CombinedText stringlengths 4 3.42M |
|---|
import os
import time
import zmq
import bquery
import bcolz
import traceback
import tempfile
import zipfile
import shutil
import boto
import redis
import binascii
import logging
import random
import bqueryd
import socket
from bqueryd.messages import msg_factory, WorkerRegisterMessage, ErrorMessage, BusyMessage, StopMessage, \
DoneMessage, FileDownloadProgress
DATA_FILE_EXTENSION = '.bcolz'
DATA_SHARD_FILE_EXTENSION = '.bcolzs'
POLLING_TIMEOUT = 5000 # timeout in ms : how long to wait for network poll, this also affects frequency of seeing new controllers and datafiles
WRM_DELAY = 20 # how often in seconds to send a WorkerRegisterMessage
bcolz.set_nthreads(1)
class WorkerNode(object):
def __init__(self, data_dir=bqueryd.DEFAULT_DATA_DIR, redis_url='redis://127.0.0.1:6379/0', loglevel=logging.DEBUG):
if not os.path.exists(data_dir) or not os.path.isdir(data_dir):
raise Exception("Datadir %s is not a valid difrectory" % data_dir)
self.worker_id = binascii.hexlify(os.urandom(8))
self.node_name = socket.gethostname()
self.data_dir = data_dir
self.data_files = set()
context = zmq.Context()
self.socket = context.socket(zmq.ROUTER)
self.socket.setsockopt(zmq.LINGER, 0)
self.socket.identity = self.worker_id
self.redis_server = redis.from_url(redis_url)
self.controllers = {} # Keep a dict of timestamps when you last spoke to controllers
self.poller = zmq.Poller()
self.poller.register(self.socket, zmq.POLLIN)
self.check_controllers()
self.last_wrm = 0
self.start_time = time.time()
self.logger = bqueryd.logger.getChild('worker '+self.worker_id)
self.logger.setLevel(loglevel)
def check_controllers(self):
# Check the Redis set of controllers to see if any new ones have appeared,
# Also register with them if so.
listed_controllers = list(self.redis_server.smembers(bqueryd.REDIS_SET_KEY))
current_controllers = []
new_controllers = []
for k in self.controllers.keys()[:]:
if k not in listed_controllers:
del self.controllers[k]
self.socket.disconnect(k)
else:
current_controllers.append(k)
new_controllers = [c for c in listed_controllers if c not in current_controllers]
for controller_address in new_controllers:
self.socket.connect(controller_address)
self.controllers[controller_address] = {'last_seen': 0, 'last_sent': 0, 'address': controller_address}
def check_datafiles(self):
has_new_files = False
for data_file in [filename for filename in os.listdir(self.data_dir) if
filename.endswith(DATA_FILE_EXTENSION) or filename.endswith(DATA_SHARD_FILE_EXTENSION)]:
if data_file not in self.data_files:
self.data_files.add(data_file)
has_new_files = True
if len(self.data_files) < 1:
self.logger.debug('Data directory %s has no files like %s or %s' % (
self.data_dir, DATA_FILE_EXTENSION, DATA_SHARD_FILE_EXTENSION))
return has_new_files
def prepare_wrm(self):
wrm = WorkerRegisterMessage()
wrm['worker_id'] = self.worker_id
wrm['node'] = self.node_name
wrm['data_files'] = list(self.data_files)
wrm['data_dir'] = self.data_dir
wrm['controllers'] = self.controllers.values()
wrm['uptime'] = int(time.time() - self.start_time)
return wrm
def heartbeat(self):
since_last_wrm = time.time() - self.last_wrm
if since_last_wrm > WRM_DELAY:
self.check_controllers()
has_new_files = self.check_datafiles()
self.last_wrm = time.time()
wrm = self.prepare_wrm()
for controller, data in self.controllers.items():
if has_new_files or (time.time() - data['last_seen'] > WRM_DELAY):
self.socket.send_multipart([controller, wrm.to_json()])
data['last_sent'] = time.time()
# self.logger.debug("heartbeat to %s" % data['address'])
def go(self):
self.running = True
while self.running:
self.heartbeat()
for sock, event in self.poller.poll(timeout=POLLING_TIMEOUT):
if event & zmq.POLLIN:
tmp = self.socket.recv_multipart()
if len(tmp) != 2:
self.logger.critical('Received a msg with len != 2, something seriously wrong. ')
continue
sender, msg_buf = tmp
msg = msg_factory(msg_buf)
data = self.controllers.get(sender)
if not data:
self.logger.critical('Received a msg from %s - this is an unknown sender' % sender)
continue
data['last_seen'] = time.time()
# self.logger.debug('Received from %s' % sender)
# TODO Notify Controllers that we are busy, no more messages to be sent
self.send_to_all(BusyMessage())
# The above busy notification is not perfect as other messages might be on their way already
# but for long-running queries it will at least ensure other controllers
# don't try and overuse this node by filling up a queue
try:
tmp = self.handle(msg)
except Exception, e:
tmp = ErrorMessage(msg)
tmp['payload'] = traceback.format_exc()
self.logger.debug(tmp['payload'])
self.send_to_all(DoneMessage()) # Send an empty mesage to all controllers, this flags you as 'Done'
if tmp:
self.socket.send_multipart([sender, tmp.to_json()])
self.logger.debug('Stopping')
def send_to_all(self, msg):
for controller in self.controllers:
self.socket.send_multipart([controller, msg.to_json()])
def handle_calc(self, msg):
args, kwargs = msg.get_args_kwargs()
self.logger.debug('doing calc %s' % args)
filename = args[0]
groupby_col_list = args[1]
aggregation_list = args[2]
where_terms_list = args[3]
expand_filter_column = kwargs.get('expand_filter_column')
aggregate = kwargs.get('aggregate', True)
# create rootdir
rootdir = os.path.join(self.data_dir, filename)
if not os.path.exists(rootdir):
raise Exception('Path %s does not exist' % rootdir)
ct = bquery.ctable(rootdir=rootdir, mode='r')
ct.auto_cache = False
# prepare filter
if not where_terms_list:
bool_arr = None
else:
bool_arr = ct.where_terms(where_terms_list)
# expand filter column check
if expand_filter_column:
bool_arr = ct.is_in_ordered_subgroups(basket_col=expand_filter_column, bool_arr=bool_arr)
# retrieve & aggregate if needed
if aggregate:
# aggregate by groupby parameters
result_ctable = ct.groupby(groupby_col_list, aggregation_list, bool_arr=bool_arr)
buf = result_ctable.todataframe()
else:
# direct result from the ctable
column_list = groupby_col_list + [x[0] for x in aggregation_list]
if bool_arr is not None:
ct = bcolz.fromiter(ct[column_list].where(bool_arr), ct[column_list].dtype, sum(bool_arr))
else:
ct = bcolz.fromiter(ct[column_list], ct[column_list].dtype, ct.len)
buf = ct[column_list].todataframe()
msg.add_as_binary('result', buf)
return msg
def file_downloader_callback(self, msg):
def _fn(progress, size):
args, kwargs = msg.get_args_kwargs()
filename = kwargs.get('filename')
ticket = msg.get('ticket')
addr = str(msg.get('source'))
self.logger.debug('At %s of %s for %s %s :: %s' % (progress, size, ticket, addr, filename))
tmp = FileDownloadProgress(msg)
tmp['filename'] = filename
tmp['progress'] = progress
tmp['size'] = size
self.socket.send_multipart([addr, tmp.to_json()])
return _fn
def handle_download(self, msg):
args, kwargs = msg.get_args_kwargs()
filename = kwargs.get('filename')
bucket = kwargs.get('bucket')
if not (filename and bucket):
raise Exception('[filename, bucket] args are all required')
# get file from S3
s3_conn = boto.connect_s3()
s3_bucket = s3_conn.get_bucket(bucket, validate=False)
k = s3_bucket.get_key(filename, validate=False)
fd, tmp_filename = tempfile.mkstemp(dir=bqueryd.INCOMING)
the_callback = self.file_downloader_callback(msg)
k.get_contents_to_filename(tmp_filename, cb=the_callback)
# unzip the tmp file to the filename
ticket = msg['ticket']
ticket_path = os.path.join(bqueryd.INCOMING, ticket)
if not os.path.exists(ticket_path):
os.mkdir(ticket_path)
temp_path = os.path.join(bqueryd.INCOMING, ticket, filename)
# if the signature already exists, first remove it.
if os.path.exists(temp_path):
shutil.rmtree(temp_path, ignore_errors=True)
with zipfile.ZipFile(tmp_filename, 'r', allowZip64=True) as myzip:
myzip.extractall(temp_path)
self.logger.debug("Downloaded %s" % temp_path)
if os.path.exists(tmp_filename):
os.remove(tmp_filename)
msg.add_as_binary('result', temp_path)
the_callback(-1, -1)
return msg
def handle_movebcolz(self, msg):
# A notification from the controller that all files are downloaded on all nodes, the files in this ticket can be moved into place
self.logger.debug('movebcolz %s' % msg)
ticket = msg['ticket']
ticket_path = os.path.join(bqueryd.INCOMING, ticket)
if not os.path.exists(ticket_path):
self.logger.debug('%s does not exist' % ticket_path)
return
for filename in os.listdir(ticket_path):
prod_path = os.path.join(bqueryd.DEFAULT_DATA_DIR, filename)
if os.path.exists(prod_path):
shutil.rmtree(prod_path, ignore_errors=True)
ready_path = os.path.join(ticket_path, filename)
os.rename(ready_path, prod_path)
shutil.rmtree(ticket_path, ignore_errors=True)
# TODO add some error handling when something goes wrong in this movemessage, send the exception to the
# calling controller and hang it on the download ticket
def handle(self, msg):
if msg.isa('kill'):
self.running = False
# Also send a message to all your controllers, that you are stopping
self.send_to_all(StopMessage())
for k in self.controllers:
self.socket.disconnect(k)
return
elif msg.isa('info'):
msg = self.prepare_wrm()
elif msg.isa('sleep'):
args, kwargs = msg.get_args_kwargs()
time.sleep(float(args[0]))
snore = 'z'*random.randint(1,20)
self.logger.debug(snore)
msg.add_as_binary('result', snore)
elif msg.isa('download'):
msg = self.handle_download(msg)
elif msg.isa('movebcolz'):
msg = self.handle_movebcolz(msg)
else:
msg = self.handle_calc(msg)
return msg
Less verbose with download progress in logs
import os
import time
import zmq
import bquery
import bcolz
import traceback
import tempfile
import zipfile
import shutil
import boto
import redis
import binascii
import logging
import random
import bqueryd
import socket
from bqueryd.messages import msg_factory, WorkerRegisterMessage, ErrorMessage, BusyMessage, StopMessage, \
DoneMessage, FileDownloadProgress
DATA_FILE_EXTENSION = '.bcolz'
DATA_SHARD_FILE_EXTENSION = '.bcolzs'
POLLING_TIMEOUT = 5000 # timeout in ms : how long to wait for network poll, this also affects frequency of seeing new controllers and datafiles
WRM_DELAY = 20 # how often in seconds to send a WorkerRegisterMessage
bcolz.set_nthreads(1)
class WorkerNode(object):
def __init__(self, data_dir=bqueryd.DEFAULT_DATA_DIR, redis_url='redis://127.0.0.1:6379/0', loglevel=logging.DEBUG):
if not os.path.exists(data_dir) or not os.path.isdir(data_dir):
raise Exception("Datadir %s is not a valid difrectory" % data_dir)
self.worker_id = binascii.hexlify(os.urandom(8))
self.node_name = socket.gethostname()
self.data_dir = data_dir
self.data_files = set()
context = zmq.Context()
self.socket = context.socket(zmq.ROUTER)
self.socket.setsockopt(zmq.LINGER, 0)
self.socket.identity = self.worker_id
self.redis_server = redis.from_url(redis_url)
self.controllers = {} # Keep a dict of timestamps when you last spoke to controllers
self.poller = zmq.Poller()
self.poller.register(self.socket, zmq.POLLIN)
self.check_controllers()
self.last_wrm = 0
self.start_time = time.time()
self.logger = bqueryd.logger.getChild('worker '+self.worker_id)
self.logger.setLevel(loglevel)
def check_controllers(self):
# Check the Redis set of controllers to see if any new ones have appeared,
# Also register with them if so.
listed_controllers = list(self.redis_server.smembers(bqueryd.REDIS_SET_KEY))
current_controllers = []
new_controllers = []
for k in self.controllers.keys()[:]:
if k not in listed_controllers:
del self.controllers[k]
self.socket.disconnect(k)
else:
current_controllers.append(k)
new_controllers = [c for c in listed_controllers if c not in current_controllers]
for controller_address in new_controllers:
self.socket.connect(controller_address)
self.controllers[controller_address] = {'last_seen': 0, 'last_sent': 0, 'address': controller_address}
def check_datafiles(self):
has_new_files = False
for data_file in [filename for filename in os.listdir(self.data_dir) if
filename.endswith(DATA_FILE_EXTENSION) or filename.endswith(DATA_SHARD_FILE_EXTENSION)]:
if data_file not in self.data_files:
self.data_files.add(data_file)
has_new_files = True
if len(self.data_files) < 1:
self.logger.debug('Data directory %s has no files like %s or %s' % (
self.data_dir, DATA_FILE_EXTENSION, DATA_SHARD_FILE_EXTENSION))
return has_new_files
def prepare_wrm(self):
wrm = WorkerRegisterMessage()
wrm['worker_id'] = self.worker_id
wrm['node'] = self.node_name
wrm['data_files'] = list(self.data_files)
wrm['data_dir'] = self.data_dir
wrm['controllers'] = self.controllers.values()
wrm['uptime'] = int(time.time() - self.start_time)
return wrm
def heartbeat(self):
since_last_wrm = time.time() - self.last_wrm
if since_last_wrm > WRM_DELAY:
self.check_controllers()
has_new_files = self.check_datafiles()
self.last_wrm = time.time()
wrm = self.prepare_wrm()
for controller, data in self.controllers.items():
if has_new_files or (time.time() - data['last_seen'] > WRM_DELAY):
self.socket.send_multipart([controller, wrm.to_json()])
data['last_sent'] = time.time()
# self.logger.debug("heartbeat to %s" % data['address'])
def go(self):
self.logger.debug('Starting')
self.running = True
while self.running:
self.heartbeat()
for sock, event in self.poller.poll(timeout=POLLING_TIMEOUT):
if event & zmq.POLLIN:
tmp = self.socket.recv_multipart()
if len(tmp) != 2:
self.logger.critical('Received a msg with len != 2, something seriously wrong. ')
continue
sender, msg_buf = tmp
msg = msg_factory(msg_buf)
data = self.controllers.get(sender)
if not data:
self.logger.critical('Received a msg from %s - this is an unknown sender' % sender)
continue
data['last_seen'] = time.time()
# self.logger.debug('Received from %s' % sender)
# TODO Notify Controllers that we are busy, no more messages to be sent
self.send_to_all(BusyMessage())
# The above busy notification is not perfect as other messages might be on their way already
# but for long-running queries it will at least ensure other controllers
# don't try and overuse this node by filling up a queue
try:
tmp = self.handle(msg)
except Exception, e:
tmp = ErrorMessage(msg)
tmp['payload'] = traceback.format_exc()
self.logger.debug(tmp['payload'])
self.send_to_all(DoneMessage()) # Send an empty mesage to all controllers, this flags you as 'Done'
if tmp:
self.socket.send_multipart([sender, tmp.to_json()])
self.logger.debug('Stopping')
def send_to_all(self, msg):
for controller in self.controllers:
self.socket.send_multipart([controller, msg.to_json()])
def handle_calc(self, msg):
args, kwargs = msg.get_args_kwargs()
self.logger.debug('doing calc %s' % args)
filename = args[0]
groupby_col_list = args[1]
aggregation_list = args[2]
where_terms_list = args[3]
expand_filter_column = kwargs.get('expand_filter_column')
aggregate = kwargs.get('aggregate', True)
# create rootdir
rootdir = os.path.join(self.data_dir, filename)
if not os.path.exists(rootdir):
raise Exception('Path %s does not exist' % rootdir)
ct = bquery.ctable(rootdir=rootdir, mode='r')
ct.auto_cache = False
# prepare filter
if not where_terms_list:
bool_arr = None
else:
bool_arr = ct.where_terms(where_terms_list)
# expand filter column check
if expand_filter_column:
bool_arr = ct.is_in_ordered_subgroups(basket_col=expand_filter_column, bool_arr=bool_arr)
# retrieve & aggregate if needed
if aggregate:
# aggregate by groupby parameters
result_ctable = ct.groupby(groupby_col_list, aggregation_list, bool_arr=bool_arr)
buf = result_ctable.todataframe()
else:
# direct result from the ctable
column_list = groupby_col_list + [x[0] for x in aggregation_list]
if bool_arr is not None:
ct = bcolz.fromiter(ct[column_list].where(bool_arr), ct[column_list].dtype, sum(bool_arr))
else:
ct = bcolz.fromiter(ct[column_list], ct[column_list].dtype, ct.len)
buf = ct[column_list].todataframe()
msg.add_as_binary('result', buf)
return msg
def file_downloader_callback(self, msg):
def _fn(progress, size):
args, kwargs = msg.get_args_kwargs()
filename = kwargs.get('filename')
ticket = msg.get('ticket')
addr = str(msg.get('source'))
if progress == 0:
self.logger.debug('At %s of %s for %s %s :: %s' % (progress, size, ticket, addr, filename))
tmp = FileDownloadProgress(msg)
tmp['filename'] = filename
tmp['progress'] = progress
tmp['size'] = size
self.socket.send_multipart([addr, tmp.to_json()])
return _fn
def handle_download(self, msg):
args, kwargs = msg.get_args_kwargs()
filename = kwargs.get('filename')
bucket = kwargs.get('bucket')
if not (filename and bucket):
raise Exception('[filename, bucket] args are all required')
# get file from S3
s3_conn = boto.connect_s3()
s3_bucket = s3_conn.get_bucket(bucket, validate=False)
k = s3_bucket.get_key(filename, validate=False)
fd, tmp_filename = tempfile.mkstemp(dir=bqueryd.INCOMING)
the_callback = self.file_downloader_callback(msg)
k.get_contents_to_filename(tmp_filename, cb=the_callback)
# unzip the tmp file to the filename
ticket = msg['ticket']
ticket_path = os.path.join(bqueryd.INCOMING, ticket)
if not os.path.exists(ticket_path):
os.mkdir(ticket_path)
temp_path = os.path.join(bqueryd.INCOMING, ticket, filename)
# if the signature already exists, first remove it.
if os.path.exists(temp_path):
shutil.rmtree(temp_path, ignore_errors=True)
with zipfile.ZipFile(tmp_filename, 'r', allowZip64=True) as myzip:
myzip.extractall(temp_path)
self.logger.debug("Downloaded %s" % temp_path)
if os.path.exists(tmp_filename):
os.remove(tmp_filename)
msg.add_as_binary('result', temp_path)
the_callback(-1, -1)
return msg
def handle_movebcolz(self, msg):
# A notification from the controller that all files are downloaded on all nodes, the files in this ticket can be moved into place
self.logger.debug('movebcolz %s' % msg['ticket'])
ticket = msg['ticket']
ticket_path = os.path.join(bqueryd.INCOMING, ticket)
if not os.path.exists(ticket_path):
self.logger.debug('%s does not exist' % ticket_path)
return
for filename in os.listdir(ticket_path):
prod_path = os.path.join(bqueryd.DEFAULT_DATA_DIR, filename)
if os.path.exists(prod_path):
shutil.rmtree(prod_path, ignore_errors=True)
ready_path = os.path.join(ticket_path, filename)
os.rename(ready_path, prod_path)
shutil.rmtree(ticket_path, ignore_errors=True)
# TODO add some error handling when something goes wrong in this movemessage, send the exception to the
# calling controller and hang it on the download ticket
def handle(self, msg):
if msg.isa('kill'):
self.running = False
# Also send a message to all your controllers, that you are stopping
self.send_to_all(StopMessage())
for k in self.controllers:
self.socket.disconnect(k)
return
elif msg.isa('info'):
msg = self.prepare_wrm()
elif msg.isa('sleep'):
args, kwargs = msg.get_args_kwargs()
time.sleep(float(args[0]))
snore = 'z'*random.randint(1,20)
self.logger.debug(snore)
msg.add_as_binary('result', snore)
elif msg.isa('download'):
msg = self.handle_download(msg)
elif msg.isa('movebcolz'):
msg = self.handle_movebcolz(msg)
else:
msg = self.handle_calc(msg)
return msg |
# -*- coding: utf-8 -*-
from kivy.lang import Builder
from kivy.uix.widget import Widget
from kivy.properties import NumericProperty, ListProperty, BooleanProperty
from kivy.animation import Animation
from kivymd.theming import ThemableBehavior
Builder.load_string('''
<MDSpinner>:
canvas.before:
PushMatrix
Rotate:
angle: self._rotation_angle
origin: self.center
canvas:
Color:
rgba: self.color
a: self._alpha
Line:
circle: self.center_x, self.center_y, self.width / 2, \
self._angle_start, self._angle_end
cap: 'square'
width: dp(2)
canvas.after:
PopMatrix
''')
class MDSpinner(ThemableBehavior, Widget):
""":class:`MDSpinner` is an implementation of the circular progress
indicator in Google's Material Design.
It can be used either as an indeterminate indicator that loops while
the user waits for something to happen, or as a determinate indicator.
Set :attr:`determinate` to **True** to activate determinate mode, and
:attr:`determinate_time` to set the duration of the animation.
"""
determinate = BooleanProperty(False)
""":attr:`determinate` is a :class:`~kivy.properties.BooleanProperty` and
defaults to False
"""
determinate_time = NumericProperty(2)
""":attr:`determinate_time` is a :class:`~kivy.properties.NumericProperty`
and defaults to 2
"""
active = BooleanProperty(True)
"""Use :attr:`active` to start or stop the spinner.
:attr:`active` is a :class:`~kivy.properties.BooleanProperty` and
defaults to True
"""
color = ListProperty([])
""":attr:`color` is a :class:`~kivy.properties.ListProperty` and
defaults to 'self.theme_cls.primary_color'
"""
_alpha = NumericProperty(0)
_rotation_angle = NumericProperty(360)
_angle_start = NumericProperty(0)
_angle_end = NumericProperty(8)
def __init__(self, **kwargs):
super(MDSpinner, self).__init__(**kwargs)
self.color = self.theme_cls.primary_color
self._alpha_anim_in = Animation(_alpha=1, duration=.8, t='out_quad')
self._alpha_anim_out = Animation(_alpha=0, duration=.3, t='out_quad')
self._alpha_anim_out.bind(on_complete=self._reset)
self.theme_cls.bind(primary_color=self._update_color)
if self.determinate:
self._start_determinate()
else:
self._start_loop()
def _update_color(self, *args):
self.color = self.theme_cls.primary_color
def _start_determinate(self, *args):
self._alpha_anim_in.start(self)
_rot_anim = Animation(_rotation_angle=0,
duration=self.determinate_time * .7,
t='out_quad')
_rot_anim.start(self)
_angle_start_anim = Animation(_angle_end=360,
duration=self.determinate_time,
t='in_out_quad')
_angle_start_anim.bind(on_complete=lambda *x: \
self._alpha_anim_out.start(self))
_angle_start_anim.start(self)
def _start_loop(self, *args):
if self._alpha == 0:
_rot_anim = Animation(_rotation_angle=0,
duration=2,
t='linear')
_rot_anim.start(self)
self._alpha = 1
self._alpha_anim_in.start(self)
_angle_start_anim = Animation(_angle_end=self._angle_end + 270,
duration=.6,
t='in_out_cubic')
_angle_start_anim.bind(on_complete=self._anim_back)
_angle_start_anim.start(self)
def _anim_back(self, *args):
_angle_back_anim = Animation(_angle_start=self._angle_end - 8,
duration=.6,
t='in_out_cubic')
_angle_back_anim.bind(on_complete=self._start_loop)
_angle_back_anim.start(self)
def on__rotation_angle(self, *args):
if self._rotation_angle == 0:
self._rotation_angle = 360
if not self.determinate:
_rot_anim = Animation(_rotation_angle=0,
duration=2)
_rot_anim.start(self)
def _reset(self, *args):
Animation.cancel_all(self, '_angle_start', '_rotation_angle',
'_angle_end', '_alpha')
self._angle_start = 0
self._angle_end = 8
self._rotation_angle = 360
self._alpha = 0
self.active = False
def on_active(self, *args):
if not self.active:
self._reset()
else:
if self.determinate:
self._start_determinate()
else:
self._start_loop()
MDSpinner: Change to use SmoothLine, to make the line smoother.
Self-explanatory
# -*- coding: utf-8 -*-
from kivy.lang import Builder
from kivy.uix.widget import Widget
from kivy.properties import NumericProperty, ListProperty, BooleanProperty
from kivy.animation import Animation
from kivymd.theming import ThemableBehavior
Builder.load_string('''
<MDSpinner>:
canvas.before:
PushMatrix
Rotate:
angle: self._rotation_angle
origin: self.center
canvas:
Color:
rgba: self.color
a: self._alpha
SmoothLine:
circle: self.center_x, self.center_y, self.width / 2, \
self._angle_start, self._angle_end
cap: 'square'
width: dp(2.25)
canvas.after:
PopMatrix
''')
class MDSpinner(ThemableBehavior, Widget):
""":class:`MDSpinner` is an implementation of the circular progress
indicator in Google's Material Design.
It can be used either as an indeterminate indicator that loops while
the user waits for something to happen, or as a determinate indicator.
Set :attr:`determinate` to **True** to activate determinate mode, and
:attr:`determinate_time` to set the duration of the animation.
"""
determinate = BooleanProperty(False)
""":attr:`determinate` is a :class:`~kivy.properties.BooleanProperty` and
defaults to False
"""
determinate_time = NumericProperty(2)
""":attr:`determinate_time` is a :class:`~kivy.properties.NumericProperty`
and defaults to 2
"""
active = BooleanProperty(True)
"""Use :attr:`active` to start or stop the spinner.
:attr:`active` is a :class:`~kivy.properties.BooleanProperty` and
defaults to True
"""
color = ListProperty([])
""":attr:`color` is a :class:`~kivy.properties.ListProperty` and
defaults to 'self.theme_cls.primary_color'
"""
_alpha = NumericProperty(0)
_rotation_angle = NumericProperty(360)
_angle_start = NumericProperty(0)
_angle_end = NumericProperty(8)
def __init__(self, **kwargs):
super(MDSpinner, self).__init__(**kwargs)
self.color = self.theme_cls.primary_color
self._alpha_anim_in = Animation(_alpha=1, duration=.8, t='out_quad')
self._alpha_anim_out = Animation(_alpha=0, duration=.3, t='out_quad')
self._alpha_anim_out.bind(on_complete=self._reset)
self.theme_cls.bind(primary_color=self._update_color)
if self.determinate:
self._start_determinate()
else:
self._start_loop()
def _update_color(self, *args):
self.color = self.theme_cls.primary_color
def _start_determinate(self, *args):
self._alpha_anim_in.start(self)
_rot_anim = Animation(_rotation_angle=0,
duration=self.determinate_time * .7,
t='out_quad')
_rot_anim.start(self)
_angle_start_anim = Animation(_angle_end=360,
duration=self.determinate_time,
t='in_out_quad')
_angle_start_anim.bind(on_complete=lambda *x: \
self._alpha_anim_out.start(self))
_angle_start_anim.start(self)
def _start_loop(self, *args):
if self._alpha == 0:
_rot_anim = Animation(_rotation_angle=0,
duration=2,
t='linear')
_rot_anim.start(self)
self._alpha = 1
self._alpha_anim_in.start(self)
_angle_start_anim = Animation(_angle_end=self._angle_end + 270,
duration=.6,
t='in_out_cubic')
_angle_start_anim.bind(on_complete=self._anim_back)
_angle_start_anim.start(self)
def _anim_back(self, *args):
_angle_back_anim = Animation(_angle_start=self._angle_end - 8,
duration=.6,
t='in_out_cubic')
_angle_back_anim.bind(on_complete=self._start_loop)
_angle_back_anim.start(self)
def on__rotation_angle(self, *args):
if self._rotation_angle == 0:
self._rotation_angle = 360
if not self.determinate:
_rot_anim = Animation(_rotation_angle=0,
duration=2)
_rot_anim.start(self)
def _reset(self, *args):
Animation.cancel_all(self, '_angle_start', '_rotation_angle',
'_angle_end', '_alpha')
self._angle_start = 0
self._angle_end = 8
self._rotation_angle = 360
self._alpha = 0
self.active = False
def on_active(self, *args):
if not self.active:
self._reset()
else:
if self.determinate:
self._start_determinate()
else:
self._start_loop()
|
from __future__ import absolute_import, unicode_literals
import copy
import logging
import time
from functools import update_wrapper
from django.db.models.fields import Field
from django.utils.translation import ugettext_lazy as _
from concurrency import forms
from concurrency.api import get_revision_of_object
from concurrency.config import conf
from concurrency.core import ConcurrencyOptions
from concurrency.utils import refetch
try:
from django.db.models.signals import class_prepared, post_migrate
except:
from django.db.models.signals import class_prepared, post_syncdb as post_migrate
logger = logging.getLogger(__name__)
OFFSET = int(time.mktime((2000, 1, 1, 0, 0, 0, 0, 0, 0)))
def class_prepared_concurrency_handler(sender, **kwargs):
if hasattr(sender, '_concurrencymeta'):
if sender != sender._concurrencymeta.base:
origin = getattr(sender._concurrencymeta.base, '_concurrencymeta')
local = copy.deepcopy(origin)
setattr(sender, '_concurrencymeta', local)
if hasattr(sender, 'ConcurrencyMeta'):
sender._concurrencymeta.enabled = getattr(sender.ConcurrencyMeta, 'enabled')
if not (sender._concurrencymeta.manually):
sender._concurrencymeta._field.wrap_model(sender)
setattr(sender, 'get_concurrency_version', get_revision_of_object)
def post_syncdb_concurrency_handler(sender, **kwargs):
from concurrency.triggers import create_triggers
from django.db import connections
databases = [alias for alias in connections]
create_triggers(databases)
class_prepared.connect(class_prepared_concurrency_handler, dispatch_uid='class_prepared_concurrency_handler')
_TRIGGERS = []
if not conf.MANUAL_TRIGGERS:
post_migrate.connect(post_syncdb_concurrency_handler, dispatch_uid='post_syncdb_concurrency_handler')
class VersionField(Field):
""" Base class """
def __init__(self, **kwargs):
verbose_name = kwargs.get('verbose_name', None)
name = kwargs.get('name', None)
db_tablespace = kwargs.get('db_tablespace', None)
db_column = kwargs.get('db_column', None)
help_text = kwargs.get('help_text', _('record revision number'))
super(VersionField, self).__init__(verbose_name, name,
help_text=help_text,
default=0,
db_tablespace=db_tablespace,
db_column=db_column)
def deconstruct(self):
name, path, args, kwargs = super(VersionField, self).deconstruct()
kwargs['default'] = 1
return name, path, args, kwargs
def get_default(self):
return 0
def get_internal_type(self):
return "BigIntegerField"
def to_python(self, value):
return int(value)
def validate(self, value, model_instance):
pass
def formfield(self, **kwargs):
kwargs['form_class'] = self.form_class
kwargs['widget'] = forms.VersionField.widget
return super(VersionField, self).formfield(**kwargs)
def contribute_to_class(self, cls, name, virtual_only=False):
super(VersionField, self).contribute_to_class(cls, name)
if hasattr(cls, '_concurrencymeta') or cls._meta.abstract:
return
setattr(cls, '_concurrencymeta', ConcurrencyOptions())
cls._concurrencymeta._field = self
cls._concurrencymeta.base = cls
def _set_version_value(self, model_instance, value):
setattr(model_instance, self.attname, int(value))
def pre_save(self, model_instance, add):
if add:
value = self._get_next_version(model_instance)
self._set_version_value(model_instance, value)
return getattr(model_instance, self.attname)
@classmethod
def wrap_model(cls, model, force=False):
if not force and model._concurrencymeta.versioned_save:
return
cls._wrap_model_methods(model, force)
model._concurrencymeta.versioned_save = True
@classmethod
def _wrap_model_methods(cls, model, force=False):
old_do_update = getattr(model, '_do_update')
setattr(model, '_do_update', model._concurrencymeta._field._wrap_do_update(old_do_update))
def _wrap_do_update(self, func):
def _do_update(model_instance, base_qs, using, pk_val, values, update_fields, forced_update):
version_field = model_instance._concurrencymeta._field
old_version = get_revision_of_object(model_instance)
if not version_field.model._meta.abstract:
if version_field.model is not base_qs.model:
return func(model_instance, base_qs, using, pk_val, values, update_fields, forced_update)
for i, (field, _1, value) in enumerate(values):
if field == version_field:
new_version = field._get_next_version(model_instance)
values[i] = (field, _1, new_version)
field._set_version_value(model_instance, new_version)
break
if values:
if model_instance._concurrencymeta.enabled and \
conf.ENABLED and \
not getattr(model_instance, '_concurrency_disabled', False) and \
old_version:
filter_kwargs = {'pk': pk_val, version_field.attname: old_version}
updated = base_qs.filter(**filter_kwargs)._update(values) >= 1
if not updated:
version_field._set_version_value(model_instance, old_version)
updated = conf._callback(model_instance)
else:
filter_kwargs = {'pk': pk_val}
updated = base_qs.filter(**filter_kwargs)._update(values) >= 1
else:
updated = base_qs.filter(pk=pk_val).exists()
return updated
return update_wrapper(_do_update, func)
class IntegerVersionField(VersionField):
"""
Version Field that returns a "unique" version number for the record.
The version number is produced using time.time() * 1000000, to get the benefits
of microsecond if the system clock provides them.
"""
form_class = forms.VersionField
def _get_next_version(self, model_instance):
old_value = getattr(model_instance, self.attname, 0)
return max(int(old_value) + 1, (int(time.time() * 1000000) - OFFSET))
class AutoIncVersionField(VersionField):
"""
Version Field increment the revision number each commit
"""
form_class = forms.VersionField
def _get_next_version(self, model_instance):
return int(getattr(model_instance, self.attname, 0)) + 1
class TriggerVersionField(VersionField):
"""
Version Field increment the revision number each commit
"""
form_class = forms.VersionField
def __init__(self, *args, **kwargs):
self._trigger_name = kwargs.pop('trigger_name', None)
super(TriggerVersionField, self).__init__(*args, **kwargs)
def contribute_to_class(self, cls, name, virtual_only=False):
if not cls._meta.abstract:
_TRIGGERS.append(self)
super(TriggerVersionField, self).contribute_to_class(cls, name)
def check(self, **kwargs):
errors = []
model = self.model
from django.db import router, connections
from concurrency.triggers import factory
from django.core.checks import Warning
alias = router.db_for_write(model)
connection = connections[alias]
f = factory(connection)
if not f.get_trigger(self):
errors.append(
Warning(
'Missed trigger for field {}'.format(self),
hint=None,
obj=None,
id='concurrency.W001',
)
)
return errors
@property
def trigger_name(self):
from concurrency.triggers import get_trigger_name
return get_trigger_name(self)
# return self._trigger_name
def _get_next_version(self, model_instance):
# always returns the same value
return int(getattr(model_instance, self.attname, 1))
def pre_save(self, model_instance, add):
# always returns the same value
return 1
@staticmethod
def _increment_version_number(obj):
old_value = get_revision_of_object(obj)
setattr(obj, obj._concurrencymeta._field.attname, int(old_value) + 1)
@classmethod
def _wrap_model_methods(cls, model, force=False):
super(TriggerVersionField, cls)._wrap_model_methods(model, force)
# old_do_update = getattr(model, '_do_update')
# setattr(model, '_do_update', model._concurrencymeta._field._wrap_do_update(old_do_update))
old_save = getattr(model, 'save')
setattr(model, 'save', model._concurrencymeta._field._wrap_save(old_save))
@staticmethod
def _wrap_save(func):
def inner(self, force_insert=False, force_update=False, using=None, **kwargs):
reload = kwargs.pop('refetch', False)
ret = func(self, force_insert, force_update, using, **kwargs)
TriggerVersionField._increment_version_number(self)
if reload:
ret = refetch(self)
setattr(self,
self._concurrencymeta._field.attname,
get_revision_of_object(ret))
return ret
return update_wrapper(inner, func)
try:
from south.modelsinspector import add_introspection_rules
rules = [
(
(IntegerVersionField, AutoIncVersionField, TriggerVersionField),
[], {"verbose_name": ["verbose_name", {"default": None}],
"name": ["name", {"default": None}],
"help_text": ["help_text", {"default": ''}],
"db_column": ["db_column", {"default": None}],
"db_tablespace": ["db_tablespace", {"default": None}],
"default": ["default", {"default": 1}],
"manually": ["manually", {"default": False}]})
]
add_introspection_rules(rules, [r"^concurrency\.fields\.IntegerVersionField",
r"^concurrency\.fields\.AutoIncVersionField"])
except ImportError as e:
from django.conf import settings
if 'south' in settings.INSTALLED_APPS:
raise e
removes unused function argument
from __future__ import absolute_import, unicode_literals
import copy
import logging
import time
from functools import update_wrapper
from django.db.models.fields import Field
from django.utils.translation import ugettext_lazy as _
from concurrency import forms
from concurrency.api import get_revision_of_object
from concurrency.config import conf
from concurrency.core import ConcurrencyOptions
from concurrency.utils import refetch
try:
from django.db.models.signals import class_prepared, post_migrate
except:
from django.db.models.signals import class_prepared, post_syncdb as post_migrate
logger = logging.getLogger(__name__)
OFFSET = int(time.mktime((2000, 1, 1, 0, 0, 0, 0, 0, 0)))
def class_prepared_concurrency_handler(sender, **kwargs):
if hasattr(sender, '_concurrencymeta'):
if sender != sender._concurrencymeta.base:
origin = getattr(sender._concurrencymeta.base, '_concurrencymeta')
local = copy.deepcopy(origin)
setattr(sender, '_concurrencymeta', local)
if hasattr(sender, 'ConcurrencyMeta'):
sender._concurrencymeta.enabled = getattr(sender.ConcurrencyMeta, 'enabled')
if not (sender._concurrencymeta.manually):
sender._concurrencymeta._field.wrap_model(sender)
setattr(sender, 'get_concurrency_version', get_revision_of_object)
def post_syncdb_concurrency_handler(sender, **kwargs):
from concurrency.triggers import create_triggers
from django.db import connections
databases = [alias for alias in connections]
create_triggers(databases)
class_prepared.connect(class_prepared_concurrency_handler, dispatch_uid='class_prepared_concurrency_handler')
_TRIGGERS = []
if not conf.MANUAL_TRIGGERS:
post_migrate.connect(post_syncdb_concurrency_handler, dispatch_uid='post_syncdb_concurrency_handler')
class VersionField(Field):
""" Base class """
def __init__(self, **kwargs):
verbose_name = kwargs.get('verbose_name', None)
name = kwargs.get('name', None)
db_tablespace = kwargs.get('db_tablespace', None)
db_column = kwargs.get('db_column', None)
help_text = kwargs.get('help_text', _('record revision number'))
super(VersionField, self).__init__(verbose_name, name,
help_text=help_text,
default=0,
db_tablespace=db_tablespace,
db_column=db_column)
def deconstruct(self):
name, path, args, kwargs = super(VersionField, self).deconstruct()
kwargs['default'] = 1
return name, path, args, kwargs
def get_default(self):
return 0
def get_internal_type(self):
return "BigIntegerField"
def to_python(self, value):
return int(value)
def validate(self, value, model_instance):
pass
def formfield(self, **kwargs):
kwargs['form_class'] = self.form_class
kwargs['widget'] = forms.VersionField.widget
return super(VersionField, self).formfield(**kwargs)
def contribute_to_class(self, cls, name, virtual_only=False):
super(VersionField, self).contribute_to_class(cls, name)
if hasattr(cls, '_concurrencymeta') or cls._meta.abstract:
return
setattr(cls, '_concurrencymeta', ConcurrencyOptions())
cls._concurrencymeta._field = self
cls._concurrencymeta.base = cls
def _set_version_value(self, model_instance, value):
setattr(model_instance, self.attname, int(value))
def pre_save(self, model_instance, add):
if add:
value = self._get_next_version(model_instance)
self._set_version_value(model_instance, value)
return getattr(model_instance, self.attname)
@classmethod
def wrap_model(cls, model, force=False):
if not force and model._concurrencymeta.versioned_save:
return
cls._wrap_model_methods(model)
model._concurrencymeta.versioned_save = True
@classmethod
def _wrap_model_methods(cls, model):
old_do_update = getattr(model, '_do_update')
setattr(model, '_do_update', model._concurrencymeta._field._wrap_do_update(old_do_update))
def _wrap_do_update(self, func):
def _do_update(model_instance, base_qs, using, pk_val, values, update_fields, forced_update):
version_field = model_instance._concurrencymeta._field
old_version = get_revision_of_object(model_instance)
if not version_field.model._meta.abstract:
if version_field.model is not base_qs.model:
return func(model_instance, base_qs, using, pk_val, values, update_fields, forced_update)
for i, (field, _1, value) in enumerate(values):
if field == version_field:
new_version = field._get_next_version(model_instance)
values[i] = (field, _1, new_version)
field._set_version_value(model_instance, new_version)
break
if values:
if model_instance._concurrencymeta.enabled and \
conf.ENABLED and \
not getattr(model_instance, '_concurrency_disabled', False) and \
old_version:
filter_kwargs = {'pk': pk_val, version_field.attname: old_version}
updated = base_qs.filter(**filter_kwargs)._update(values) >= 1
if not updated:
version_field._set_version_value(model_instance, old_version)
updated = conf._callback(model_instance)
else:
filter_kwargs = {'pk': pk_val}
updated = base_qs.filter(**filter_kwargs)._update(values) >= 1
else:
updated = base_qs.filter(pk=pk_val).exists()
return updated
return update_wrapper(_do_update, func)
class IntegerVersionField(VersionField):
"""
Version Field that returns a "unique" version number for the record.
The version number is produced using time.time() * 1000000, to get the benefits
of microsecond if the system clock provides them.
"""
form_class = forms.VersionField
def _get_next_version(self, model_instance):
old_value = getattr(model_instance, self.attname, 0)
return max(int(old_value) + 1, (int(time.time() * 1000000) - OFFSET))
class AutoIncVersionField(VersionField):
"""
Version Field increment the revision number each commit
"""
form_class = forms.VersionField
def _get_next_version(self, model_instance):
return int(getattr(model_instance, self.attname, 0)) + 1
class TriggerVersionField(VersionField):
"""
Version Field increment the revision number each commit
"""
form_class = forms.VersionField
def __init__(self, *args, **kwargs):
self._trigger_name = kwargs.pop('trigger_name', None)
super(TriggerVersionField, self).__init__(*args, **kwargs)
def contribute_to_class(self, cls, name, virtual_only=False):
if not cls._meta.abstract:
_TRIGGERS.append(self)
super(TriggerVersionField, self).contribute_to_class(cls, name)
def check(self, **kwargs):
errors = []
model = self.model
from django.db import router, connections
from concurrency.triggers import factory
from django.core.checks import Warning
alias = router.db_for_write(model)
connection = connections[alias]
f = factory(connection)
if not f.get_trigger(self):
errors.append(
Warning(
'Missed trigger for field {}'.format(self),
hint=None,
obj=None,
id='concurrency.W001',
)
)
return errors
@property
def trigger_name(self):
from concurrency.triggers import get_trigger_name
return get_trigger_name(self)
# return self._trigger_name
def _get_next_version(self, model_instance):
# always returns the same value
return int(getattr(model_instance, self.attname, 1))
def pre_save(self, model_instance, add):
# always returns the same value
return 1
@staticmethod
def _increment_version_number(obj):
old_value = get_revision_of_object(obj)
setattr(obj, obj._concurrencymeta._field.attname, int(old_value) + 1)
@classmethod
def _wrap_model_methods(cls, model):
super(TriggerVersionField, cls)._wrap_model_methods(model)
# old_do_update = getattr(model, '_do_update')
# setattr(model, '_do_update', model._concurrencymeta._field._wrap_do_update(old_do_update))
old_save = getattr(model, 'save')
setattr(model, 'save', model._concurrencymeta._field._wrap_save(old_save))
@staticmethod
def _wrap_save(func):
def inner(self, force_insert=False, force_update=False, using=None, **kwargs):
reload = kwargs.pop('refetch', False)
ret = func(self, force_insert, force_update, using, **kwargs)
TriggerVersionField._increment_version_number(self)
if reload:
ret = refetch(self)
setattr(self,
self._concurrencymeta._field.attname,
get_revision_of_object(ret))
return ret
return update_wrapper(inner, func)
try:
from south.modelsinspector import add_introspection_rules
rules = [
(
(IntegerVersionField, AutoIncVersionField, TriggerVersionField),
[], {"verbose_name": ["verbose_name", {"default": None}],
"name": ["name", {"default": None}],
"help_text": ["help_text", {"default": ''}],
"db_column": ["db_column", {"default": None}],
"db_tablespace": ["db_tablespace", {"default": None}],
"default": ["default", {"default": 1}],
"manually": ["manually", {"default": False}]})
]
add_introspection_rules(rules, [r"^concurrency\.fields\.IntegerVersionField",
r"^concurrency\.fields\.AutoIncVersionField"])
except ImportError as e:
from django.conf import settings
if 'south' in settings.INSTALLED_APPS:
raise e
|
"""Dashboard app views"""
import os
import time
import json
from django.contrib.messages import add_message, ERROR, WARNING
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.http import Http404, HttpResponseRedirect
from logbook import Logger
from kitchen.backends.lchef import (get_nodes, get_nodes_extended, get_roles,
get_role_groups, get_environments,
filter_nodes, group_nodes_by_host,
inject_plugin_data, RepoError, plugins as PLUGINS)
from kitchen.dashboard import graphs
from kitchen.settings import (SHOW_VIRT_VIEW, SHOW_HOST_NAMES, SHOW_LINKS,
REPO, SYNCDATE_FILE)
log = Logger(__name__)
def _get_data(request, env, roles, virt, group_by_host=False):
"""Returns processed repository data, filtering nodes based on given args
"""
data = {'filter_env': env, 'filter_roles': roles, 'filter_virt': virt}
data['roles'] = get_roles()
roles_groups = get_role_groups(data['roles'])
data['roles_groups'] = roles_groups
data['virt_roles'] = ['host', 'guest']
# Get environments before we filter nodes
data['nodes'] = get_nodes()
data['nodes_extended'] = get_nodes_extended(data['nodes'])
data['environments'] = get_environments(data['nodes_extended'])
roles_to_filter = '' if group_by_host else data['filter_roles']
if data['filter_env'] or roles_to_filter or data['filter_virt']:
data['nodes_extended'] = filter_nodes(data['nodes_extended'],
data['filter_env'],
roles_to_filter,
data['filter_virt'])
if group_by_host:
data['nodes_extended'] = group_nodes_by_host(
data['nodes_extended'], roles=data['filter_roles'])
inject_plugin_data(data['nodes_extended'])
if not data['nodes_extended']:
add_message(request, WARNING,
"There are no nodes that fit the supplied criteria.")
return data
def _show_repo_sync_date(request):
"""Shows the sync date, which will be the modified date of a file"""
try:
sync_age = (time.time() - os.stat(SYNCDATE_FILE).st_mtime) / 60
except OSError:
add_message(request, ERROR, "There have been errors while "
"syncing the repo")
else:
sync_lim = REPO['SYNC_PERIOD'] * 2.5
if sync_age > sync_lim:
add_message(request, WARNING, "The {0} repo is getting out of "
"sync. Last pull was {1} minutes "
"ago.".format(REPO['NAME'], int(sync_age)))
def _set_options(options):
"""Sets default options if none are given"""
if options is None:
# Set defaults
options = ''
if SHOW_HOST_NAMES:
options += 'show_hostnames,'
return options
def main(request):
"""Default main view showing a list of nodes"""
_show_repo_sync_date(request)
data = {}
try:
data = _get_data(request,
request.GET.get('env', REPO['DEFAULT_ENV']),
request.GET.get('roles', ''),
request.GET.get('virt', REPO['DEFAULT_VIRT']))
except RepoError as e:
add_message(request, ERROR, str(e))
else:
data['nodes'] = json.dumps(data['nodes'])
data['show_virt'] = SHOW_VIRT_VIEW
data['show_links'] = SHOW_LINKS
data['query_string'] = request.META['QUERY_STRING']
return render_to_response('main.html',
data, context_instance=RequestContext(request))
def virt(request):
"""Displays a view where the nodes are grouped by physical host"""
_show_repo_sync_date(request)
data = {}
try:
data = _get_data(request,
request.GET.get('env', REPO['DEFAULT_ENV']),
request.GET.get('roles', ''),
None, group_by_host=True)
except RepoError as e:
add_message(request, ERROR, str(e))
else:
data['nodes'] = json.dumps(data['nodes'])
data['show_links'] = SHOW_LINKS
data['query_string'] = request.META['QUERY_STRING']
return render_to_response('virt.html',
data, context_instance=RequestContext(request))
def graph(request):
"""Graph view where users can visualize graphs of their nodes
generated using Graphviz open source graph visualization library
"""
_show_repo_sync_date(request)
data = {}
options = _set_options(request.GET.get('options'))
env_filter = request.GET.get('env', REPO['DEFAULT_ENV'])
try:
data = _get_data(request, env_filter, request.GET.get('roles', ''),
'guest')
except RepoError as e:
add_message(request, ERROR, str(e))
else:
if env_filter:
success, msg = graphs.generate_node_map(
data['nodes_extended'], data.get('roles', []),
'show_hostnames' in options)
data['draw_graph'] = success
if not success:
add_message(request, ERROR, msg)
else:
add_message(request, WARNING, "Please select an environment")
data['show_hostnames'] = 'show_hostnames' in options
data['query_string'] = request.META['QUERY_STRING']
return render_to_response('graph.html',
data, context_instance=RequestContext(request))
def plugins(request, name, method, plugin_type='list'):
"""Plugin interface view which either response with the page created by the
plugin method, or returns a 404 HTTP Error
"""
try:
plugin = PLUGINS[name]
except KeyError:
raise Http404("Requested plugin '{0}' not found".format(name))
try:
func = getattr(plugin, method)
except AttributeError:
raise Http404("Plugin '{0}' has no method '{1}'".format(name, method))
if not getattr(func, '__is_view__', False):
raise Http404("Plugin method '{0}.{1}' is not defined as a view".format(name, method))
nodes = get_nodes()
nodes = get_nodes_extended(nodes)
if plugin_type in ('v', 'virt'):
if func.__p_type__ != 'virt':
raise Http404("Plugin '{0}.{1}' has wrong type".format(name, method))
nodes = group_nodes_by_host(nodes, roles=None)
elif func.__p_type__ != 'list':
raise Http404("Plugin '{0}.{1}' has wrong type".format(name, method))
inject_plugin_data(nodes)
try:
result = func(request, nodes)
except TypeError:
raise Http404("Failed running plugin '{0}.{1}'".format(name, method))
if not isinstance(result, HttpResponseRedirect):
raise Http404("Plugin '{0}.{1}' returned unexpected result: {2}".format(name, method, result))
else:
return result
kitchen.dashboard.views: the plugin allows any kind of HttpResponse objects,
not only redirects
"""Dashboard app views"""
import os
import time
import json
from django.contrib.messages import add_message, ERROR, WARNING
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.http import Http404, HttpResponse
from logbook import Logger
from kitchen.backends.lchef import (get_nodes, get_nodes_extended, get_roles,
get_role_groups, get_environments,
filter_nodes, group_nodes_by_host,
inject_plugin_data, RepoError, plugins as PLUGINS)
from kitchen.dashboard import graphs
from kitchen.settings import (SHOW_VIRT_VIEW, SHOW_HOST_NAMES, SHOW_LINKS,
REPO, SYNCDATE_FILE)
log = Logger(__name__)
def _get_data(request, env, roles, virt, group_by_host=False):
"""Returns processed repository data, filtering nodes based on given args
"""
data = {'filter_env': env, 'filter_roles': roles, 'filter_virt': virt}
data['roles'] = get_roles()
roles_groups = get_role_groups(data['roles'])
data['roles_groups'] = roles_groups
data['virt_roles'] = ['host', 'guest']
# Get environments before we filter nodes
data['nodes'] = get_nodes()
data['nodes_extended'] = get_nodes_extended(data['nodes'])
data['environments'] = get_environments(data['nodes_extended'])
roles_to_filter = '' if group_by_host else data['filter_roles']
if data['filter_env'] or roles_to_filter or data['filter_virt']:
data['nodes_extended'] = filter_nodes(data['nodes_extended'],
data['filter_env'],
roles_to_filter,
data['filter_virt'])
if group_by_host:
data['nodes_extended'] = group_nodes_by_host(
data['nodes_extended'], roles=data['filter_roles'])
inject_plugin_data(data['nodes_extended'])
if not data['nodes_extended']:
add_message(request, WARNING,
"There are no nodes that fit the supplied criteria.")
return data
def _show_repo_sync_date(request):
"""Shows the sync date, which will be the modified date of a file"""
try:
sync_age = (time.time() - os.stat(SYNCDATE_FILE).st_mtime) / 60
except OSError:
add_message(request, ERROR, "There have been errors while "
"syncing the repo")
else:
sync_lim = REPO['SYNC_PERIOD'] * 2.5
if sync_age > sync_lim:
add_message(request, WARNING, "The {0} repo is getting out of "
"sync. Last pull was {1} minutes "
"ago.".format(REPO['NAME'], int(sync_age)))
def _set_options(options):
"""Sets default options if none are given"""
if options is None:
# Set defaults
options = ''
if SHOW_HOST_NAMES:
options += 'show_hostnames,'
return options
def main(request):
"""Default main view showing a list of nodes"""
_show_repo_sync_date(request)
data = {}
try:
data = _get_data(request,
request.GET.get('env', REPO['DEFAULT_ENV']),
request.GET.get('roles', ''),
request.GET.get('virt', REPO['DEFAULT_VIRT']))
except RepoError as e:
add_message(request, ERROR, str(e))
else:
data['nodes'] = json.dumps(data['nodes'])
data['show_virt'] = SHOW_VIRT_VIEW
data['show_links'] = SHOW_LINKS
data['query_string'] = request.META['QUERY_STRING']
return render_to_response('main.html',
data, context_instance=RequestContext(request))
def virt(request):
"""Displays a view where the nodes are grouped by physical host"""
_show_repo_sync_date(request)
data = {}
try:
data = _get_data(request,
request.GET.get('env', REPO['DEFAULT_ENV']),
request.GET.get('roles', ''),
None, group_by_host=True)
except RepoError as e:
add_message(request, ERROR, str(e))
else:
data['nodes'] = json.dumps(data['nodes'])
data['show_links'] = SHOW_LINKS
data['query_string'] = request.META['QUERY_STRING']
return render_to_response('virt.html',
data, context_instance=RequestContext(request))
def graph(request):
"""Graph view where users can visualize graphs of their nodes
generated using Graphviz open source graph visualization library
"""
_show_repo_sync_date(request)
data = {}
options = _set_options(request.GET.get('options'))
env_filter = request.GET.get('env', REPO['DEFAULT_ENV'])
try:
data = _get_data(request, env_filter, request.GET.get('roles', ''),
'guest')
except RepoError as e:
add_message(request, ERROR, str(e))
else:
if env_filter:
success, msg = graphs.generate_node_map(
data['nodes_extended'], data.get('roles', []),
'show_hostnames' in options)
data['draw_graph'] = success
if not success:
add_message(request, ERROR, msg)
else:
add_message(request, WARNING, "Please select an environment")
data['show_hostnames'] = 'show_hostnames' in options
data['query_string'] = request.META['QUERY_STRING']
return render_to_response('graph.html',
data, context_instance=RequestContext(request))
def plugins(request, name, method, plugin_type='list'):
"""Plugin interface view which either response with the page created by the
plugin method, or returns a 404 HTTP Error
"""
try:
plugin = PLUGINS[name]
except KeyError:
raise Http404("Requested plugin '{0}' not found".format(name))
try:
func = getattr(plugin, method)
except AttributeError:
raise Http404("Plugin '{0}' has no method '{1}'".format(name, method))
if not getattr(func, '__is_view__', False):
raise Http404("Plugin method '{0}.{1}' is not defined as a view".format(name, method))
nodes = get_nodes()
nodes = get_nodes_extended(nodes)
if plugin_type in ('v', 'virt'):
if func.__p_type__ != 'virt':
raise Http404("Plugin '{0}.{1}' has wrong type".format(name, method))
nodes = group_nodes_by_host(nodes, roles=None)
elif func.__p_type__ != 'list':
raise Http404("Plugin '{0}.{1}' has wrong type".format(name, method))
inject_plugin_data(nodes)
try:
result = func(request, nodes)
except TypeError:
raise Http404("Failed running plugin '{0}.{1}'".format(name, method))
if not isinstance(result, HttpResponse):
raise Http404("Plugin '{0}.{1}' returned unexpected result: {2}".format(name, method, result))
else:
return result
|
# Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
`prometheus.py`
Prometheus backend implementation.
"""
import json
import logging
import os
import pprint
from prometheus_http_client import Prometheus
from slo_generator.backends.base import MetricBackend
LOGGER = logging.getLogger(__name__)
class PrometheusBackend(MetricBackend):
"""Backend for querying metrics from Prometheus."""
def __init__(self, **kwargs):
self.client = kwargs.pop('client')
if not self.client:
url = kwargs.get('url')
headers = kwargs.get('headers')
if url:
os.environ['PROMETHEUS_URL'] = url
if headers:
os.environ['PROMETHEUS_HEAD'] = json.dumps(headers)
LOGGER.debug(f'Prometheus URL: {url}')
LOGGER.debug(f'Prometheus headers: {headers}')
self.client = Prometheus()
def query_sli(self, **kwargs):
"""Query SLI value from a given PromQL expression.
Args:
kwargs (dict):
timestamp (int): Timestamp to query.
window (int): Window to query (in seconds).
measurement (dict):
expression (str): PromQL expression.
Returns:
float: SLI value.
"""
window = kwargs['window']
measurement = kwargs['measurement']
expr = measurement['expression']
expression = expr.replace("[window]", f"[{window}s]")
data = self.query(expression)
LOGGER.debug(
f"Expression: {expression} | Result: {pprint.pformat(data)}")
try:
sli_value = float(data['data']['result'][0]['value'][1])
except IndexError:
sli_value = 0
LOGGER.debug(f"SLI value: {sli_value}")
return sli_value
def good_bad_ratio(self, **kwargs):
"""Compute good bad ratio from two metric filters.
Args:
kwargs (dict):
window (str): Query window.
measurement (dict): Measurement config
filter_good (str): PromQL query for good events.
filter_bad (str, optional): PromQL query for bad events.
filter_valid (str, optional): PromQL query for valid events.
Note:
At least one of `filter_bad` or `filter_valid` is required.
Returns:
tuple: A tuple of (good_event_count, bad_event_count).
"""
window = kwargs['window']
filter_good = kwargs['measurement']['filter_good']
filter_bad = kwargs['measurement'].get('filter_bad')
filter_valid = kwargs['measurement'].get('filter_valid')
# Replace window by its value in the error budget policy step
expr_good = filter_good.replace('[window]', f'[{window}s]')
res_good = self.query(expr_good)
good_event_count = PrometheusBackend.count(res_good)
if filter_bad:
expr_bad = filter_bad.replace('[window]', f'[{window}s]')
res_bad = self.query(expr_bad)
bad_event_count = PrometheusBackend.count(res_bad)
elif filter_valid:
expr_valid = filter_valid.replace('[window]', f'[{window}s]')
res_valid = self.query(expr_valid)
bad_event_count = \
PrometheusBackend.count(res_valid) - good_event_count
else:
raise Exception(
"Oneof `filter_bad` or `filter_valid` is needed in your SLO",
"configuration file")
LOGGER.debug(f'Good events: {good_event_count} | '
f'Bad events: {bad_event_count}')
return (good_event_count, bad_event_count)
def query(self, filter):
timeseries = self.client.query(metric=filter)
timeseries = json.loads(timeseries)
LOGGER.debug(pprint.pformat(timeseries))
return timeseries
@staticmethod
def count(timeseries):
"""Count event in Prometheus timeseries.
Args:
timeseries (dict): Prometheus query response.
Returns:
int: Event count.
"""
# TODO: Note that this function could be replaced by using the
# `count_over_time` function that Prometheus provides.
try:
return len(timeseries['data']['result'][0]['values'])
except (IndexError, KeyError) as exception:
LOGGER.warning("Couldn't find any values in timeseries response")
LOGGER.debug(exception)
return 0 # no events in timeseries
Fix TODO format
# Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
`prometheus.py`
Prometheus backend implementation.
"""
import json
import logging
import os
import pprint
from prometheus_http_client import Prometheus
from slo_generator.backends.base import MetricBackend
LOGGER = logging.getLogger(__name__)
class PrometheusBackend(MetricBackend):
"""Backend for querying metrics from Prometheus."""
def __init__(self, **kwargs):
self.client = kwargs.pop('client')
if not self.client:
url = kwargs.get('url')
headers = kwargs.get('headers')
if url:
os.environ['PROMETHEUS_URL'] = url
if headers:
os.environ['PROMETHEUS_HEAD'] = json.dumps(headers)
LOGGER.debug(f'Prometheus URL: {url}')
LOGGER.debug(f'Prometheus headers: {headers}')
self.client = Prometheus()
def query_sli(self, **kwargs):
"""Query SLI value from a given PromQL expression.
Args:
kwargs (dict):
timestamp (int): Timestamp to query.
window (int): Window to query (in seconds).
measurement (dict):
expression (str): PromQL expression.
Returns:
float: SLI value.
"""
window = kwargs['window']
measurement = kwargs['measurement']
expr = measurement['expression']
expression = expr.replace("[window]", f"[{window}s]")
data = self.query(expression)
LOGGER.debug(
f"Expression: {expression} | Result: {pprint.pformat(data)}")
try:
sli_value = float(data['data']['result'][0]['value'][1])
except IndexError:
sli_value = 0
LOGGER.debug(f"SLI value: {sli_value}")
return sli_value
def good_bad_ratio(self, **kwargs):
"""Compute good bad ratio from two metric filters.
Args:
kwargs (dict):
window (str): Query window.
measurement (dict): Measurement config
filter_good (str): PromQL query for good events.
filter_bad (str, optional): PromQL query for bad events.
filter_valid (str, optional): PromQL query for valid events.
Note:
At least one of `filter_bad` or `filter_valid` is required.
Returns:
tuple: A tuple of (good_event_count, bad_event_count).
"""
window = kwargs['window']
filter_good = kwargs['measurement']['filter_good']
filter_bad = kwargs['measurement'].get('filter_bad')
filter_valid = kwargs['measurement'].get('filter_valid')
# Replace window by its value in the error budget policy step
expr_good = filter_good.replace('[window]', f'[{window}s]')
res_good = self.query(expr_good)
good_event_count = PrometheusBackend.count(res_good)
if filter_bad:
expr_bad = filter_bad.replace('[window]', f'[{window}s]')
res_bad = self.query(expr_bad)
bad_event_count = PrometheusBackend.count(res_bad)
elif filter_valid:
expr_valid = filter_valid.replace('[window]', f'[{window}s]')
res_valid = self.query(expr_valid)
bad_event_count = \
PrometheusBackend.count(res_valid) - good_event_count
else:
raise Exception(
"Oneof `filter_bad` or `filter_valid` is needed in your SLO",
"configuration file")
LOGGER.debug(f'Good events: {good_event_count} | '
f'Bad events: {bad_event_count}')
return (good_event_count, bad_event_count)
def query(self, filter):
timeseries = self.client.query(metric=filter)
timeseries = json.loads(timeseries)
LOGGER.debug(pprint.pformat(timeseries))
return timeseries
@staticmethod
def count(timeseries):
"""Count event in Prometheus timeseries.
Args:
timeseries (dict): Prometheus query response.
Returns:
int: Event count.
"""
# Note: this function could be replaced by using the `count_over_time`
# function that Prometheus provides.
try:
return len(timeseries['data']['result'][0]['values'])
except (IndexError, KeyError) as exception:
LOGGER.warning("Couldn't find any values in timeseries response")
LOGGER.debug(exception)
return 0 # no events in timeseries
|
"""
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django.db.models import Q
from functools import reduce
import operator
from .models import Well
class Search():
def well_search(well='', addr='', legal='', owner='', lat_long_box=None, query_limit=1000):
"""
Search for wells
:param well: the identification plate number or well tag number
:param addr: part of the street address or site area of the well
:param legal: part of the legal plan, legal district lot or pid
:param owner: part of the owner's full name
:returns: QuerySet of Well objects or None if no matching records found.
"""
well_results = None
q_list = []
if well:
q_list.append(Q(identification_plate_number=well) | Q(well_tag_number=well))
if addr:
q_list.append(Q(street_address__icontains=addr) | Q(city__icontains=addr))
if legal:
pid = legal.lstrip('0')
q_list.append(Q(legal_plan__icontains=legal) |
Q(legal_district_lot__icontains=legal) | Q(legal_pid=pid))
if owner:
q_list.append(Q(owner_full_name__icontains=owner))
# If there is a lat_long_box, then a user has drawn a box on the map
# to limit their query to within the box.
if lat_long_box and lat_long_box['start_corner'] and lat_long_box['end_corner']:
delimiter = ','
start_corner = lat_long_box['start_corner'].split(delimiter)
end_corner = lat_long_box['end_corner'].split(delimiter)
# Casting to floats serves as last-minute sanitisation.
start_lat = float(start_corner[0])
start_long = float(start_corner[1])
end_lat = float(end_corner[0])
end_long = float(end_corner[1])
# The minimum and maximum latitude values should behave as expected
max_lat = max(start_lat, end_lat)
min_lat = min(start_lat, end_lat)
# We must compare the absolute values of the minimum and maximum longitude values,
# since users may erronneously enter positive longitudes for BC.
max_long = max(start_long, end_long)
min_long = min(start_long, end_long)
q_list.append(Q(latitude__gt=min_lat) & Q(latitude__lt=max_lat)
& Q(longitude__gt=min_long) & Q(longitude__lt=max_long))
if q_list:
# If there are too many results, we return one plus the query limit to engage post-query logic in views.py
well_results = Well.objects.distinct().filter(
reduce(operator.and_, q_list)).order_by('well_tag_number', 'when_created')[:(query_limit+1)]
return well_results
Comments on well_search
"""
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django.db.models import Q
from functools import reduce
import operator
from .models import Well
class Search():
def well_search(well='', addr='', legal='', owner='', lat_long_box=None, query_limit=1000):
"""
Search for wells
:param well: the identification plate number or well tag number
:param addr: part of the street address or site area of the well
:param legal: part of the legal plan, legal district lot or pid
:param owner: part of the owner's full name
:param lat_long_box: an object including 'start_corner' and 'end_corner',
each a latitude-longitude pair describing extreme corners of an extent rectangle
from within which the wells are to be selected
:param query_limit: the number of wells the caller will display
:returns: QuerySet of Well objects or None if no matching records found.
"""
well_results = None
q_list = []
if well:
q_list.append(Q(identification_plate_number=well) | Q(well_tag_number=well))
if addr:
q_list.append(Q(street_address__icontains=addr) | Q(city__icontains=addr))
if legal:
pid = legal.lstrip('0')
q_list.append(Q(legal_plan__icontains=legal) |
Q(legal_district_lot__icontains=legal) | Q(legal_pid=pid))
if owner:
q_list.append(Q(owner_full_name__icontains=owner))
# If there is a lat_long_box, then a user has drawn a box on the map
# to limit their query to within the box.
if lat_long_box and lat_long_box['start_corner'] and lat_long_box['end_corner']:
delimiter = ','
start_corner = lat_long_box['start_corner'].split(delimiter)
end_corner = lat_long_box['end_corner'].split(delimiter)
# Casting to floats serves as last-minute sanitisation.
start_lat = float(start_corner[0])
start_long = float(start_corner[1])
end_lat = float(end_corner[0])
end_long = float(end_corner[1])
# The minimum and maximum latitude values should behave as expected
max_lat = max(start_lat, end_lat)
min_lat = min(start_lat, end_lat)
# We must compare the absolute values of the minimum and maximum longitude values,
# since users may erronneously enter positive longitudes for BC.
max_long = max(start_long, end_long)
min_long = min(start_long, end_long)
q_list.append(Q(latitude__gt=min_lat) & Q(latitude__lt=max_lat)
& Q(longitude__gt=min_long) & Q(longitude__lt=max_long))
if q_list:
# If there are too many results, we return one plus the query limit to engage post-query logic in views.py
well_results = Well.objects.distinct().filter(
reduce(operator.and_, q_list)).order_by('well_tag_number', 'when_created')[:(query_limit+1)]
return well_results
|
# (The MIT License)
#
# Copyright (c) 2016 Kura
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the 'Software'), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
blackhole.smtp.
This module contains the Smtp protocol.
"""
import asyncio
import base64
from email.utils import make_msgid as message_id
import inspect
import logging
import random
from blackhole.config import Config
from blackhole.utils import mailname
logger = logging.getLogger('blackhole.smtp')
class Smtp(asyncio.StreamReaderProtocol):
"""The class responsible for handling SMTP/SMTPS commands."""
bounce_responses = {
450: 'Requested mail action not taken: mailbox unavailable',
451: 'Requested action aborted: local error in processing',
452: 'Requested action not taken: insufficient system storage',
458: 'Unable to queue message',
521: 'Machine does not accept mail',
550: 'Requested action not taken: mailbox unavailable',
551: 'User not local',
552: 'Requested mail action aborted: exceeded storage allocation',
553: 'Requested action not taken: mailbox name not allowed',
571: 'Blocked',
}
"""A dictionary of response codes and messages for bouncing mail."""
_delay = None
_max_delay = 60
"""This is the maximum delay, to mitigate DoS risks."""
_mode = None
def __init__(self):
"""
Initialise the SMTP protocol.
.. note::
Loads the configuration, defines the server's FQDN and generates
an RFC 2822 Message-ID.
"""
self.loop = asyncio.get_event_loop()
super().__init__(
asyncio.StreamReader(loop=self.loop),
client_connected_cb=self._client_connected_cb,
loop=self.loop)
self.config = Config()
self.fqdn = mailname()
self.message_id = message_id()
def connection_made(self, transport):
"""
Tie a connection to blackhole to the SMTP protocol.
:param transport:
:type transport: :any:`asyncio.transport.Transport`
"""
super().connection_made(transport)
self.peer = transport.get_extra_info('peername')
logger.debug('Peer %s connected', repr(self.peer))
self.transport = transport
self.connection_closed = False
self._handler_coroutine = self.loop.create_task(self._handle_client())
def _client_connected_cb(self, reader, writer):
"""
Callback that binds a stream reader and writer to the SMTP Protocol.
:param reader:
:type reader: :any:`asyncio.streams.StreamReader`
:param writer:
:type writer: :any:`asyncio.streams.StreamWriter`
"""
self._reader = reader
self._writer = writer
def connection_lost(self, exc):
"""
Callback for when a connection is closed or lost.
:param exc:
:type exc:
"""
logger.debug('Peer %s disconnected', repr(self.peer))
super().connection_lost(exc)
self._connection_closed = True
async def _handle_client(self):
"""
Handle a client connection.
This method greets the client and then accepts and handles each line
the client sends, passing off to the currect verb handler.
"""
await self.greet()
while not self.connection_closed:
line = await self.wait()
logger.debug('RECV %s', line)
line = line.decode('utf-8').rstrip('\r\n')
handler = self.lookup_handler(line)
if handler:
await handler()
else:
await self.push(502, '5.5.2 Command not recognised')
def get_auth_members(self):
"""
Get a list of available AUTH mechanisms.
:returns: :any:`list` -- AUTH mechanisms.
"""
members = inspect.getmembers(self, predicate=inspect.ismethod)
cmds = []
for cmd, _ in members:
if cmd.startswith('auth_'):
cmd = cmd.replace('auth_', '').replace('_', '-')
cmds.append(cmd)
return cmds
def lookup_auth_handler(self, line):
"""
Look up a handler for the received AUTH mechanism.
:param line:
:type line: :any:`str`
:returns: :any:`blackhole.smtp.Smtp.auth_MECHANISM`.
"""
parts = line.split(' ')
if len(parts) < 2:
return None
mechanism = parts[1].upper()
if mechanism == 'CRAM-MD5':
return self.auth_CRAM_MD5
if mechanism not in self.get_auth_members():
return self.do_UNKNOWN
if len(parts) == 3 and mechanism == 'PLAIN':
return self._auth_success
return getattr(self, 'auth_{}'.format(mechanism.upper()),
self.do_UNKNOWN)
async def help_AUTH(self):
"""
Send help for AUTH mechanisms.
https://blackhole.io/index.html#help-verb
"""
mechanisms = ' '.join(self.get_auth_members())
await self.push(250, 'Syntax: AUTH {}'.format(mechanisms))
async def auth_LOGIN(self):
"""Handle an AUTH LOGIN request."""
await self.push(334, 'VXNlcm5hbWU6')
line = await self.wait()
logger.debug('RECV %s', line)
await self._auth_success()
async def auth_CRAM_MD5(self):
"""Handle an AUTH CRAM-MD5 request."""
message_id = base64.b64encode(self.message_id.encode('utf-8'), b'==')
await self.push(334, message_id)
line = await self.wait()
logger.debug('RECV %s', line)
await self._auth_success()
async def wait(self):
"""
Wait for data from the client.
:returns: :any:`str`
.. note::
Also handles client timeouts if they wait too long before sending
data. -- https://blackhole.io/configuration-options.html#timeout
"""
while not self.connection_closed:
try:
line = await asyncio.wait_for(self._reader.readline(),
self.config.timeout,
loop=self.loop)
except asyncio.TimeoutError:
await self.timeout()
return line
async def auth_PLAIN(self):
"""Handle an AUTH PLAIN request."""
await self.push(334, ' ')
line = await self.wait()
logger.debug('RECV %s', line)
await self._auth_success()
async def _auth_success(self):
"""Send an authentication successful response."""
await self.push(235, '2.7.0 Authentication successful')
async def timeout(self):
"""
Timeout a client connection.
Sends the 421 timeout message to the client and closes the connection.
https://blackhole.io/configuration-options.html#timeout
"""
logger.debug('%s timed out, no data received for %d seconds',
repr(self.peer), self.config.timeout)
await self.push(421, 'Timeout')
await self.close()
async def close(self):
"""Close the connection from the client."""
logger.debug('Closing connection: %s', repr(self.peer))
if self._writer:
self._writer.close()
self._connection_closed = True
def lookup_handler(self, line):
"""
Look up the SMTP VERB against a handler.
:param line:
:type line: :any:`str` -- e.g. HELO blackhole.io
:returns: :any:`blackhole.smtp..Smtp.do_VERB`,
:any:`blackhole.smtp.Smtp.auth_MECHANISM` or
:any:`blackhole.smtp..Smtp.help_VERB`.
"""
parts = line.split(None, 1)
if parts:
if parts[0].lower() == 'help':
return self.lookup_help_handler(parts)
if parts[0].lower() == 'auth':
return self.lookup_auth_handler(line)
else:
return self.lookup_verb_handler(parts[0])
return self.do_UNKNOWN
def lookup_help_handler(self, parts):
"""
Look up a help handler for the SMTP VERB.
https://blackhole.io/index.html#help-verb
:param parts:
:type parts: :any:`list`
:returns: :any:`blackhole.smtp.Smtp.help_VERB`.
"""
if len(parts) > 1:
cmd = 'help_{}'.format(parts[1].upper())
else:
cmd = 'do_HELP'
return getattr(self, cmd, self.help_UNKNOWN)
def lookup_verb_handler(self, verb):
"""
Look up a handler for the SMTP VERB.
:param verb:
:type verb: :any:`str`
:returns: :any:`blackhole.smtp.Smtp.do_VERB`.
"""
return getattr(self, 'do_{}'.format(verb.upper()), self.do_UNKNOWN)
async def push(self, code, msg):
"""
Write a response code and message to the client.
:param code:
:type code: :any:`int` -- SMTP code, i.e. 250.
:param msg:
:type msg: :any:`str` -- The message for the SMTP code.
"""
response = "{} {}\r\n".format(code, msg).encode('utf-8')
logger.debug('SEND %s', response)
self._writer.write(response)
await self._writer.drain()
async def greet(self):
"""Send a greeting to the client."""
await self.push(220, '{} ESMTP'.format(self.fqdn))
def get_help_members(self):
"""
Get a list of HELP handlers for verbs.
https://blackhole.io/index.html#help-verb
:returns: :any:`list` -- help handler names.
"""
members = inspect.getmembers(self, predicate=inspect.ismethod)
cmds = []
for cmd, _ in members:
if cmd.startswith('help_') and not cmd == 'help_UNKNOWN':
cmds.append(cmd.replace('help_', ''))
return cmds
async def do_HELP(self):
"""
Send a response to the HELP verb.
https://blackhole.io/index.html#help-verb
"""
msg = ' '.join(self.get_help_members())
await self.push(250, 'Supported commands: {}'.format(msg))
async def help_HELO(self):
"""
Send help for HELO verb.
https://blackhole.io/index.html#help-verb
"""
await self.push(250, 'Syntax: HELO domain.tld')
async def do_HELO(self):
"""Send response to HELO verb."""
await self.push(250, 'OK')
async def help_EHLO(self):
"""
Send help for the EHLO verb.
https://blackhole.io/index.html#help-verb
"""
await self.push(250, 'Syntax: EHLO domain.tld')
async def do_EHLO(self):
"""Send response to EHLO verb."""
response = "250-{}\r\n".format(self.fqdn).encode('utf-8')
self._writer.write(response)
logger.debug('SENT %s', response)
auth = ' '.join(self.get_auth_members())
responses = ('250-HELP', '250-PIPELINING', '250-AUTH {}'.format(auth),
'250-SIZE {}'.format(self.config.max_message_size),
'250-VRFY', '250-ETRN', '250-ENHANCEDSTATUSCODES',
'250-8BITMIME', '250-SMTPUTF8', '250 DSN', )
for response in responses:
response = "{}\r\n".format(response).encode('utf-8')
logger.debug("SENT %s", response)
self._writer.write(response)
await self._writer.drain()
async def help_MAIL(self):
"""
Send help for the MAIL TO verb.
https://blackhole.io/index.html#help-verb
"""
await self.push(250, 'Syntax: MAIL FROM: <address>')
async def do_MAIL(self):
"""Send response to MAIL TO verb."""
await self.push(250, '2.1.0 OK')
async def help_RCPT(self):
"""
Send response to the RCPT TO verb.
https://blackhole.io/index.html#help-verb
"""
await self.push(250, 'Syntax: RCPT TO: <address>')
async def do_RCPT(self):
"""Send response to RCPT TO verb."""
await self.push(250, '2.1.5 OK')
async def help_DATA(self):
"""
Send help for the DATA verb.
https://blackhole.io/index.html#help-verb
"""
await self.push(250, 'Syntax: DATA')
def process_header(self, line):
"""
Process dynamic switch email headers.
Reads x-blackhole-delay and x-blackhole-mode headers and re-configures
on-the-fly how the email is handled based on these headers.
https://blackhole.io/dynamic-switches.html
:param line:
:type line: :any:`str` -- an email header
"""
logger.debug('HEADER RECV: %s', line)
if self.config.dynamic_switch is False:
logger.debug('Dynamic switches disabled, ignoring')
return
key, value = line.split(':')
key, value = key.lower().strip(), value.lower().strip()
if key == 'x-blackhole-delay':
self.delay = value
if key == 'x-blackhole-mode':
self.mode = value
async def response_from_mode(self):
"""
Send a response based on the configured response mode.
https://blackhole.io/dynamic-switches.html#dynamic-delay-switches
https://blackhole.io/configuration-options.html#mode
https://blackhole.io/modes.html
Response mode is configured in configuration file and can be overridden
by email headers, if enabled.
"""
logger.debug('MODE: %s', self.mode)
if self.mode == 'bounce':
key = random.choice(list(self.bounce_responses.keys()))
await self.push(key, self.bounce_responses[key])
elif self.mode == 'random':
resps = {250: '2.0.0 OK: queued as {}'.format(self.message_id), }
resps.update(self.bounce_responses)
key = random.choice(list(resps.keys()))
await self.push(key, resps[key])
else:
msg = '2.0.0 OK: queued as {}'.format(self.message_id)
await self.push(250, msg)
async def do_DATA(self):
r"""
Send response to DATA verb and wait for mail data.
This method will also implement timeout management and handling after
receiving the DATA command and no new data is received.
This method also implements the delay functionality, delaying a
response after the final '\r\n.\r\n' line. --
https://blackhole.io/configuration-options.html#delay
https://blackhole.io/dynamic-switches.html#dynamic-delay-switches
This method implements restrictions on message sizes. --
https://blackhole.io/configuration-options.html#max-message-size
"""
await self.push(354, 'End data with <CR><LF>.<CR><LF>')
on_body = False
msg = []
while not self.connection_closed:
line = await self.wait()
logger.debug('RECV %s', line)
msg.append(line)
if line.lower().startswith(b'x-blackhole') and on_body is False:
self.process_header(line.decode('utf-8').rstrip('\n'))
if len(b''.join(msg)) > self.config.max_message_size:
await self.push(552, 'Message size exceeds fixed maximum '
'message size')
return
if line == b'\n':
on_body = True
if line == b'.\r\n':
break
if self.delay:
logger.debug('DELAYING RESPONSE: %s seconds', self.delay)
await asyncio.sleep(self.delay)
await self.response_from_mode()
async def do_STARTTLS(self):
"""STARTTLS is not implemented."""
# It's currently not possible to implement STARTTLS due to lack of
# support in asyncio. - https://bugs.python.org/review/23749/
await self.do_NOT_IMPLEMENTED()
async def help_NOOP(self):
"""
Send help for the NOOP verb.
https://blackhole.io/index.html#help-verb
"""
await self.push(250, 'Syntax: NOOP')
async def do_NOOP(self):
"""Send response to the NOOP verb."""
await self.push(250, '2.0.0 OK')
async def help_RSET(self):
"""
Send help for the RSET verb.
https://blackhole.io/index.html#help-verb
"""
await self.push(250, 'Syntax: RSET')
async def do_RSET(self):
"""
Send response to the RSET verb.
A new message id is generated and assigned.
"""
old_msg_id = self.message_id
self.message_id = message_id()
logger.debug('%s is now %s', old_msg_id, self.message_id)
await self.push(250, '2.0.0 OK')
async def help_VRFY(self):
"""
Send help for the VRFY verb.
https://blackhole.io/index.html#help-verb
"""
await self.push(250, 'Syntax: VRFY <address>')
async def do_VRFY(self):
"""Send response to the VRFY verb."""
await self.push(252, '2.0.0 OK')
async def help_ETRN(self):
"""
Send help for the ETRN verb.
https://blackhole.io/index.html#help-verb
"""
await self.push(250, 'Syntax: ETRN')
async def do_ETRN(self):
"""Send response to the ETRN verb."""
await self.push(250, 'Queueing started')
async def help_QUIT(self):
"""
Send help for the QUIT verb.
https://blackhole.io/index.html#help-verb
"""
await self.push(250, 'Syntax: QUIT')
async def do_QUIT(self):
"""
Send response to the QUIT verb.
Closes the client connection.
"""
await self.push(221, '2.0.0 Goodbye')
self._handler_coroutine.cancel()
await self.close()
async def do_NOT_IMPLEMENTED(self):
"""Send a not implemented response."""
await self.push(500, 'Not implemented')
async def help_UNKNOWN(self):
"""Send available help verbs when an invalid verb is received."""
msg = ' '.join(self.get_help_members())
await self.push(501, 'Supported commands: {}'.format(msg))
async def do_UNKNOWN(self):
"""Send response to unknown verb."""
await self.push(502, '5.5.2 Command not recognised')
@property
def delay(self):
"""
Delay after the DATA command completes.
Value is in seconds, with a maximum value of 60 seconds.
https://blackhole.io/configuration-options.html#delay
https://blackhole.io/dynamic-switches.html#dynamic-delay-switches
:returns: :any:`int` or :any:`None`
"""
if self._delay is not None:
return self._delay
if self.config.delay is not None:
return self.config.delay
return None
@delay.setter
def delay(self, values):
logger.debug('DELAY: Dymanic delay enabled')
value = values.split(',')
if len(value) == 2:
self._delay_range(value)
elif len(value) == 1:
self._delay_single(value[0])
else:
logger.debug('DELAY: Invalid value(s): %s. Skipping', values)
return
def _delay_range(self, value):
"""
Generate a delay from a range provided in the email header.
https://blackhole.io/configuration-options.html#delay
https://blackhole.io/dynamic-switches.html#dynamic-delay-switches
:param value:
:type value: :any:`str` -- a list of minimum and maximum values as a
string. i.e. (10, 20).
.. note::
Converted from a string of a list to a list of integers.
"""
min_delay, max_delay = value
min_delay, max_delay = min_delay.strip(), max_delay.strip()
try:
min_delay = int(min_delay)
max_delay = int(max_delay)
except ValueError:
logger.debug('DELAY: Unable to convert %s, %s to integers. '
'Skipping', min_delay, max_delay)
self._delay = None
return
if min_delay < 0 or max_delay < 0:
logger.debug('DELAY: A value is less than 0: %s, %s. Skipping',
min_delay, max_delay)
self._delay = None
return
if min_delay > max_delay:
logger.debug('Min cannot be greater than max')
self._delay = None
return
if max_delay > self._max_delay:
logger.debug('DELAY: %s is higher than %s. %s is the hard coded '
'maximum delay for security.', max_delay,
self._max_delay, self._max_delay)
max_delay = self._max_delay
self._delay = random.randint(min_delay, max_delay)
logger.debug('DELAY: Set to %s from range %s-%s', self._delay,
min_delay, max_delay)
return
def _delay_single(self, value):
"""
Generate a delay from a value provided in an email header.
https://blackhole.io/configuration-options.html#delay
https://blackhole.io/dynamic-switches.html#dynamic-delay-switches
:param value:
:type value: :any:`str` -- time in seconds as a string.
.. note:
Converted from a string to an integer.
"""
try:
value = int(value)
except ValueError:
logger.debug('DELAY: Unable to convert %s to an integer. Skipping',
value)
self._delay = None
return
logger.debug(value)
if value < 0:
logger.debug('DELAY: %s is less than 0. Skipping', value)
self._delay = None
return
if value > self._max_delay:
logger.debug('DELAY: %s is higher than %s. %s is the hard coded '
'maximum delay for security.', value, self._max_delay,
self._max_delay)
self._delay = self._max_delay
return
logger.debug('DELAY: Set to %s', value)
self._delay = value
@property
def mode(self):
"""
How to respond to an email, based on configuration.
Reponse is configured in the configuration file or configured from
email headers, if configured to allow that option.
https://blackhole.io/configuration-options.html#mode
https://blackhole.io/dynamic-switches.html#dynamic-mode-switches
:returns: :any:`str`
"""
if self._mode is not None:
return self._mode
return self.config.mode
@mode.setter
def mode(self, value):
if value not in ['accept', 'bounce', 'random']:
logger.debug('MODE: %s is an invalid. Allowed modes: (accept, '
'bounce, random)', value)
self._mode = None
return
logger.debug('MODE: Dynamic mode enabled. Mode set to %s', value)
self._mode = value
SIZE check in MAIL FROM
# (The MIT License)
#
# Copyright (c) 2016 Kura
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the 'Software'), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
blackhole.smtp.
This module contains the Smtp protocol.
"""
import asyncio
import base64
from email.utils import make_msgid as message_id
import inspect
import logging
import random
from blackhole.config import Config
from blackhole.utils import mailname
logger = logging.getLogger('blackhole.smtp')
class Smtp(asyncio.StreamReaderProtocol):
"""The class responsible for handling SMTP/SMTPS commands."""
bounce_responses = {
450: 'Requested mail action not taken: mailbox unavailable',
451: 'Requested action aborted: local error in processing',
452: 'Requested action not taken: insufficient system storage',
458: 'Unable to queue message',
521: 'Machine does not accept mail',
550: 'Requested action not taken: mailbox unavailable',
551: 'User not local',
552: 'Requested mail action aborted: exceeded storage allocation',
553: 'Requested action not taken: mailbox name not allowed',
571: 'Blocked',
}
"""A dictionary of response codes and messages for bouncing mail."""
_delay = None
_max_delay = 60
"""This is the maximum delay, to mitigate DoS risks."""
_mode = None
def __init__(self):
"""
Initialise the SMTP protocol.
.. note::
Loads the configuration, defines the server's FQDN and generates
an RFC 2822 Message-ID.
"""
self.loop = asyncio.get_event_loop()
super().__init__(
asyncio.StreamReader(loop=self.loop),
client_connected_cb=self._client_connected_cb,
loop=self.loop)
self.config = Config()
self.fqdn = mailname()
self.message_id = message_id()
def connection_made(self, transport):
"""
Tie a connection to blackhole to the SMTP protocol.
:param transport:
:type transport: :any:`asyncio.transport.Transport`
"""
super().connection_made(transport)
self.peer = transport.get_extra_info('peername')
logger.debug('Peer %s connected', repr(self.peer))
self.transport = transport
self.connection_closed = False
self._handler_coroutine = self.loop.create_task(self._handle_client())
def _client_connected_cb(self, reader, writer):
"""
Callback that binds a stream reader and writer to the SMTP Protocol.
:param reader:
:type reader: :any:`asyncio.streams.StreamReader`
:param writer:
:type writer: :any:`asyncio.streams.StreamWriter`
"""
self._reader = reader
self._writer = writer
def connection_lost(self, exc):
"""
Callback for when a connection is closed or lost.
:param exc:
:type exc:
"""
logger.debug('Peer %s disconnected', repr(self.peer))
super().connection_lost(exc)
self._connection_closed = True
async def _handle_client(self):
"""
Handle a client connection.
This method greets the client and then accepts and handles each line
the client sends, passing off to the currect verb handler.
"""
await self.greet()
while not self.connection_closed:
line = await self.wait()
logger.debug('RECV %s', line)
line = line.decode('utf-8').rstrip('\r\n')
self._line = line
handler = self.lookup_handler(line)
if handler:
await handler()
else:
await self.push(502, '5.5.2 Command not recognised')
def get_auth_members(self):
"""
Get a list of available AUTH mechanisms.
:returns: :any:`list` -- AUTH mechanisms.
"""
members = inspect.getmembers(self, predicate=inspect.ismethod)
cmds = []
for cmd, _ in members:
if cmd.startswith('auth_'):
cmd = cmd.replace('auth_', '').replace('_', '-')
cmds.append(cmd)
return cmds
def lookup_auth_handler(self, line):
"""
Look up a handler for the received AUTH mechanism.
:param line:
:type line: :any:`str`
:returns: :any:`blackhole.smtp.Smtp.auth_MECHANISM`.
"""
parts = line.split(' ')
if len(parts) < 2:
return None
mechanism = parts[1].upper()
if mechanism == 'CRAM-MD5':
return self.auth_CRAM_MD5
if mechanism not in self.get_auth_members():
return self.do_UNKNOWN
if len(parts) == 3 and mechanism == 'PLAIN':
return self._auth_success
return getattr(self, 'auth_{}'.format(mechanism.upper()),
self.do_UNKNOWN)
async def help_AUTH(self):
"""
Send help for AUTH mechanisms.
https://blackhole.io/index.html#help-verb
"""
mechanisms = ' '.join(self.get_auth_members())
await self.push(250, 'Syntax: AUTH {}'.format(mechanisms))
async def auth_LOGIN(self):
"""Handle an AUTH LOGIN request."""
await self.push(334, 'VXNlcm5hbWU6')
line = await self.wait()
logger.debug('RECV %s', line)
await self._auth_success()
async def auth_CRAM_MD5(self):
"""Handle an AUTH CRAM-MD5 request."""
message_id = base64.b64encode(self.message_id.encode('utf-8'), b'==')
await self.push(334, message_id)
line = await self.wait()
logger.debug('RECV %s', line)
await self._auth_success()
async def wait(self):
"""
Wait for data from the client.
:returns: :any:`str`
.. note::
Also handles client timeouts if they wait too long before sending
data. -- https://blackhole.io/configuration-options.html#timeout
"""
while not self.connection_closed:
try:
line = await asyncio.wait_for(self._reader.readline(),
self.config.timeout,
loop=self.loop)
except asyncio.TimeoutError:
await self.timeout()
return line
async def auth_PLAIN(self):
"""Handle an AUTH PLAIN request."""
await self.push(334, ' ')
line = await self.wait()
logger.debug('RECV %s', line)
await self._auth_success()
async def _auth_success(self):
"""Send an authentication successful response."""
await self.push(235, '2.7.0 Authentication successful')
async def timeout(self):
"""
Timeout a client connection.
Sends the 421 timeout message to the client and closes the connection.
https://blackhole.io/configuration-options.html#timeout
"""
logger.debug('%s timed out, no data received for %d seconds',
repr(self.peer), self.config.timeout)
await self.push(421, 'Timeout')
await self.close()
async def close(self):
"""Close the connection from the client."""
logger.debug('Closing connection: %s', repr(self.peer))
if self._writer:
self._writer.close()
self._connection_closed = True
def lookup_handler(self, line):
"""
Look up the SMTP VERB against a handler.
:param line:
:type line: :any:`str` -- e.g. HELO blackhole.io
:returns: :any:`blackhole.smtp..Smtp.do_VERB`,
:any:`blackhole.smtp.Smtp.auth_MECHANISM` or
:any:`blackhole.smtp..Smtp.help_VERB`.
"""
parts = line.split(None, 1)
if parts:
if parts[0].lower() == 'help':
return self.lookup_help_handler(parts)
if parts[0].lower() == 'auth':
return self.lookup_auth_handler(line)
else:
return self.lookup_verb_handler(parts[0])
return self.do_UNKNOWN
def lookup_help_handler(self, parts):
"""
Look up a help handler for the SMTP VERB.
https://blackhole.io/index.html#help-verb
:param parts:
:type parts: :any:`list`
:returns: :any:`blackhole.smtp.Smtp.help_VERB`.
"""
if len(parts) > 1:
cmd = 'help_{}'.format(parts[1].upper())
else:
cmd = 'do_HELP'
return getattr(self, cmd, self.help_UNKNOWN)
def lookup_verb_handler(self, verb):
"""
Look up a handler for the SMTP VERB.
:param verb:
:type verb: :any:`str`
:returns: :any:`blackhole.smtp.Smtp.do_VERB`.
"""
return getattr(self, 'do_{}'.format(verb.upper()), self.do_UNKNOWN)
async def push(self, code, msg):
"""
Write a response code and message to the client.
:param code:
:type code: :any:`int` -- SMTP code, i.e. 250.
:param msg:
:type msg: :any:`str` -- The message for the SMTP code.
"""
response = "{} {}\r\n".format(code, msg).encode('utf-8')
logger.debug('SEND %s', response)
self._writer.write(response)
await self._writer.drain()
async def greet(self):
"""Send a greeting to the client."""
await self.push(220, '{} ESMTP'.format(self.fqdn))
def get_help_members(self):
"""
Get a list of HELP handlers for verbs.
https://blackhole.io/index.html#help-verb
:returns: :any:`list` -- help handler names.
"""
members = inspect.getmembers(self, predicate=inspect.ismethod)
cmds = []
for cmd, _ in members:
if cmd.startswith('help_') and not cmd == 'help_UNKNOWN':
cmds.append(cmd.replace('help_', ''))
return cmds
async def do_HELP(self):
"""
Send a response to the HELP verb.
https://blackhole.io/index.html#help-verb
"""
msg = ' '.join(self.get_help_members())
await self.push(250, 'Supported commands: {}'.format(msg))
async def help_HELO(self):
"""
Send help for HELO verb.
https://blackhole.io/index.html#help-verb
"""
await self.push(250, 'Syntax: HELO domain.tld')
async def do_HELO(self):
"""Send response to HELO verb."""
await self.push(250, 'OK')
async def help_EHLO(self):
"""
Send help for the EHLO verb.
https://blackhole.io/index.html#help-verb
"""
await self.push(250, 'Syntax: EHLO domain.tld')
async def do_EHLO(self):
"""Send response to EHLO verb."""
response = "250-{}\r\n".format(self.fqdn).encode('utf-8')
self._writer.write(response)
logger.debug('SENT %s', response)
auth = ' '.join(self.get_auth_members())
responses = ('250-HELP', '250-PIPELINING', '250-AUTH {}'.format(auth),
'250-SIZE {}'.format(self.config.max_message_size),
'250-VRFY', '250-ETRN', '250-ENHANCEDSTATUSCODES',
'250-8BITMIME', '250-SMTPUTF8', '250 DSN', )
for response in responses:
response = "{}\r\n".format(response).encode('utf-8')
logger.debug("SENT %s", response)
self._writer.write(response)
await self._writer.drain()
async def help_MAIL(self):
"""
Send help for the MAIL TO verb.
https://blackhole.io/index.html#help-verb
"""
await self.push(250, 'Syntax: MAIL FROM: <address>')
async def _size_in_from(self):
"""
Look for the SIZE= parameter in MAIL FROM.
If a SIZE= parameter is found, we'll send a 552 response is the
size provided is larger than max_message_size.
:returns: :any:`bool`
"""
if 'size=' in self._line.lower():
size = self._line.split('size=')[1]
if size.isdigit() and int(size) > self.config.max_message_size:
await self.push(552, 'Message size exceeds fixed maximum '
'message size')
return False
return True
async def do_MAIL(self):
"""Send response to MAIL TO verb."""
if self._size_in_from() is False:
return
await self.push(250, '2.1.0 OK')
async def help_RCPT(self):
"""
Send response to the RCPT TO verb.
https://blackhole.io/index.html#help-verb
"""
await self.push(250, 'Syntax: RCPT TO: <address>')
async def do_RCPT(self):
"""Send response to RCPT TO verb."""
await self.push(250, '2.1.5 OK')
async def help_DATA(self):
"""
Send help for the DATA verb.
https://blackhole.io/index.html#help-verb
"""
await self.push(250, 'Syntax: DATA')
def process_header(self, line):
"""
Process dynamic switch email headers.
Reads x-blackhole-delay and x-blackhole-mode headers and re-configures
on-the-fly how the email is handled based on these headers.
https://blackhole.io/dynamic-switches.html
:param line:
:type line: :any:`str` -- an email header
"""
logger.debug('HEADER RECV: %s', line)
if self.config.dynamic_switch is False:
logger.debug('Dynamic switches disabled, ignoring')
return
key, value = line.split(':')
key, value = key.lower().strip(), value.lower().strip()
if key == 'x-blackhole-delay':
self.delay = value
if key == 'x-blackhole-mode':
self.mode = value
async def response_from_mode(self):
"""
Send a response based on the configured response mode.
https://blackhole.io/dynamic-switches.html#dynamic-delay-switches
https://blackhole.io/configuration-options.html#mode
https://blackhole.io/modes.html
Response mode is configured in configuration file and can be overridden
by email headers, if enabled.
"""
logger.debug('MODE: %s', self.mode)
if self.mode == 'bounce':
key = random.choice(list(self.bounce_responses.keys()))
await self.push(key, self.bounce_responses[key])
elif self.mode == 'random':
resps = {250: '2.0.0 OK: queued as {}'.format(self.message_id), }
resps.update(self.bounce_responses)
key = random.choice(list(resps.keys()))
await self.push(key, resps[key])
else:
msg = '2.0.0 OK: queued as {}'.format(self.message_id)
await self.push(250, msg)
async def do_DATA(self):
r"""
Send response to DATA verb and wait for mail data.
This method will also implement timeout management and handling after
receiving the DATA command and no new data is received.
This method also implements the delay functionality, delaying a
response after the final '\r\n.\r\n' line. --
https://blackhole.io/configuration-options.html#delay
https://blackhole.io/dynamic-switches.html#dynamic-delay-switches
This method implements restrictions on message sizes. --
https://blackhole.io/configuration-options.html#max-message-size
"""
await self.push(354, 'End data with <CR><LF>.<CR><LF>')
on_body = False
msg = []
while not self.connection_closed:
line = await self.wait()
logger.debug('RECV %s', line)
msg.append(line)
if line.lower().startswith(b'x-blackhole') and on_body is False:
self.process_header(line.decode('utf-8').rstrip('\n'))
if len(b''.join(msg)) > self.config.max_message_size:
await self.push(552, 'Message size exceeds fixed maximum '
'message size')
return
if line == b'\n':
on_body = True
if line == b'.\r\n':
break
if self.delay:
logger.debug('DELAYING RESPONSE: %s seconds', self.delay)
await asyncio.sleep(self.delay)
await self.response_from_mode()
async def do_STARTTLS(self):
"""STARTTLS is not implemented."""
# It's currently not possible to implement STARTTLS due to lack of
# support in asyncio. - https://bugs.python.org/review/23749/
await self.do_NOT_IMPLEMENTED()
async def help_NOOP(self):
"""
Send help for the NOOP verb.
https://blackhole.io/index.html#help-verb
"""
await self.push(250, 'Syntax: NOOP')
async def do_NOOP(self):
"""Send response to the NOOP verb."""
await self.push(250, '2.0.0 OK')
async def help_RSET(self):
"""
Send help for the RSET verb.
https://blackhole.io/index.html#help-verb
"""
await self.push(250, 'Syntax: RSET')
async def do_RSET(self):
"""
Send response to the RSET verb.
A new message id is generated and assigned.
"""
old_msg_id = self.message_id
self.message_id = message_id()
logger.debug('%s is now %s', old_msg_id, self.message_id)
await self.push(250, '2.0.0 OK')
async def help_VRFY(self):
"""
Send help for the VRFY verb.
https://blackhole.io/index.html#help-verb
"""
await self.push(250, 'Syntax: VRFY <address>')
async def do_VRFY(self):
"""Send response to the VRFY verb."""
await self.push(252, '2.0.0 OK')
async def help_ETRN(self):
"""
Send help for the ETRN verb.
https://blackhole.io/index.html#help-verb
"""
await self.push(250, 'Syntax: ETRN')
async def do_ETRN(self):
"""Send response to the ETRN verb."""
await self.push(250, 'Queueing started')
async def help_QUIT(self):
"""
Send help for the QUIT verb.
https://blackhole.io/index.html#help-verb
"""
await self.push(250, 'Syntax: QUIT')
async def do_QUIT(self):
"""
Send response to the QUIT verb.
Closes the client connection.
"""
await self.push(221, '2.0.0 Goodbye')
self._handler_coroutine.cancel()
await self.close()
async def do_NOT_IMPLEMENTED(self):
"""Send a not implemented response."""
await self.push(500, 'Not implemented')
async def help_UNKNOWN(self):
"""Send available help verbs when an invalid verb is received."""
msg = ' '.join(self.get_help_members())
await self.push(501, 'Supported commands: {}'.format(msg))
async def do_UNKNOWN(self):
"""Send response to unknown verb."""
await self.push(502, '5.5.2 Command not recognised')
@property
def delay(self):
"""
Delay after the DATA command completes.
Value is in seconds, with a maximum value of 60 seconds.
https://blackhole.io/configuration-options.html#delay
https://blackhole.io/dynamic-switches.html#dynamic-delay-switches
:returns: :any:`int` or :any:`None`
"""
if self._delay is not None:
return self._delay
if self.config.delay is not None:
return self.config.delay
return None
@delay.setter
def delay(self, values):
logger.debug('DELAY: Dymanic delay enabled')
value = values.split(',')
if len(value) == 2:
self._delay_range(value)
elif len(value) == 1:
self._delay_single(value[0])
else:
logger.debug('DELAY: Invalid value(s): %s. Skipping', values)
return
def _delay_range(self, value):
"""
Generate a delay from a range provided in the email header.
https://blackhole.io/configuration-options.html#delay
https://blackhole.io/dynamic-switches.html#dynamic-delay-switches
:param value:
:type value: :any:`str` -- a list of minimum and maximum values as a
string. i.e. (10, 20).
.. note::
Converted from a string of a list to a list of integers.
"""
min_delay, max_delay = value
min_delay, max_delay = min_delay.strip(), max_delay.strip()
try:
min_delay = int(min_delay)
max_delay = int(max_delay)
except ValueError:
logger.debug('DELAY: Unable to convert %s, %s to integers. '
'Skipping', min_delay, max_delay)
self._delay = None
return
if min_delay < 0 or max_delay < 0:
logger.debug('DELAY: A value is less than 0: %s, %s. Skipping',
min_delay, max_delay)
self._delay = None
return
if min_delay > max_delay:
logger.debug('Min cannot be greater than max')
self._delay = None
return
if max_delay > self._max_delay:
logger.debug('DELAY: %s is higher than %s. %s is the hard coded '
'maximum delay for security.', max_delay,
self._max_delay, self._max_delay)
max_delay = self._max_delay
self._delay = random.randint(min_delay, max_delay)
logger.debug('DELAY: Set to %s from range %s-%s', self._delay,
min_delay, max_delay)
return
def _delay_single(self, value):
"""
Generate a delay from a value provided in an email header.
https://blackhole.io/configuration-options.html#delay
https://blackhole.io/dynamic-switches.html#dynamic-delay-switches
:param value:
:type value: :any:`str` -- time in seconds as a string.
.. note:
Converted from a string to an integer.
"""
try:
value = int(value)
except ValueError:
logger.debug('DELAY: Unable to convert %s to an integer. Skipping',
value)
self._delay = None
return
logger.debug(value)
if value < 0:
logger.debug('DELAY: %s is less than 0. Skipping', value)
self._delay = None
return
if value > self._max_delay:
logger.debug('DELAY: %s is higher than %s. %s is the hard coded '
'maximum delay for security.', value, self._max_delay,
self._max_delay)
self._delay = self._max_delay
return
logger.debug('DELAY: Set to %s', value)
self._delay = value
@property
def mode(self):
"""
How to respond to an email, based on configuration.
Reponse is configured in the configuration file or configured from
email headers, if configured to allow that option.
https://blackhole.io/configuration-options.html#mode
https://blackhole.io/dynamic-switches.html#dynamic-mode-switches
:returns: :any:`str`
"""
if self._mode is not None:
return self._mode
return self.config.mode
@mode.setter
def mode(self, value):
if value not in ['accept', 'bounce', 'random']:
logger.debug('MODE: %s is an invalid. Allowed modes: (accept, '
'bounce, random)', value)
self._mode = None
return
logger.debug('MODE: Dynamic mode enabled. Mode set to %s', value)
self._mode = value
|
# coding=utf-8
#
# Copyright 2014-2017 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import hashlib
import json
import logging as std_logging
import os
from eventlet import greenthread
from time import strftime
from time import time
from requests import HTTPError
from neutron.plugins.common import constants as plugin_const
from neutron_lbaas.services.loadbalancer import constants as lb_const
from neutron_lib.exceptions import InvalidConfigurationOption
from oslo_config import cfg
from oslo_log import helpers as log_helpers
from oslo_log import log as logging
from oslo_utils import importutils
from f5.bigip import ManagementRoot
from f5_openstack_agent.lbaasv2.drivers.bigip.cluster_manager import \
ClusterManager
from f5_openstack_agent.lbaasv2.drivers.bigip import constants_v2 as f5const
from f5_openstack_agent.lbaasv2.drivers.bigip.esd_filehandler import \
EsdTagProcessor
from f5_openstack_agent.lbaasv2.drivers.bigip import exceptions as f5ex
from f5_openstack_agent.lbaasv2.drivers.bigip.lbaas_builder import \
LBaaSBuilder
from f5_openstack_agent.lbaasv2.drivers.bigip.lbaas_driver import \
LBaaSBaseDriver
from f5_openstack_agent.lbaasv2.drivers.bigip import network_helper
from f5_openstack_agent.lbaasv2.drivers.bigip.network_service import \
NetworkServiceBuilder
from f5_openstack_agent.lbaasv2.drivers.bigip import resource_helper
from f5_openstack_agent.lbaasv2.drivers.bigip.service_adapter import \
ServiceModelAdapter
from f5_openstack_agent.lbaasv2.drivers.bigip import ssl_profile
from f5_openstack_agent.lbaasv2.drivers.bigip import stat_helper
from f5_openstack_agent.lbaasv2.drivers.bigip.system_helper import \
SystemHelper
from f5_openstack_agent.lbaasv2.drivers.bigip.tenants import \
BigipTenantManager
from f5_openstack_agent.lbaasv2.drivers.bigip.utils import serialized
from f5_openstack_agent.lbaasv2.drivers.bigip.virtual_address import \
VirtualAddress
LOG = logging.getLogger(__name__)
NS_PREFIX = 'qlbaas-'
__VERSION__ = '0.1.1'
# configuration objects specific to iControl® driver
# XXX see /etc/neutron/services/f5/f5-openstack-agent.ini
OPTS = [ # XXX maybe we should make this a dictionary
cfg.StrOpt(
'bigiq_hostname',
help='The hostname (name or IP address) to use for the BIG-IQ host'
),
cfg.StrOpt(
'bigiq_admin_username',
default='admin',
help='The admin username to use for BIG-IQ authentication',
),
cfg.StrOpt(
'bigiq_admin_password',
default='[Provide password in config file]',
secret=True,
help='The admin password to use for BIG-IQ authentication'
),
cfg.StrOpt(
'openstack_keystone_uri',
default='http://192.0.2.248:5000/',
help='The admin password to use for BIG-IQ authentication'
),
cfg.StrOpt(
'openstack_admin_username',
default='admin',
help='The admin username to use for authentication '
'with the Keystone service'
),
cfg.StrOpt(
'openstack_admin_password',
default='[Provide password in config file]',
secret=True,
help='The admin password to use for authentication'
' with the Keystone service'
),
cfg.StrOpt(
'bigip_management_username',
default='admin',
help='The admin username that the BIG-IQ will use to manage '
'discovered BIG-IPs'
),
cfg.StrOpt(
'bigip_management_password',
default='[Provide password in config file]',
secret=True,
help='The admin password that the BIG-IQ will use to manage '
'discovered BIG-IPs'
),
cfg.StrOpt(
'f5_device_type', default='external',
help='What type of device onboarding'
),
cfg.StrOpt(
'f5_ha_type', default='pair',
help='Are we standalone, pair(active/standby), or scalen'
),
cfg.ListOpt(
'f5_external_physical_mappings', default=['default:1.1:True'],
help='Mapping between Neutron physical_network to interfaces'
),
cfg.StrOpt(
'f5_vtep_folder', default='Common',
help='Folder for the VTEP SelfIP'
),
cfg.StrOpt(
'f5_vtep_selfip_name', default=None,
help='Name of the VTEP SelfIP'
),
cfg.ListOpt(
'advertised_tunnel_types', default=['gre', 'vxlan'],
help='tunnel types which are advertised to other VTEPs'
),
cfg.BoolOpt(
'f5_populate_static_arp', default=False,
help='create static arp entries based on service entries'
),
cfg.StrOpt(
'vlan_binding_driver',
default=None,
help='driver class for binding vlans to device ports'
),
cfg.StrOpt(
'interface_port_static_mappings',
default=None,
help='JSON encoded static mapping of'
'devices to list of '
'interface and port_id'
),
cfg.StrOpt(
'l3_binding_driver',
default=None,
help='driver class for binding l3 address to l2 ports'
),
cfg.StrOpt(
'l3_binding_static_mappings', default=None,
help='JSON encoded static mapping of'
'subnet_id to list of '
'port_id, device_id list.'
),
cfg.BoolOpt(
'f5_route_domain_strictness', default=False,
help='Strict route domain isolation'
),
cfg.BoolOpt(
'f5_common_external_networks', default=True,
help='Treat external networks as common'
),
cfg.StrOpt(
'icontrol_vcmp_hostname',
help='The hostname (name or IP address) to use for vCMP Host '
'iControl access'
),
cfg.StrOpt(
'icontrol_hostname',
default="10.190.5.7",
help='The hostname (name or IP address) to use for iControl access'
),
cfg.StrOpt(
'icontrol_username', default='admin',
help='The username to use for iControl access'
),
cfg.StrOpt(
'icontrol_password', default='admin', secret=True,
help='The password to use for iControl access'
),
cfg.IntOpt(
'icontrol_connection_timeout', default=30,
help='How many seconds to timeout a connection to BIG-IP'
),
cfg.IntOpt(
'icontrol_connection_retry_interval', default=10,
help='How many seconds to wait between retry connection attempts'
),
cfg.DictOpt(
'common_network_ids', default={},
help='network uuid to existing Common networks mapping'
),
cfg.StrOpt(
'icontrol_config_mode', default='objects',
help='Whether to use iapp or objects for bigip configuration'
),
cfg.IntOpt(
'max_namespaces_per_tenant', default=1,
help='How many routing tables the BIG-IP will allocate per tenant'
' in order to accommodate overlapping IP subnets'
),
cfg.StrOpt(
'cert_manager',
default=None,
help='Class name of the certificate mangager used for retrieving '
'certificates and keys.'
),
cfg.StrOpt(
'auth_version',
default=None,
help='Keystone authentication version (v2 or v3) for Barbican client.'
),
cfg.StrOpt(
'os_project_id',
default='service',
help='OpenStack project ID.'
),
cfg.StrOpt(
'os_auth_url',
default=None,
help='OpenStack authentication URL.'
),
cfg.StrOpt(
'os_username',
default=None,
help='OpenStack user name for Keystone authentication.'
),
cfg.StrOpt(
'os_user_domain_name',
default=None,
help='OpenStack user domain name for Keystone authentication.'
),
cfg.StrOpt(
'os_project_name',
default=None,
help='OpenStack project name for Keystone authentication.'
),
cfg.StrOpt(
'os_project_domain_name',
default=None,
help='OpenStack domain name for Keystone authentication.'
),
cfg.StrOpt(
'os_password',
default=None,
help='OpenStack user password for Keystone authentication.'
),
cfg.StrOpt(
'f5_network_segment_physical_network', default=None,
help='Name of physical network to use for discovery of segment ID'
),
cfg.IntOpt(
'f5_network_segment_polling_interval', default=10,
help='Seconds between periodic scans for disconnected virtual servers'
),
cfg.IntOpt(
'f5_network_segment_gross_timeout', default=300,
help='Seconds to wait for a virtual server to become connected'
),
cfg.StrOpt(
'f5_parent_ssl_profile',
default='clientssl',
help='Parent profile used when creating client SSL profiles '
'for listeners with TERMINATED_HTTPS protocols.'
),
cfg.StrOpt(
'os_tenant_name',
default=None,
help='OpenStack tenant name for Keystone authentication (v2 only).'
),
cfg.BoolOpt(
'trace_service_requests',
default=False,
help='Log service object.'
)
]
def is_operational(method):
# Decorator to check we are operational before provisioning.
def wrapper(*args, **kwargs):
instance = args[0]
if instance.operational:
try:
return method(*args, **kwargs)
except IOError as ioe:
LOG.error('IO Error detected: %s' % method.__name__)
LOG.error(str(ioe))
raise ioe
else:
LOG.error('Cannot execute %s. Not operational. Re-initializing.'
% method.__name__)
instance._init_bigips()
return wrapper
class iControlDriver(LBaaSBaseDriver):
'''gets rpc plugin from manager (which instantiates, via importutils'''
def __init__(self, conf, registerOpts=True):
# The registerOpts parameter allows a test to
# turn off config option handling so that it can
# set the options manually instead. """
super(iControlDriver, self).__init__(conf)
self.conf = conf
if registerOpts:
self.conf.register_opts(OPTS)
self.initialized = False
self.hostnames = None
self.device_type = conf.f5_device_type
self.plugin_rpc = None # overrides base, same value
self.agent_report_state = None # overrides base, same value
self.operational = False # overrides base, same value
self.driver_name = 'f5-lbaasv2-icontrol'
#
# BIG-IP® containers
#
# BIG-IPs which currectly active
self.__bigips = {}
self.__last_connect_attempt = None
# HA and traffic group validation
self.ha_validated = False
self.tg_initialized = False
# traffic groups discovered from BIG-IPs for service placement
self.__traffic_groups = []
# base configurations to report to Neutron agent state reports
self.agent_configurations = {} # overrides base, same value
self.agent_configurations['device_drivers'] = [self.driver_name]
self.agent_configurations['icontrol_endpoints'] = {}
# service component managers
self.tenant_manager = None
self.cluster_manager = None
self.system_helper = None
self.lbaas_builder = None
self.service_adapter = None
self.vlan_binding = None
self.l3_binding = None
self.cert_manager = None # overrides register_OPTS
# server helpers
self.stat_helper = stat_helper.StatHelper()
self.network_helper = network_helper.NetworkHelper()
# f5-sdk helpers
self.vs_manager = resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.virtual)
self.pool_manager = resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.pool)
try:
# debug logging of service requests recieved by driver
if self.conf.trace_service_requests:
path = '/var/log/neutron/service/'
if not os.path.exists(path):
os.makedirs(path)
self.file_name = path + strftime("%H%M%S-%m%d%Y") + '.json'
with open(self.file_name, 'w') as fp:
fp.write('[{}] ')
# driver mode settings - GRM vs L2 adjacent
if self.conf.f5_global_routed_mode:
LOG.info('WARNING - f5_global_routed_mode enabled.'
' There will be no L2 or L3 orchestration'
' or tenant isolation provisioned. All vips'
' and pool members must be routable through'
' pre-provisioned SelfIPs.')
self.conf.use_namespaces = False
self.conf.f5_snat_mode = True
self.conf.f5_snat_addresses_per_subnet = 0
self.agent_configurations['tunnel_types'] = []
self.agent_configurations['bridge_mappings'] = {}
else:
self.agent_configurations['tunnel_types'] = \
self.conf.advertised_tunnel_types
for net_id in self.conf.common_network_ids:
LOG.debug('network %s will be mapped to /Common/%s'
% (net_id, self.conf.common_network_ids[net_id]))
self.agent_configurations['common_networks'] = \
self.conf.common_network_ids
LOG.debug('Setting static ARP population to %s'
% self.conf.f5_populate_static_arp)
self.agent_configurations['f5_common_external_networks'] = \
self.conf.f5_common_external_networks
f5const.FDB_POPULATE_STATIC_ARP = \
self.conf.f5_populate_static_arp
# parse the icontrol_hostname setting
self._init_bigip_hostnames()
# instantiate the managers
self._init_bigip_managers()
self.initialized = True
LOG.debug('iControlDriver loaded successfully')
except Exception as exc:
LOG.error("exception in intializing driver %s" % str(exc))
self._set_agent_status(False)
def connect(self):
# initialize communications wiht BIG-IP via iControl
try:
self._init_bigips()
except Exception as exc:
LOG.error("exception in intializing communicatoins to BIG-IPs %s"
% str(exc))
self._set_agent_status(False)
def _init_bigip_managers(self):
if self.conf.vlan_binding_driver:
try:
self.vlan_binding = importutils.import_object(
self.conf.vlan_binding_driver, self.conf, self)
except ImportError:
LOG.error('Failed to import VLAN binding driver: %s'
% self.conf.vlan_binding_driver)
if self.conf.l3_binding_driver:
print('self.conf.l3_binding_driver')
try:
self.l3_binding = importutils.import_object(
self.conf.l3_binding_driver, self.conf, self)
except ImportError:
LOG.error('Failed to import L3 binding driver: %s'
% self.conf.l3_binding_driver)
else:
LOG.debug('No L3 binding driver configured.'
' No L3 binding will be done.')
if self.conf.cert_manager:
try:
self.cert_manager = importutils.import_object(
self.conf.cert_manager, self.conf)
except ImportError as import_err:
LOG.error('Failed to import CertManager: %s.' %
import_err.message)
raise
except Exception as err:
LOG.error('Failed to initialize CertManager. %s' % err.message)
# re-raise as ImportError to cause agent exit
raise ImportError(err.message)
self.service_adapter = ServiceModelAdapter(self.conf)
self.tenant_manager = BigipTenantManager(self.conf, self)
self.cluster_manager = ClusterManager()
self.system_helper = SystemHelper()
self.lbaas_builder = LBaaSBuilder(self.conf, self)
if self.conf.f5_global_routed_mode:
self.network_builder = None
else:
self.network_builder = NetworkServiceBuilder(
self.conf.f5_global_routed_mode,
self.conf,
self,
self.l3_binding)
def _init_bigip_hostnames(self):
# Validate and parse bigip credentials
if not self.conf.icontrol_hostname:
raise InvalidConfigurationOption(
opt_name='icontrol_hostname',
opt_value='valid hostname or IP address'
)
if not self.conf.icontrol_username:
raise InvalidConfigurationOption(
opt_name='icontrol_username',
opt_value='valid username'
)
if not self.conf.icontrol_password:
raise InvalidConfigurationOption(
opt_name='icontrol_password',
opt_value='valid password'
)
self.hostnames = self.conf.icontrol_hostname.split(',')
self.hostnames = [item.strip() for item in self.hostnames]
self.hostnames = sorted(self.hostnames)
# initialize per host agent_configurations
for hostname in self.hostnames:
self.__bigips[hostname] = bigip = type('', (), {})()
bigip.hostname = hostname
bigip.status = 'creating'
bigip.status_message = 'creating BIG-IP from iControl hostnames'
self.agent_configurations[
'icontrol_endpoints'][hostname] = {}
self.agent_configurations[
'icontrol_endpoints'][hostname]['failover_state'] = \
'undiscovered'
self.agent_configurations[
'icontrol_endpoints'][hostname]['status'] = 'unknown'
self.agent_configurations[
'icontrol_endpoints'][hostname]['status_message'] = ''
def _init_bigips(self):
# Connect to all BIG-IP®s
if self.operational:
LOG.debug('iControl driver reports connection is operational')
return
LOG.debug('initializing communications to BIG-IPs')
try:
# setup logging options
if not self.conf.debug:
# sudslog = std_logging.getLogger('suds.client')
# sudslog.setLevel(std_logging.FATAL)
requests_log = std_logging.getLogger(
"requests.packages.urllib3")
requests_log.setLevel(std_logging.ERROR)
requests_log.propagate = False
else:
requests_log = std_logging.getLogger(
"requests.packages.urllib3")
requests_log.setLevel(std_logging.DEBUG)
requests_log.propagate = True
self.__last_connect_attempt = datetime.datetime.now()
for hostname in self.hostnames:
# connect to each BIG-IP and set it status
bigip = self._open_bigip(hostname)
if bigip.status == 'active':
# set the status down until we assure initialized
bigip.status = 'initializing'
bigip.status_message = 'initializing HA viability'
LOG.debug('initializing HA viability %s' % hostname)
device_group_name = None
if not self.ha_validated:
device_group_name = self._validate_ha(bigip)
LOG.debug('HA validated from %s with DSG %s' %
(hostname, device_group_name))
self.ha_validated = True
if not self.tg_initialized:
self._init_traffic_groups(bigip)
LOG.debug('known traffic groups initialized',
' from %s as %s' %
(hostname, self.__traffic_groups))
self.tg_initialized = True
LOG.debug('initializing bigip %s' % hostname)
self._init_bigip(bigip, hostname, device_group_name)
LOG.debug('initializing agent configurations %s'
% hostname)
self._init_agent_config(bigip)
# Assure basic BIG-IP HA is operational
LOG.debug('validating HA state for %s' % hostname)
bigip.status = 'validating_HA'
bigip.status_message = 'validating the current HA state'
if self._validate_ha_operational(bigip):
LOG.debug('setting status to active for %s' % hostname)
bigip.status = 'active'
bigip.status_message = 'BIG-IP ready for provisioning'
self._post_init()
else:
LOG.debug('setting status to error for %s' % hostname)
bigip.status = 'error'
bigip.status_message = 'BIG-IP is not operational'
self._set_agent_status(False)
else:
LOG.error('error opening BIG-IP %s - %s:%s'
% (hostname, bigip.status, bigip.status_message))
self._set_agent_status(False)
except Exception as exc:
LOG.error('Invalid agent configuration: %s' % exc.message)
raise
self._set_agent_status(force_resync=True)
def _init_errored_bigips(self):
try:
errored_bigips = self.get_errored_bigips_hostnames()
if errored_bigips:
LOG.debug('attempting to recover %s BIG-IPs' %
len(errored_bigips))
for hostname in errored_bigips:
# try to connect and set status
bigip = self._open_bigip(hostname)
if bigip.status == 'active':
# set the status down until we assure initialized
bigip.status = 'initializing'
bigip.status_message = 'initializing HA viability'
LOG.debug('initializing HA viability %s' % hostname)
LOG.debug('proceeding to initialize %s' % hostname)
device_group_name = None
if not self.ha_validated:
device_group_name = self._validate_ha(bigip)
LOG.debug('HA validated from %s with DSG %s' %
(hostname, device_group_name))
self.ha_validated = True
if not self.tg_initialized:
self._init_traffic_groups(bigip)
LOG.debug('known traffic groups initialized',
' from %s as %s' %
(hostname, self.__traffic_groups))
self.tg_initialized = True
LOG.debug('initializing bigip %s' % hostname)
self._init_bigip(bigip, hostname, device_group_name)
LOG.debug('initializing agent configurations %s'
% hostname)
self._init_agent_config(bigip)
# Assure basic BIG-IP HA is operational
LOG.debug('validating HA state for %s' % hostname)
bigip.status = 'validating_HA'
bigip.status_message = \
'validating the current HA state'
if self._validate_ha_operational(bigip):
LOG.debug('setting status to active for %s'
% hostname)
bigip.status = 'active'
bigip.status_message = \
'BIG-IP ready for provisioning'
self._post_init()
self._set_agent_status(True)
else:
LOG.debug('setting status to error for %s'
% hostname)
bigip.status = 'error'
bigip.status_message = 'BIG-IP is not operational'
self._set_agent_status(False)
else:
LOG.debug('there are no BIG-IPs with error status')
except Exception as exc:
LOG.error('Invalid agent configuration: %s' % exc.message)
raise
def _open_bigip(self, hostname):
# Open bigip connection """
try:
bigip = self.__bigips[hostname]
if bigip.status not in ['creating', 'error']:
LOG.debug('BIG-IP %s status invalid %s to open a connection'
% (hostname, bigip.status))
return bigip
bigip.status = 'connecting'
bigip.status_message = 'requesting iControl endpoint'
LOG.info('opening iControl connection to %s @ %s' %
(self.conf.icontrol_username, hostname))
bigip = ManagementRoot(hostname,
self.conf.icontrol_username,
self.conf.icontrol_password,
timeout=f5const.DEVICE_CONNECTION_TIMEOUT)
bigip.status = 'active'
bigip.status_message = 'connected to BIG-IP'
self.__bigips[hostname] = bigip
return bigip
except Exception as exc:
LOG.exception('could not communicate with ' +
'iControl device: %s' % hostname)
errbigip = type('', (), {})()
errbigip.hostname = hostname
errbigip.status = 'error'
errbigip.status_message = str(exc)[:80]
self.__bigips[hostname] = errbigip
return errbigip
def _init_bigip(self, bigip, hostname, check_group_name=None):
# Prepare a bigip for usage
try:
major_version, minor_version = self._validate_bigip_version(
bigip, hostname)
device_group_name = None
extramb = self.system_helper.get_provision_extramb(bigip)
if int(extramb) < f5const.MIN_EXTRA_MB:
raise f5ex.ProvisioningExtraMBValidateFailed(
'Device %s BIG-IP not provisioned for '
'management LARGE.' % hostname)
if self.conf.f5_ha_type == 'pair' and \
self.cluster_manager.get_sync_status(bigip) == \
'Standalone':
raise f5ex.BigIPClusterInvalidHA(
'HA mode is pair and bigip %s in standalone mode'
% hostname)
if self.conf.f5_ha_type == 'scalen' and \
self.cluster_manager.get_sync_status(bigip) == \
'Standalone':
raise f5ex.BigIPClusterInvalidHA(
'HA mode is scalen and bigip %s in standalone mode'
% hostname)
if self.conf.f5_ha_type != 'standalone':
device_group_name = \
self.cluster_manager.get_device_group(bigip)
if not device_group_name:
raise f5ex.BigIPClusterInvalidHA(
'HA mode is %s and no sync failover '
'device group found for device %s.'
% (self.conf.f5_ha_type, hostname))
if check_group_name and device_group_name != check_group_name:
raise f5ex.BigIPClusterInvalidHA(
'Invalid HA. Device %s is in device group'
' %s but should be in %s.'
% (hostname, device_group_name, check_group_name))
bigip.device_group_name = device_group_name
if self.network_builder:
for network in self.conf.common_network_ids.values():
if not self.network_builder.vlan_exists(bigip,
network,
folder='Common'):
raise f5ex.MissingNetwork(
'Common network %s on %s does not exist'
% (network, bigip.hostname))
bigip.device_name = self.cluster_manager.get_device_name(bigip)
bigip.mac_addresses = self.system_helper.get_mac_addresses(bigip)
LOG.debug("Initialized BIG-IP %s with MAC addresses %s" %
(bigip.device_name, ', '.join(bigip.mac_addresses)))
bigip.device_interfaces = \
self.system_helper.get_interface_macaddresses_dict(bigip)
bigip.assured_networks = {}
bigip.assured_tenant_snat_subnets = {}
bigip.assured_gateway_subnets = []
if self.conf.f5_ha_type != 'standalone':
self.cluster_manager.disable_auto_sync(
device_group_name, bigip)
# validate VTEP SelfIPs
if not self.conf.f5_global_routed_mode:
self.network_builder.initialize_tunneling(bigip)
# Turn off tunnel syncing between BIG-IP
# as our VTEPs properly use only local SelfIPs
if self.system_helper.get_tunnel_sync(bigip) == 'enable':
self.system_helper.set_tunnel_sync(bigip, enabled=False)
LOG.debug('connected to iControl %s @ %s ver %s.%s'
% (self.conf.icontrol_username, hostname,
major_version, minor_version))
except Exception as exc:
bigip.status = 'error'
bigip.status_message = str(exc)[:80]
raise
return bigip
def _post_init(self):
# After we have a connection to the BIG-IPs, initialize vCMP
# on all connected BIG-IPs
if self.network_builder:
self.network_builder.initialize_vcmp()
self.agent_configurations['network_segment_physical_network'] = \
self.conf.f5_network_segment_physical_network
LOG.info('iControlDriver initialized to %d bigips with username:%s'
% (len(self.get_active_bigips()),
self.conf.icontrol_username))
LOG.info('iControlDriver dynamic agent configurations:%s'
% self.agent_configurations)
if self.vlan_binding:
LOG.debug(
'getting BIG-IP device interface for VLAN Binding')
self.vlan_binding.register_bigip_interfaces()
if self.l3_binding:
LOG.debug('getting BIG-IP MAC Address for L3 Binding')
self.l3_binding.register_bigip_mac_addresses()
if self.network_builder:
self.network_builder.post_init()
# read enhanced services definitions
esd_dir = os.path.join(self.get_config_dir(), 'esd')
esd = EsdTagProcessor(esd_dir)
try:
esd.process_esd(self.get_all_bigips())
self.lbaas_builder.init_esd(esd)
except f5ex.esdJSONFileInvalidException as err:
LOG.error("unable to initialize ESD. Error: %s.", err.message)
self._set_agent_status(False)
def _validate_ha(self, bigip):
# if there was only one address supplied and
# this is not a standalone device, get the
# devices trusted by this device. """
device_group_name = None
if self.conf.f5_ha_type == 'standalone':
if len(self.hostnames) != 1:
raise f5ex.BigIPClusterInvalidHA(
'HA mode is standalone and %d hosts found.'
% len(self.hostnames))
device_group_name = 'standalone'
elif self.conf.f5_ha_type == 'pair':
device_group_name = self.cluster_manager.\
get_device_group(bigip)
if len(self.hostnames) != 2:
mgmt_addrs = []
devices = self.cluster_manager.devices(bigip,
device_group_name)
for device in devices:
mgmt_addrs.append(
self.cluster_manager.get_mgmt_addr_by_device(device))
self.hostnames = mgmt_addrs
if len(self.hostnames) != 2:
raise f5ex.BigIPClusterInvalidHA(
'HA mode is pair and %d hosts found.'
% len(self.hostnames))
elif self.conf.f5_ha_type == 'scalen':
device_group_name = self.cluster_manager.\
get_device_group(bigip)
if len(self.hostnames) < 2:
mgmt_addrs = []
devices = self.cluster_manager.devices(bigip,
device_group_name)
for device in devices:
mgmt_addrs.append(
self.cluster_manager.get_mgmt_addr_by_device(
bigip, device))
self.hostnames = mgmt_addrs
return device_group_name
def _validate_ha_operational(self, bigip):
if self.conf.f5_ha_type == 'standalone':
return True
else:
# how many active BIG-IPs are there?
active_bigips = self.get_active_bigips()
if active_bigips:
sync_status = self.cluster_manager.get_sync_status(bigip)
if sync_status in ['Disconnected', 'Sync Failure']:
if len(active_bigips) > 1:
# the device should not be in the disconnected state
return False
else:
# it should be in the same sync-failover group
# as the rest of the active bigips
device_group_name = \
self.cluster_manager.get_device_group(bigip)
for ab in active_bigips:
adgn = self.cluster_manager.get_device_group(ab)
if not adgn == device_group_name:
return False
return True
else:
return True
def _init_agent_config(self, bigip):
# Init agent config
ic_host = {}
ic_host['version'] = self.system_helper.get_version(bigip)
ic_host['device_name'] = bigip.device_name
ic_host['platform'] = self.system_helper.get_platform(bigip)
ic_host['serial_number'] = self.system_helper.get_serial_number(bigip)
ic_host['status'] = bigip.status
ic_host['status_message'] = bigip.status_message
ic_host['failover_state'] = self.get_failover_state(bigip)
ic_host['local_ip'] = bigip.local_ip
self.agent_configurations['icontrol_endpoints'][bigip.hostname] = \
ic_host
if self.network_builder:
self.agent_configurations['bridge_mappings'] = \
self.network_builder.interface_mapping
def _set_agent_status(self, force_resync=False):
for hostname in self.__bigips:
bigip = self.__bigips[hostname]
self.agent_configurations[
'icontrol_endpoints'][bigip.hostname][
'status'] = bigip.status
self.agent_configurations[
'icontrol_endpoints'][bigip.hostname][
'status_message'] = bigip.status_message
# Policy - if any BIG-IP are active we're operational
if self.get_active_bigips():
self.operational = True
else:
self.operational = False
if self.agent_report_state:
self.agent_report_state(force_resync=force_resync)
def get_failover_state(self, bigip):
try:
if hasattr(bigip, 'tm'):
fs = bigip.tm.sys.dbs.db.load(name='failover.state')
bigip.failover_state = fs.value
return bigip.failover_state
else:
return 'error'
except Exception as exc:
LOG.exception('Error getting %s failover state' % bigip.hostname)
bigip.status = 'error'
bigip.status_message = str(exc)[:80]
self._set_agent_status(False)
return 'error'
def get_agent_configurations(self):
for hostname in self.__bigips:
bigip = self.__bigips[hostname]
if bigip.status == 'active':
failover_state = self.get_failover_state(bigip)
self.agent_configurations[
'icontrol_endpoints'][bigip.hostname][
'failover_state'] = failover_state
else:
self.agent_configurations[
'icontrol_endpoints'][bigip.hostname][
'failover_state'] = 'unknown'
self.agent_configurations['icontrol_endpoints'][
bigip.hostname]['status'] = bigip.status
self.agent_configurations['icontrol_endpoints'][
bigip.hostname]['status_message'] = bigip.status_message
self.agent_configurations['operational'] = \
self.operational
LOG.debug('agent configurations are: %s' % self.agent_configurations)
return dict(self.agent_configurations)
def recover_errored_devices(self):
# trigger a retry on errored BIG-IPs
try:
self._init_errored_bigips()
except Exception as exc:
LOG.error('Could not recover devices: %s' % exc.message)
def backend_integrity(self):
if self.operational:
return True
return False
def generate_capacity_score(self, capacity_policy=None):
"""Generate the capacity score of connected devices """
if capacity_policy:
highest_metric = 0.0
highest_metric_name = None
my_methods = dir(self)
bigips = self.get_all_bigips()
for metric in capacity_policy:
func_name = 'get_' + metric
if func_name in my_methods:
max_capacity = int(capacity_policy[metric])
metric_func = getattr(self, func_name)
metric_value = 0
for bigip in bigips:
if bigip.status == 'active':
global_stats = \
self.stat_helper.get_global_statistics(bigip)
value = int(
metric_func(bigip=bigip,
global_statistics=global_stats)
)
LOG.debug('calling capacity %s on %s returned: %s'
% (func_name, bigip.hostname, value))
else:
value = 0
if value > metric_value:
metric_value = value
metric_capacity = float(metric_value) / float(max_capacity)
if metric_capacity > highest_metric:
highest_metric = metric_capacity
highest_metric_name = metric
else:
LOG.warn('capacity policy has method '
'%s which is not implemented in this driver'
% metric)
LOG.debug('capacity score: %s based on %s'
% (highest_metric, highest_metric_name))
return highest_metric
return 0
def set_context(self, context):
# Context to keep for database access
if self.network_builder:
self.network_builder.set_context(context)
def set_plugin_rpc(self, plugin_rpc):
# Provide Plugin RPC access
self.plugin_rpc = plugin_rpc
def set_tunnel_rpc(self, tunnel_rpc):
# Provide FDB Connector with ML2 RPC access
if self.network_builder:
self.network_builder.set_tunnel_rpc(tunnel_rpc)
def set_l2pop_rpc(self, l2pop_rpc):
# Provide FDB Connector with ML2 RPC access
if self.network_builder:
self.network_builder.set_l2pop_rpc(l2pop_rpc)
def set_agent_report_state(self, report_state_callback):
"""Set Agent Report State"""
self.agent_report_state = report_state_callback
def service_exists(self, service):
return self._service_exists(service)
def flush_cache(self):
# Remove cached objects so they can be created if necessary
for bigip in self.get_all_bigips():
bigip.assured_networks = {}
bigip.assured_tenant_snat_subnets = {}
bigip.assured_gateway_subnets = []
@serialized('get_all_deployed_pools')
@is_operational
def get_all_deployed_pools(self):
LOG.debug('getting all deployed pools on BIG-IPs')
deployed_pool_dict = {}
for bigip in self.get_all_bigips():
folders = self.system_helper.get_folders(bigip)
for folder in folders:
tenant_id = folder[len(self.service_adapter.prefix):]
if str(folder).startswith(self.service_adapter.prefix):
resource = resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.pool)
deployed_pools = resource.get_resources(bigip, folder)
if deployed_pools:
for pool in deployed_pools:
pool_id = \
pool.name[len(self.service_adapter.prefix):]
if pool_id in deployed_pool_dict:
deployed_pool_dict[pool_id][
'hostnames'].append(bigip.hostname)
else:
deployed_pool_dict[pool_id] = {
'id': pool_id,
'tenant_id': tenant_id,
'hostnames': [bigip.hostname]
}
return deployed_pool_dict
@serialized('get_all_deployed_loadbalancers')
@is_operational
def get_all_deployed_loadbalancers(self, purge_orphaned_folders=False):
LOG.debug('getting all deployed loadbalancers on BIG-IPs')
deployed_lb_dict = {}
for bigip in self.get_all_bigips():
folders = self.system_helper.get_folders(bigip)
for folder in folders:
tenant_id = folder[len(self.service_adapter.prefix):]
if str(folder).startswith(self.service_adapter.prefix):
resource = resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.virtual_address)
deployed_lbs = resource.get_resources(bigip, folder)
if deployed_lbs:
for lb in deployed_lbs:
lb_id = lb.name[len(self.service_adapter.prefix):]
if lb_id in deployed_lb_dict:
deployed_lb_dict[lb_id][
'hostnames'].append(bigip.hostname)
else:
deployed_lb_dict[lb_id] = {
'id': lb_id,
'tenant_id': tenant_id,
'hostnames': [bigip.hostname]
}
else:
# delay to assure we are not in the tenant creation
# process before a virtual address is created.
greenthread.sleep(10)
deployed_lbs = resource.get_resources(bigip, folder)
if deployed_lbs:
for lb in deployed_lbs:
lb_id = lb.name[
len(self.service_adapter.prefix):]
deployed_lb_dict[lb_id] = \
{'id': lb_id, 'tenant_id': tenant_id}
else:
# Orphaned folder!
if purge_orphaned_folders:
try:
self.system_helper.purge_folder_contents(
bigip, folder)
self.system_helper.purge_folder(
bigip, folder)
LOG.error('orphaned folder %s on %s' %
(folder, bigip.hostname))
except Exception as exc:
LOG.error('error purging folder %s: %s' %
(folder, str(exc)))
return deployed_lb_dict
@serialized('purge_orphaned_pool')
@is_operational
def purge_orphaned_pool(self, tenant_id=None, pool_id=None, hostnames=[]):
for bigip in self.get_all_bigips():
if bigip.hostname in hostnames:
try:
pool_name = self.service_adapter.prefix + pool_id
partition = self.service_adapter.prefix + tenant_id
pool = resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.pool).load(
bigip, pool_name, partition)
pool.delete()
except HTTPError as err:
if err.response.status_code == 404:
LOG.debug('pool %s not on BIG-IP %s.'
% (pool_id, bigip.hostname))
except Exception as exc:
LOG.exception('Exception purging pool %s' % str(exc))
@serialized('purge_orphaned_loadbalancer')
@is_operational
def purge_orphaned_loadbalancer(self, tenant_id=None,
loadbalancer_id=None, hostnames=[]):
for bigip in self.get_all_bigips():
if bigip.hostname in hostnames:
try:
va_name = self.service_adapter.prefix + loadbalancer_id
partition = self.service_adapter.prefix + tenant_id
va = resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.virtual_address).load(
bigip, va_name, partition)
# get virtual services (listeners)
# referencing this virtual address
vses = resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.virtual).get_resources(
bigip, partition)
vs_dest_compare = '/' + partition + '/' + va.name
for vs in vses:
if str(vs.destination).startswith(vs_dest_compare):
LOG.debug('BAM!!! matched %s startswith %s' % (vs.destination, vs_dest_compare))
if vs.pool:
pool = resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.pool).load(
bigip, os.path.basename(vs.pool),
partition)
vs.delete()
pool.delete()
else:
vs.delete()
resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.virtual_address).delete(
bigip, va_name, partition)
except HTTPError as err:
if err.response.status_code == 404:
LOG.debug('loadbalancer %s not on BIG-IP %s.'
% (loadbalancer_id, bigip.hostname))
except Exception as exc:
LOG.exception('Exception purging loadbalancer %s'
% str(exc))
@serialized('create_loadbalancer')
@is_operational
def create_loadbalancer(self, loadbalancer, service):
"""Create virtual server"""
return self._common_service_handler(service)
@serialized('update_loadbalancer')
@is_operational
def update_loadbalancer(self, old_loadbalancer, loadbalancer, service):
"""Update virtual server"""
# anti-pattern three args unused.
return self._common_service_handler(service)
@serialized('delete_loadbalancer')
@is_operational
def delete_loadbalancer(self, loadbalancer, service):
"""Delete loadbalancer"""
LOG.debug("Deleting loadbalancer")
return self._common_service_handler(
service,
delete_partition=True,
delete_event=True)
@serialized('create_listener')
@is_operational
def create_listener(self, listener, service):
"""Create virtual server"""
LOG.debug("Creating listener")
return self._common_service_handler(service)
@serialized('update_listener')
@is_operational
def update_listener(self, old_listener, listener, service):
"""Update virtual server"""
LOG.debug("Updating listener")
service['old_listener'] = old_listener
return self._common_service_handler(service)
@serialized('delete_listener')
@is_operational
def delete_listener(self, listener, service):
"""Delete virtual server"""
LOG.debug("Deleting listener")
return self._common_service_handler(service)
@serialized('create_pool')
@is_operational
def create_pool(self, pool, service):
"""Create lb pool"""
LOG.debug("Creating pool")
return self._common_service_handler(service)
@serialized('update_pool')
@is_operational
def update_pool(self, old_pool, pool, service):
"""Update lb pool"""
LOG.debug("Updating pool")
return self._common_service_handler(service)
@serialized('delete_pool')
@is_operational
def delete_pool(self, pool, service):
"""Delete lb pool"""
LOG.debug("Deleting pool")
return self._common_service_handler(service)
@serialized('create_member')
@is_operational
def create_member(self, member, service):
"""Create pool member"""
LOG.debug("Creating member")
return self._common_service_handler(service)
@serialized('update_member')
@is_operational
def update_member(self, old_member, member, service):
"""Update pool member"""
LOG.debug("Updating member")
return self._common_service_handler(service)
@serialized('delete_member')
@is_operational
def delete_member(self, member, service):
"""Delete pool member"""
LOG.debug("Deleting member")
return self._common_service_handler(service, delete_event=True)
@serialized('create_health_monitor')
@is_operational
def create_health_monitor(self, health_monitor, service):
"""Create pool health monitor"""
LOG.debug("Creating health monitor")
return self._common_service_handler(service)
@serialized('update_health_monitor')
@is_operational
def update_health_monitor(self, old_health_monitor,
health_monitor, service):
"""Update pool health monitor"""
LOG.debug("Updating health monitor")
return self._common_service_handler(service)
@serialized('delete_health_monitor')
@is_operational
def delete_health_monitor(self, health_monitor, service):
"""Delete pool health monitor"""
LOG.debug("Deleting health monitor")
return self._common_service_handler(service)
@is_operational
def get_stats(self, service):
lb_stats = {}
stats = ['clientside.bitsIn',
'clientside.bitsOut',
'clientside.curConns',
'clientside.totConns']
loadbalancer = service['loadbalancer']
try:
# sum virtual server stats for all BIG-IPs
vs_stats = self.lbaas_builder.get_listener_stats(service, stats)
# convert to bytes
lb_stats[lb_const.STATS_IN_BYTES] = \
vs_stats['clientside.bitsIn']/8
lb_stats[lb_const.STATS_OUT_BYTES] = \
vs_stats['clientside.bitsOut']/8
lb_stats[lb_const.STATS_ACTIVE_CONNECTIONS] = \
vs_stats['clientside.curConns']
lb_stats[lb_const.STATS_TOTAL_CONNECTIONS] = \
vs_stats['clientside.totConns']
# update Neutron
self.plugin_rpc.update_loadbalancer_stats(
loadbalancer['id'], lb_stats)
except Exception as e:
LOG.error("Error getting loadbalancer stats: %s", e.message)
finally:
return lb_stats
def fdb_add(self, fdb):
# Add (L2toL3) forwarding database entries
self.remove_ips_from_fdb_update(fdb)
for bigip in self.get_all_bigips():
self.network_builder.add_bigip_fdb(bigip, fdb)
def fdb_remove(self, fdb):
# Remove (L2toL3) forwarding database entries
self.remove_ips_from_fdb_update(fdb)
for bigip in self.get_all_bigips():
self.network_builder.remove_bigip_fdb(bigip, fdb)
def fdb_update(self, fdb):
# Update (L2toL3) forwarding database entries
self.remove_ips_from_fdb_update(fdb)
for bigip in self.get_all_bigips():
self.network_builder.update_bigip_fdb(bigip, fdb)
# remove ips from fdb update so we do not try to
# add static arps for them because we do not have
# enough information to determine the route domain
def remove_ips_from_fdb_update(self, fdb):
for network_id in fdb:
network = fdb[network_id]
mac_ips_by_vtep = network['ports']
for vtep in mac_ips_by_vtep:
mac_ips = mac_ips_by_vtep[vtep]
for mac_ip in mac_ips:
mac_ip[1] = None
def tunnel_update(self, **kwargs):
# Tunnel Update from Neutron Core RPC
pass
def tunnel_sync(self):
# Only sync when supported types are present
if not [i for i in self.agent_configurations['tunnel_types']
if i in ['gre', 'vxlan']]:
return False
tunnel_ips = []
for bigip in self.get_all_bigips():
if bigip.local_ip:
tunnel_ips.append(bigip.local_ip)
self.network_builder.tunnel_sync(tunnel_ips)
# Tunnel sync sent.
return False
@serialized('sync')
@is_operational
def sync(self, service):
"""Sync service defintion to device"""
# plugin_rpc may not be set when unit testing
if self.plugin_rpc:
# Get the latest service. It may have changed.
service = self.plugin_rpc.get_service_by_loadbalancer_id(
service['loadbalancer']['id']
)
if service['loadbalancer']:
return self._common_service_handler(service)
else:
LOG.debug("Attempted sync of deleted pool")
@serialized('backup_configuration')
@is_operational
def backup_configuration(self):
# Save Configuration on Devices
for bigip in self.get_all_bigips():
LOG.debug('_backup_configuration: saving device %s.'
% bigip.hostname)
self.cluster_manager.save_config(bigip)
def _get_monitor_endpoint(self, bigip, service):
monitor_type = self.service_adapter.get_monitor_type(service)
if not monitor_type:
monitor_type = ""
if monitor_type == "HTTPS":
hm = bigip.tm.ltm.monitor.https_s.https
elif monitor_type == "TCP":
hm = bigip.tm.ltm.monitor.tcps.tcp
elif monitor_type == "PING":
hm = bigip.tm.ltm.monitor.gateway_icmps.gateway_icmp
else:
hm = bigip.tm.ltm.monitor.https.http
return hm
def service_rename_required(self, service):
rename_required = False
# Returns whether the bigip has a pool for the service
if not service['loadbalancer']:
return False
bigips = self.get_config_bigips()
loadbalancer = service['loadbalancer']
# Does the correctly named virtual address exist?
for bigip in bigips:
virtual_address = VirtualAddress(self.service_adapter,
loadbalancer)
if not virtual_address.exists(bigip):
rename_required = True
break
return rename_required
def service_object_teardown(self, service):
# Returns whether the bigip has a pool for the service
if not service['loadbalancer']:
return False
bigips = self.get_config_bigips()
loadbalancer = service['loadbalancer']
folder_name = self.service_adapter.get_folder_name(
loadbalancer['tenant_id']
)
# Change to bigips
for bigip in bigips:
# Delete all virtuals
v = bigip.tm.ltm.virtuals.virtual
for listener in service['listeners']:
l_name = listener.get("name", "")
if not l_name:
svc = {"loadbalancer": loadbalancer,
"listener": listener}
vip = self.service_adapter.get_virtual(svc)
l_name = vip['name']
if v.exists(name=l_name, partition=folder_name):
# Found a virtual that is named by the OS object,
# delete it.
l_obj = v.load(name=l_name, partition=folder_name)
LOG.warn("Deleting listener: /%s/%s" %
(folder_name, l_name))
l_obj.delete(name=l_name, partition=folder_name)
# Delete all pools
p = bigip.tm.ltm.pools.pool
for os_pool in service['pools']:
p_name = os_pool.get('name', "")
if not p_name:
svc = {"loadbalancer": loadbalancer,
"pool": os_pool}
pool = self.service_adapter.get_pool(svc)
p_name = pool['name']
if p.exists(name=p_name, partition=folder_name):
p_obj = p.load(name=p_name, partition=folder_name)
LOG.warn("Deleting pool: /%s/%s" % (folder_name, p_name))
p_obj.delete(name=p_name, partition=folder_name)
# Delete all healthmonitors
for healthmonitor in service['healthmonitors']:
svc = {'loadbalancer': loadbalancer,
'healthmonitor': healthmonitor}
monitor_ep = self._get_monitor_endpoint(bigip, svc)
m_name = healthmonitor.get('name', "")
if not m_name:
hm = self.service_adapter.get_healthmonitor(svc)
m_name = hm['name']
if monitor_ep.exists(name=m_name, partition=folder_name):
m_obj = monitor_ep.load(name=m_name, partition=folder_name)
LOG.warn("Deleting monitor: /%s/%s" % (
folder_name, m_name))
m_obj.delete()
def _service_exists(self, service):
# Returns whether the bigip has the service defined
if not service['loadbalancer']:
return False
loadbalancer = service['loadbalancer']
folder_name = self.service_adapter.get_folder_name(
loadbalancer['tenant_id']
)
# Foreach bigip in the cluster:
for bigip in self.get_config_bigips():
# Does the tenant folder exist?
if not self.system_helper.folder_exists(bigip, folder_name):
LOG.error("Folder %s does not exists on bigip: %s" %
(folder_name, bigip.hostname))
return False
# Get the virtual address
virtual_address = VirtualAddress(self.service_adapter,
loadbalancer)
if not virtual_address.exists(bigip):
LOG.error("Virtual address %s(%s) does not "
"exists on bigip: %s" % (virtual_address.name,
virtual_address.address,
bigip.hostname))
return False
# Ensure that each virtual service exists.
for listener in service['listeners']:
svc = {"loadbalancer": loadbalancer,
"listener": listener}
virtual_server = self.service_adapter.get_virtual_name(svc)
if not self.vs_manager.exists(bigip,
name=virtual_server['name'],
partition=folder_name):
LOG.error("Virtual /%s/%s not found on bigip: %s" %
(virtual_server['name'], folder_name,
bigip.hostname))
return False
# Ensure that each pool exists.
for pool in service['pools']:
svc = {"loadbalancer": loadbalancer,
"pool": pool}
bigip_pool = self.service_adapter.get_pool(svc)
if not self.pool_manager.exists(
bigip,
name=bigip_pool['name'],
partition=folder_name):
LOG.error("Pool /%s/%s not found on bigip: %s" %
(bigip_pool['name'], folder_name,
bigip.hostname))
return False
else:
# Ensure each pool member exists
for member in service['members']:
if member['pool_id'] == pool['id']:
lb = self.lbaas_builder
pool = lb.get_pool_by_id(
service, member["pool_id"])
svc = {"loadbalancer": loadbalancer,
"member": member,
"pool": pool}
if not lb.pool_builder.member_exists(svc, bigip):
return False
# Ensure that each health monitor exists.
for healthmonitor in service['healthmonitors']:
svc = {"loadbalancer": loadbalancer,
"healthmonitor": healthmonitor}
monitor = self.service_adapter.get_healthmonitor(svc)
monitor_ep = self._get_monitor_endpoint(bigip, svc)
if not monitor_ep.exists(name=monitor['name'],
partition=folder_name):
LOG.error("Monitor /%s/%s not found on bigip: %s" %
(monitor['name'], folder_name, bigip.hostname))
return False
return True
def get_loadbalancers_in_tenant(self, tenant_id):
loadbalancers = self.plugin_rpc.get_all_loadbalancers()
return [lb['lb_id'] for lb in loadbalancers
if lb['tenant_id'] == tenant_id]
def _common_service_handler(self, service,
delete_partition=False,
delete_event=False):
# Assure that the service is configured on bigip(s)
start_time = time()
lb_pending = True
do_service_update = True
if self.conf.trace_service_requests:
self.trace_service_requests(service)
loadbalancer = service.get("loadbalancer", None)
if not loadbalancer:
LOG.error("_common_service_handler: Service loadbalancer is None")
return lb_pending
lb_provisioning_status = loadbalancer.get("provisioning_status",
plugin_const.ERROR)
try:
try:
self.tenant_manager.assure_tenant_created(service)
except Exception as e:
LOG.error("Tenant folder creation exception: %s",
e.message)
if lb_provisioning_status != plugin_const.PENDING_DELETE:
loadbalancer['provisioning_status'] = \
plugin_const.ERROR
raise e
LOG.debug(" _assure_tenant_created took %.5f secs" %
(time() - start_time))
traffic_group = self.service_to_traffic_group(service)
loadbalancer['traffic_group'] = traffic_group
if self.network_builder:
start_time = time()
try:
self.network_builder.prep_service_networking(
service, traffic_group)
except f5ex.NetworkNotReady as error:
LOG.debug("Network creation deferred until network "
"definition is completed: %s",
error.message)
if not delete_event:
do_service_update = False
raise error
except Exception as error:
LOG.error("Prep-network exception: icontrol_driver: %s",
error.message)
if lb_provisioning_status != plugin_const.PENDING_DELETE:
loadbalancer['provisioning_status'] = \
plugin_const.ERROR
if not delete_event:
raise error
finally:
if time() - start_time > .001:
LOG.debug(" _prep_service_networking "
"took %.5f secs" % (time() - start_time))
all_subnet_hints = {}
for bigip in self.get_config_bigips():
# check_for_delete_subnets:
# keep track of which subnets we should check to delete
# for a deleted vip or member
# do_not_delete_subnets:
# If we add an IP to a subnet we must not delete the subnet
all_subnet_hints[bigip.device_name] = \
{'check_for_delete_subnets': {},
'do_not_delete_subnets': []}
LOG.debug("XXXXXXXXX: Pre assure service")
# pdb.set_trace()
self.lbaas_builder.assure_service(service,
traffic_group,
all_subnet_hints)
LOG.debug("XXXXXXXXX: Post assure service")
if self.network_builder:
start_time = time()
try:
self.network_builder.post_service_networking(
service, all_subnet_hints)
except Exception as error:
LOG.error("Post-network exception: icontrol_driver: %s",
error.message)
if lb_provisioning_status != plugin_const.PENDING_DELETE:
loadbalancer['provisioning_status'] = \
plugin_const.ERROR
raise error
if time() - start_time > .001:
LOG.debug(" _post_service_networking "
"took %.5f secs" % (time() - start_time))
except f5ex.NetworkNotReady as error:
pass
except Exception as err:
LOG.exception(err)
finally:
# only delete partition if loadbalancer is being deleted
if lb_provisioning_status == plugin_const.PENDING_DELETE:
self.tenant_manager.assure_tenant_cleanup(service,
all_subnet_hints)
if do_service_update:
self.update_service_status(service)
lb_provisioning_status = loadbalancer.get("provisioning_status",
plugin_const.ERROR)
lb_pending = \
(lb_provisioning_status == plugin_const.PENDING_CREATE or
lb_provisioning_status == plugin_const.PENDING_UPDATE)
return lb_pending
def update_service_status(self, service, timed_out=False):
"""Update status of objects in controller."""
LOG.debug("_update_service_status")
if not self.plugin_rpc:
LOG.error("Cannot update status in Neutron without "
"RPC handler.")
return
if 'members' in service:
# Call update_members_status
self._update_member_status(service['members'], timed_out)
if 'healthmonitors' in service:
# Call update_monitor_status
self._update_health_monitor_status(
service['healthmonitors']
)
if 'pools' in service:
# Call update_pool_status
self._update_pool_status(
service['pools']
)
if 'listeners' in service:
# Call update_listener_status
self._update_listener_status(service)
if 'l7policy_rules' in service:
self._update_l7rule_status(service['l7policy_rules'])
if 'l7policies' in service:
self._update_l7policy_status(service['l7policies'])
self._update_loadbalancer_status(service, timed_out)
def _update_member_status(self, members, timed_out):
"""Update member status in OpenStack """
for member in members:
if 'provisioning_status' in member:
provisioning_status = member['provisioning_status']
if (provisioning_status == plugin_const.PENDING_CREATE or
provisioning_status == plugin_const.PENDING_UPDATE):
if timed_out:
member['provisioning_status'] = plugin_const.ERROR
operating_status = lb_const.OFFLINE
else:
member['provisioning_status'] = plugin_const.ACTIVE
operating_status = lb_const.ONLINE
self.plugin_rpc.update_member_status(
member['id'],
member['provisioning_status'],
operating_status
)
elif provisioning_status == plugin_const.PENDING_DELETE:
self.plugin_rpc.member_destroyed(
member['id'])
elif provisioning_status == plugin_const.ERROR:
self.plugin_rpc.update_member_status(
member['id'],
plugin_const.ERROR,
lb_const.OFFLINE)
def _update_health_monitor_status(self, health_monitors):
"""Update pool monitor status in OpenStack """
for health_monitor in health_monitors:
if 'provisioning_status' in health_monitor:
provisioning_status = health_monitor['provisioning_status']
if (provisioning_status == plugin_const.PENDING_CREATE or
provisioning_status == plugin_const.PENDING_UPDATE):
self.plugin_rpc.update_health_monitor_status(
health_monitor['id'],
plugin_const.ACTIVE,
lb_const.ONLINE
)
health_monitor['provisioning_status'] = \
plugin_const.ACTIVE
elif provisioning_status == plugin_const.PENDING_DELETE:
self.plugin_rpc.health_monitor_destroyed(
health_monitor['id'])
elif provisioning_status == plugin_const.ERROR:
self.plugin_rpc.update_health_monitor_status(
health_monitor['id'])
@log_helpers.log_method_call
def _update_pool_status(self, pools):
"""Update pool status in OpenStack """
for pool in pools:
if 'provisioning_status' in pool:
provisioning_status = pool['provisioning_status']
if (provisioning_status == plugin_const.PENDING_CREATE or
provisioning_status == plugin_const.PENDING_UPDATE):
self.plugin_rpc.update_pool_status(
pool['id'],
plugin_const.ACTIVE,
lb_const.ONLINE
)
pool['provisioning_status'] = plugin_const.ACTIVE
elif provisioning_status == plugin_const.PENDING_DELETE:
self.plugin_rpc.pool_destroyed(
pool['id'])
elif provisioning_status == plugin_const.ERROR:
self.plugin_rpc.update_pool_status(pool['id'])
@log_helpers.log_method_call
def _update_listener_status(self, service):
"""Update listener status in OpenStack """
listeners = service['listeners']
for listener in listeners:
if 'provisioning_status' in listener:
provisioning_status = listener['provisioning_status']
if (provisioning_status == plugin_const.PENDING_CREATE or
provisioning_status == plugin_const.PENDING_UPDATE):
self.plugin_rpc.update_listener_status(
listener['id'],
plugin_const.ACTIVE,
listener['operating_status']
)
listener['provisioning_status'] = \
plugin_const.ACTIVE
elif provisioning_status == plugin_const.PENDING_DELETE:
self.plugin_rpc.listener_destroyed(
listener['id'])
elif provisioning_status == plugin_const.ERROR:
self.plugin_rpc.update_listener_status(
listener['id'],
provisioning_status,
lb_const.OFFLINE)
@log_helpers.log_method_call
def _update_l7rule_status(self, l7rules):
"""Update l7rule status in OpenStack """
for l7rule in l7rules:
if 'provisioning_status' in l7rule:
provisioning_status = l7rule['provisioning_status']
if (provisioning_status == plugin_const.PENDING_CREATE or
provisioning_status == plugin_const.PENDING_UPDATE):
self.plugin_rpc.update_l7rule_status(
l7rule['id'],
l7rule['policy_id'],
plugin_const.ACTIVE,
lb_const.ONLINE
)
elif provisioning_status == plugin_const.PENDING_DELETE:
self.plugin_rpc.l7rule_destroyed(
l7rule['id'])
elif provisioning_status == plugin_const.ERROR:
self.plugin_rpc.update_l7rule_status(
l7rule['id'], l7rule['policy_id'])
@log_helpers.log_method_call
def _update_l7policy_status(self, l7policies):
LOG.debug("_update_l7policy_status")
"""Update l7policy status in OpenStack """
for l7policy in l7policies:
if 'provisioning_status' in l7policy:
provisioning_status = l7policy['provisioning_status']
if (provisioning_status == plugin_const.PENDING_CREATE or
provisioning_status == plugin_const.PENDING_UPDATE):
self.plugin_rpc.update_l7policy_status(
l7policy['id'],
plugin_const.ACTIVE,
lb_const.ONLINE
)
elif provisioning_status == plugin_const.PENDING_DELETE:
LOG.debug("calling l7policy_destroyed")
self.plugin_rpc.l7policy_destroyed(
l7policy['id'])
elif provisioning_status == plugin_const.ERROR:
self.plugin_rpc.update_l7policy_status(l7policy['id'])
@log_helpers.log_method_call
def _update_loadbalancer_status(self, service, timed_out=False):
"""Update loadbalancer status in OpenStack """
loadbalancer = service.get('loadbalancer', {})
provisioning_status = loadbalancer.get('provisioning_status',
plugin_const.ERROR)
if (provisioning_status == plugin_const.PENDING_CREATE or
provisioning_status == plugin_const.PENDING_UPDATE):
if timed_out:
operating_status = (lb_const.OFFLINE)
if provisioning_status == plugin_const.PENDING_CREATE:
loadbalancer['provisioning_status'] = \
plugin_const.ERROR
else:
loadbalancer['provisioning_status'] = \
plugin_const.ACTIVE
else:
operating_status = (lb_const.ONLINE)
loadbalancer['provisioning_status'] = \
plugin_const.ACTIVE
self.plugin_rpc.update_loadbalancer_status(
loadbalancer['id'],
loadbalancer['provisioning_status'],
operating_status)
elif provisioning_status == plugin_const.PENDING_DELETE:
self.plugin_rpc.loadbalancer_destroyed(
loadbalancer['id'])
elif provisioning_status == plugin_const.ERROR:
self.plugin_rpc.update_loadbalancer_status(
loadbalancer['id'],
provisioning_status,
lb_const.OFFLINE)
elif provisioning_status == plugin_const.ACTIVE:
LOG.debug('Loadbalancer provisioning status is active')
else:
LOG.error('Loadbalancer provisioning status is invalid')
@is_operational
def update_operating_status(self, service):
if 'members' in service:
if self.network_builder:
# append route domain to member address
self.network_builder._annotate_service_route_domains(service)
# get currrent member status
self.lbaas_builder.update_operating_status(service)
# udpate Neutron
for member in service['members']:
if member['provisioning_status'] == plugin_const.ACTIVE:
operating_status = member.get('operating_status', None)
self.plugin_rpc.update_member_status(
member['id'],
provisioning_status=None,
operating_status=operating_status)
def get_active_bigip(self):
bigips = self.get_all_bigips()
if len(bigips) == 1:
return bigips[0]
for bigip in bigips:
if hasattr(bigip, 'failover_state'):
if bigip.failover_state == 'active':
return bigip
# if can't determine active, default to first one
return bigips[0]
def service_to_traffic_group(self, service):
# Hash service tenant id to index of traffic group
# return which iControlDriver.__traffic_group that tenant is "in?"
return self.tenant_to_traffic_group(
service['loadbalancer']['tenant_id'])
def tenant_to_traffic_group(self, tenant_id):
# Hash tenant id to index of traffic group
hexhash = hashlib.md5(tenant_id).hexdigest()
tg_index = int(hexhash, 16) % len(self.__traffic_groups)
return self.__traffic_groups[tg_index]
# these functions should return only active BIG-IP
# not errored BIG-IPs.
def get_bigip(self):
hostnames = sorted(list(self.__bigips))
for host in hostnames:
if hasattr(self.__bigips[host], 'status') and \
self.__bigips[host].status == 'active':
return self.__bigips[host]
def get_bigip_hosts(self):
return_hosts = []
for host in list(self.__bigips):
if hasattr(self.__bigips[host], 'status') and \
self.__bigips[host].status == 'active':
return_hosts.append(host)
return sorted(return_hosts)
def get_all_bigips(self):
return_bigips = []
for host in list(self.__bigips):
if hasattr(self.__bigips[host], 'status') and \
self.__bigips[host].status == 'active':
return_bigips.append(self.__bigips[host])
return return_bigips
def get_config_bigips(self):
return self.get_all_bigips()
# these are the refactored methods
def get_active_bigips(self):
return self.get_all_bigips()
def get_errored_bigips_hostnames(self):
return_hostnames = []
for host in list(self.__bigips):
bigip = self.__bigips[host]
if hasattr(bigip, 'status') and bigip.status == 'error':
return_hostnames.append(host)
return return_hostnames
def get_inbound_throughput(self, bigip, global_statistics=None):
return self.stat_helper.get_inbound_throughput(
bigip, global_stats=global_statistics)
def get_outbound_throughput(self, bigip, global_statistics=None):
return self.stat_helper.get_outbound_throughput(
bigip, global_stats=global_statistics)
def get_throughput(self, bigip=None, global_statistics=None):
return self.stat_helper.get_throughput(
bigip, global_stats=global_statistics)
def get_active_connections(self, bigip=None, global_statistics=None):
return self.stat_helper.get_active_connection_count(
bigip, global_stats=global_statistics)
def get_ssltps(self, bigip=None, global_statistics=None):
return self.stat_helper.get_active_SSL_TPS(
bigip, global_stats=global_statistics)
def get_node_count(self, bigip=None, global_statistics=None):
return len(bigip.tm.ltm.nodes.get_collection())
def get_clientssl_profile_count(self, bigip=None, global_statistics=None):
return ssl_profile.SSLProfileHelper.get_client_ssl_profile_count(bigip)
def get_tenant_count(self, bigip=None, global_statistics=None):
return self.system_helper.get_tenant_folder_count(bigip)
def get_tunnel_count(self, bigip=None, global_statistics=None):
return self.network_helper.get_tunnel_count(bigip)
def get_vlan_count(self, bigip=None, global_statistics=None):
return self.network_helper.get_vlan_count(bigip)
def get_route_domain_count(self, bigip=None, global_statistics=None):
return self.network_helper.get_route_domain_count(bigip)
def _init_traffic_groups(self, bigip):
self.__traffic_groups = self.cluster_manager.get_traffic_groups(bigip)
if 'traffic-group-local-only' in self.__traffic_groups:
self.__traffic_groups.remove('traffic-group-local-only')
self.__traffic_groups.sort()
def _validate_bigip_version(self, bigip, hostname):
# Ensure the BIG-IP® has sufficient version
major_version = self.system_helper.get_major_version(bigip)
if major_version < f5const.MIN_TMOS_MAJOR_VERSION:
raise f5ex.MajorVersionValidateFailed(
'Device %s must be at least TMOS %s.%s'
% (hostname, f5const.MIN_TMOS_MAJOR_VERSION,
f5const.MIN_TMOS_MINOR_VERSION))
minor_version = self.system_helper.get_minor_version(bigip)
if minor_version < f5const.MIN_TMOS_MINOR_VERSION:
raise f5ex.MinorVersionValidateFailed(
'Device %s must be at least TMOS %s.%s'
% (hostname, f5const.MIN_TMOS_MAJOR_VERSION,
f5const.MIN_TMOS_MINOR_VERSION))
return major_version, minor_version
@serialized('create_l7policy')
@is_operational
def create_l7policy(self, l7policy, service):
"""Create lb l7policy"""
LOG.debug("Creating l7policy")
self._common_service_handler(service)
@serialized('update_l7policy')
@is_operational
def update_l7policy(self, old_l7policy, l7policy, service):
"""Update lb l7policy"""
LOG.debug("Updating l7policy")
self._common_service_handler(service)
@serialized('delete_l7policy')
@is_operational
def delete_l7policy(self, l7policy, service):
"""Delete lb l7policy"""
LOG.debug("Deleting l7policy")
self._common_service_handler(service)
@serialized('create_l7rule')
@is_operational
def create_l7rule(self, pool, service):
"""Create lb l7rule"""
LOG.debug("Creating l7rule")
self._common_service_handler(service)
@serialized('update_l7rule')
@is_operational
def update_l7rule(self, old_l7rule, l7rule, service):
"""Update lb l7rule"""
LOG.debug("Updating l7rule")
self._common_service_handler(service)
@serialized('delete_l7rule')
@is_operational
def delete_l7rule(self, l7rule, service):
"""Delete lb l7rule"""
LOG.debug("Deleting l7rule")
self._common_service_handler(service)
def trace_service_requests(self, service):
with open(self.file_name, 'r+') as fp:
fp.seek(-1, 2)
fp.write(',')
json.dump(service, fp, sort_keys=True, indent=2)
fp.write(']')
def get_config_dir(self):
"""Determines F5 agent configuration directory.
Oslo cfg has a config_dir option, but F5 agent is not currently
started with this option. To be complete, the code will check if
config_dir is defined, and use that value as long as it is a single
string (no idea what to do if it is not a str). If not defined,
get the full dir path of the INI file, which is currently used when
starting F5 agent. If neither option is available,
use /etc/neutron/services/f5.
:return: str defining configuration directory.
"""
if self.conf.config_dir and isinstance(self.conf.config_dir, str):
# use config_dir parameter if defined, and is a string
return self.conf.config_dir
elif self.conf.config_file:
# multiple config files (neutron and agent) are usually defined
if isinstance(self.conf.config_file, list):
# find agent config (f5-openstack-agent.ini)
config_files = self.conf.config_file
for file_name in config_files:
if 'f5-openstack-agent.ini' in file_name:
return os.path.dirname(file_name)
elif isinstance(self.conf.config_file, str):
# not a list, just a single string
return os.path.dirname(self.conf.config_file)
# if all else fails
return '/etc/neutron/services/f5'
removed debug comment causing flake8 issue
# coding=utf-8
#
# Copyright 2014-2017 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import hashlib
import json
import logging as std_logging
import os
from eventlet import greenthread
from time import strftime
from time import time
from requests import HTTPError
from neutron.plugins.common import constants as plugin_const
from neutron_lbaas.services.loadbalancer import constants as lb_const
from neutron_lib.exceptions import InvalidConfigurationOption
from oslo_config import cfg
from oslo_log import helpers as log_helpers
from oslo_log import log as logging
from oslo_utils import importutils
from f5.bigip import ManagementRoot
from f5_openstack_agent.lbaasv2.drivers.bigip.cluster_manager import \
ClusterManager
from f5_openstack_agent.lbaasv2.drivers.bigip import constants_v2 as f5const
from f5_openstack_agent.lbaasv2.drivers.bigip.esd_filehandler import \
EsdTagProcessor
from f5_openstack_agent.lbaasv2.drivers.bigip import exceptions as f5ex
from f5_openstack_agent.lbaasv2.drivers.bigip.lbaas_builder import \
LBaaSBuilder
from f5_openstack_agent.lbaasv2.drivers.bigip.lbaas_driver import \
LBaaSBaseDriver
from f5_openstack_agent.lbaasv2.drivers.bigip import network_helper
from f5_openstack_agent.lbaasv2.drivers.bigip.network_service import \
NetworkServiceBuilder
from f5_openstack_agent.lbaasv2.drivers.bigip import resource_helper
from f5_openstack_agent.lbaasv2.drivers.bigip.service_adapter import \
ServiceModelAdapter
from f5_openstack_agent.lbaasv2.drivers.bigip import ssl_profile
from f5_openstack_agent.lbaasv2.drivers.bigip import stat_helper
from f5_openstack_agent.lbaasv2.drivers.bigip.system_helper import \
SystemHelper
from f5_openstack_agent.lbaasv2.drivers.bigip.tenants import \
BigipTenantManager
from f5_openstack_agent.lbaasv2.drivers.bigip.utils import serialized
from f5_openstack_agent.lbaasv2.drivers.bigip.virtual_address import \
VirtualAddress
LOG = logging.getLogger(__name__)
NS_PREFIX = 'qlbaas-'
__VERSION__ = '0.1.1'
# configuration objects specific to iControl® driver
# XXX see /etc/neutron/services/f5/f5-openstack-agent.ini
OPTS = [ # XXX maybe we should make this a dictionary
cfg.StrOpt(
'bigiq_hostname',
help='The hostname (name or IP address) to use for the BIG-IQ host'
),
cfg.StrOpt(
'bigiq_admin_username',
default='admin',
help='The admin username to use for BIG-IQ authentication',
),
cfg.StrOpt(
'bigiq_admin_password',
default='[Provide password in config file]',
secret=True,
help='The admin password to use for BIG-IQ authentication'
),
cfg.StrOpt(
'openstack_keystone_uri',
default='http://192.0.2.248:5000/',
help='The admin password to use for BIG-IQ authentication'
),
cfg.StrOpt(
'openstack_admin_username',
default='admin',
help='The admin username to use for authentication '
'with the Keystone service'
),
cfg.StrOpt(
'openstack_admin_password',
default='[Provide password in config file]',
secret=True,
help='The admin password to use for authentication'
' with the Keystone service'
),
cfg.StrOpt(
'bigip_management_username',
default='admin',
help='The admin username that the BIG-IQ will use to manage '
'discovered BIG-IPs'
),
cfg.StrOpt(
'bigip_management_password',
default='[Provide password in config file]',
secret=True,
help='The admin password that the BIG-IQ will use to manage '
'discovered BIG-IPs'
),
cfg.StrOpt(
'f5_device_type', default='external',
help='What type of device onboarding'
),
cfg.StrOpt(
'f5_ha_type', default='pair',
help='Are we standalone, pair(active/standby), or scalen'
),
cfg.ListOpt(
'f5_external_physical_mappings', default=['default:1.1:True'],
help='Mapping between Neutron physical_network to interfaces'
),
cfg.StrOpt(
'f5_vtep_folder', default='Common',
help='Folder for the VTEP SelfIP'
),
cfg.StrOpt(
'f5_vtep_selfip_name', default=None,
help='Name of the VTEP SelfIP'
),
cfg.ListOpt(
'advertised_tunnel_types', default=['gre', 'vxlan'],
help='tunnel types which are advertised to other VTEPs'
),
cfg.BoolOpt(
'f5_populate_static_arp', default=False,
help='create static arp entries based on service entries'
),
cfg.StrOpt(
'vlan_binding_driver',
default=None,
help='driver class for binding vlans to device ports'
),
cfg.StrOpt(
'interface_port_static_mappings',
default=None,
help='JSON encoded static mapping of'
'devices to list of '
'interface and port_id'
),
cfg.StrOpt(
'l3_binding_driver',
default=None,
help='driver class for binding l3 address to l2 ports'
),
cfg.StrOpt(
'l3_binding_static_mappings', default=None,
help='JSON encoded static mapping of'
'subnet_id to list of '
'port_id, device_id list.'
),
cfg.BoolOpt(
'f5_route_domain_strictness', default=False,
help='Strict route domain isolation'
),
cfg.BoolOpt(
'f5_common_external_networks', default=True,
help='Treat external networks as common'
),
cfg.StrOpt(
'icontrol_vcmp_hostname',
help='The hostname (name or IP address) to use for vCMP Host '
'iControl access'
),
cfg.StrOpt(
'icontrol_hostname',
default="10.190.5.7",
help='The hostname (name or IP address) to use for iControl access'
),
cfg.StrOpt(
'icontrol_username', default='admin',
help='The username to use for iControl access'
),
cfg.StrOpt(
'icontrol_password', default='admin', secret=True,
help='The password to use for iControl access'
),
cfg.IntOpt(
'icontrol_connection_timeout', default=30,
help='How many seconds to timeout a connection to BIG-IP'
),
cfg.IntOpt(
'icontrol_connection_retry_interval', default=10,
help='How many seconds to wait between retry connection attempts'
),
cfg.DictOpt(
'common_network_ids', default={},
help='network uuid to existing Common networks mapping'
),
cfg.StrOpt(
'icontrol_config_mode', default='objects',
help='Whether to use iapp or objects for bigip configuration'
),
cfg.IntOpt(
'max_namespaces_per_tenant', default=1,
help='How many routing tables the BIG-IP will allocate per tenant'
' in order to accommodate overlapping IP subnets'
),
cfg.StrOpt(
'cert_manager',
default=None,
help='Class name of the certificate mangager used for retrieving '
'certificates and keys.'
),
cfg.StrOpt(
'auth_version',
default=None,
help='Keystone authentication version (v2 or v3) for Barbican client.'
),
cfg.StrOpt(
'os_project_id',
default='service',
help='OpenStack project ID.'
),
cfg.StrOpt(
'os_auth_url',
default=None,
help='OpenStack authentication URL.'
),
cfg.StrOpt(
'os_username',
default=None,
help='OpenStack user name for Keystone authentication.'
),
cfg.StrOpt(
'os_user_domain_name',
default=None,
help='OpenStack user domain name for Keystone authentication.'
),
cfg.StrOpt(
'os_project_name',
default=None,
help='OpenStack project name for Keystone authentication.'
),
cfg.StrOpt(
'os_project_domain_name',
default=None,
help='OpenStack domain name for Keystone authentication.'
),
cfg.StrOpt(
'os_password',
default=None,
help='OpenStack user password for Keystone authentication.'
),
cfg.StrOpt(
'f5_network_segment_physical_network', default=None,
help='Name of physical network to use for discovery of segment ID'
),
cfg.IntOpt(
'f5_network_segment_polling_interval', default=10,
help='Seconds between periodic scans for disconnected virtual servers'
),
cfg.IntOpt(
'f5_network_segment_gross_timeout', default=300,
help='Seconds to wait for a virtual server to become connected'
),
cfg.StrOpt(
'f5_parent_ssl_profile',
default='clientssl',
help='Parent profile used when creating client SSL profiles '
'for listeners with TERMINATED_HTTPS protocols.'
),
cfg.StrOpt(
'os_tenant_name',
default=None,
help='OpenStack tenant name for Keystone authentication (v2 only).'
),
cfg.BoolOpt(
'trace_service_requests',
default=False,
help='Log service object.'
)
]
def is_operational(method):
# Decorator to check we are operational before provisioning.
def wrapper(*args, **kwargs):
instance = args[0]
if instance.operational:
try:
return method(*args, **kwargs)
except IOError as ioe:
LOG.error('IO Error detected: %s' % method.__name__)
LOG.error(str(ioe))
raise ioe
else:
LOG.error('Cannot execute %s. Not operational. Re-initializing.'
% method.__name__)
instance._init_bigips()
return wrapper
class iControlDriver(LBaaSBaseDriver):
'''gets rpc plugin from manager (which instantiates, via importutils'''
def __init__(self, conf, registerOpts=True):
# The registerOpts parameter allows a test to
# turn off config option handling so that it can
# set the options manually instead. """
super(iControlDriver, self).__init__(conf)
self.conf = conf
if registerOpts:
self.conf.register_opts(OPTS)
self.initialized = False
self.hostnames = None
self.device_type = conf.f5_device_type
self.plugin_rpc = None # overrides base, same value
self.agent_report_state = None # overrides base, same value
self.operational = False # overrides base, same value
self.driver_name = 'f5-lbaasv2-icontrol'
#
# BIG-IP® containers
#
# BIG-IPs which currectly active
self.__bigips = {}
self.__last_connect_attempt = None
# HA and traffic group validation
self.ha_validated = False
self.tg_initialized = False
# traffic groups discovered from BIG-IPs for service placement
self.__traffic_groups = []
# base configurations to report to Neutron agent state reports
self.agent_configurations = {} # overrides base, same value
self.agent_configurations['device_drivers'] = [self.driver_name]
self.agent_configurations['icontrol_endpoints'] = {}
# service component managers
self.tenant_manager = None
self.cluster_manager = None
self.system_helper = None
self.lbaas_builder = None
self.service_adapter = None
self.vlan_binding = None
self.l3_binding = None
self.cert_manager = None # overrides register_OPTS
# server helpers
self.stat_helper = stat_helper.StatHelper()
self.network_helper = network_helper.NetworkHelper()
# f5-sdk helpers
self.vs_manager = resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.virtual)
self.pool_manager = resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.pool)
try:
# debug logging of service requests recieved by driver
if self.conf.trace_service_requests:
path = '/var/log/neutron/service/'
if not os.path.exists(path):
os.makedirs(path)
self.file_name = path + strftime("%H%M%S-%m%d%Y") + '.json'
with open(self.file_name, 'w') as fp:
fp.write('[{}] ')
# driver mode settings - GRM vs L2 adjacent
if self.conf.f5_global_routed_mode:
LOG.info('WARNING - f5_global_routed_mode enabled.'
' There will be no L2 or L3 orchestration'
' or tenant isolation provisioned. All vips'
' and pool members must be routable through'
' pre-provisioned SelfIPs.')
self.conf.use_namespaces = False
self.conf.f5_snat_mode = True
self.conf.f5_snat_addresses_per_subnet = 0
self.agent_configurations['tunnel_types'] = []
self.agent_configurations['bridge_mappings'] = {}
else:
self.agent_configurations['tunnel_types'] = \
self.conf.advertised_tunnel_types
for net_id in self.conf.common_network_ids:
LOG.debug('network %s will be mapped to /Common/%s'
% (net_id, self.conf.common_network_ids[net_id]))
self.agent_configurations['common_networks'] = \
self.conf.common_network_ids
LOG.debug('Setting static ARP population to %s'
% self.conf.f5_populate_static_arp)
self.agent_configurations['f5_common_external_networks'] = \
self.conf.f5_common_external_networks
f5const.FDB_POPULATE_STATIC_ARP = \
self.conf.f5_populate_static_arp
# parse the icontrol_hostname setting
self._init_bigip_hostnames()
# instantiate the managers
self._init_bigip_managers()
self.initialized = True
LOG.debug('iControlDriver loaded successfully')
except Exception as exc:
LOG.error("exception in intializing driver %s" % str(exc))
self._set_agent_status(False)
def connect(self):
# initialize communications wiht BIG-IP via iControl
try:
self._init_bigips()
except Exception as exc:
LOG.error("exception in intializing communicatoins to BIG-IPs %s"
% str(exc))
self._set_agent_status(False)
def _init_bigip_managers(self):
if self.conf.vlan_binding_driver:
try:
self.vlan_binding = importutils.import_object(
self.conf.vlan_binding_driver, self.conf, self)
except ImportError:
LOG.error('Failed to import VLAN binding driver: %s'
% self.conf.vlan_binding_driver)
if self.conf.l3_binding_driver:
print('self.conf.l3_binding_driver')
try:
self.l3_binding = importutils.import_object(
self.conf.l3_binding_driver, self.conf, self)
except ImportError:
LOG.error('Failed to import L3 binding driver: %s'
% self.conf.l3_binding_driver)
else:
LOG.debug('No L3 binding driver configured.'
' No L3 binding will be done.')
if self.conf.cert_manager:
try:
self.cert_manager = importutils.import_object(
self.conf.cert_manager, self.conf)
except ImportError as import_err:
LOG.error('Failed to import CertManager: %s.' %
import_err.message)
raise
except Exception as err:
LOG.error('Failed to initialize CertManager. %s' % err.message)
# re-raise as ImportError to cause agent exit
raise ImportError(err.message)
self.service_adapter = ServiceModelAdapter(self.conf)
self.tenant_manager = BigipTenantManager(self.conf, self)
self.cluster_manager = ClusterManager()
self.system_helper = SystemHelper()
self.lbaas_builder = LBaaSBuilder(self.conf, self)
if self.conf.f5_global_routed_mode:
self.network_builder = None
else:
self.network_builder = NetworkServiceBuilder(
self.conf.f5_global_routed_mode,
self.conf,
self,
self.l3_binding)
def _init_bigip_hostnames(self):
# Validate and parse bigip credentials
if not self.conf.icontrol_hostname:
raise InvalidConfigurationOption(
opt_name='icontrol_hostname',
opt_value='valid hostname or IP address'
)
if not self.conf.icontrol_username:
raise InvalidConfigurationOption(
opt_name='icontrol_username',
opt_value='valid username'
)
if not self.conf.icontrol_password:
raise InvalidConfigurationOption(
opt_name='icontrol_password',
opt_value='valid password'
)
self.hostnames = self.conf.icontrol_hostname.split(',')
self.hostnames = [item.strip() for item in self.hostnames]
self.hostnames = sorted(self.hostnames)
# initialize per host agent_configurations
for hostname in self.hostnames:
self.__bigips[hostname] = bigip = type('', (), {})()
bigip.hostname = hostname
bigip.status = 'creating'
bigip.status_message = 'creating BIG-IP from iControl hostnames'
self.agent_configurations[
'icontrol_endpoints'][hostname] = {}
self.agent_configurations[
'icontrol_endpoints'][hostname]['failover_state'] = \
'undiscovered'
self.agent_configurations[
'icontrol_endpoints'][hostname]['status'] = 'unknown'
self.agent_configurations[
'icontrol_endpoints'][hostname]['status_message'] = ''
def _init_bigips(self):
# Connect to all BIG-IP®s
if self.operational:
LOG.debug('iControl driver reports connection is operational')
return
LOG.debug('initializing communications to BIG-IPs')
try:
# setup logging options
if not self.conf.debug:
# sudslog = std_logging.getLogger('suds.client')
# sudslog.setLevel(std_logging.FATAL)
requests_log = std_logging.getLogger(
"requests.packages.urllib3")
requests_log.setLevel(std_logging.ERROR)
requests_log.propagate = False
else:
requests_log = std_logging.getLogger(
"requests.packages.urllib3")
requests_log.setLevel(std_logging.DEBUG)
requests_log.propagate = True
self.__last_connect_attempt = datetime.datetime.now()
for hostname in self.hostnames:
# connect to each BIG-IP and set it status
bigip = self._open_bigip(hostname)
if bigip.status == 'active':
# set the status down until we assure initialized
bigip.status = 'initializing'
bigip.status_message = 'initializing HA viability'
LOG.debug('initializing HA viability %s' % hostname)
device_group_name = None
if not self.ha_validated:
device_group_name = self._validate_ha(bigip)
LOG.debug('HA validated from %s with DSG %s' %
(hostname, device_group_name))
self.ha_validated = True
if not self.tg_initialized:
self._init_traffic_groups(bigip)
LOG.debug('known traffic groups initialized',
' from %s as %s' %
(hostname, self.__traffic_groups))
self.tg_initialized = True
LOG.debug('initializing bigip %s' % hostname)
self._init_bigip(bigip, hostname, device_group_name)
LOG.debug('initializing agent configurations %s'
% hostname)
self._init_agent_config(bigip)
# Assure basic BIG-IP HA is operational
LOG.debug('validating HA state for %s' % hostname)
bigip.status = 'validating_HA'
bigip.status_message = 'validating the current HA state'
if self._validate_ha_operational(bigip):
LOG.debug('setting status to active for %s' % hostname)
bigip.status = 'active'
bigip.status_message = 'BIG-IP ready for provisioning'
self._post_init()
else:
LOG.debug('setting status to error for %s' % hostname)
bigip.status = 'error'
bigip.status_message = 'BIG-IP is not operational'
self._set_agent_status(False)
else:
LOG.error('error opening BIG-IP %s - %s:%s'
% (hostname, bigip.status, bigip.status_message))
self._set_agent_status(False)
except Exception as exc:
LOG.error('Invalid agent configuration: %s' % exc.message)
raise
self._set_agent_status(force_resync=True)
def _init_errored_bigips(self):
try:
errored_bigips = self.get_errored_bigips_hostnames()
if errored_bigips:
LOG.debug('attempting to recover %s BIG-IPs' %
len(errored_bigips))
for hostname in errored_bigips:
# try to connect and set status
bigip = self._open_bigip(hostname)
if bigip.status == 'active':
# set the status down until we assure initialized
bigip.status = 'initializing'
bigip.status_message = 'initializing HA viability'
LOG.debug('initializing HA viability %s' % hostname)
LOG.debug('proceeding to initialize %s' % hostname)
device_group_name = None
if not self.ha_validated:
device_group_name = self._validate_ha(bigip)
LOG.debug('HA validated from %s with DSG %s' %
(hostname, device_group_name))
self.ha_validated = True
if not self.tg_initialized:
self._init_traffic_groups(bigip)
LOG.debug('known traffic groups initialized',
' from %s as %s' %
(hostname, self.__traffic_groups))
self.tg_initialized = True
LOG.debug('initializing bigip %s' % hostname)
self._init_bigip(bigip, hostname, device_group_name)
LOG.debug('initializing agent configurations %s'
% hostname)
self._init_agent_config(bigip)
# Assure basic BIG-IP HA is operational
LOG.debug('validating HA state for %s' % hostname)
bigip.status = 'validating_HA'
bigip.status_message = \
'validating the current HA state'
if self._validate_ha_operational(bigip):
LOG.debug('setting status to active for %s'
% hostname)
bigip.status = 'active'
bigip.status_message = \
'BIG-IP ready for provisioning'
self._post_init()
self._set_agent_status(True)
else:
LOG.debug('setting status to error for %s'
% hostname)
bigip.status = 'error'
bigip.status_message = 'BIG-IP is not operational'
self._set_agent_status(False)
else:
LOG.debug('there are no BIG-IPs with error status')
except Exception as exc:
LOG.error('Invalid agent configuration: %s' % exc.message)
raise
def _open_bigip(self, hostname):
# Open bigip connection """
try:
bigip = self.__bigips[hostname]
if bigip.status not in ['creating', 'error']:
LOG.debug('BIG-IP %s status invalid %s to open a connection'
% (hostname, bigip.status))
return bigip
bigip.status = 'connecting'
bigip.status_message = 'requesting iControl endpoint'
LOG.info('opening iControl connection to %s @ %s' %
(self.conf.icontrol_username, hostname))
bigip = ManagementRoot(hostname,
self.conf.icontrol_username,
self.conf.icontrol_password,
timeout=f5const.DEVICE_CONNECTION_TIMEOUT)
bigip.status = 'active'
bigip.status_message = 'connected to BIG-IP'
self.__bigips[hostname] = bigip
return bigip
except Exception as exc:
LOG.exception('could not communicate with ' +
'iControl device: %s' % hostname)
errbigip = type('', (), {})()
errbigip.hostname = hostname
errbigip.status = 'error'
errbigip.status_message = str(exc)[:80]
self.__bigips[hostname] = errbigip
return errbigip
def _init_bigip(self, bigip, hostname, check_group_name=None):
# Prepare a bigip for usage
try:
major_version, minor_version = self._validate_bigip_version(
bigip, hostname)
device_group_name = None
extramb = self.system_helper.get_provision_extramb(bigip)
if int(extramb) < f5const.MIN_EXTRA_MB:
raise f5ex.ProvisioningExtraMBValidateFailed(
'Device %s BIG-IP not provisioned for '
'management LARGE.' % hostname)
if self.conf.f5_ha_type == 'pair' and \
self.cluster_manager.get_sync_status(bigip) == \
'Standalone':
raise f5ex.BigIPClusterInvalidHA(
'HA mode is pair and bigip %s in standalone mode'
% hostname)
if self.conf.f5_ha_type == 'scalen' and \
self.cluster_manager.get_sync_status(bigip) == \
'Standalone':
raise f5ex.BigIPClusterInvalidHA(
'HA mode is scalen and bigip %s in standalone mode'
% hostname)
if self.conf.f5_ha_type != 'standalone':
device_group_name = \
self.cluster_manager.get_device_group(bigip)
if not device_group_name:
raise f5ex.BigIPClusterInvalidHA(
'HA mode is %s and no sync failover '
'device group found for device %s.'
% (self.conf.f5_ha_type, hostname))
if check_group_name and device_group_name != check_group_name:
raise f5ex.BigIPClusterInvalidHA(
'Invalid HA. Device %s is in device group'
' %s but should be in %s.'
% (hostname, device_group_name, check_group_name))
bigip.device_group_name = device_group_name
if self.network_builder:
for network in self.conf.common_network_ids.values():
if not self.network_builder.vlan_exists(bigip,
network,
folder='Common'):
raise f5ex.MissingNetwork(
'Common network %s on %s does not exist'
% (network, bigip.hostname))
bigip.device_name = self.cluster_manager.get_device_name(bigip)
bigip.mac_addresses = self.system_helper.get_mac_addresses(bigip)
LOG.debug("Initialized BIG-IP %s with MAC addresses %s" %
(bigip.device_name, ', '.join(bigip.mac_addresses)))
bigip.device_interfaces = \
self.system_helper.get_interface_macaddresses_dict(bigip)
bigip.assured_networks = {}
bigip.assured_tenant_snat_subnets = {}
bigip.assured_gateway_subnets = []
if self.conf.f5_ha_type != 'standalone':
self.cluster_manager.disable_auto_sync(
device_group_name, bigip)
# validate VTEP SelfIPs
if not self.conf.f5_global_routed_mode:
self.network_builder.initialize_tunneling(bigip)
# Turn off tunnel syncing between BIG-IP
# as our VTEPs properly use only local SelfIPs
if self.system_helper.get_tunnel_sync(bigip) == 'enable':
self.system_helper.set_tunnel_sync(bigip, enabled=False)
LOG.debug('connected to iControl %s @ %s ver %s.%s'
% (self.conf.icontrol_username, hostname,
major_version, minor_version))
except Exception as exc:
bigip.status = 'error'
bigip.status_message = str(exc)[:80]
raise
return bigip
def _post_init(self):
# After we have a connection to the BIG-IPs, initialize vCMP
# on all connected BIG-IPs
if self.network_builder:
self.network_builder.initialize_vcmp()
self.agent_configurations['network_segment_physical_network'] = \
self.conf.f5_network_segment_physical_network
LOG.info('iControlDriver initialized to %d bigips with username:%s'
% (len(self.get_active_bigips()),
self.conf.icontrol_username))
LOG.info('iControlDriver dynamic agent configurations:%s'
% self.agent_configurations)
if self.vlan_binding:
LOG.debug(
'getting BIG-IP device interface for VLAN Binding')
self.vlan_binding.register_bigip_interfaces()
if self.l3_binding:
LOG.debug('getting BIG-IP MAC Address for L3 Binding')
self.l3_binding.register_bigip_mac_addresses()
if self.network_builder:
self.network_builder.post_init()
# read enhanced services definitions
esd_dir = os.path.join(self.get_config_dir(), 'esd')
esd = EsdTagProcessor(esd_dir)
try:
esd.process_esd(self.get_all_bigips())
self.lbaas_builder.init_esd(esd)
except f5ex.esdJSONFileInvalidException as err:
LOG.error("unable to initialize ESD. Error: %s.", err.message)
self._set_agent_status(False)
def _validate_ha(self, bigip):
# if there was only one address supplied and
# this is not a standalone device, get the
# devices trusted by this device. """
device_group_name = None
if self.conf.f5_ha_type == 'standalone':
if len(self.hostnames) != 1:
raise f5ex.BigIPClusterInvalidHA(
'HA mode is standalone and %d hosts found.'
% len(self.hostnames))
device_group_name = 'standalone'
elif self.conf.f5_ha_type == 'pair':
device_group_name = self.cluster_manager.\
get_device_group(bigip)
if len(self.hostnames) != 2:
mgmt_addrs = []
devices = self.cluster_manager.devices(bigip,
device_group_name)
for device in devices:
mgmt_addrs.append(
self.cluster_manager.get_mgmt_addr_by_device(device))
self.hostnames = mgmt_addrs
if len(self.hostnames) != 2:
raise f5ex.BigIPClusterInvalidHA(
'HA mode is pair and %d hosts found.'
% len(self.hostnames))
elif self.conf.f5_ha_type == 'scalen':
device_group_name = self.cluster_manager.\
get_device_group(bigip)
if len(self.hostnames) < 2:
mgmt_addrs = []
devices = self.cluster_manager.devices(bigip,
device_group_name)
for device in devices:
mgmt_addrs.append(
self.cluster_manager.get_mgmt_addr_by_device(
bigip, device))
self.hostnames = mgmt_addrs
return device_group_name
def _validate_ha_operational(self, bigip):
if self.conf.f5_ha_type == 'standalone':
return True
else:
# how many active BIG-IPs are there?
active_bigips = self.get_active_bigips()
if active_bigips:
sync_status = self.cluster_manager.get_sync_status(bigip)
if sync_status in ['Disconnected', 'Sync Failure']:
if len(active_bigips) > 1:
# the device should not be in the disconnected state
return False
else:
# it should be in the same sync-failover group
# as the rest of the active bigips
device_group_name = \
self.cluster_manager.get_device_group(bigip)
for ab in active_bigips:
adgn = self.cluster_manager.get_device_group(ab)
if not adgn == device_group_name:
return False
return True
else:
return True
def _init_agent_config(self, bigip):
# Init agent config
ic_host = {}
ic_host['version'] = self.system_helper.get_version(bigip)
ic_host['device_name'] = bigip.device_name
ic_host['platform'] = self.system_helper.get_platform(bigip)
ic_host['serial_number'] = self.system_helper.get_serial_number(bigip)
ic_host['status'] = bigip.status
ic_host['status_message'] = bigip.status_message
ic_host['failover_state'] = self.get_failover_state(bigip)
ic_host['local_ip'] = bigip.local_ip
self.agent_configurations['icontrol_endpoints'][bigip.hostname] = \
ic_host
if self.network_builder:
self.agent_configurations['bridge_mappings'] = \
self.network_builder.interface_mapping
def _set_agent_status(self, force_resync=False):
for hostname in self.__bigips:
bigip = self.__bigips[hostname]
self.agent_configurations[
'icontrol_endpoints'][bigip.hostname][
'status'] = bigip.status
self.agent_configurations[
'icontrol_endpoints'][bigip.hostname][
'status_message'] = bigip.status_message
# Policy - if any BIG-IP are active we're operational
if self.get_active_bigips():
self.operational = True
else:
self.operational = False
if self.agent_report_state:
self.agent_report_state(force_resync=force_resync)
def get_failover_state(self, bigip):
try:
if hasattr(bigip, 'tm'):
fs = bigip.tm.sys.dbs.db.load(name='failover.state')
bigip.failover_state = fs.value
return bigip.failover_state
else:
return 'error'
except Exception as exc:
LOG.exception('Error getting %s failover state' % bigip.hostname)
bigip.status = 'error'
bigip.status_message = str(exc)[:80]
self._set_agent_status(False)
return 'error'
def get_agent_configurations(self):
for hostname in self.__bigips:
bigip = self.__bigips[hostname]
if bigip.status == 'active':
failover_state = self.get_failover_state(bigip)
self.agent_configurations[
'icontrol_endpoints'][bigip.hostname][
'failover_state'] = failover_state
else:
self.agent_configurations[
'icontrol_endpoints'][bigip.hostname][
'failover_state'] = 'unknown'
self.agent_configurations['icontrol_endpoints'][
bigip.hostname]['status'] = bigip.status
self.agent_configurations['icontrol_endpoints'][
bigip.hostname]['status_message'] = bigip.status_message
self.agent_configurations['operational'] = \
self.operational
LOG.debug('agent configurations are: %s' % self.agent_configurations)
return dict(self.agent_configurations)
def recover_errored_devices(self):
# trigger a retry on errored BIG-IPs
try:
self._init_errored_bigips()
except Exception as exc:
LOG.error('Could not recover devices: %s' % exc.message)
def backend_integrity(self):
if self.operational:
return True
return False
def generate_capacity_score(self, capacity_policy=None):
"""Generate the capacity score of connected devices """
if capacity_policy:
highest_metric = 0.0
highest_metric_name = None
my_methods = dir(self)
bigips = self.get_all_bigips()
for metric in capacity_policy:
func_name = 'get_' + metric
if func_name in my_methods:
max_capacity = int(capacity_policy[metric])
metric_func = getattr(self, func_name)
metric_value = 0
for bigip in bigips:
if bigip.status == 'active':
global_stats = \
self.stat_helper.get_global_statistics(bigip)
value = int(
metric_func(bigip=bigip,
global_statistics=global_stats)
)
LOG.debug('calling capacity %s on %s returned: %s'
% (func_name, bigip.hostname, value))
else:
value = 0
if value > metric_value:
metric_value = value
metric_capacity = float(metric_value) / float(max_capacity)
if metric_capacity > highest_metric:
highest_metric = metric_capacity
highest_metric_name = metric
else:
LOG.warn('capacity policy has method '
'%s which is not implemented in this driver'
% metric)
LOG.debug('capacity score: %s based on %s'
% (highest_metric, highest_metric_name))
return highest_metric
return 0
def set_context(self, context):
# Context to keep for database access
if self.network_builder:
self.network_builder.set_context(context)
def set_plugin_rpc(self, plugin_rpc):
# Provide Plugin RPC access
self.plugin_rpc = plugin_rpc
def set_tunnel_rpc(self, tunnel_rpc):
# Provide FDB Connector with ML2 RPC access
if self.network_builder:
self.network_builder.set_tunnel_rpc(tunnel_rpc)
def set_l2pop_rpc(self, l2pop_rpc):
# Provide FDB Connector with ML2 RPC access
if self.network_builder:
self.network_builder.set_l2pop_rpc(l2pop_rpc)
def set_agent_report_state(self, report_state_callback):
"""Set Agent Report State"""
self.agent_report_state = report_state_callback
def service_exists(self, service):
return self._service_exists(service)
def flush_cache(self):
# Remove cached objects so they can be created if necessary
for bigip in self.get_all_bigips():
bigip.assured_networks = {}
bigip.assured_tenant_snat_subnets = {}
bigip.assured_gateway_subnets = []
@serialized('get_all_deployed_pools')
@is_operational
def get_all_deployed_pools(self):
LOG.debug('getting all deployed pools on BIG-IPs')
deployed_pool_dict = {}
for bigip in self.get_all_bigips():
folders = self.system_helper.get_folders(bigip)
for folder in folders:
tenant_id = folder[len(self.service_adapter.prefix):]
if str(folder).startswith(self.service_adapter.prefix):
resource = resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.pool)
deployed_pools = resource.get_resources(bigip, folder)
if deployed_pools:
for pool in deployed_pools:
pool_id = \
pool.name[len(self.service_adapter.prefix):]
if pool_id in deployed_pool_dict:
deployed_pool_dict[pool_id][
'hostnames'].append(bigip.hostname)
else:
deployed_pool_dict[pool_id] = {
'id': pool_id,
'tenant_id': tenant_id,
'hostnames': [bigip.hostname]
}
return deployed_pool_dict
@serialized('get_all_deployed_loadbalancers')
@is_operational
def get_all_deployed_loadbalancers(self, purge_orphaned_folders=False):
LOG.debug('getting all deployed loadbalancers on BIG-IPs')
deployed_lb_dict = {}
for bigip in self.get_all_bigips():
folders = self.system_helper.get_folders(bigip)
for folder in folders:
tenant_id = folder[len(self.service_adapter.prefix):]
if str(folder).startswith(self.service_adapter.prefix):
resource = resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.virtual_address)
deployed_lbs = resource.get_resources(bigip, folder)
if deployed_lbs:
for lb in deployed_lbs:
lb_id = lb.name[len(self.service_adapter.prefix):]
if lb_id in deployed_lb_dict:
deployed_lb_dict[lb_id][
'hostnames'].append(bigip.hostname)
else:
deployed_lb_dict[lb_id] = {
'id': lb_id,
'tenant_id': tenant_id,
'hostnames': [bigip.hostname]
}
else:
# delay to assure we are not in the tenant creation
# process before a virtual address is created.
greenthread.sleep(10)
deployed_lbs = resource.get_resources(bigip, folder)
if deployed_lbs:
for lb in deployed_lbs:
lb_id = lb.name[
len(self.service_adapter.prefix):]
deployed_lb_dict[lb_id] = \
{'id': lb_id, 'tenant_id': tenant_id}
else:
# Orphaned folder!
if purge_orphaned_folders:
try:
self.system_helper.purge_folder_contents(
bigip, folder)
self.system_helper.purge_folder(
bigip, folder)
LOG.error('orphaned folder %s on %s' %
(folder, bigip.hostname))
except Exception as exc:
LOG.error('error purging folder %s: %s' %
(folder, str(exc)))
return deployed_lb_dict
@serialized('purge_orphaned_pool')
@is_operational
def purge_orphaned_pool(self, tenant_id=None, pool_id=None, hostnames=[]):
for bigip in self.get_all_bigips():
if bigip.hostname in hostnames:
try:
pool_name = self.service_adapter.prefix + pool_id
partition = self.service_adapter.prefix + tenant_id
pool = resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.pool).load(
bigip, pool_name, partition)
pool.delete()
except HTTPError as err:
if err.response.status_code == 404:
LOG.debug('pool %s not on BIG-IP %s.'
% (pool_id, bigip.hostname))
except Exception as exc:
LOG.exception('Exception purging pool %s' % str(exc))
@serialized('purge_orphaned_loadbalancer')
@is_operational
def purge_orphaned_loadbalancer(self, tenant_id=None,
loadbalancer_id=None, hostnames=[]):
for bigip in self.get_all_bigips():
if bigip.hostname in hostnames:
try:
va_name = self.service_adapter.prefix + loadbalancer_id
partition = self.service_adapter.prefix + tenant_id
va = resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.virtual_address).load(
bigip, va_name, partition)
# get virtual services (listeners)
# referencing this virtual address
vses = resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.virtual).get_resources(
bigip, partition)
vs_dest_compare = '/' + partition + '/' + va.name
for vs in vses:
if str(vs.destination).startswith(vs_dest_compare):
if vs.pool:
pool = resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.pool).load(
bigip, os.path.basename(vs.pool),
partition)
vs.delete()
pool.delete()
else:
vs.delete()
resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.virtual_address).delete(
bigip, va_name, partition)
except HTTPError as err:
if err.response.status_code == 404:
LOG.debug('loadbalancer %s not on BIG-IP %s.'
% (loadbalancer_id, bigip.hostname))
except Exception as exc:
LOG.exception('Exception purging loadbalancer %s'
% str(exc))
@serialized('create_loadbalancer')
@is_operational
def create_loadbalancer(self, loadbalancer, service):
"""Create virtual server"""
return self._common_service_handler(service)
@serialized('update_loadbalancer')
@is_operational
def update_loadbalancer(self, old_loadbalancer, loadbalancer, service):
"""Update virtual server"""
# anti-pattern three args unused.
return self._common_service_handler(service)
@serialized('delete_loadbalancer')
@is_operational
def delete_loadbalancer(self, loadbalancer, service):
"""Delete loadbalancer"""
LOG.debug("Deleting loadbalancer")
return self._common_service_handler(
service,
delete_partition=True,
delete_event=True)
@serialized('create_listener')
@is_operational
def create_listener(self, listener, service):
"""Create virtual server"""
LOG.debug("Creating listener")
return self._common_service_handler(service)
@serialized('update_listener')
@is_operational
def update_listener(self, old_listener, listener, service):
"""Update virtual server"""
LOG.debug("Updating listener")
service['old_listener'] = old_listener
return self._common_service_handler(service)
@serialized('delete_listener')
@is_operational
def delete_listener(self, listener, service):
"""Delete virtual server"""
LOG.debug("Deleting listener")
return self._common_service_handler(service)
@serialized('create_pool')
@is_operational
def create_pool(self, pool, service):
"""Create lb pool"""
LOG.debug("Creating pool")
return self._common_service_handler(service)
@serialized('update_pool')
@is_operational
def update_pool(self, old_pool, pool, service):
"""Update lb pool"""
LOG.debug("Updating pool")
return self._common_service_handler(service)
@serialized('delete_pool')
@is_operational
def delete_pool(self, pool, service):
"""Delete lb pool"""
LOG.debug("Deleting pool")
return self._common_service_handler(service)
@serialized('create_member')
@is_operational
def create_member(self, member, service):
"""Create pool member"""
LOG.debug("Creating member")
return self._common_service_handler(service)
@serialized('update_member')
@is_operational
def update_member(self, old_member, member, service):
"""Update pool member"""
LOG.debug("Updating member")
return self._common_service_handler(service)
@serialized('delete_member')
@is_operational
def delete_member(self, member, service):
"""Delete pool member"""
LOG.debug("Deleting member")
return self._common_service_handler(service, delete_event=True)
@serialized('create_health_monitor')
@is_operational
def create_health_monitor(self, health_monitor, service):
"""Create pool health monitor"""
LOG.debug("Creating health monitor")
return self._common_service_handler(service)
@serialized('update_health_monitor')
@is_operational
def update_health_monitor(self, old_health_monitor,
health_monitor, service):
"""Update pool health monitor"""
LOG.debug("Updating health monitor")
return self._common_service_handler(service)
@serialized('delete_health_monitor')
@is_operational
def delete_health_monitor(self, health_monitor, service):
"""Delete pool health monitor"""
LOG.debug("Deleting health monitor")
return self._common_service_handler(service)
@is_operational
def get_stats(self, service):
lb_stats = {}
stats = ['clientside.bitsIn',
'clientside.bitsOut',
'clientside.curConns',
'clientside.totConns']
loadbalancer = service['loadbalancer']
try:
# sum virtual server stats for all BIG-IPs
vs_stats = self.lbaas_builder.get_listener_stats(service, stats)
# convert to bytes
lb_stats[lb_const.STATS_IN_BYTES] = \
vs_stats['clientside.bitsIn']/8
lb_stats[lb_const.STATS_OUT_BYTES] = \
vs_stats['clientside.bitsOut']/8
lb_stats[lb_const.STATS_ACTIVE_CONNECTIONS] = \
vs_stats['clientside.curConns']
lb_stats[lb_const.STATS_TOTAL_CONNECTIONS] = \
vs_stats['clientside.totConns']
# update Neutron
self.plugin_rpc.update_loadbalancer_stats(
loadbalancer['id'], lb_stats)
except Exception as e:
LOG.error("Error getting loadbalancer stats: %s", e.message)
finally:
return lb_stats
def fdb_add(self, fdb):
# Add (L2toL3) forwarding database entries
self.remove_ips_from_fdb_update(fdb)
for bigip in self.get_all_bigips():
self.network_builder.add_bigip_fdb(bigip, fdb)
def fdb_remove(self, fdb):
# Remove (L2toL3) forwarding database entries
self.remove_ips_from_fdb_update(fdb)
for bigip in self.get_all_bigips():
self.network_builder.remove_bigip_fdb(bigip, fdb)
def fdb_update(self, fdb):
# Update (L2toL3) forwarding database entries
self.remove_ips_from_fdb_update(fdb)
for bigip in self.get_all_bigips():
self.network_builder.update_bigip_fdb(bigip, fdb)
# remove ips from fdb update so we do not try to
# add static arps for them because we do not have
# enough information to determine the route domain
def remove_ips_from_fdb_update(self, fdb):
for network_id in fdb:
network = fdb[network_id]
mac_ips_by_vtep = network['ports']
for vtep in mac_ips_by_vtep:
mac_ips = mac_ips_by_vtep[vtep]
for mac_ip in mac_ips:
mac_ip[1] = None
def tunnel_update(self, **kwargs):
# Tunnel Update from Neutron Core RPC
pass
def tunnel_sync(self):
# Only sync when supported types are present
if not [i for i in self.agent_configurations['tunnel_types']
if i in ['gre', 'vxlan']]:
return False
tunnel_ips = []
for bigip in self.get_all_bigips():
if bigip.local_ip:
tunnel_ips.append(bigip.local_ip)
self.network_builder.tunnel_sync(tunnel_ips)
# Tunnel sync sent.
return False
@serialized('sync')
@is_operational
def sync(self, service):
"""Sync service defintion to device"""
# plugin_rpc may not be set when unit testing
if self.plugin_rpc:
# Get the latest service. It may have changed.
service = self.plugin_rpc.get_service_by_loadbalancer_id(
service['loadbalancer']['id']
)
if service['loadbalancer']:
return self._common_service_handler(service)
else:
LOG.debug("Attempted sync of deleted pool")
@serialized('backup_configuration')
@is_operational
def backup_configuration(self):
# Save Configuration on Devices
for bigip in self.get_all_bigips():
LOG.debug('_backup_configuration: saving device %s.'
% bigip.hostname)
self.cluster_manager.save_config(bigip)
def _get_monitor_endpoint(self, bigip, service):
monitor_type = self.service_adapter.get_monitor_type(service)
if not monitor_type:
monitor_type = ""
if monitor_type == "HTTPS":
hm = bigip.tm.ltm.monitor.https_s.https
elif monitor_type == "TCP":
hm = bigip.tm.ltm.monitor.tcps.tcp
elif monitor_type == "PING":
hm = bigip.tm.ltm.monitor.gateway_icmps.gateway_icmp
else:
hm = bigip.tm.ltm.monitor.https.http
return hm
def service_rename_required(self, service):
rename_required = False
# Returns whether the bigip has a pool for the service
if not service['loadbalancer']:
return False
bigips = self.get_config_bigips()
loadbalancer = service['loadbalancer']
# Does the correctly named virtual address exist?
for bigip in bigips:
virtual_address = VirtualAddress(self.service_adapter,
loadbalancer)
if not virtual_address.exists(bigip):
rename_required = True
break
return rename_required
def service_object_teardown(self, service):
# Returns whether the bigip has a pool for the service
if not service['loadbalancer']:
return False
bigips = self.get_config_bigips()
loadbalancer = service['loadbalancer']
folder_name = self.service_adapter.get_folder_name(
loadbalancer['tenant_id']
)
# Change to bigips
for bigip in bigips:
# Delete all virtuals
v = bigip.tm.ltm.virtuals.virtual
for listener in service['listeners']:
l_name = listener.get("name", "")
if not l_name:
svc = {"loadbalancer": loadbalancer,
"listener": listener}
vip = self.service_adapter.get_virtual(svc)
l_name = vip['name']
if v.exists(name=l_name, partition=folder_name):
# Found a virtual that is named by the OS object,
# delete it.
l_obj = v.load(name=l_name, partition=folder_name)
LOG.warn("Deleting listener: /%s/%s" %
(folder_name, l_name))
l_obj.delete(name=l_name, partition=folder_name)
# Delete all pools
p = bigip.tm.ltm.pools.pool
for os_pool in service['pools']:
p_name = os_pool.get('name', "")
if not p_name:
svc = {"loadbalancer": loadbalancer,
"pool": os_pool}
pool = self.service_adapter.get_pool(svc)
p_name = pool['name']
if p.exists(name=p_name, partition=folder_name):
p_obj = p.load(name=p_name, partition=folder_name)
LOG.warn("Deleting pool: /%s/%s" % (folder_name, p_name))
p_obj.delete(name=p_name, partition=folder_name)
# Delete all healthmonitors
for healthmonitor in service['healthmonitors']:
svc = {'loadbalancer': loadbalancer,
'healthmonitor': healthmonitor}
monitor_ep = self._get_monitor_endpoint(bigip, svc)
m_name = healthmonitor.get('name', "")
if not m_name:
hm = self.service_adapter.get_healthmonitor(svc)
m_name = hm['name']
if monitor_ep.exists(name=m_name, partition=folder_name):
m_obj = monitor_ep.load(name=m_name, partition=folder_name)
LOG.warn("Deleting monitor: /%s/%s" % (
folder_name, m_name))
m_obj.delete()
def _service_exists(self, service):
# Returns whether the bigip has the service defined
if not service['loadbalancer']:
return False
loadbalancer = service['loadbalancer']
folder_name = self.service_adapter.get_folder_name(
loadbalancer['tenant_id']
)
# Foreach bigip in the cluster:
for bigip in self.get_config_bigips():
# Does the tenant folder exist?
if not self.system_helper.folder_exists(bigip, folder_name):
LOG.error("Folder %s does not exists on bigip: %s" %
(folder_name, bigip.hostname))
return False
# Get the virtual address
virtual_address = VirtualAddress(self.service_adapter,
loadbalancer)
if not virtual_address.exists(bigip):
LOG.error("Virtual address %s(%s) does not "
"exists on bigip: %s" % (virtual_address.name,
virtual_address.address,
bigip.hostname))
return False
# Ensure that each virtual service exists.
for listener in service['listeners']:
svc = {"loadbalancer": loadbalancer,
"listener": listener}
virtual_server = self.service_adapter.get_virtual_name(svc)
if not self.vs_manager.exists(bigip,
name=virtual_server['name'],
partition=folder_name):
LOG.error("Virtual /%s/%s not found on bigip: %s" %
(virtual_server['name'], folder_name,
bigip.hostname))
return False
# Ensure that each pool exists.
for pool in service['pools']:
svc = {"loadbalancer": loadbalancer,
"pool": pool}
bigip_pool = self.service_adapter.get_pool(svc)
if not self.pool_manager.exists(
bigip,
name=bigip_pool['name'],
partition=folder_name):
LOG.error("Pool /%s/%s not found on bigip: %s" %
(bigip_pool['name'], folder_name,
bigip.hostname))
return False
else:
# Ensure each pool member exists
for member in service['members']:
if member['pool_id'] == pool['id']:
lb = self.lbaas_builder
pool = lb.get_pool_by_id(
service, member["pool_id"])
svc = {"loadbalancer": loadbalancer,
"member": member,
"pool": pool}
if not lb.pool_builder.member_exists(svc, bigip):
return False
# Ensure that each health monitor exists.
for healthmonitor in service['healthmonitors']:
svc = {"loadbalancer": loadbalancer,
"healthmonitor": healthmonitor}
monitor = self.service_adapter.get_healthmonitor(svc)
monitor_ep = self._get_monitor_endpoint(bigip, svc)
if not monitor_ep.exists(name=monitor['name'],
partition=folder_name):
LOG.error("Monitor /%s/%s not found on bigip: %s" %
(monitor['name'], folder_name, bigip.hostname))
return False
return True
def get_loadbalancers_in_tenant(self, tenant_id):
loadbalancers = self.plugin_rpc.get_all_loadbalancers()
return [lb['lb_id'] for lb in loadbalancers
if lb['tenant_id'] == tenant_id]
def _common_service_handler(self, service,
delete_partition=False,
delete_event=False):
# Assure that the service is configured on bigip(s)
start_time = time()
lb_pending = True
do_service_update = True
if self.conf.trace_service_requests:
self.trace_service_requests(service)
loadbalancer = service.get("loadbalancer", None)
if not loadbalancer:
LOG.error("_common_service_handler: Service loadbalancer is None")
return lb_pending
lb_provisioning_status = loadbalancer.get("provisioning_status",
plugin_const.ERROR)
try:
try:
self.tenant_manager.assure_tenant_created(service)
except Exception as e:
LOG.error("Tenant folder creation exception: %s",
e.message)
if lb_provisioning_status != plugin_const.PENDING_DELETE:
loadbalancer['provisioning_status'] = \
plugin_const.ERROR
raise e
LOG.debug(" _assure_tenant_created took %.5f secs" %
(time() - start_time))
traffic_group = self.service_to_traffic_group(service)
loadbalancer['traffic_group'] = traffic_group
if self.network_builder:
start_time = time()
try:
self.network_builder.prep_service_networking(
service, traffic_group)
except f5ex.NetworkNotReady as error:
LOG.debug("Network creation deferred until network "
"definition is completed: %s",
error.message)
if not delete_event:
do_service_update = False
raise error
except Exception as error:
LOG.error("Prep-network exception: icontrol_driver: %s",
error.message)
if lb_provisioning_status != plugin_const.PENDING_DELETE:
loadbalancer['provisioning_status'] = \
plugin_const.ERROR
if not delete_event:
raise error
finally:
if time() - start_time > .001:
LOG.debug(" _prep_service_networking "
"took %.5f secs" % (time() - start_time))
all_subnet_hints = {}
for bigip in self.get_config_bigips():
# check_for_delete_subnets:
# keep track of which subnets we should check to delete
# for a deleted vip or member
# do_not_delete_subnets:
# If we add an IP to a subnet we must not delete the subnet
all_subnet_hints[bigip.device_name] = \
{'check_for_delete_subnets': {},
'do_not_delete_subnets': []}
LOG.debug("XXXXXXXXX: Pre assure service")
# pdb.set_trace()
self.lbaas_builder.assure_service(service,
traffic_group,
all_subnet_hints)
LOG.debug("XXXXXXXXX: Post assure service")
if self.network_builder:
start_time = time()
try:
self.network_builder.post_service_networking(
service, all_subnet_hints)
except Exception as error:
LOG.error("Post-network exception: icontrol_driver: %s",
error.message)
if lb_provisioning_status != plugin_const.PENDING_DELETE:
loadbalancer['provisioning_status'] = \
plugin_const.ERROR
raise error
if time() - start_time > .001:
LOG.debug(" _post_service_networking "
"took %.5f secs" % (time() - start_time))
except f5ex.NetworkNotReady as error:
pass
except Exception as err:
LOG.exception(err)
finally:
# only delete partition if loadbalancer is being deleted
if lb_provisioning_status == plugin_const.PENDING_DELETE:
self.tenant_manager.assure_tenant_cleanup(service,
all_subnet_hints)
if do_service_update:
self.update_service_status(service)
lb_provisioning_status = loadbalancer.get("provisioning_status",
plugin_const.ERROR)
lb_pending = \
(lb_provisioning_status == plugin_const.PENDING_CREATE or
lb_provisioning_status == plugin_const.PENDING_UPDATE)
return lb_pending
def update_service_status(self, service, timed_out=False):
"""Update status of objects in controller."""
LOG.debug("_update_service_status")
if not self.plugin_rpc:
LOG.error("Cannot update status in Neutron without "
"RPC handler.")
return
if 'members' in service:
# Call update_members_status
self._update_member_status(service['members'], timed_out)
if 'healthmonitors' in service:
# Call update_monitor_status
self._update_health_monitor_status(
service['healthmonitors']
)
if 'pools' in service:
# Call update_pool_status
self._update_pool_status(
service['pools']
)
if 'listeners' in service:
# Call update_listener_status
self._update_listener_status(service)
if 'l7policy_rules' in service:
self._update_l7rule_status(service['l7policy_rules'])
if 'l7policies' in service:
self._update_l7policy_status(service['l7policies'])
self._update_loadbalancer_status(service, timed_out)
def _update_member_status(self, members, timed_out):
"""Update member status in OpenStack """
for member in members:
if 'provisioning_status' in member:
provisioning_status = member['provisioning_status']
if (provisioning_status == plugin_const.PENDING_CREATE or
provisioning_status == plugin_const.PENDING_UPDATE):
if timed_out:
member['provisioning_status'] = plugin_const.ERROR
operating_status = lb_const.OFFLINE
else:
member['provisioning_status'] = plugin_const.ACTIVE
operating_status = lb_const.ONLINE
self.plugin_rpc.update_member_status(
member['id'],
member['provisioning_status'],
operating_status
)
elif provisioning_status == plugin_const.PENDING_DELETE:
self.plugin_rpc.member_destroyed(
member['id'])
elif provisioning_status == plugin_const.ERROR:
self.plugin_rpc.update_member_status(
member['id'],
plugin_const.ERROR,
lb_const.OFFLINE)
def _update_health_monitor_status(self, health_monitors):
"""Update pool monitor status in OpenStack """
for health_monitor in health_monitors:
if 'provisioning_status' in health_monitor:
provisioning_status = health_monitor['provisioning_status']
if (provisioning_status == plugin_const.PENDING_CREATE or
provisioning_status == plugin_const.PENDING_UPDATE):
self.plugin_rpc.update_health_monitor_status(
health_monitor['id'],
plugin_const.ACTIVE,
lb_const.ONLINE
)
health_monitor['provisioning_status'] = \
plugin_const.ACTIVE
elif provisioning_status == plugin_const.PENDING_DELETE:
self.plugin_rpc.health_monitor_destroyed(
health_monitor['id'])
elif provisioning_status == plugin_const.ERROR:
self.plugin_rpc.update_health_monitor_status(
health_monitor['id'])
@log_helpers.log_method_call
def _update_pool_status(self, pools):
"""Update pool status in OpenStack """
for pool in pools:
if 'provisioning_status' in pool:
provisioning_status = pool['provisioning_status']
if (provisioning_status == plugin_const.PENDING_CREATE or
provisioning_status == plugin_const.PENDING_UPDATE):
self.plugin_rpc.update_pool_status(
pool['id'],
plugin_const.ACTIVE,
lb_const.ONLINE
)
pool['provisioning_status'] = plugin_const.ACTIVE
elif provisioning_status == plugin_const.PENDING_DELETE:
self.plugin_rpc.pool_destroyed(
pool['id'])
elif provisioning_status == plugin_const.ERROR:
self.plugin_rpc.update_pool_status(pool['id'])
@log_helpers.log_method_call
def _update_listener_status(self, service):
"""Update listener status in OpenStack """
listeners = service['listeners']
for listener in listeners:
if 'provisioning_status' in listener:
provisioning_status = listener['provisioning_status']
if (provisioning_status == plugin_const.PENDING_CREATE or
provisioning_status == plugin_const.PENDING_UPDATE):
self.plugin_rpc.update_listener_status(
listener['id'],
plugin_const.ACTIVE,
listener['operating_status']
)
listener['provisioning_status'] = \
plugin_const.ACTIVE
elif provisioning_status == plugin_const.PENDING_DELETE:
self.plugin_rpc.listener_destroyed(
listener['id'])
elif provisioning_status == plugin_const.ERROR:
self.plugin_rpc.update_listener_status(
listener['id'],
provisioning_status,
lb_const.OFFLINE)
@log_helpers.log_method_call
def _update_l7rule_status(self, l7rules):
"""Update l7rule status in OpenStack """
for l7rule in l7rules:
if 'provisioning_status' in l7rule:
provisioning_status = l7rule['provisioning_status']
if (provisioning_status == plugin_const.PENDING_CREATE or
provisioning_status == plugin_const.PENDING_UPDATE):
self.plugin_rpc.update_l7rule_status(
l7rule['id'],
l7rule['policy_id'],
plugin_const.ACTIVE,
lb_const.ONLINE
)
elif provisioning_status == plugin_const.PENDING_DELETE:
self.plugin_rpc.l7rule_destroyed(
l7rule['id'])
elif provisioning_status == plugin_const.ERROR:
self.plugin_rpc.update_l7rule_status(
l7rule['id'], l7rule['policy_id'])
@log_helpers.log_method_call
def _update_l7policy_status(self, l7policies):
LOG.debug("_update_l7policy_status")
"""Update l7policy status in OpenStack """
for l7policy in l7policies:
if 'provisioning_status' in l7policy:
provisioning_status = l7policy['provisioning_status']
if (provisioning_status == plugin_const.PENDING_CREATE or
provisioning_status == plugin_const.PENDING_UPDATE):
self.plugin_rpc.update_l7policy_status(
l7policy['id'],
plugin_const.ACTIVE,
lb_const.ONLINE
)
elif provisioning_status == plugin_const.PENDING_DELETE:
LOG.debug("calling l7policy_destroyed")
self.plugin_rpc.l7policy_destroyed(
l7policy['id'])
elif provisioning_status == plugin_const.ERROR:
self.plugin_rpc.update_l7policy_status(l7policy['id'])
@log_helpers.log_method_call
def _update_loadbalancer_status(self, service, timed_out=False):
"""Update loadbalancer status in OpenStack """
loadbalancer = service.get('loadbalancer', {})
provisioning_status = loadbalancer.get('provisioning_status',
plugin_const.ERROR)
if (provisioning_status == plugin_const.PENDING_CREATE or
provisioning_status == plugin_const.PENDING_UPDATE):
if timed_out:
operating_status = (lb_const.OFFLINE)
if provisioning_status == plugin_const.PENDING_CREATE:
loadbalancer['provisioning_status'] = \
plugin_const.ERROR
else:
loadbalancer['provisioning_status'] = \
plugin_const.ACTIVE
else:
operating_status = (lb_const.ONLINE)
loadbalancer['provisioning_status'] = \
plugin_const.ACTIVE
self.plugin_rpc.update_loadbalancer_status(
loadbalancer['id'],
loadbalancer['provisioning_status'],
operating_status)
elif provisioning_status == plugin_const.PENDING_DELETE:
self.plugin_rpc.loadbalancer_destroyed(
loadbalancer['id'])
elif provisioning_status == plugin_const.ERROR:
self.plugin_rpc.update_loadbalancer_status(
loadbalancer['id'],
provisioning_status,
lb_const.OFFLINE)
elif provisioning_status == plugin_const.ACTIVE:
LOG.debug('Loadbalancer provisioning status is active')
else:
LOG.error('Loadbalancer provisioning status is invalid')
@is_operational
def update_operating_status(self, service):
if 'members' in service:
if self.network_builder:
# append route domain to member address
self.network_builder._annotate_service_route_domains(service)
# get currrent member status
self.lbaas_builder.update_operating_status(service)
# udpate Neutron
for member in service['members']:
if member['provisioning_status'] == plugin_const.ACTIVE:
operating_status = member.get('operating_status', None)
self.plugin_rpc.update_member_status(
member['id'],
provisioning_status=None,
operating_status=operating_status)
def get_active_bigip(self):
bigips = self.get_all_bigips()
if len(bigips) == 1:
return bigips[0]
for bigip in bigips:
if hasattr(bigip, 'failover_state'):
if bigip.failover_state == 'active':
return bigip
# if can't determine active, default to first one
return bigips[0]
def service_to_traffic_group(self, service):
# Hash service tenant id to index of traffic group
# return which iControlDriver.__traffic_group that tenant is "in?"
return self.tenant_to_traffic_group(
service['loadbalancer']['tenant_id'])
def tenant_to_traffic_group(self, tenant_id):
# Hash tenant id to index of traffic group
hexhash = hashlib.md5(tenant_id).hexdigest()
tg_index = int(hexhash, 16) % len(self.__traffic_groups)
return self.__traffic_groups[tg_index]
# these functions should return only active BIG-IP
# not errored BIG-IPs.
def get_bigip(self):
hostnames = sorted(list(self.__bigips))
for host in hostnames:
if hasattr(self.__bigips[host], 'status') and \
self.__bigips[host].status == 'active':
return self.__bigips[host]
def get_bigip_hosts(self):
return_hosts = []
for host in list(self.__bigips):
if hasattr(self.__bigips[host], 'status') and \
self.__bigips[host].status == 'active':
return_hosts.append(host)
return sorted(return_hosts)
def get_all_bigips(self):
return_bigips = []
for host in list(self.__bigips):
if hasattr(self.__bigips[host], 'status') and \
self.__bigips[host].status == 'active':
return_bigips.append(self.__bigips[host])
return return_bigips
def get_config_bigips(self):
return self.get_all_bigips()
# these are the refactored methods
def get_active_bigips(self):
return self.get_all_bigips()
def get_errored_bigips_hostnames(self):
return_hostnames = []
for host in list(self.__bigips):
bigip = self.__bigips[host]
if hasattr(bigip, 'status') and bigip.status == 'error':
return_hostnames.append(host)
return return_hostnames
def get_inbound_throughput(self, bigip, global_statistics=None):
return self.stat_helper.get_inbound_throughput(
bigip, global_stats=global_statistics)
def get_outbound_throughput(self, bigip, global_statistics=None):
return self.stat_helper.get_outbound_throughput(
bigip, global_stats=global_statistics)
def get_throughput(self, bigip=None, global_statistics=None):
return self.stat_helper.get_throughput(
bigip, global_stats=global_statistics)
def get_active_connections(self, bigip=None, global_statistics=None):
return self.stat_helper.get_active_connection_count(
bigip, global_stats=global_statistics)
def get_ssltps(self, bigip=None, global_statistics=None):
return self.stat_helper.get_active_SSL_TPS(
bigip, global_stats=global_statistics)
def get_node_count(self, bigip=None, global_statistics=None):
return len(bigip.tm.ltm.nodes.get_collection())
def get_clientssl_profile_count(self, bigip=None, global_statistics=None):
return ssl_profile.SSLProfileHelper.get_client_ssl_profile_count(bigip)
def get_tenant_count(self, bigip=None, global_statistics=None):
return self.system_helper.get_tenant_folder_count(bigip)
def get_tunnel_count(self, bigip=None, global_statistics=None):
return self.network_helper.get_tunnel_count(bigip)
def get_vlan_count(self, bigip=None, global_statistics=None):
return self.network_helper.get_vlan_count(bigip)
def get_route_domain_count(self, bigip=None, global_statistics=None):
return self.network_helper.get_route_domain_count(bigip)
def _init_traffic_groups(self, bigip):
self.__traffic_groups = self.cluster_manager.get_traffic_groups(bigip)
if 'traffic-group-local-only' in self.__traffic_groups:
self.__traffic_groups.remove('traffic-group-local-only')
self.__traffic_groups.sort()
def _validate_bigip_version(self, bigip, hostname):
# Ensure the BIG-IP® has sufficient version
major_version = self.system_helper.get_major_version(bigip)
if major_version < f5const.MIN_TMOS_MAJOR_VERSION:
raise f5ex.MajorVersionValidateFailed(
'Device %s must be at least TMOS %s.%s'
% (hostname, f5const.MIN_TMOS_MAJOR_VERSION,
f5const.MIN_TMOS_MINOR_VERSION))
minor_version = self.system_helper.get_minor_version(bigip)
if minor_version < f5const.MIN_TMOS_MINOR_VERSION:
raise f5ex.MinorVersionValidateFailed(
'Device %s must be at least TMOS %s.%s'
% (hostname, f5const.MIN_TMOS_MAJOR_VERSION,
f5const.MIN_TMOS_MINOR_VERSION))
return major_version, minor_version
@serialized('create_l7policy')
@is_operational
def create_l7policy(self, l7policy, service):
"""Create lb l7policy"""
LOG.debug("Creating l7policy")
self._common_service_handler(service)
@serialized('update_l7policy')
@is_operational
def update_l7policy(self, old_l7policy, l7policy, service):
"""Update lb l7policy"""
LOG.debug("Updating l7policy")
self._common_service_handler(service)
@serialized('delete_l7policy')
@is_operational
def delete_l7policy(self, l7policy, service):
"""Delete lb l7policy"""
LOG.debug("Deleting l7policy")
self._common_service_handler(service)
@serialized('create_l7rule')
@is_operational
def create_l7rule(self, pool, service):
"""Create lb l7rule"""
LOG.debug("Creating l7rule")
self._common_service_handler(service)
@serialized('update_l7rule')
@is_operational
def update_l7rule(self, old_l7rule, l7rule, service):
"""Update lb l7rule"""
LOG.debug("Updating l7rule")
self._common_service_handler(service)
@serialized('delete_l7rule')
@is_operational
def delete_l7rule(self, l7rule, service):
"""Delete lb l7rule"""
LOG.debug("Deleting l7rule")
self._common_service_handler(service)
def trace_service_requests(self, service):
with open(self.file_name, 'r+') as fp:
fp.seek(-1, 2)
fp.write(',')
json.dump(service, fp, sort_keys=True, indent=2)
fp.write(']')
def get_config_dir(self):
"""Determines F5 agent configuration directory.
Oslo cfg has a config_dir option, but F5 agent is not currently
started with this option. To be complete, the code will check if
config_dir is defined, and use that value as long as it is a single
string (no idea what to do if it is not a str). If not defined,
get the full dir path of the INI file, which is currently used when
starting F5 agent. If neither option is available,
use /etc/neutron/services/f5.
:return: str defining configuration directory.
"""
if self.conf.config_dir and isinstance(self.conf.config_dir, str):
# use config_dir parameter if defined, and is a string
return self.conf.config_dir
elif self.conf.config_file:
# multiple config files (neutron and agent) are usually defined
if isinstance(self.conf.config_file, list):
# find agent config (f5-openstack-agent.ini)
config_files = self.conf.config_file
for file_name in config_files:
if 'f5-openstack-agent.ini' in file_name:
return os.path.dirname(file_name)
elif isinstance(self.conf.config_file, str):
# not a list, just a single string
return os.path.dirname(self.conf.config_file)
# if all else fails
return '/etc/neutron/services/f5'
|
from os import listdir
from os.path import isfile
from sqlalchemy import join
__author__ = 'kiro'
trend_base = "/home/kiro/ntnu/master/code/twitter/trend-data/"
def filename_separation(folder):
"""
Run trend file compilation with all wanted files in the folder.
@param folder: the folder containing tweet files.
"""
files = [f for f in listdir(folder) if isfile(join(folder, f))]
trend_files = []
files.sort().reverse()
for filename in files:
# disregard special files.
if filename[0] == "_":
continue
# skipping the metadata files.
if ".meta" in filename:
continue
# don't aggregate the trend files, the trend files contains already sorted tweets
if "trend" in filename:
trend_files.append(filename)
continue
# append filename to list.
if __name__ == "__main__":
filename_separation(trend_base)
skeleton of trend compilation in palce, need to add actual classification and weighting
import ast
import codecs
from os import listdir
from os.path import isfile, join
import random
import matplotlib.pyplot as plt
__author__ = 'kiro'
trend_base = "/home/kiro/ntnu/master/code/twitter/trend-data/"
def calculate_trend_contribution(classifier, tweet):
return
def calculcate_polarity_by_day(intra_day):
pass
def compile_trend(trend_files):
classifier = [] # todo import trained classifier from classification code.
trend = []
# get all previously observed tweets.
# for all trend files
for filename in trend_files:
lines = codecs.open(trend_base + filename, 'r', "utf-8")
# for all lines in file
intra_day = []
for line in lines.readlines():
# calculate tweet trend contribution. aka Polarity of a tweet.
#intra_day.append(calculate_trend_contribution(classifier, ast.literal_eval(line)))
intra_day.append(random.random()*10)
# calculate the polarity of given day based on input tweets.
#trend.append(["-".join(filename.split("-")[1:]), calculcate_polarity_by_day(intra_day)])
trend = [i for i in intra_day]
x = [i for i in range(0, len(trend))]
plt.plot(x, trend)
plt.show()
return
def filename_separation(folder):
"""
Run trend file compilation with all wanted files in the folder.
@param folder: the folder containing tweet files.
"""
files = [f for f in listdir(folder) if isfile(join(folder, f))]
trend_files = []
files.sort()
for filename in files:
# disregard special files.
if filename[0] == "_":
continue
# skipping the metadata files.
if ".meta" in filename:
continue
# don't aggregate the trend files, the trend files contains already sorted tweets
if "trend" in filename:
trend_files.append(filename)
continue
# append filename to list.
compile_trend(trend_files)
if __name__ == "__main__":
filename_separation(trend_base)
|
# _ __
# | |/ /___ ___ _ __ ___ _ _ ®
# | ' </ -_) -_) '_ \/ -_) '_|
# |_|\_\___\___| .__/\___|_|
# |_|
#
# Keeper Commander
# Contact: ops@keepersecurity.com
#
import os
import random
import string
def randomSample(sampleLength=0, sampleString=''):
sample = ''
for i in range(sampleLength):
pos = int.from_bytes(os.urandom(2), 'big') % len(sampleString)
sample += sampleString[pos]
return sample
def rules(uppercase=0, lowercase=0, digits=0, special_characters=0):
""" Generate a password of specified length with specified number of """
""" uppercase, lowercase, digits and special characters """
password = ''
if uppercase:
password += randomSample(uppercase, string.ascii_uppercase)
if lowercase:
password += randomSample(lowercase, string.ascii_lowercase)
if digits:
password += randomSample(digits, string.digits)
if special_characters:
password += randomSample(special_characters, string.punctuation)
newpass = ''.join(random.sample(password,len(password)))
return newpass
def generateFromRules(rulestring):
""" Generate based on rules from a string similar to "4,5,2,5" """
uppercase, lowercase, digits, special = 0,0,0,0
ruleparams = filter(str.isdigit, rulestring)
rulecount = 0
for rule in ruleparams:
if rulecount == 0:
uppercase = int(rule)
elif rulecount == 1:
lowercase = int(rule)
elif rulecount == 2:
digits = int(rule)
elif rulecount == 3:
special = int(rule)
rulecount += 1
return rules(uppercase, lowercase, digits, special)
def generate(length=64):
""" Generate password of specified len """
increment = length // 4
lastincrement = increment + (length % 4)
return rules(increment, increment, increment, lastincrement)
Improve password generation - KC-207 (#383)
# _ __
# | |/ /___ ___ _ __ ___ _ _ ®
# | ' </ -_) -_) '_ \/ -_) '_|
# |_|\_\___\___| .__/\___|_|
# |_|
#
# Keeper Commander
# Contact: ops@keepersecurity.com
#
import logging
import os
import random
import string
def randomSample(sampleLength=0, sampleString=''):
sample = ''
use_secrets = False
try:
# Older version of Python (before 3.6) don't have this module.
# If not installed, fall back to the original version of the code
import secrets
logging.debug("module 'secrets' is installed")
use_secrets = True
except ModuleNotFoundError:
logging.warning("module 'secrets' is not installed")
for i in range(sampleLength):
if use_secrets:
sample += secrets.choice(sampleString)
else:
pos = int.from_bytes(os.urandom(2), 'big') % len(sampleString)
sample += sampleString[pos]
return sample
def rules(uppercase=0, lowercase=0, digits=0, special_characters=0):
""" Generate a password of specified length with specified number of """
""" uppercase, lowercase, digits and special characters """
password = ''
if uppercase:
password += randomSample(uppercase, string.ascii_uppercase)
if lowercase:
password += randomSample(lowercase, string.ascii_lowercase)
if digits:
password += randomSample(digits, string.digits)
if special_characters:
password += randomSample(special_characters, string.punctuation)
newpass = ''.join(random.sample(password,len(password)))
return newpass
def generateFromRules(rulestring):
""" Generate based on rules from a string similar to "4,5,2,5" """
uppercase, lowercase, digits, special = 0,0,0,0
ruleparams = filter(str.isdigit, rulestring)
rulecount = 0
for rule in ruleparams:
if rulecount == 0:
uppercase = int(rule)
elif rulecount == 1:
lowercase = int(rule)
elif rulecount == 2:
digits = int(rule)
elif rulecount == 3:
special = int(rule)
rulecount += 1
return rules(uppercase, lowercase, digits, special)
def generate(length=64):
""" Generate password of specified len """
increment = length // 4
lastincrement = increment + (length % 4)
return rules(increment, increment, increment, lastincrement)
|
# coding: utf-8
import hashlib
import json
import uuid
from datetime import datetime, timedelta as td
from croniter import croniter
from django.conf import settings
from django.db import models
from django.urls import reverse
from django.utils import timezone
from hc.accounts.models import Project
from hc.api import transports
from hc.lib import emails
import pytz
STATUSES = (("up", "Up"), ("down", "Down"), ("new", "New"), ("paused", "Paused"))
DEFAULT_TIMEOUT = td(days=1)
DEFAULT_GRACE = td(hours=1)
NEVER = datetime(3000, 1, 1, tzinfo=pytz.UTC)
CHECK_KINDS = (("simple", "Simple"), ("cron", "Cron"))
CHANNEL_KINDS = (
("email", "Email"),
("webhook", "Webhook"),
("hipchat", "HipChat"),
("slack", "Slack"),
("pd", "PagerDuty"),
("pagertree", "PagerTree"),
("pagerteam", "Pager Team"),
("po", "Pushover"),
("pushbullet", "Pushbullet"),
("opsgenie", "OpsGenie"),
("victorops", "VictorOps"),
("discord", "Discord"),
("telegram", "Telegram"),
("sms", "SMS"),
("zendesk", "Zendesk"),
("trello", "Trello"),
("matrix", "Matrix"),
("whatsapp", "WhatsApp"),
)
PO_PRIORITIES = {-2: "lowest", -1: "low", 0: "normal", 1: "high", 2: "emergency"}
def isostring(dt):
"""Convert the datetime to ISO 8601 format with no microseconds. """
if dt:
return dt.replace(microsecond=0).isoformat()
class Check(models.Model):
name = models.CharField(max_length=100, blank=True)
tags = models.CharField(max_length=500, blank=True)
code = models.UUIDField(default=uuid.uuid4, editable=False, unique=True)
desc = models.TextField(blank=True)
project = models.ForeignKey(Project, models.CASCADE)
created = models.DateTimeField(auto_now_add=True)
kind = models.CharField(max_length=10, default="simple", choices=CHECK_KINDS)
timeout = models.DurationField(default=DEFAULT_TIMEOUT)
grace = models.DurationField(default=DEFAULT_GRACE)
schedule = models.CharField(max_length=100, default="* * * * *")
tz = models.CharField(max_length=36, default="UTC")
subject = models.CharField(max_length=100, blank=True)
n_pings = models.IntegerField(default=0)
last_ping = models.DateTimeField(null=True, blank=True)
last_start = models.DateTimeField(null=True, blank=True)
last_ping_was_fail = models.NullBooleanField(default=False)
has_confirmation_link = models.BooleanField(default=False)
alert_after = models.DateTimeField(null=True, blank=True, editable=False)
status = models.CharField(max_length=6, choices=STATUSES, default="new")
def __str__(self):
return "%s (%d)" % (self.name or self.code, self.id)
def name_then_code(self):
if self.name:
return self.name
return str(self.code)
def url(self):
return settings.PING_ENDPOINT + str(self.code)
def details_url(self):
return settings.SITE_ROOT + reverse("hc-details", args=[self.code])
def email(self):
return "%s@%s" % (self.code, settings.PING_EMAIL_DOMAIN)
def get_grace_start(self):
""" Return the datetime when the grace period starts.
If the check is currently new, paused or down, return None.
"""
# NEVER is a constant sentinel value (year 3000).
# Using None instead would make the logic clunky.
result = NEVER
if self.kind == "simple" and self.status == "up":
result = self.last_ping + self.timeout
elif self.kind == "cron" and self.status == "up":
# The complex case, next ping is expected based on cron schedule.
# Don't convert to naive datetimes (and so avoid ambiguities around
# DST transitions). Croniter will handle the timezone-aware datetimes.
zone = pytz.timezone(self.tz)
last_local = timezone.localtime(self.last_ping, zone)
it = croniter(self.schedule, last_local)
result = it.next(datetime)
if self.last_start and self.status != "down":
result = min(result, self.last_start)
if result != NEVER:
return result
def going_down_after(self):
""" Return the datetime when the check goes down.
If the check is new or paused, and not currently running, return None.
If the check is already down, also return None.
"""
grace_start = self.get_grace_start()
if grace_start is not None:
return grace_start + self.grace
def get_status(self, now=None, with_started=True):
""" Return current status for display. """
if now is None:
now = timezone.now()
if self.last_start:
if now >= self.last_start + self.grace:
return "down"
elif with_started:
return "started"
if self.status in ("new", "paused", "down"):
return self.status
grace_start = self.get_grace_start()
grace_end = grace_start + self.grace
if now >= grace_end:
return "down"
if now >= grace_start:
return "grace"
return "up"
def assign_all_channels(self):
channels = Channel.objects.filter(project=self.project)
self.channel_set.set(channels)
def tags_list(self):
return [t.strip() for t in self.tags.split(" ") if t.strip()]
def matches_tag_set(self, tag_set):
return tag_set.issubset(self.tags_list())
def channels_str(self):
""" Return a comma-separated string of assigned channel codes. """
codes = self.channel_set.order_by("code").values_list("code", flat=True)
return ",".join(map(str, codes))
def to_dict(self, readonly=False):
result = {
"name": self.name,
"tags": self.tags,
"grace": int(self.grace.total_seconds()),
"n_pings": self.n_pings,
"status": self.get_status(),
"last_ping": isostring(self.last_ping),
"next_ping": isostring(self.get_grace_start()),
}
if not readonly:
update_rel_url = reverse("hc-api-update", args=[self.code])
pause_rel_url = reverse("hc-api-pause", args=[self.code])
result["ping_url"] = self.url()
result["update_url"] = settings.SITE_ROOT + update_rel_url
result["pause_url"] = settings.SITE_ROOT + pause_rel_url
result["channels"] = self.channels_str()
result["desc"] = self.desc
if self.kind == "simple":
result["timeout"] = int(self.timeout.total_seconds())
elif self.kind == "cron":
result["schedule"] = self.schedule
result["tz"] = self.tz
return result
def ping(self, remote_addr, scheme, method, ua, body, action):
if action == "start":
self.last_start = timezone.now()
# Don't update "last_ping" field.
elif action == "ign":
pass
else:
self.last_start = None
self.last_ping = timezone.now()
new_status = "down" if action == "fail" else "up"
if self.status != new_status:
flip = Flip(owner=self)
flip.created = self.last_ping
flip.old_status = self.status
flip.new_status = new_status
flip.save()
self.status = new_status
self.alert_after = self.going_down_after()
self.n_pings = models.F("n_pings") + 1
self.has_confirmation_link = "confirm" in str(body).lower()
self.save()
self.refresh_from_db()
ping = Ping(owner=self)
ping.n = self.n_pings
if action in ("start", "fail", "ign"):
ping.kind = action
ping.remote_addr = remote_addr
ping.scheme = scheme
ping.method = method
# If User-Agent is longer than 200 characters, truncate it:
ping.ua = ua[:200]
ping.body = body[:10000]
ping.save()
class Ping(models.Model):
id = models.BigAutoField(primary_key=True)
n = models.IntegerField(null=True)
owner = models.ForeignKey(Check, models.CASCADE)
created = models.DateTimeField(auto_now_add=True)
kind = models.CharField(max_length=6, blank=True, null=True)
scheme = models.CharField(max_length=10, default="http")
remote_addr = models.GenericIPAddressField(blank=True, null=True)
method = models.CharField(max_length=10, blank=True)
ua = models.CharField(max_length=200, blank=True)
body = models.CharField(max_length=10000, blank=True, null=True)
class Channel(models.Model):
name = models.CharField(max_length=100, blank=True)
code = models.UUIDField(default=uuid.uuid4, editable=False, unique=True)
project = models.ForeignKey(Project, models.CASCADE)
created = models.DateTimeField(auto_now_add=True)
kind = models.CharField(max_length=20, choices=CHANNEL_KINDS)
value = models.TextField(blank=True)
email_verified = models.BooleanField(default=False)
checks = models.ManyToManyField(Check)
def __str__(self):
if self.name:
return self.name
if self.kind == "email":
return "Email to %s" % self.email_value
elif self.kind == "sms":
return "SMS to %s" % self.sms_number
elif self.kind == "slack":
return "Slack %s" % self.slack_channel
elif self.kind == "telegram":
return "Telegram %s" % self.telegram_name
return self.get_kind_display()
def to_dict(self):
return {"id": str(self.code), "name": self.name, "kind": self.kind}
def assign_all_checks(self):
checks = Check.objects.filter(project=self.project)
self.checks.add(*checks)
def make_token(self):
seed = "%s%s" % (self.code, settings.SECRET_KEY)
seed = seed.encode()
return hashlib.sha1(seed).hexdigest()
def send_verify_link(self):
args = [self.code, self.make_token()]
verify_link = reverse("hc-verify-email", args=args)
verify_link = settings.SITE_ROOT + verify_link
emails.verify_email(self.email_value, {"verify_link": verify_link})
def get_unsub_link(self):
args = [self.code, self.make_token()]
verify_link = reverse("hc-unsubscribe-alerts", args=args)
return settings.SITE_ROOT + verify_link
@property
def transport(self):
if self.kind == "email":
return transports.Email(self)
elif self.kind == "webhook":
return transports.Webhook(self)
elif self.kind == "slack":
return transports.Slack(self)
elif self.kind == "hipchat":
return transports.HipChat(self)
elif self.kind == "pd":
return transports.PagerDuty(self)
elif self.kind == "pagertree":
return transports.PagerTree(self)
elif self.kind == "pagerteam":
return transports.PagerTeam(self)
elif self.kind == "victorops":
return transports.VictorOps(self)
elif self.kind == "pushbullet":
return transports.Pushbullet(self)
elif self.kind == "po":
return transports.Pushover(self)
elif self.kind == "opsgenie":
return transports.OpsGenie(self)
elif self.kind == "discord":
return transports.Discord(self)
elif self.kind == "telegram":
return transports.Telegram(self)
elif self.kind == "sms":
return transports.Sms(self)
elif self.kind == "trello":
return transports.Trello(self)
elif self.kind == "matrix":
return transports.Matrix(self)
elif self.kind == "whatsapp":
return transports.WhatsApp(self)
else:
raise NotImplementedError("Unknown channel kind: %s" % self.kind)
def notify(self, check):
if self.transport.is_noop(check):
return "no-op"
n = Notification(owner=check, channel=self)
n.check_status = check.status
n.error = "Sending"
n.save()
if self.kind == "email":
error = self.transport.notify(check, n.bounce_url()) or ""
else:
error = self.transport.notify(check) or ""
n.error = error
n.save()
return error
def icon_path(self):
return "img/integrations/%s.png" % self.kind
@property
def po_priority(self):
assert self.kind == "po"
parts = self.value.split("|")
prio = int(parts[1])
return PO_PRIORITIES[prio]
def webhook_spec(self, status):
assert self.kind == "webhook"
if not self.value.startswith("{"):
parts = self.value.split("\n")
url_down = parts[0]
url_up = parts[1] if len(parts) > 1 else ""
post_data = parts[2] if len(parts) > 2 else ""
return {
"method": "POST" if post_data else "GET",
"url": url_down if status == "down" else url_up,
"body": post_data,
"headers": {},
}
doc = json.loads(self.value)
if "post_data" in doc:
# Legacy "post_data" in doc -- use the legacy fields
return {
"method": "POST" if doc["post_data"] else "GET",
"url": doc["url_down"] if status == "down" else doc["url_up"],
"body": doc["post_data"],
"headers": doc["headers"],
}
if status == "down" and "method_down" in doc:
return {
"method": doc["method_down"],
"url": doc["url_down"],
"body": doc["body_down"],
"headers": doc["headers_down"],
}
elif status == "up" and "method_up" in doc:
return {
"method": doc["method_up"],
"url": doc["url_up"],
"body": doc["body_up"],
"headers": doc["headers_up"],
}
@property
def down_webhook_spec(self):
return self.webhook_spec("down")
@property
def up_webhook_spec(self):
return self.webhook_spec("up")
@property
def url_down(self):
return self.down_webhook_spec["url"]
@property
def url_up(self):
return self.up_webhook_spec["url"]
@property
def slack_team(self):
assert self.kind == "slack"
if not self.value.startswith("{"):
return None
doc = json.loads(self.value)
return doc["team_name"]
@property
def slack_channel(self):
assert self.kind == "slack"
if not self.value.startswith("{"):
return None
doc = json.loads(self.value)
return doc["incoming_webhook"]["channel"]
@property
def slack_webhook_url(self):
assert self.kind == "slack"
if not self.value.startswith("{"):
return self.value
doc = json.loads(self.value)
return doc["incoming_webhook"]["url"]
@property
def discord_webhook_url(self):
assert self.kind == "discord"
doc = json.loads(self.value)
return doc["webhook"]["url"]
@property
def discord_webhook_id(self):
assert self.kind == "discord"
doc = json.loads(self.value)
return doc["webhook"]["id"]
@property
def telegram_id(self):
assert self.kind == "telegram"
doc = json.loads(self.value)
return doc.get("id")
@property
def telegram_type(self):
assert self.kind == "telegram"
doc = json.loads(self.value)
return doc.get("type")
@property
def telegram_name(self):
assert self.kind == "telegram"
doc = json.loads(self.value)
return doc.get("name")
@property
def pd_service_key(self):
assert self.kind == "pd"
if not self.value.startswith("{"):
return self.value
doc = json.loads(self.value)
return doc["service_key"]
@property
def pd_account(self):
assert self.kind == "pd"
if self.value.startswith("{"):
doc = json.loads(self.value)
return doc["account"]
def latest_notification(self):
return Notification.objects.filter(channel=self).latest()
@property
def sms_number(self):
assert self.kind in ("sms", "whatsapp")
if self.value.startswith("{"):
doc = json.loads(self.value)
return doc["value"]
return self.value
@property
def sms_label(self):
assert self.kind == "sms"
if self.value.startswith("{"):
doc = json.loads(self.value)
return doc["label"]
@property
def trello_token(self):
assert self.kind == "trello"
if self.value.startswith("{"):
doc = json.loads(self.value)
return doc["token"]
@property
def trello_board_list(self):
assert self.kind == "trello"
if self.value.startswith("{"):
doc = json.loads(self.value)
return doc["board_name"], doc["list_name"]
@property
def trello_list_id(self):
assert self.kind == "trello"
if self.value.startswith("{"):
doc = json.loads(self.value)
return doc["list_id"]
@property
def email_value(self):
assert self.kind == "email"
if not self.value.startswith("{"):
return self.value
doc = json.loads(self.value)
return doc.get("value")
@property
def email_notify_up(self):
assert self.kind == "email"
if not self.value.startswith("{"):
return True
doc = json.loads(self.value)
return doc.get("up")
@property
def email_notify_down(self):
assert self.kind == "email"
if not self.value.startswith("{"):
return True
doc = json.loads(self.value)
return doc.get("down")
@property
def whatsapp_notify_up(self):
assert self.kind == "whatsapp"
doc = json.loads(self.value)
return doc["up"]
@property
def whatsapp_notify_down(self):
assert self.kind == "whatsapp"
doc = json.loads(self.value)
return doc["down"]
class Notification(models.Model):
class Meta:
get_latest_by = "created"
code = models.UUIDField(default=uuid.uuid4, null=True, editable=False)
owner = models.ForeignKey(Check, models.CASCADE)
check_status = models.CharField(max_length=6)
channel = models.ForeignKey(Channel, models.CASCADE)
created = models.DateTimeField(auto_now_add=True)
error = models.CharField(max_length=200, blank=True)
def bounce_url(self):
return settings.SITE_ROOT + reverse("hc-api-bounce", args=[self.code])
class Flip(models.Model):
owner = models.ForeignKey(Check, models.CASCADE)
created = models.DateTimeField()
processed = models.DateTimeField(null=True, blank=True, db_index=True)
old_status = models.CharField(max_length=8, choices=STATUSES)
new_status = models.CharField(max_length=8, choices=STATUSES)
def send_alerts(self):
if self.new_status == "up" and self.old_status in ("new", "paused"):
# Don't send alerts on new->up and paused->up transitions
return []
if self.new_status not in ("up", "down"):
raise NotImplementedError("Unexpected status: %s" % self.status)
errors = []
for channel in self.owner.channel_set.all():
error = channel.notify(self.owner)
if error not in ("", "no-op"):
errors.append((channel, error))
return errors
class TokenBucket(models.Model):
value = models.CharField(max_length=80, unique=True)
tokens = models.FloatField(default=1.0)
updated = models.DateTimeField(default=timezone.now)
@staticmethod
def authorize(value, capacity, refill_time_secs):
now = timezone.now()
obj, created = TokenBucket.objects.get_or_create(value=value)
if not created:
# Top up the bucket:
delta_secs = (now - obj.updated).total_seconds()
obj.tokens = min(1.0, obj.tokens + delta_secs / refill_time_secs)
obj.tokens -= 1.0 / capacity
if obj.tokens < 0:
# Not enough tokens
return False
# Race condition: two concurrent authorize calls can overwrite each
# other's changes. It's OK to be a little inexact here for the sake
# of simplicity.
obj.updated = now
obj.save()
return True
@staticmethod
def authorize_login_email(email):
# remove dots and alias:
mailbox, domain = email.split("@")
mailbox = mailbox.replace(".", "")
mailbox = mailbox.split("+")[0]
email = mailbox + "@" + domain
salted_encoded = (email + settings.SECRET_KEY).encode()
value = "em-%s" % hashlib.sha1(salted_encoded).hexdigest()
# 20 login attempts for a single email per hour:
return TokenBucket.authorize(value, 20, 3600)
@staticmethod
def authorize_invite(user):
value = "invite-%d" % user.id
# 20 invites per day
return TokenBucket.authorize(value, 20, 3600 * 24)
@staticmethod
def authorize_login_password(email):
salted_encoded = (email + settings.SECRET_KEY).encode()
value = "pw-%s" % hashlib.sha1(salted_encoded).hexdigest()
# 20 password attempts per day
return TokenBucket.authorize(value, 20, 3600 * 24)
Add "desc" back in the readonly API responses, and add "unique_key" field, derived from code.
# coding: utf-8
import hashlib
import json
import uuid
from datetime import datetime, timedelta as td
from croniter import croniter
from django.conf import settings
from django.db import models
from django.urls import reverse
from django.utils import timezone
from hc.accounts.models import Project
from hc.api import transports
from hc.lib import emails
import pytz
STATUSES = (("up", "Up"), ("down", "Down"), ("new", "New"), ("paused", "Paused"))
DEFAULT_TIMEOUT = td(days=1)
DEFAULT_GRACE = td(hours=1)
NEVER = datetime(3000, 1, 1, tzinfo=pytz.UTC)
CHECK_KINDS = (("simple", "Simple"), ("cron", "Cron"))
CHANNEL_KINDS = (
("email", "Email"),
("webhook", "Webhook"),
("hipchat", "HipChat"),
("slack", "Slack"),
("pd", "PagerDuty"),
("pagertree", "PagerTree"),
("pagerteam", "Pager Team"),
("po", "Pushover"),
("pushbullet", "Pushbullet"),
("opsgenie", "OpsGenie"),
("victorops", "VictorOps"),
("discord", "Discord"),
("telegram", "Telegram"),
("sms", "SMS"),
("zendesk", "Zendesk"),
("trello", "Trello"),
("matrix", "Matrix"),
("whatsapp", "WhatsApp"),
)
PO_PRIORITIES = {-2: "lowest", -1: "low", 0: "normal", 1: "high", 2: "emergency"}
def isostring(dt):
"""Convert the datetime to ISO 8601 format with no microseconds. """
if dt:
return dt.replace(microsecond=0).isoformat()
class Check(models.Model):
name = models.CharField(max_length=100, blank=True)
tags = models.CharField(max_length=500, blank=True)
code = models.UUIDField(default=uuid.uuid4, editable=False, unique=True)
desc = models.TextField(blank=True)
project = models.ForeignKey(Project, models.CASCADE)
created = models.DateTimeField(auto_now_add=True)
kind = models.CharField(max_length=10, default="simple", choices=CHECK_KINDS)
timeout = models.DurationField(default=DEFAULT_TIMEOUT)
grace = models.DurationField(default=DEFAULT_GRACE)
schedule = models.CharField(max_length=100, default="* * * * *")
tz = models.CharField(max_length=36, default="UTC")
subject = models.CharField(max_length=100, blank=True)
n_pings = models.IntegerField(default=0)
last_ping = models.DateTimeField(null=True, blank=True)
last_start = models.DateTimeField(null=True, blank=True)
last_ping_was_fail = models.NullBooleanField(default=False)
has_confirmation_link = models.BooleanField(default=False)
alert_after = models.DateTimeField(null=True, blank=True, editable=False)
status = models.CharField(max_length=6, choices=STATUSES, default="new")
def __str__(self):
return "%s (%d)" % (self.name or self.code, self.id)
def name_then_code(self):
if self.name:
return self.name
return str(self.code)
def url(self):
return settings.PING_ENDPOINT + str(self.code)
def details_url(self):
return settings.SITE_ROOT + reverse("hc-details", args=[self.code])
def email(self):
return "%s@%s" % (self.code, settings.PING_EMAIL_DOMAIN)
def get_grace_start(self):
""" Return the datetime when the grace period starts.
If the check is currently new, paused or down, return None.
"""
# NEVER is a constant sentinel value (year 3000).
# Using None instead would make the logic clunky.
result = NEVER
if self.kind == "simple" and self.status == "up":
result = self.last_ping + self.timeout
elif self.kind == "cron" and self.status == "up":
# The complex case, next ping is expected based on cron schedule.
# Don't convert to naive datetimes (and so avoid ambiguities around
# DST transitions). Croniter will handle the timezone-aware datetimes.
zone = pytz.timezone(self.tz)
last_local = timezone.localtime(self.last_ping, zone)
it = croniter(self.schedule, last_local)
result = it.next(datetime)
if self.last_start and self.status != "down":
result = min(result, self.last_start)
if result != NEVER:
return result
def going_down_after(self):
""" Return the datetime when the check goes down.
If the check is new or paused, and not currently running, return None.
If the check is already down, also return None.
"""
grace_start = self.get_grace_start()
if grace_start is not None:
return grace_start + self.grace
def get_status(self, now=None, with_started=True):
""" Return current status for display. """
if now is None:
now = timezone.now()
if self.last_start:
if now >= self.last_start + self.grace:
return "down"
elif with_started:
return "started"
if self.status in ("new", "paused", "down"):
return self.status
grace_start = self.get_grace_start()
grace_end = grace_start + self.grace
if now >= grace_end:
return "down"
if now >= grace_start:
return "grace"
return "up"
def assign_all_channels(self):
channels = Channel.objects.filter(project=self.project)
self.channel_set.set(channels)
def tags_list(self):
return [t.strip() for t in self.tags.split(" ") if t.strip()]
def matches_tag_set(self, tag_set):
return tag_set.issubset(self.tags_list())
def channels_str(self):
""" Return a comma-separated string of assigned channel codes. """
codes = self.channel_set.order_by("code").values_list("code", flat=True)
return ",".join(map(str, codes))
def to_dict(self, readonly=False):
result = {
"name": self.name,
"tags": self.tags,
"desc": self.desc,
"grace": int(self.grace.total_seconds()),
"n_pings": self.n_pings,
"status": self.get_status(),
"last_ping": isostring(self.last_ping),
"next_ping": isostring(self.get_grace_start()),
}
if readonly:
code_half = self.code.hex[:16]
result["unique_key"] = hashlib.sha1(code_half.encode()).hexdigest()
else:
update_rel_url = reverse("hc-api-update", args=[self.code])
pause_rel_url = reverse("hc-api-pause", args=[self.code])
result["ping_url"] = self.url()
result["update_url"] = settings.SITE_ROOT + update_rel_url
result["pause_url"] = settings.SITE_ROOT + pause_rel_url
result["channels"] = self.channels_str()
if self.kind == "simple":
result["timeout"] = int(self.timeout.total_seconds())
elif self.kind == "cron":
result["schedule"] = self.schedule
result["tz"] = self.tz
return result
def ping(self, remote_addr, scheme, method, ua, body, action):
if action == "start":
self.last_start = timezone.now()
# Don't update "last_ping" field.
elif action == "ign":
pass
else:
self.last_start = None
self.last_ping = timezone.now()
new_status = "down" if action == "fail" else "up"
if self.status != new_status:
flip = Flip(owner=self)
flip.created = self.last_ping
flip.old_status = self.status
flip.new_status = new_status
flip.save()
self.status = new_status
self.alert_after = self.going_down_after()
self.n_pings = models.F("n_pings") + 1
self.has_confirmation_link = "confirm" in str(body).lower()
self.save()
self.refresh_from_db()
ping = Ping(owner=self)
ping.n = self.n_pings
if action in ("start", "fail", "ign"):
ping.kind = action
ping.remote_addr = remote_addr
ping.scheme = scheme
ping.method = method
# If User-Agent is longer than 200 characters, truncate it:
ping.ua = ua[:200]
ping.body = body[:10000]
ping.save()
class Ping(models.Model):
id = models.BigAutoField(primary_key=True)
n = models.IntegerField(null=True)
owner = models.ForeignKey(Check, models.CASCADE)
created = models.DateTimeField(auto_now_add=True)
kind = models.CharField(max_length=6, blank=True, null=True)
scheme = models.CharField(max_length=10, default="http")
remote_addr = models.GenericIPAddressField(blank=True, null=True)
method = models.CharField(max_length=10, blank=True)
ua = models.CharField(max_length=200, blank=True)
body = models.CharField(max_length=10000, blank=True, null=True)
class Channel(models.Model):
name = models.CharField(max_length=100, blank=True)
code = models.UUIDField(default=uuid.uuid4, editable=False, unique=True)
project = models.ForeignKey(Project, models.CASCADE)
created = models.DateTimeField(auto_now_add=True)
kind = models.CharField(max_length=20, choices=CHANNEL_KINDS)
value = models.TextField(blank=True)
email_verified = models.BooleanField(default=False)
checks = models.ManyToManyField(Check)
def __str__(self):
if self.name:
return self.name
if self.kind == "email":
return "Email to %s" % self.email_value
elif self.kind == "sms":
return "SMS to %s" % self.sms_number
elif self.kind == "slack":
return "Slack %s" % self.slack_channel
elif self.kind == "telegram":
return "Telegram %s" % self.telegram_name
return self.get_kind_display()
def to_dict(self):
return {"id": str(self.code), "name": self.name, "kind": self.kind}
def assign_all_checks(self):
checks = Check.objects.filter(project=self.project)
self.checks.add(*checks)
def make_token(self):
seed = "%s%s" % (self.code, settings.SECRET_KEY)
seed = seed.encode()
return hashlib.sha1(seed).hexdigest()
def send_verify_link(self):
args = [self.code, self.make_token()]
verify_link = reverse("hc-verify-email", args=args)
verify_link = settings.SITE_ROOT + verify_link
emails.verify_email(self.email_value, {"verify_link": verify_link})
def get_unsub_link(self):
args = [self.code, self.make_token()]
verify_link = reverse("hc-unsubscribe-alerts", args=args)
return settings.SITE_ROOT + verify_link
@property
def transport(self):
if self.kind == "email":
return transports.Email(self)
elif self.kind == "webhook":
return transports.Webhook(self)
elif self.kind == "slack":
return transports.Slack(self)
elif self.kind == "hipchat":
return transports.HipChat(self)
elif self.kind == "pd":
return transports.PagerDuty(self)
elif self.kind == "pagertree":
return transports.PagerTree(self)
elif self.kind == "pagerteam":
return transports.PagerTeam(self)
elif self.kind == "victorops":
return transports.VictorOps(self)
elif self.kind == "pushbullet":
return transports.Pushbullet(self)
elif self.kind == "po":
return transports.Pushover(self)
elif self.kind == "opsgenie":
return transports.OpsGenie(self)
elif self.kind == "discord":
return transports.Discord(self)
elif self.kind == "telegram":
return transports.Telegram(self)
elif self.kind == "sms":
return transports.Sms(self)
elif self.kind == "trello":
return transports.Trello(self)
elif self.kind == "matrix":
return transports.Matrix(self)
elif self.kind == "whatsapp":
return transports.WhatsApp(self)
else:
raise NotImplementedError("Unknown channel kind: %s" % self.kind)
def notify(self, check):
if self.transport.is_noop(check):
return "no-op"
n = Notification(owner=check, channel=self)
n.check_status = check.status
n.error = "Sending"
n.save()
if self.kind == "email":
error = self.transport.notify(check, n.bounce_url()) or ""
else:
error = self.transport.notify(check) or ""
n.error = error
n.save()
return error
def icon_path(self):
return "img/integrations/%s.png" % self.kind
@property
def po_priority(self):
assert self.kind == "po"
parts = self.value.split("|")
prio = int(parts[1])
return PO_PRIORITIES[prio]
def webhook_spec(self, status):
assert self.kind == "webhook"
if not self.value.startswith("{"):
parts = self.value.split("\n")
url_down = parts[0]
url_up = parts[1] if len(parts) > 1 else ""
post_data = parts[2] if len(parts) > 2 else ""
return {
"method": "POST" if post_data else "GET",
"url": url_down if status == "down" else url_up,
"body": post_data,
"headers": {},
}
doc = json.loads(self.value)
if "post_data" in doc:
# Legacy "post_data" in doc -- use the legacy fields
return {
"method": "POST" if doc["post_data"] else "GET",
"url": doc["url_down"] if status == "down" else doc["url_up"],
"body": doc["post_data"],
"headers": doc["headers"],
}
if status == "down" and "method_down" in doc:
return {
"method": doc["method_down"],
"url": doc["url_down"],
"body": doc["body_down"],
"headers": doc["headers_down"],
}
elif status == "up" and "method_up" in doc:
return {
"method": doc["method_up"],
"url": doc["url_up"],
"body": doc["body_up"],
"headers": doc["headers_up"],
}
@property
def down_webhook_spec(self):
return self.webhook_spec("down")
@property
def up_webhook_spec(self):
return self.webhook_spec("up")
@property
def url_down(self):
return self.down_webhook_spec["url"]
@property
def url_up(self):
return self.up_webhook_spec["url"]
@property
def slack_team(self):
assert self.kind == "slack"
if not self.value.startswith("{"):
return None
doc = json.loads(self.value)
return doc["team_name"]
@property
def slack_channel(self):
assert self.kind == "slack"
if not self.value.startswith("{"):
return None
doc = json.loads(self.value)
return doc["incoming_webhook"]["channel"]
@property
def slack_webhook_url(self):
assert self.kind == "slack"
if not self.value.startswith("{"):
return self.value
doc = json.loads(self.value)
return doc["incoming_webhook"]["url"]
@property
def discord_webhook_url(self):
assert self.kind == "discord"
doc = json.loads(self.value)
return doc["webhook"]["url"]
@property
def discord_webhook_id(self):
assert self.kind == "discord"
doc = json.loads(self.value)
return doc["webhook"]["id"]
@property
def telegram_id(self):
assert self.kind == "telegram"
doc = json.loads(self.value)
return doc.get("id")
@property
def telegram_type(self):
assert self.kind == "telegram"
doc = json.loads(self.value)
return doc.get("type")
@property
def telegram_name(self):
assert self.kind == "telegram"
doc = json.loads(self.value)
return doc.get("name")
@property
def pd_service_key(self):
assert self.kind == "pd"
if not self.value.startswith("{"):
return self.value
doc = json.loads(self.value)
return doc["service_key"]
@property
def pd_account(self):
assert self.kind == "pd"
if self.value.startswith("{"):
doc = json.loads(self.value)
return doc["account"]
def latest_notification(self):
return Notification.objects.filter(channel=self).latest()
@property
def sms_number(self):
assert self.kind in ("sms", "whatsapp")
if self.value.startswith("{"):
doc = json.loads(self.value)
return doc["value"]
return self.value
@property
def sms_label(self):
assert self.kind == "sms"
if self.value.startswith("{"):
doc = json.loads(self.value)
return doc["label"]
@property
def trello_token(self):
assert self.kind == "trello"
if self.value.startswith("{"):
doc = json.loads(self.value)
return doc["token"]
@property
def trello_board_list(self):
assert self.kind == "trello"
if self.value.startswith("{"):
doc = json.loads(self.value)
return doc["board_name"], doc["list_name"]
@property
def trello_list_id(self):
assert self.kind == "trello"
if self.value.startswith("{"):
doc = json.loads(self.value)
return doc["list_id"]
@property
def email_value(self):
assert self.kind == "email"
if not self.value.startswith("{"):
return self.value
doc = json.loads(self.value)
return doc.get("value")
@property
def email_notify_up(self):
assert self.kind == "email"
if not self.value.startswith("{"):
return True
doc = json.loads(self.value)
return doc.get("up")
@property
def email_notify_down(self):
assert self.kind == "email"
if not self.value.startswith("{"):
return True
doc = json.loads(self.value)
return doc.get("down")
@property
def whatsapp_notify_up(self):
assert self.kind == "whatsapp"
doc = json.loads(self.value)
return doc["up"]
@property
def whatsapp_notify_down(self):
assert self.kind == "whatsapp"
doc = json.loads(self.value)
return doc["down"]
class Notification(models.Model):
class Meta:
get_latest_by = "created"
code = models.UUIDField(default=uuid.uuid4, null=True, editable=False)
owner = models.ForeignKey(Check, models.CASCADE)
check_status = models.CharField(max_length=6)
channel = models.ForeignKey(Channel, models.CASCADE)
created = models.DateTimeField(auto_now_add=True)
error = models.CharField(max_length=200, blank=True)
def bounce_url(self):
return settings.SITE_ROOT + reverse("hc-api-bounce", args=[self.code])
class Flip(models.Model):
owner = models.ForeignKey(Check, models.CASCADE)
created = models.DateTimeField()
processed = models.DateTimeField(null=True, blank=True, db_index=True)
old_status = models.CharField(max_length=8, choices=STATUSES)
new_status = models.CharField(max_length=8, choices=STATUSES)
def send_alerts(self):
if self.new_status == "up" and self.old_status in ("new", "paused"):
# Don't send alerts on new->up and paused->up transitions
return []
if self.new_status not in ("up", "down"):
raise NotImplementedError("Unexpected status: %s" % self.status)
errors = []
for channel in self.owner.channel_set.all():
error = channel.notify(self.owner)
if error not in ("", "no-op"):
errors.append((channel, error))
return errors
class TokenBucket(models.Model):
value = models.CharField(max_length=80, unique=True)
tokens = models.FloatField(default=1.0)
updated = models.DateTimeField(default=timezone.now)
@staticmethod
def authorize(value, capacity, refill_time_secs):
now = timezone.now()
obj, created = TokenBucket.objects.get_or_create(value=value)
if not created:
# Top up the bucket:
delta_secs = (now - obj.updated).total_seconds()
obj.tokens = min(1.0, obj.tokens + delta_secs / refill_time_secs)
obj.tokens -= 1.0 / capacity
if obj.tokens < 0:
# Not enough tokens
return False
# Race condition: two concurrent authorize calls can overwrite each
# other's changes. It's OK to be a little inexact here for the sake
# of simplicity.
obj.updated = now
obj.save()
return True
@staticmethod
def authorize_login_email(email):
# remove dots and alias:
mailbox, domain = email.split("@")
mailbox = mailbox.replace(".", "")
mailbox = mailbox.split("+")[0]
email = mailbox + "@" + domain
salted_encoded = (email + settings.SECRET_KEY).encode()
value = "em-%s" % hashlib.sha1(salted_encoded).hexdigest()
# 20 login attempts for a single email per hour:
return TokenBucket.authorize(value, 20, 3600)
@staticmethod
def authorize_invite(user):
value = "invite-%d" % user.id
# 20 invites per day
return TokenBucket.authorize(value, 20, 3600 * 24)
@staticmethod
def authorize_login_password(email):
salted_encoded = (email + settings.SECRET_KEY).encode()
value = "pw-%s" % hashlib.sha1(salted_encoded).hexdigest()
# 20 password attempts per day
return TokenBucket.authorize(value, 20, 3600 * 24)
|
'''
The ssh client wrapper system contains the routines that are used to alter
how executions are run in the salt-ssh system, this allows for state routines
to be easily rewritten to execute in a way that makes them do the same tasks
as ZeroMQ salt, but via ssh.
'''
# Import salt libs
import salt.utils
import salt.client.ssh
class FunctionWrapper(dict):
'''
Create an object that acts like the salt function dict and makes function
calls remotely via the SSH shell system
'''
def __init__(
self,
opts,
id_,
host,
**kwargs):
super(FunctionWrapper, self).__init__()
self.opts = opts
self.kwargs = {'id_': id_,
'host': host}
self.kwargs.update(kwargs)
def __getitem__(self, cmd):
'''
Return the function call to simulate the salt local lookup system
'''
def caller(*args, **kwargs):
'''
The remote execution function
'''
arg_str = '{0} '.format(cmd)
for arg in args:
arg_str += '{0} '.format(arg)
for key, val in kwargs.items():
arg_str += '{0}={1} '.format(key, val)
single = salt.client.ssh.Single(self.opts, arg_str, **self.kwargs)
ret = single.cmd_block()
if ret.startswith('deploy'):
single.deploy()
ret = single.cmd_block()
return json.loads(ret, object_hook=salt.utils.decode_dict)
return caller
import json
'''
The ssh client wrapper system contains the routines that are used to alter
how executions are run in the salt-ssh system, this allows for state routines
to be easily rewritten to execute in a way that makes them do the same tasks
as ZeroMQ salt, but via ssh.
'''
# Import python libs
import json
# Import salt libs
import salt.utils
import salt.client.ssh
class FunctionWrapper(dict):
'''
Create an object that acts like the salt function dict and makes function
calls remotely via the SSH shell system
'''
def __init__(
self,
opts,
id_,
host,
**kwargs):
super(FunctionWrapper, self).__init__()
self.opts = opts
self.kwargs = {'id_': id_,
'host': host}
self.kwargs.update(kwargs)
def __getitem__(self, cmd):
'''
Return the function call to simulate the salt local lookup system
'''
def caller(*args, **kwargs):
'''
The remote execution function
'''
arg_str = '{0} '.format(cmd)
for arg in args:
arg_str += '{0} '.format(arg)
for key, val in kwargs.items():
arg_str += '{0}={1} '.format(key, val)
single = salt.client.ssh.Single(self.opts, arg_str, **self.kwargs)
ret = single.cmd_block()
if ret.startswith('deploy'):
single.deploy()
ret = single.cmd_block()
return json.loads(ret, object_hook=salt.utils.decode_dict)
return caller
|
# -*- coding: utf-8 -*-
import json
import urlparse
from django.http import QueryDict
from jingo.helpers import datetime as datetime_filter
from nose import SkipTest
from nose.tools import eq_
from pyquery import PyQuery as pq
from test_utils import RequestFactory
from tower import strip_whitespace
import amo
import amo.tests
from amo.helpers import locale_url, numberfmt, urlparams
from amo.urlresolvers import reverse
from addons.models import Addon, AddonCategory, AddonUser, Category, Persona
from search import views
from search.utils import floor_version
from search.views import DEFAULT_NUM_PERSONAS, version_sidebar
from tags.models import AddonTag, Tag
from users.models import UserProfile
from versions.compare import num as vnum, version_int as vint, MAXVERSION
class TestSearchboxTarget(amo.tests.ESTestCase):
@classmethod
def setUpClass(cls):
super(TestSearchboxTarget, cls).setUpClass()
cls.setUpIndex()
def check(self, url, placeholder, cat=None, action=None, q=None):
# Checks that we search within addons, personas, collections, etc.
form = pq(self.client.get(url).content)('.header-search form')
eq_(form.attr('action'), action or reverse('search.search'))
if cat:
eq_(form('input[name=cat]').val(), cat)
q_field = form('input[name=q]')
eq_(q_field.attr('placeholder'), placeholder)
if q:
eq_(q_field.val(), q)
def test_addons_is_default(self):
self.check(reverse('home'), 'search for add-ons')
def test_themes(self):
self.check(reverse('browse.themes'), 'search for add-ons',
'%s,0' % amo.ADDON_THEME)
def test_collections(self):
self.check(reverse('collections.list'), 'search for collections',
'collections')
def test_personas(self):
self.check(reverse('browse.personas'), 'search for themes',
'themes')
def test_addons_search(self):
self.check(reverse('search.search'), 'search for add-ons')
def test_addons_search_term(self):
self.check(reverse('search.search') + '?q=ballin',
'search for add-ons', q='ballin')
class SearchBase(amo.tests.ESTestCase):
def get_results(self, r, sort=True):
"""Return pks of add-ons shown on search results page."""
results = [a.id for a in r.context['pager'].object_list]
if sort:
results = sorted(results)
return results
def check_sort_links(self, key, title=None, sort_by=None, reverse=True,
params={}):
r = self.client.get(urlparams(self.url, sort=key, **params))
eq_(r.status_code, 200)
doc = pq(r.content)
if title:
if hasattr(self, 'MOBILE'):
menu = doc('#sort-menu')
eq_(menu.find('span').text(), title)
eq_(menu.find('.selected').text(), title)
else:
eq_(doc('#sorter .selected').text(), title)
if sort_by:
results = r.context['pager'].object_list
if sort_by == 'name':
expected = sorted(results, key=lambda x: unicode(x.name))
else:
expected = sorted(results, key=lambda x: getattr(x, sort_by),
reverse=reverse)
eq_(list(results), expected)
def check_name_results(self, params, expected):
r = self.client.get(urlparams(self.url, **params), follow=True)
eq_(r.status_code, 200)
got = self.get_results(r)
eq_(got, expected,
'Got: %s. Expected: %s. Parameters: %s' % (got, expected, params))
def check_appver_platform_ignored(self, expected):
# Collection results should not filter on `appver` nor `platform`.
permutations = [
{},
{'appver': amo.FIREFOX.id},
{'appver': amo.THUNDERBIRD.id},
{'platform': amo.PLATFORM_MAC.id},
{'appver': amo.SEAMONKEY.id, 'platform': amo.PLATFORM_WIN.id},
]
for p in permutations:
self.check_name_results(p, expected)
def check_heading(self):
r = self.client.get(self.url)
eq_(r.status_code, 200)
eq_(pq(r.content)('.results-count strong').text(), None)
r = self.client.get(self.url + '&q=ballin')
eq_(r.status_code, 200)
eq_(pq(r.content)('.results-count strong').text(), 'ballin')
class TestESSearch(SearchBase):
fixtures = ['base/apps', 'base/category', 'tags/tags']
@classmethod
def setUpClass(cls):
super(TestESSearch, cls).setUpClass()
cls.setUpIndex()
def setUp(self):
self.url = reverse('search.search')
self.addons = Addon.objects.filter(status=amo.STATUS_PUBLIC,
disabled_by_user=False)
for addon in self.addons:
AddonCategory.objects.create(addon=addon, category_id=1)
addon.save()
self.refresh()
def refresh_addons(self):
[a.save() for a in Addon.objects.all()]
self.refresh()
def test_get(self):
r = self.client.get(self.url)
eq_(r.status_code, 200)
assert 'X-PJAX' in r['vary'].split(','), 'Expected "Vary: X-PJAX"'
self.assertTemplateUsed(r, 'search/results.html')
@amo.tests.mobile_test
def test_get_mobile(self):
r = self.client.get(self.url)
eq_(r.status_code, 200)
self.assertTemplateUsed(r, 'search/mobile/results.html')
@amo.tests.mobile_test
def test_mobile_results_downloads(self):
r = self.client.get(urlparams(self.url, sort='downloads'))
assert pq(r.content)('#content .item .vital.downloads'), (
'Expected weekly downloads')
def test_search_tools_omit_users(self):
r = self.client.get(self.url, dict(cat='%s,5' % amo.ADDON_SEARCH))
eq_(r.status_code, 200)
sorter = pq(r.content)('#sorter')
eq_(sorter.length, 1)
assert 'sort=users' not in sorter.text(), (
'Sort by "Most Users" should not appear for search tools.')
def test_results_sort_default(self):
self.check_sort_links(None, 'Relevance', 'weekly_downloads')
def test_results_sort_unknown(self):
self.check_sort_links('xxx', 'Relevance')
def test_results_sort_users(self):
self.check_sort_links('users', 'Most Users', 'average_daily_users')
def test_results_sort_rating(self):
self.check_sort_links('rating', 'Top Rated', 'bayesian_rating')
def test_results_sort_newest(self):
self.check_sort_links('created', 'Newest', 'created')
def test_results_sort_updated(self):
self.check_sort_links('updated', 'Recently Updated')
def test_results_sort_downloads(self):
self.check_sort_links('downloads', 'Weekly Downloads',
'weekly_downloads')
def test_mobile_results_sort_name(self):
self.check_sort_links('name', 'Name', 'name', reverse=False)
@amo.tests.mobile_test
def test_mobile_results_sort_default(self):
self.check_sort_links(None, 'Relevance', 'weekly_downloads')
@amo.tests.mobile_test
def test_mobile_results_sort_unknown(self):
self.check_sort_links('xxx', 'Relevance')
@amo.tests.mobile_test
def test_mobile_results_sort_users(self):
self.check_sort_links('users', 'Most Users', 'average_daily_users')
@amo.tests.mobile_test
def test_mobile_results_sort_rating(self):
self.check_sort_links('rating', 'Top Rated', 'bayesian_rating')
@amo.tests.mobile_test
def test_mobile_results_sort_newest(self):
self.check_sort_links('created', 'Newest', 'created')
def test_legacy_redirects(self):
r = self.client.get(self.url + '?sort=averagerating')
self.assertRedirects(r, self.url + '?sort=rating', status_code=301)
def test_legacy_redirects_to_non_ascii(self):
# see http://sentry.dmz.phx1.mozilla.com/addons/group/2186/
url = '/ga-IE/seamonkey/tag/%E5%95%86%E5%93%81%E6%90%9C%E7%B4%A2'
from_ = ('?sort=updated&lver=1.0&advancedsearch=1'
'&tag=dearbhair&cat=4%2C84')
to = ('?sort=updated&advancedsearch=1&appver=1.0'
'&tag=dearbhair&cat=4%2C84')
r = self.client.get(url + from_)
self.assertRedirects(r, url + to, status_code=301)
def check_platform_filters(self, platform, expected=None):
r = self.client.get('%s?platform=%s' % (self.url, platform),
follow=True)
plats = r.context['platforms']
for idx, plat in enumerate(plats):
name, selected = expected[idx]
label = unicode(plat.text)
eq_(label, name,
'%r platform had the wrong label: %s' % (platform, label))
eq_(plat.selected, selected,
'%r platform should have been selected' % platform)
def test_platform_default(self):
expected = [
('All Systems', True),
('Linux', False),
('Mac OS X', False),
('Windows', False),
]
self.check_platform_filters('', expected)
self.check_platform_filters('all', expected)
self.check_platform_filters('any', expected)
self.check_platform_filters('amiga', expected)
def test_platform_listed(self):
expected = [
('All Systems', False),
('Linux', True),
('Mac OS X', False),
('Windows', False),
]
self.check_platform_filters('linux', expected)
expected = [
('All Systems', False),
('Linux', False),
('Mac OS X', False),
('Windows', True),
]
self.check_platform_filters('windows', expected)
expected = [
('All Systems', False),
('Linux', False),
('Mac OS X', True),
('Windows', False),
]
self.check_platform_filters('mac', expected)
def test_platform_incompatible(self):
expected = [
('All Systems', True),
('Linux', False),
('Mac OS X', False),
('Windows', False),
]
self.check_platform_filters('any', expected)
expected = [
('All Systems', False),
('Linux', False),
('Mac OS X', False),
('Windows', False),
('Maemo', True),
]
self.check_platform_filters('maemo', expected)
def test_platform_legacy_params(self):
ALL = (amo.PLATFORM_ALL, amo.PLATFORM_ANY, amo.PLATFORM_ALL_MOBILE)
listed = ALL + (amo.PLATFORM_LINUX, amo.PLATFORM_MAC, amo.PLATFORM_WIN)
for idx, platform in amo.PLATFORMS.iteritems():
expected = [
('All Systems', platform in ALL),
('Linux', platform == amo.PLATFORM_LINUX),
('Mac OS X', platform == amo.PLATFORM_MAC),
('Windows', platform == amo.PLATFORM_WIN),
]
if platform not in listed:
expected.append((platform.name, True))
self.check_platform_filters(str(idx), expected)
def check_appver_filters(self, appver, expected):
request = RequestFactory()
request.APP = amo.FIREFOX
facets = {
u'platforms': [{u'count': 58, u'term': 1}],
u'appversions': [{u'count': 58, u'term': 5000000200100}],
u'categories': [{u'count': 55, u'term': 1}],
u'tags': []
}
versions = version_sidebar(request,
{'appver': floor_version(appver)}, facets)
all_ = versions.pop(0)
eq_(all_.text, 'Any %s' % unicode(request.APP.pretty))
eq_(all_.selected, not expected)
return [v.__dict__ for v in versions]
def test_appver_default(self):
eq_(self.check_appver_filters('', ''),
[{'text': u'Firefox 5.0',
'selected': False,
'urlparams': {'appver': '5.0'},
'children': []}])
def test_appver_known(self):
eq_(self.check_appver_filters('5.0', '5.0'),
[{'text': u'Firefox 5.0',
'selected': True,
'urlparams': {'appver': '5.0'},
'children': []}])
def test_appver_oddballs(self):
eq_(self.check_appver_filters('3.6.22', '3.6'),
[{'text': u'Firefox 5.0',
'selected': False,
'urlparams': {'appver': '5.0'},
'children': []},
{'text': u'Firefox 3.6',
'selected': True,
'urlparams': {'appver': '3.6'},
'children': []}])
def test_appver_long(self):
too_big = vnum(vint(MAXVERSION + 1))
just_right = vnum(vint(MAXVERSION))
assert self.check_appver_filters(too_big, floor_version(just_right)), (
'All I ask is do not crash')
eq_(self.check_appver_filters('9999999', '9999999.0'),
[{'text': u'Firefox 9999999.0',
'selected': True,
'urlparams': {'appver': '9999999.0'},
'children': []},
{'text': u'Firefox 5.0',
'selected': False,
'urlparams': {'appver': '5.0'},
'children': []}])
eq_(self.check_appver_filters('99999999', '99999999.0'),
[{'text': u'Firefox 99999999.0',
'selected': True,
'urlparams': {'appver': '99999999.0'},
'children': []},
{'text': u'Firefox 5.0',
'selected': False,
'urlparams': {'appver': '5.0'},
'children': []}])
def test_appver_bad(self):
assert self.check_appver_filters('.', '.')
assert self.check_appver_filters('_', '_')
assert self.check_appver_filters('y.y', 'y.y')
assert self.check_appver_filters('*', '*')
def test_non_pjax_results(self):
r = self.client.get(self.url)
eq_(r.status_code, 200)
eq_(r.context['is_pjax'], None)
# These context variables should exist for normal requests.
for var in ('categories', 'platforms', 'versions', 'tags'):
assert var in r.context, '%r missing context var in view' % var
doc = pq(r.content)
eq_(doc('html').length, 1)
eq_(doc('#pjax-results').length, 1)
eq_(doc('#search-facets .facets.pjax-trigger').length, 1)
eq_(doc('#sorter.pjax-trigger').length, 1)
def test_pjax_results(self):
r = self.client.get(self.url, HTTP_X_PJAX=True)
eq_(r.status_code, 200)
eq_(r.context['is_pjax'], True)
doc = pq(r.content)
eq_(doc('html').length, 0)
eq_(doc('#pjax-results').length, 0)
eq_(doc('#search-facets .facets.pjax-trigger').length, 0)
eq_(doc('#sorter.pjax-trigger').length, 1)
def test_facet_data_params_default(self):
r = self.client.get(self.url)
a = pq(r.content)('#search-facets a[data-params]:first')
eq_(json.loads(a.attr('data-params')),
dict(atype=None, cat=None, page=None))
def test_facet_data_params_filtered(self):
r = self.client.get(self.url + '?appver=3.6&platform=mac&page=3')
a = pq(r.content)('#search-facets a[data-params]:first')
eq_(json.loads(a.attr('data-params')),
dict(atype=None, cat=None, page=None))
def check_cat_filters(self, params=None, selected='All Add-ons'):
if not params:
params = {}
r = self.client.get(urlparams(self.url, **params))
eq_(sorted(a.id for a in self.addons),
sorted(a.id for a in r.context['pager'].object_list))
cat = self.addons[0].all_categories[0]
links = pq(r.content)('#category-facets li a')
expected = [
('All Add-ons', self.url),
('Extensions', urlparams(self.url, atype=amo.ADDON_EXTENSION)),
(unicode(cat.name), urlparams(self.url, atype=amo.ADDON_EXTENSION,
cat=cat.id)),
]
amo.tests.check_links(expected, links, selected, verify=False)
def test_defaults_atype_no_cat(self):
self.check_cat_filters(dict(atype=1))
def test_defaults_atype_unknown_cat(self):
self.check_cat_filters(dict(atype=amo.ADDON_EXTENSION, cat=999))
def test_defaults_no_atype_unknown_cat(self):
self.check_cat_filters(dict(cat=999))
def test_defaults_atype_foreign_cat(self):
cat = Category.objects.create(application_id=amo.THUNDERBIRD.id,
type=amo.ADDON_EXTENSION)
self.check_cat_filters(dict(atype=amo.ADDON_EXTENSION, cat=cat.id))
def test_listed_cat(self):
cat = self.addons[0].all_categories[0]
self.check_cat_filters(dict(atype=amo.ADDON_EXTENSION, cat=cat.id),
selected=unicode(cat.name))
def test_cat_facet_stale(self):
AddonCategory.objects.all().delete()
r = self.client.get(self.url)
expected = [
('All Add-ons', self.url),
('Extensions', urlparams(self.url, atype=amo.ADDON_EXTENSION)),
]
amo.tests.check_links(expected, pq(r.content)('#category-facets li a'),
verify=False)
def test_cat_facet_fresh(self):
AddonCategory.objects.all().delete()
# Save to reindex with new categories.
self.refresh_addons()
r = self.client.get(self.url)
amo.tests.check_links([('All Add-ons', self.url)],
pq(r.content)('#category-facets li a'),
verify=False)
def test_unknown_tag_filter(self):
r = self.client.get(urlparams(self.url, tag='xxx'))
a = pq(r.content)('#tag-facets li.selected a')
eq_(a.length, 1)
eq_(a.text(), 'xxx')
eq_(list(r.context['pager'].object_list), [])
def test_tag_filters_on_search_page(self):
r = self.client.get(self.url, dict(tag='sky'))
a = pq(r.content)('#tag-facets li.selected a[data-params]')
eq_(json.loads(a.attr('data-params')), dict(tag='sky', page=None))
def test_no_tag_filters_on_tags_page(self):
r = self.client.get(reverse('tags.detail', args=['sky']))
eq_(r.status_code, 200)
eq_(pq(r.content)('#tag-facets').length, 0)
def get_results(self, r):
"""Return pks of add-ons shown on search results page."""
pks = pq(r.content)('#pjax-results div[data-addon]')
return sorted(int(pq(a).attr('data-addon')) for a in pks)
def test_results_filtered_atype(self):
theme = self.addons[0]
theme.type = amo.ADDON_THEME
theme.save()
self.refresh_addons()
themes = sorted(self.addons.filter(type=amo.ADDON_THEME)
.values_list('id', flat=True))
eq_(themes, [theme.id])
extensions = sorted(self.addons.filter(type=amo.ADDON_EXTENSION)
.values_list('id', flat=True))
eq_(extensions, sorted(a.id for a in self.addons[1:]))
# Extensions should show only extensions.
r = self.client.get(self.url, dict(atype=amo.ADDON_EXTENSION))
eq_(r.status_code, 200)
eq_(self.get_results(r), extensions)
# Themes should show only themes.
r = self.client.get(self.url, dict(atype=amo.ADDON_THEME))
eq_(r.status_code, 200)
eq_(self.get_results(r), themes)
def test_results_respect_appver_filtering(self):
r = self.client.get(self.url, dict(appver='9.00'))
eq_(self.get_results(r), [])
def test_results_skip_appver_filtering_for_d2c(self):
r = self.client.get(self.url, dict(appver='10.0a1'))
eq_(self.get_results(r),
sorted(self.addons.values_list('id', flat=True)))
def test_results_respect_appver_filtering_for_non_extensions(self):
self.addons.update(type=amo.ADDON_THEME)
r = self.client.get(self.url, dict(appver='10.0a1',
type=amo.ADDON_THEME))
eq_(self.get_results(r),
sorted(self.addons.values_list('id', flat=True)))
def test_results_platform_filter_all(self):
for platform in ('', 'all'):
r = self.client.get(self.url, dict(platform=platform))
eq_(self.get_results(r),
sorted(self.addons.values_list('id', flat=True)))
def test_slug_indexed(self):
a = self.addons[0]
r = self.client.get(self.url, dict(q='omgyes'))
eq_(self.get_results(r), [])
a.update(slug='omgyes')
self.refresh()
r = self.client.get(self.url, dict(q='omgyes'))
eq_(self.get_results(r), [a.id])
def test_authors_indexed(self):
a = self.addons[0]
r = self.client.get(self.url, dict(q='boop'))
eq_(self.get_results(r), [])
AddonUser.objects.create(addon=a,
user=UserProfile.objects.create(username='boop'))
AddonUser.objects.create(addon=a,
user=UserProfile.objects.create(username='ponypet'))
a.save()
self.refresh()
r = self.client.get(self.url, dict(q='garbage'))
eq_(self.get_results(r), [])
r = self.client.get(self.url, dict(q='boop'))
eq_(self.get_results(r), [a.id])
r = self.client.get(self.url, dict(q='pony'))
eq_(self.get_results(r), [a.id])
def test_tag_search(self):
a = self.addons[0]
tag_name = 'tagretpractice'
r = self.client.get(self.url, dict(q=tag_name))
eq_(self.get_results(r), [])
AddonTag.objects.create(
addon=a, tag=Tag.objects.create(tag_text=tag_name))
a.save()
self.refresh(timesleep=1)
r = self.client.get(self.url, dict(q=tag_name))
eq_(self.get_results(r), [a.id])
class TestPersonaSearch(SearchBase):
fixtures = ['base/apps']
@classmethod
def setUpClass(cls):
super(TestPersonaSearch, cls).setUpClass()
cls.setUpIndex()
def setUp(self):
self.url = urlparams(reverse('search.search'), atype=amo.ADDON_PERSONA)
def _generate_personas(self):
# Add some public personas.
self.personas = []
for status in amo.REVIEWED_STATUSES:
self.personas.append(
amo.tests.addon_factory(type=amo.ADDON_PERSONA, status=status))
# Add some unreviewed personas.
for status in set(amo.STATUS_CHOICES) - set(amo.REVIEWED_STATUSES):
amo.tests.addon_factory(type=amo.ADDON_PERSONA, status=status)
# Add a disabled persona.
amo.tests.addon_factory(type=amo.ADDON_PERSONA, disabled_by_user=True)
# NOTE: There are also some add-ons in `setUpIndex` for good measure.
self.refresh()
def test_sort_order_default(self):
self._generate_personas()
self.check_sort_links(None, sort_by='weekly_downloads')
def test_sort_order_unknown(self):
self._generate_personas()
self.check_sort_links('xxx')
def test_sort_order_users(self):
self._generate_personas()
self.check_sort_links('users', sort_by='average_daily_users')
def test_sort_order_rating(self):
self._generate_personas()
self.check_sort_links('rating', sort_by='bayesian_rating')
def test_sort_order_newest(self):
self._generate_personas()
self.check_sort_links('created', sort_by='created')
def test_heading(self):
self.check_heading()
def test_results_blank_query(self):
self._generate_personas()
personas_ids = sorted(p.id for p in self.personas) # Not PersonaID ;)
r = self.client.get(self.url, follow=True)
eq_(r.status_code, 200)
eq_(self.get_results(r), personas_ids)
doc = pq(r.content)
eq_(doc('.personas-grid li').length, len(personas_ids))
eq_(doc('.listing-footer').length, 0)
def test_results_name_query(self):
raise SkipTest
self._generate_personas()
p1 = self.personas[0]
p1.name = 'Harry Potter'
p1.save()
p2 = self.personas[1]
p2.name = 'The Life Aquatic with SeaVan'
p2.save()
self.refresh()
# Empty search term should return everything.
self.check_name_results({'q': ''}, sorted(p.id for p in self.personas))
# Garbage search terms should return nothing.
for term in ('xxx', 'garbage', '£'):
self.check_name_results({'q': term}, [])
# Try to match 'Harry Potter'.
for term in ('harry', 'potter', 'har', 'pot', 'harry pooper'):
self.check_name_results({'q': term}, [p1.pk])
# Try to match 'The Life Aquatic with SeaVan'.
for term in ('life', 'aquatic', 'seavan', 'sea van'):
self.check_name_results({'q': term}, [p2.pk])
def test_results_popularity(self):
personas = [
('Harry Potter', 2000),
('Japanese Koi Tattoo', 67),
('Japanese Tattoo', 250),
('Japanese Tattoo boop', 50),
('Japanese Tattoo ballin', 200),
('The Japanese Tattooed Girl', 242),
]
for name, popularity in personas:
amo.tests.addon_factory(name=name, type=amo.ADDON_PERSONA,
popularity=popularity)
self.refresh()
# Japanese Tattoo should be the #1 most relevant result. Obviously.
expected_name, expected_popularity = personas[2]
for sort in ('downloads', 'popularity', 'users'):
r = self.client.get(urlparams(self.url, q='japanese tattoo',
sort=sort), follow=True)
eq_(r.status_code, 200)
results = list(r.context['pager'].object_list)
first = results[0]
eq_(unicode(first.name), expected_name,
'Was not first result for %r. Results: %s' % (sort, results))
eq_(first.persona.popularity, expected_popularity,
'Incorrect popularity for %r. Got %r. Expected %r.' % (
sort, first.persona.popularity, results))
eq_(first.average_daily_users, expected_popularity,
'Incorrect users for %r. Got %r. Expected %r.' % (
sort, first.average_daily_users, results))
eq_(first.weekly_downloads, expected_popularity,
'Incorrect weekly_downloads for %r. Got %r. Expected %r.' % (
sort, first.weekly_downloads, results))
def test_results_appver_platform(self):
self._generate_personas()
self.check_appver_platform_ignored(sorted(p.id for p in self.personas))
def test_results_other_applications(self):
self._generate_personas()
# Now ensure we get the same results for Firefox as for Thunderbird.
self.url = self.url.replace('firefox', 'thunderbird')
self.check_name_results({}, sorted(p.id for p in self.personas))
def test_pagination(self):
# TODO: Figure out why ES wonks out when we index a plethora of junk.
raise SkipTest
# Generate some (22) personas to get us to two pages.
left_to_add = DEFAULT_NUM_PERSONAS - len(self.personas) + 1
for x in xrange(left_to_add):
self.personas.append(
amo.tests.addon_factory(type=amo.ADDON_PERSONA))
self.refresh()
# Page one should show 21 personas.
r = self.client.get(self.url, follow=True)
eq_(r.status_code, 200)
eq_(pq(r.content)('.personas-grid li').length, DEFAULT_NUM_PERSONAS)
# Page two should show 1 persona.
r = self.client.get(self.url + '&page=2', follow=True)
eq_(r.status_code, 200)
eq_(pq(r.content)('.personas-grid li').length, 1)
class TestCollectionSearch(SearchBase):
fixtures = ['base/apps']
@classmethod
def setUpClass(cls):
# Set up the mapping.
super(TestCollectionSearch, cls).setUpClass()
def setUp(self):
self.url = urlparams(reverse('search.search'), cat='collections')
def _generate(self):
# Add some public collections.
self.collections = []
for x in xrange(3):
self.collections.append(
amo.tests.collection_factory(name='Collection %s' % x))
# Synchronized, favorites, and unlisted collections should be excluded.
for type_ in (amo.COLLECTION_SYNCHRONIZED, amo.COLLECTION_FAVORITES):
amo.tests.collection_factory(type=type_)
amo.tests.collection_factory(listed=False)
self.refresh()
def test_legacy_redirect(self):
# Ensure `sort=newest` redirects to `sort=created`.
r = self.client.get(urlparams(self.url, sort='newest'))
self.assertRedirects(r, urlparams(self.url, sort='created'), 301)
def test_sort_order_unknown(self):
self._generate()
self.check_sort_links('xxx')
def test_sort_order_default(self):
self._generate()
self.check_sort_links(None, sort_by='weekly_subscribers')
def test_sort_order_weekly(self):
self._generate()
self.check_sort_links('weekly', sort_by='weekly_subscribers')
def test_sort_order_default_with_term(self):
self._generate()
self.check_sort_links(None, sort_by='weekly_subscribers',
params={'q': 'collection'})
def test_sort_order_weekly_with_term(self):
self._generate()
self.check_sort_links('weekly', sort_by='weekly_subscribers',
params={'q': 'collection'})
def test_sort_order_monthly(self):
self._generate()
self.check_sort_links('monthly', sort_by='monthly_subscribers')
def test_sort_order_all(self):
self._generate()
self.check_sort_links('all', sort_by='subscribers')
def test_sort_order_rating(self):
self._generate()
self.check_sort_links('rating', sort_by='rating')
def test_sort_order_name(self):
self._generate()
self.check_sort_links('name', sort_by='name', reverse=False)
def test_sort_order_created(self):
self._generate()
self.check_sort_links('created', sort_by='created')
def test_sort_order_updated(self):
self._generate()
self.check_sort_links('updated', sort_by='modified')
def test_created_timestamp(self):
self._generate()
r = self.client.get(urlparams(self.url, sort='created'))
items = pq(r.content)('.primary .item')
for idx, c in enumerate(r.context['pager'].object_list):
eq_(strip_whitespace(items.eq(idx).find('.modified').text()),
'Added %s' % strip_whitespace(datetime_filter(c.created)))
def test_updated_timestamp(self):
self._generate()
r = self.client.get(urlparams(self.url, sort='updated'))
items = pq(r.content)('.primary .item')
for idx, c in enumerate(r.context['pager'].object_list):
eq_(strip_whitespace(items.eq(idx).find('.modified').text()),
'Updated %s' % strip_whitespace(datetime_filter(c.modified)))
def check_followers_count(self, sort, column):
# Checks that we show the correct type/number of followers.
r = self.client.get(urlparams(self.url, sort=sort))
items = pq(r.content)('.primary .item')
for idx, c in enumerate(r.context['pager'].object_list):
eq_(items.eq(idx).find('.followers').text().split()[0],
numberfmt(getattr(c, column)))
def test_followers_all(self):
self._generate()
for sort in ('', 'all', 'rating', 'created', 'modified', 'name'):
self.check_followers_count(sort, column='subscribers')
def test_followers_monthly(self):
self._generate()
self.check_followers_count('monthly', column='monthly_subscribers')
def test_followers_weekly(self):
self._generate()
self.check_followers_count('weekly', column='weekly_subscribers')
def test_heading(self):
# One is a lonely number. But that's all we need.
amo.tests.collection_factory()
self.check_heading()
def test_results_blank_query(self):
self._generate()
collection_ids = sorted(p.id for p in self.collections)
r = self.client.get(self.url, follow=True)
eq_(r.status_code, 200)
eq_(self.get_results(r), collection_ids)
doc = pq(r.content)
eq_(doc('.primary .item').length, len(collection_ids))
eq_(doc('.listing-footer').length, 0)
def test_results_name_query(self):
self._generate()
c1 = self.collections[0]
c1.name = 'SeaVans: A Collection of Cars at the Beach'
c1.save()
c2 = self.collections[1]
c2.name = 'The Life Aquatic with SeaVan: An Underwater Collection'
c2.save()
self.refresh(timesleep=1)
# These contain terms that are in every result - so return everything.
for term in ('collection',
'seavan: a collection of cars at the beach'):
self.check_name_results({'q': term},
sorted(p.id for p in self.collections))
# Garbage search terms should return nothing.
for term in ('xxx', 'garbage', '£'):
self.check_name_results({'q': term}, [])
# Try to match 'SeaVans: A Collection of Cars at the Beach'.
for term in ('cars', 'beach'):
self.check_name_results({'q': term}, [c1.pk])
# Match 'The Life Aquatic with SeaVan: An Underwater Collection'.
for term in ('life aquatic', 'life', 'aquatic', 'underwater', 'under'):
self.check_name_results({'q': term}, [c2.pk])
# Match both results above.
for term in ('seavan', 'seavans'):
self.check_name_results({'q': term}, sorted([c1.pk, c2.pk]))
def test_results_popularity(self):
collections = [
('Traveler Pack', 2000),
('Tools for Developer', 67),
('Web Developer', 250),
('Web Developer Necessities', 50),
('Web Pro', 200),
('Web Developer Pack', 242),
]
for name, subscribers in collections:
amo.tests.collection_factory(name=name, subscribers=subscribers,
weekly_subscribers=subscribers)
self.refresh()
# "Web Developer Collection" should be the #1 most relevant result.
expected_name, expected_subscribers = collections[2]
for sort in ('', 'all'):
r = self.client.get(urlparams(self.url, q='web developer',
sort=sort), follow=True)
eq_(r.status_code, 200)
results = list(r.context['pager'].object_list)
first = results[0]
eq_(unicode(first.name), expected_name,
'Was not first result for %r. Results: %s' % (sort, results))
eq_(first.subscribers, expected_subscribers,
'Incorrect subscribers for %r. Got %r. Expected %r.' % (
sort, first.subscribers, results))
def test_results_appver_platform(self):
self._generate()
self.check_appver_platform_ignored(
sorted(c.id for c in self.collections))
def test_results_other_applications(self):
tb_collection = amo.tests.collection_factory(
application_id=amo.THUNDERBIRD.id)
sm_collection = amo.tests.collection_factory(
application_id=amo.SEAMONKEY.id)
self.refresh()
r = self.client.get(self.url)
eq_(self.get_results(r), [])
r = self.client.get(self.url.replace('firefox', 'thunderbird'))
eq_(self.get_results(r), [tb_collection.id])
r = self.client.get(self.url.replace('firefox', 'seamonkey'))
eq_(self.get_results(r), [sm_collection.id])
def test_search_redirects():
changes = (
('q=yeah&sort=newest', 'q=yeah&sort=updated'),
('sort=weeklydownloads', 'sort=users'),
('sort=averagerating', 'sort=rating'),
('lver=5.*', 'appver=5.*'),
('q=woo&sort=averagerating&lver=6.0', 'q=woo&sort=rating&appver=6.0'),
('pid=2', 'platform=linux'),
('q=woo&lver=6.0&sort=users&pid=5',
'q=woo&appver=6.0&sort=users&platform=windows'),
)
def check(before, after):
eq_(views.fix_search_query(QueryDict(before)),
dict(urlparse.parse_qsl(after)))
for before, after in changes:
yield check, before, after
queries = (
'q=yeah',
'q=yeah&sort=users',
'sort=users',
'q=yeah&appver=6.0',
'q=yeah&appver=6.0&platform=mac',
)
def same(qs):
q = QueryDict(qs)
assert views.fix_search_query(q) is q
for qs in queries:
yield same, qs
class TestAjaxSearch(amo.tests.ESTestCase):
@classmethod
def setUpClass(cls):
super(TestAjaxSearch, cls).setUpClass()
cls.setUpIndex()
def search_addons(self, url, params, addons=[], types=amo.ADDON_TYPES,
src=None):
r = self.client.get(url + '?' + params)
eq_(r.status_code, 200)
data = json.loads(r.content)
data = sorted(data, key=lambda x: x['id'])
addons = sorted(addons, key=lambda x: x.id)
eq_(len(data), len(addons))
for got, expected in zip(data, addons):
eq_(int(got['id']), expected.id)
eq_(got['name'], unicode(expected.name))
expected_url = expected.get_url_path()
if src:
expected_url += '?src=ss'
eq_(got['url'], expected_url)
eq_(got['icon'], expected.icon_url)
assert expected.status in amo.REVIEWED_STATUSES, (
'Unreviewed add-ons should not appear in search results.')
eq_(expected.is_disabled, False)
assert expected.type in types, (
'Add-on type %s should not be searchable.' % expected.type)
class TestGenericAjaxSearch(TestAjaxSearch):
def search_addons(self, params, addons=[]):
[a.save() for a in Addon.objects.all()]
self.refresh(timesleep=1)
super(TestGenericAjaxSearch, self).search_addons(
reverse('search.ajax'), params, addons)
def test_ajax_search_by_id(self):
addon = Addon.objects.reviewed().all()[0]
self.search_addons('q=%s' % addon.id, [addon])
def test_ajax_search_by_bad_id(self):
self.search_addons('q=999', [])
def test_ajax_search_unreviewed_by_id(self):
addon = Addon.objects.all()[3]
addon.update(status=amo.STATUS_UNREVIEWED)
self.search_addons('q=999', [])
def test_ajax_search_lite_reviewed_by_id(self):
addon = Addon.objects.all()[3]
addon.update(status=amo.STATUS_LITE)
q = 'q=%s' % addon.id
self.search_addons(q, [addon])
addon.update(status=amo.STATUS_LITE_AND_NOMINATED)
self.search_addons(q, [addon])
def test_ajax_search_user_disabled_by_id(self):
addon = Addon.objects.filter(disabled_by_user=True)[0]
self.search_addons('q=%s' % addon.id, [])
def test_ajax_search_admin_disabled_by_id(self):
addon = Addon.objects.filter(status=amo.STATUS_DISABLED)[0]
self.search_addons('q=%s' % addon.id, [])
def test_ajax_search_admin_deleted_by_id(self):
amo.tests.addon_factory(status=amo.STATUS_DELETED)
self.refresh()
addon = Addon.with_deleted.filter(status=amo.STATUS_DELETED)[0]
self.search_addons('q=%s' % addon.id, [])
def test_ajax_search_personas_by_id(self):
addon = Addon.objects.all()[3]
addon.update(type=amo.ADDON_PERSONA)
addon.update(status=amo.STATUS_LITE)
Persona.objects.create(persona_id=addon.id, addon_id=addon.id)
self.search_addons('q=%s' % addon.id, [addon])
def test_ajax_search_webapp_by_id(self):
"""Webapps should not appear in ajax search results."""
addon = Addon.objects.all()[3]
addon.update(type=amo.ADDON_WEBAPP)
self.search_addons('q=%s' % addon.id, [])
def test_ajax_search_by_name(self):
addon = amo.tests.addon_factory(
name='uniqueaddon',
status=amo.STATUS_LITE,
type=amo.ADDON_EXTENSION,
)
self.refresh(timesleep=1)
self.search_addons('q=' + unicode(addon.name), [addon])
def test_ajax_search_by_bad_name(self):
self.search_addons('q=some+filthy+bad+word', [])
class TestSearchSuggestions(TestAjaxSearch):
def setUp(self):
self.url = reverse('search.suggestions')
amo.tests.addon_factory(name='addon webapp', type=amo.ADDON_WEBAPP)
amo.tests.addon_factory(name='addon persona', type=amo.ADDON_PERSONA)
amo.tests.addon_factory(name='addon persona', type=amo.ADDON_PERSONA,
disabled_by_user=True, status=amo.STATUS_NULL)
self.refresh(timesleep=1)
def search_addons(self, params, addons=[],
types=views.AddonSuggestionsAjax.types):
super(TestSearchSuggestions, self).search_addons(
self.url, params, addons, types, src='ss')
def search_applications(self, params, apps=[]):
r = self.client.get(self.url + '?' + params)
eq_(r.status_code, 200)
data = json.loads(r.content)
data = sorted(data, key=lambda x: x['id'])
apps = sorted(apps, key=lambda x: x.id)
eq_(len(data), len(apps))
for got, expected in zip(data, apps):
eq_(int(got['id']), expected.id)
eq_(got['name'], '%s Add-ons' % unicode(expected.pretty))
eq_(got['url'], locale_url(expected.short))
eq_(got['cls'], 'app ' + expected.short)
def test_get(self):
r = self.client.get(self.url)
eq_(r.status_code, 200)
def test_addons(self):
addons = (Addon.objects.reviewed()
.filter(disabled_by_user=False,
type__in=views.AddonSuggestionsAjax.types))
self.search_addons('q=add', list(addons))
self.search_addons('q=add&cat=all', list(addons))
def test_unicode(self):
self.search_addons('q=%C2%B2%C2%B2', [])
def test_personas(self):
personas = (Addon.objects.reviewed()
.filter(type=amo.ADDON_PERSONA, disabled_by_user=False))
personas, types = list(personas), [amo.ADDON_PERSONA]
self.search_addons('q=add&cat=themes', personas, types)
self.search_addons('q=persona&cat=themes', personas, types)
self.search_addons('q=PERSONA&cat=themes', personas, types)
self.search_addons('q=persona&cat=all', [])
def test_applications(self):
self.search_applications('', [])
self.search_applications('q=FIREFOX', [amo.FIREFOX])
self.search_applications('q=firefox', [amo.FIREFOX])
self.search_applications('q=bird', [amo.THUNDERBIRD])
self.search_applications('q=mobile', [amo.MOBILE])
self.search_applications('q=mozilla', [])
add test for searching over multiple tags for addons
# -*- coding: utf-8 -*-
import json
import urlparse
from django.http import QueryDict
from jingo.helpers import datetime as datetime_filter
from nose import SkipTest
from nose.tools import eq_
from pyquery import PyQuery as pq
from test_utils import RequestFactory
from tower import strip_whitespace
import amo
import amo.tests
from amo.helpers import locale_url, numberfmt, urlparams
from amo.urlresolvers import reverse
from addons.models import Addon, AddonCategory, AddonUser, Category, Persona
from search import views
from search.utils import floor_version
from search.views import DEFAULT_NUM_PERSONAS, version_sidebar
from tags.models import AddonTag, Tag
from users.models import UserProfile
from versions.compare import num as vnum, version_int as vint, MAXVERSION
class TestSearchboxTarget(amo.tests.ESTestCase):
@classmethod
def setUpClass(cls):
super(TestSearchboxTarget, cls).setUpClass()
cls.setUpIndex()
def check(self, url, placeholder, cat=None, action=None, q=None):
# Checks that we search within addons, personas, collections, etc.
form = pq(self.client.get(url).content)('.header-search form')
eq_(form.attr('action'), action or reverse('search.search'))
if cat:
eq_(form('input[name=cat]').val(), cat)
q_field = form('input[name=q]')
eq_(q_field.attr('placeholder'), placeholder)
if q:
eq_(q_field.val(), q)
def test_addons_is_default(self):
self.check(reverse('home'), 'search for add-ons')
def test_themes(self):
self.check(reverse('browse.themes'), 'search for add-ons',
'%s,0' % amo.ADDON_THEME)
def test_collections(self):
self.check(reverse('collections.list'), 'search for collections',
'collections')
def test_personas(self):
self.check(reverse('browse.personas'), 'search for themes',
'themes')
def test_addons_search(self):
self.check(reverse('search.search'), 'search for add-ons')
def test_addons_search_term(self):
self.check(reverse('search.search') + '?q=ballin',
'search for add-ons', q='ballin')
class SearchBase(amo.tests.ESTestCase):
def get_results(self, r, sort=True):
"""Return pks of add-ons shown on search results page."""
results = [a.id for a in r.context['pager'].object_list]
if sort:
results = sorted(results)
return results
def check_sort_links(self, key, title=None, sort_by=None, reverse=True,
params={}):
r = self.client.get(urlparams(self.url, sort=key, **params))
eq_(r.status_code, 200)
doc = pq(r.content)
if title:
if hasattr(self, 'MOBILE'):
menu = doc('#sort-menu')
eq_(menu.find('span').text(), title)
eq_(menu.find('.selected').text(), title)
else:
eq_(doc('#sorter .selected').text(), title)
if sort_by:
results = r.context['pager'].object_list
if sort_by == 'name':
expected = sorted(results, key=lambda x: unicode(x.name))
else:
expected = sorted(results, key=lambda x: getattr(x, sort_by),
reverse=reverse)
eq_(list(results), expected)
def check_name_results(self, params, expected):
r = self.client.get(urlparams(self.url, **params), follow=True)
eq_(r.status_code, 200)
got = self.get_results(r)
eq_(got, expected,
'Got: %s. Expected: %s. Parameters: %s' % (got, expected, params))
def check_appver_platform_ignored(self, expected):
# Collection results should not filter on `appver` nor `platform`.
permutations = [
{},
{'appver': amo.FIREFOX.id},
{'appver': amo.THUNDERBIRD.id},
{'platform': amo.PLATFORM_MAC.id},
{'appver': amo.SEAMONKEY.id, 'platform': amo.PLATFORM_WIN.id},
]
for p in permutations:
self.check_name_results(p, expected)
def check_heading(self):
r = self.client.get(self.url)
eq_(r.status_code, 200)
eq_(pq(r.content)('.results-count strong').text(), None)
r = self.client.get(self.url + '&q=ballin')
eq_(r.status_code, 200)
eq_(pq(r.content)('.results-count strong').text(), 'ballin')
class TestESSearch(SearchBase):
fixtures = ['base/apps', 'base/category', 'tags/tags']
@classmethod
def setUpClass(cls):
super(TestESSearch, cls).setUpClass()
cls.setUpIndex()
def setUp(self):
self.url = reverse('search.search')
self.addons = Addon.objects.filter(status=amo.STATUS_PUBLIC,
disabled_by_user=False)
for addon in self.addons:
AddonCategory.objects.create(addon=addon, category_id=1)
addon.save()
self.refresh()
def refresh_addons(self):
[a.save() for a in Addon.objects.all()]
self.refresh()
def test_get(self):
r = self.client.get(self.url)
eq_(r.status_code, 200)
assert 'X-PJAX' in r['vary'].split(','), 'Expected "Vary: X-PJAX"'
self.assertTemplateUsed(r, 'search/results.html')
@amo.tests.mobile_test
def test_get_mobile(self):
r = self.client.get(self.url)
eq_(r.status_code, 200)
self.assertTemplateUsed(r, 'search/mobile/results.html')
@amo.tests.mobile_test
def test_mobile_results_downloads(self):
r = self.client.get(urlparams(self.url, sort='downloads'))
assert pq(r.content)('#content .item .vital.downloads'), (
'Expected weekly downloads')
def test_search_tools_omit_users(self):
r = self.client.get(self.url, dict(cat='%s,5' % amo.ADDON_SEARCH))
eq_(r.status_code, 200)
sorter = pq(r.content)('#sorter')
eq_(sorter.length, 1)
assert 'sort=users' not in sorter.text(), (
'Sort by "Most Users" should not appear for search tools.')
def test_results_sort_default(self):
self.check_sort_links(None, 'Relevance', 'weekly_downloads')
def test_results_sort_unknown(self):
self.check_sort_links('xxx', 'Relevance')
def test_results_sort_users(self):
self.check_sort_links('users', 'Most Users', 'average_daily_users')
def test_results_sort_rating(self):
self.check_sort_links('rating', 'Top Rated', 'bayesian_rating')
def test_results_sort_newest(self):
self.check_sort_links('created', 'Newest', 'created')
def test_results_sort_updated(self):
self.check_sort_links('updated', 'Recently Updated')
def test_results_sort_downloads(self):
self.check_sort_links('downloads', 'Weekly Downloads',
'weekly_downloads')
def test_mobile_results_sort_name(self):
self.check_sort_links('name', 'Name', 'name', reverse=False)
@amo.tests.mobile_test
def test_mobile_results_sort_default(self):
self.check_sort_links(None, 'Relevance', 'weekly_downloads')
@amo.tests.mobile_test
def test_mobile_results_sort_unknown(self):
self.check_sort_links('xxx', 'Relevance')
@amo.tests.mobile_test
def test_mobile_results_sort_users(self):
self.check_sort_links('users', 'Most Users', 'average_daily_users')
@amo.tests.mobile_test
def test_mobile_results_sort_rating(self):
self.check_sort_links('rating', 'Top Rated', 'bayesian_rating')
@amo.tests.mobile_test
def test_mobile_results_sort_newest(self):
self.check_sort_links('created', 'Newest', 'created')
def test_legacy_redirects(self):
r = self.client.get(self.url + '?sort=averagerating')
self.assertRedirects(r, self.url + '?sort=rating', status_code=301)
def test_legacy_redirects_to_non_ascii(self):
# see http://sentry.dmz.phx1.mozilla.com/addons/group/2186/
url = '/ga-IE/seamonkey/tag/%E5%95%86%E5%93%81%E6%90%9C%E7%B4%A2'
from_ = ('?sort=updated&lver=1.0&advancedsearch=1'
'&tag=dearbhair&cat=4%2C84')
to = ('?sort=updated&advancedsearch=1&appver=1.0'
'&tag=dearbhair&cat=4%2C84')
r = self.client.get(url + from_)
self.assertRedirects(r, url + to, status_code=301)
def check_platform_filters(self, platform, expected=None):
r = self.client.get('%s?platform=%s' % (self.url, platform),
follow=True)
plats = r.context['platforms']
for idx, plat in enumerate(plats):
name, selected = expected[idx]
label = unicode(plat.text)
eq_(label, name,
'%r platform had the wrong label: %s' % (platform, label))
eq_(plat.selected, selected,
'%r platform should have been selected' % platform)
def test_platform_default(self):
expected = [
('All Systems', True),
('Linux', False),
('Mac OS X', False),
('Windows', False),
]
self.check_platform_filters('', expected)
self.check_platform_filters('all', expected)
self.check_platform_filters('any', expected)
self.check_platform_filters('amiga', expected)
def test_platform_listed(self):
expected = [
('All Systems', False),
('Linux', True),
('Mac OS X', False),
('Windows', False),
]
self.check_platform_filters('linux', expected)
expected = [
('All Systems', False),
('Linux', False),
('Mac OS X', False),
('Windows', True),
]
self.check_platform_filters('windows', expected)
expected = [
('All Systems', False),
('Linux', False),
('Mac OS X', True),
('Windows', False),
]
self.check_platform_filters('mac', expected)
def test_platform_incompatible(self):
expected = [
('All Systems', True),
('Linux', False),
('Mac OS X', False),
('Windows', False),
]
self.check_platform_filters('any', expected)
expected = [
('All Systems', False),
('Linux', False),
('Mac OS X', False),
('Windows', False),
('Maemo', True),
]
self.check_platform_filters('maemo', expected)
def test_platform_legacy_params(self):
ALL = (amo.PLATFORM_ALL, amo.PLATFORM_ANY, amo.PLATFORM_ALL_MOBILE)
listed = ALL + (amo.PLATFORM_LINUX, amo.PLATFORM_MAC, amo.PLATFORM_WIN)
for idx, platform in amo.PLATFORMS.iteritems():
expected = [
('All Systems', platform in ALL),
('Linux', platform == amo.PLATFORM_LINUX),
('Mac OS X', platform == amo.PLATFORM_MAC),
('Windows', platform == amo.PLATFORM_WIN),
]
if platform not in listed:
expected.append((platform.name, True))
self.check_platform_filters(str(idx), expected)
def check_appver_filters(self, appver, expected):
request = RequestFactory()
request.APP = amo.FIREFOX
facets = {
u'platforms': [{u'count': 58, u'term': 1}],
u'appversions': [{u'count': 58, u'term': 5000000200100}],
u'categories': [{u'count': 55, u'term': 1}],
u'tags': []
}
versions = version_sidebar(request,
{'appver': floor_version(appver)}, facets)
all_ = versions.pop(0)
eq_(all_.text, 'Any %s' % unicode(request.APP.pretty))
eq_(all_.selected, not expected)
return [v.__dict__ for v in versions]
def test_appver_default(self):
eq_(self.check_appver_filters('', ''),
[{'text': u'Firefox 5.0',
'selected': False,
'urlparams': {'appver': '5.0'},
'children': []}])
def test_appver_known(self):
eq_(self.check_appver_filters('5.0', '5.0'),
[{'text': u'Firefox 5.0',
'selected': True,
'urlparams': {'appver': '5.0'},
'children': []}])
def test_appver_oddballs(self):
eq_(self.check_appver_filters('3.6.22', '3.6'),
[{'text': u'Firefox 5.0',
'selected': False,
'urlparams': {'appver': '5.0'},
'children': []},
{'text': u'Firefox 3.6',
'selected': True,
'urlparams': {'appver': '3.6'},
'children': []}])
def test_appver_long(self):
too_big = vnum(vint(MAXVERSION + 1))
just_right = vnum(vint(MAXVERSION))
assert self.check_appver_filters(too_big, floor_version(just_right)), (
'All I ask is do not crash')
eq_(self.check_appver_filters('9999999', '9999999.0'),
[{'text': u'Firefox 9999999.0',
'selected': True,
'urlparams': {'appver': '9999999.0'},
'children': []},
{'text': u'Firefox 5.0',
'selected': False,
'urlparams': {'appver': '5.0'},
'children': []}])
eq_(self.check_appver_filters('99999999', '99999999.0'),
[{'text': u'Firefox 99999999.0',
'selected': True,
'urlparams': {'appver': '99999999.0'},
'children': []},
{'text': u'Firefox 5.0',
'selected': False,
'urlparams': {'appver': '5.0'},
'children': []}])
def test_appver_bad(self):
assert self.check_appver_filters('.', '.')
assert self.check_appver_filters('_', '_')
assert self.check_appver_filters('y.y', 'y.y')
assert self.check_appver_filters('*', '*')
def test_non_pjax_results(self):
r = self.client.get(self.url)
eq_(r.status_code, 200)
eq_(r.context['is_pjax'], None)
# These context variables should exist for normal requests.
for var in ('categories', 'platforms', 'versions', 'tags'):
assert var in r.context, '%r missing context var in view' % var
doc = pq(r.content)
eq_(doc('html').length, 1)
eq_(doc('#pjax-results').length, 1)
eq_(doc('#search-facets .facets.pjax-trigger').length, 1)
eq_(doc('#sorter.pjax-trigger').length, 1)
def test_pjax_results(self):
r = self.client.get(self.url, HTTP_X_PJAX=True)
eq_(r.status_code, 200)
eq_(r.context['is_pjax'], True)
doc = pq(r.content)
eq_(doc('html').length, 0)
eq_(doc('#pjax-results').length, 0)
eq_(doc('#search-facets .facets.pjax-trigger').length, 0)
eq_(doc('#sorter.pjax-trigger').length, 1)
def test_facet_data_params_default(self):
r = self.client.get(self.url)
a = pq(r.content)('#search-facets a[data-params]:first')
eq_(json.loads(a.attr('data-params')),
dict(atype=None, cat=None, page=None))
def test_facet_data_params_filtered(self):
r = self.client.get(self.url + '?appver=3.6&platform=mac&page=3')
a = pq(r.content)('#search-facets a[data-params]:first')
eq_(json.loads(a.attr('data-params')),
dict(atype=None, cat=None, page=None))
def check_cat_filters(self, params=None, selected='All Add-ons'):
if not params:
params = {}
r = self.client.get(urlparams(self.url, **params))
eq_(sorted(a.id for a in self.addons),
sorted(a.id for a in r.context['pager'].object_list))
cat = self.addons[0].all_categories[0]
links = pq(r.content)('#category-facets li a')
expected = [
('All Add-ons', self.url),
('Extensions', urlparams(self.url, atype=amo.ADDON_EXTENSION)),
(unicode(cat.name), urlparams(self.url, atype=amo.ADDON_EXTENSION,
cat=cat.id)),
]
amo.tests.check_links(expected, links, selected, verify=False)
def test_defaults_atype_no_cat(self):
self.check_cat_filters(dict(atype=1))
def test_defaults_atype_unknown_cat(self):
self.check_cat_filters(dict(atype=amo.ADDON_EXTENSION, cat=999))
def test_defaults_no_atype_unknown_cat(self):
self.check_cat_filters(dict(cat=999))
def test_defaults_atype_foreign_cat(self):
cat = Category.objects.create(application_id=amo.THUNDERBIRD.id,
type=amo.ADDON_EXTENSION)
self.check_cat_filters(dict(atype=amo.ADDON_EXTENSION, cat=cat.id))
def test_listed_cat(self):
cat = self.addons[0].all_categories[0]
self.check_cat_filters(dict(atype=amo.ADDON_EXTENSION, cat=cat.id),
selected=unicode(cat.name))
def test_cat_facet_stale(self):
AddonCategory.objects.all().delete()
r = self.client.get(self.url)
expected = [
('All Add-ons', self.url),
('Extensions', urlparams(self.url, atype=amo.ADDON_EXTENSION)),
]
amo.tests.check_links(expected, pq(r.content)('#category-facets li a'),
verify=False)
def test_cat_facet_fresh(self):
AddonCategory.objects.all().delete()
# Save to reindex with new categories.
self.refresh_addons()
r = self.client.get(self.url)
amo.tests.check_links([('All Add-ons', self.url)],
pq(r.content)('#category-facets li a'),
verify=False)
def test_unknown_tag_filter(self):
r = self.client.get(urlparams(self.url, tag='xxx'))
a = pq(r.content)('#tag-facets li.selected a')
eq_(a.length, 1)
eq_(a.text(), 'xxx')
eq_(list(r.context['pager'].object_list), [])
def test_tag_filters_on_search_page(self):
r = self.client.get(self.url, dict(tag='sky'))
a = pq(r.content)('#tag-facets li.selected a[data-params]')
eq_(json.loads(a.attr('data-params')), dict(tag='sky', page=None))
def test_no_tag_filters_on_tags_page(self):
r = self.client.get(reverse('tags.detail', args=['sky']))
eq_(r.status_code, 200)
eq_(pq(r.content)('#tag-facets').length, 0)
def get_results(self, r):
"""Return pks of add-ons shown on search results page."""
pks = pq(r.content)('#pjax-results div[data-addon]')
return sorted(int(pq(a).attr('data-addon')) for a in pks)
def test_results_filtered_atype(self):
theme = self.addons[0]
theme.type = amo.ADDON_THEME
theme.save()
self.refresh_addons()
themes = sorted(self.addons.filter(type=amo.ADDON_THEME)
.values_list('id', flat=True))
eq_(themes, [theme.id])
extensions = sorted(self.addons.filter(type=amo.ADDON_EXTENSION)
.values_list('id', flat=True))
eq_(extensions, sorted(a.id for a in self.addons[1:]))
# Extensions should show only extensions.
r = self.client.get(self.url, dict(atype=amo.ADDON_EXTENSION))
eq_(r.status_code, 200)
eq_(self.get_results(r), extensions)
# Themes should show only themes.
r = self.client.get(self.url, dict(atype=amo.ADDON_THEME))
eq_(r.status_code, 200)
eq_(self.get_results(r), themes)
def test_results_respect_appver_filtering(self):
r = self.client.get(self.url, dict(appver='9.00'))
eq_(self.get_results(r), [])
def test_results_skip_appver_filtering_for_d2c(self):
r = self.client.get(self.url, dict(appver='10.0a1'))
eq_(self.get_results(r),
sorted(self.addons.values_list('id', flat=True)))
def test_results_respect_appver_filtering_for_non_extensions(self):
self.addons.update(type=amo.ADDON_THEME)
r = self.client.get(self.url, dict(appver='10.0a1',
type=amo.ADDON_THEME))
eq_(self.get_results(r),
sorted(self.addons.values_list('id', flat=True)))
def test_results_platform_filter_all(self):
for platform in ('', 'all'):
r = self.client.get(self.url, dict(platform=platform))
eq_(self.get_results(r),
sorted(self.addons.values_list('id', flat=True)))
def test_slug_indexed(self):
a = self.addons[0]
r = self.client.get(self.url, dict(q='omgyes'))
eq_(self.get_results(r), [])
a.update(slug='omgyes')
self.refresh()
r = self.client.get(self.url, dict(q='omgyes'))
eq_(self.get_results(r), [a.id])
def test_authors_indexed(self):
a = self.addons[0]
r = self.client.get(self.url, dict(q='boop'))
eq_(self.get_results(r), [])
AddonUser.objects.create(addon=a,
user=UserProfile.objects.create(username='boop'))
AddonUser.objects.create(addon=a,
user=UserProfile.objects.create(username='ponypet'))
a.save()
self.refresh()
r = self.client.get(self.url, dict(q='garbage'))
eq_(self.get_results(r), [])
r = self.client.get(self.url, dict(q='boop'))
eq_(self.get_results(r), [a.id])
r = self.client.get(self.url, dict(q='pony'))
eq_(self.get_results(r), [a.id])
def test_tag_search(self):
a = self.addons[0]
tag_name = 'tagretpractice'
r = self.client.get(self.url, dict(q=tag_name))
eq_(self.get_results(r), [])
AddonTag.objects.create(
addon=a, tag=Tag.objects.create(tag_text=tag_name))
a.save()
self.refresh(timesleep=1)
r = self.client.get(self.url, dict(q=tag_name))
eq_(self.get_results(r), [a.id])
# Multiple tags.
tag_name_2 = 'bagemtagem'
AddonTag.objects.create(
addon=a, tag=Tag.objects.create(tag_text=tag_name_2))
a.save()
self.refresh(timesleep=1)
r = self.client.get(self.url, dict(q='%s %s' % (tag_name, tag_name_2)))
eq_(self.get_results(r), [a.id])
class TestPersonaSearch(SearchBase):
fixtures = ['base/apps']
@classmethod
def setUpClass(cls):
super(TestPersonaSearch, cls).setUpClass()
cls.setUpIndex()
def setUp(self):
self.url = urlparams(reverse('search.search'), atype=amo.ADDON_PERSONA)
def _generate_personas(self):
# Add some public personas.
self.personas = []
for status in amo.REVIEWED_STATUSES:
self.personas.append(
amo.tests.addon_factory(type=amo.ADDON_PERSONA, status=status))
# Add some unreviewed personas.
for status in set(amo.STATUS_CHOICES) - set(amo.REVIEWED_STATUSES):
amo.tests.addon_factory(type=amo.ADDON_PERSONA, status=status)
# Add a disabled persona.
amo.tests.addon_factory(type=amo.ADDON_PERSONA, disabled_by_user=True)
# NOTE: There are also some add-ons in `setUpIndex` for good measure.
self.refresh()
def test_sort_order_default(self):
self._generate_personas()
self.check_sort_links(None, sort_by='weekly_downloads')
def test_sort_order_unknown(self):
self._generate_personas()
self.check_sort_links('xxx')
def test_sort_order_users(self):
self._generate_personas()
self.check_sort_links('users', sort_by='average_daily_users')
def test_sort_order_rating(self):
self._generate_personas()
self.check_sort_links('rating', sort_by='bayesian_rating')
def test_sort_order_newest(self):
self._generate_personas()
self.check_sort_links('created', sort_by='created')
def test_heading(self):
self.check_heading()
def test_results_blank_query(self):
self._generate_personas()
personas_ids = sorted(p.id for p in self.personas) # Not PersonaID ;)
r = self.client.get(self.url, follow=True)
eq_(r.status_code, 200)
eq_(self.get_results(r), personas_ids)
doc = pq(r.content)
eq_(doc('.personas-grid li').length, len(personas_ids))
eq_(doc('.listing-footer').length, 0)
def test_results_name_query(self):
raise SkipTest
self._generate_personas()
p1 = self.personas[0]
p1.name = 'Harry Potter'
p1.save()
p2 = self.personas[1]
p2.name = 'The Life Aquatic with SeaVan'
p2.save()
self.refresh()
# Empty search term should return everything.
self.check_name_results({'q': ''}, sorted(p.id for p in self.personas))
# Garbage search terms should return nothing.
for term in ('xxx', 'garbage', '£'):
self.check_name_results({'q': term}, [])
# Try to match 'Harry Potter'.
for term in ('harry', 'potter', 'har', 'pot', 'harry pooper'):
self.check_name_results({'q': term}, [p1.pk])
# Try to match 'The Life Aquatic with SeaVan'.
for term in ('life', 'aquatic', 'seavan', 'sea van'):
self.check_name_results({'q': term}, [p2.pk])
def test_results_popularity(self):
personas = [
('Harry Potter', 2000),
('Japanese Koi Tattoo', 67),
('Japanese Tattoo', 250),
('Japanese Tattoo boop', 50),
('Japanese Tattoo ballin', 200),
('The Japanese Tattooed Girl', 242),
]
for name, popularity in personas:
amo.tests.addon_factory(name=name, type=amo.ADDON_PERSONA,
popularity=popularity)
self.refresh()
# Japanese Tattoo should be the #1 most relevant result. Obviously.
expected_name, expected_popularity = personas[2]
for sort in ('downloads', 'popularity', 'users'):
r = self.client.get(urlparams(self.url, q='japanese tattoo',
sort=sort), follow=True)
eq_(r.status_code, 200)
results = list(r.context['pager'].object_list)
first = results[0]
eq_(unicode(first.name), expected_name,
'Was not first result for %r. Results: %s' % (sort, results))
eq_(first.persona.popularity, expected_popularity,
'Incorrect popularity for %r. Got %r. Expected %r.' % (
sort, first.persona.popularity, results))
eq_(first.average_daily_users, expected_popularity,
'Incorrect users for %r. Got %r. Expected %r.' % (
sort, first.average_daily_users, results))
eq_(first.weekly_downloads, expected_popularity,
'Incorrect weekly_downloads for %r. Got %r. Expected %r.' % (
sort, first.weekly_downloads, results))
def test_results_appver_platform(self):
self._generate_personas()
self.check_appver_platform_ignored(sorted(p.id for p in self.personas))
def test_results_other_applications(self):
self._generate_personas()
# Now ensure we get the same results for Firefox as for Thunderbird.
self.url = self.url.replace('firefox', 'thunderbird')
self.check_name_results({}, sorted(p.id for p in self.personas))
def test_pagination(self):
# TODO: Figure out why ES wonks out when we index a plethora of junk.
raise SkipTest
# Generate some (22) personas to get us to two pages.
left_to_add = DEFAULT_NUM_PERSONAS - len(self.personas) + 1
for x in xrange(left_to_add):
self.personas.append(
amo.tests.addon_factory(type=amo.ADDON_PERSONA))
self.refresh()
# Page one should show 21 personas.
r = self.client.get(self.url, follow=True)
eq_(r.status_code, 200)
eq_(pq(r.content)('.personas-grid li').length, DEFAULT_NUM_PERSONAS)
# Page two should show 1 persona.
r = self.client.get(self.url + '&page=2', follow=True)
eq_(r.status_code, 200)
eq_(pq(r.content)('.personas-grid li').length, 1)
class TestCollectionSearch(SearchBase):
fixtures = ['base/apps']
@classmethod
def setUpClass(cls):
# Set up the mapping.
super(TestCollectionSearch, cls).setUpClass()
def setUp(self):
self.url = urlparams(reverse('search.search'), cat='collections')
def _generate(self):
# Add some public collections.
self.collections = []
for x in xrange(3):
self.collections.append(
amo.tests.collection_factory(name='Collection %s' % x))
# Synchronized, favorites, and unlisted collections should be excluded.
for type_ in (amo.COLLECTION_SYNCHRONIZED, amo.COLLECTION_FAVORITES):
amo.tests.collection_factory(type=type_)
amo.tests.collection_factory(listed=False)
self.refresh()
def test_legacy_redirect(self):
# Ensure `sort=newest` redirects to `sort=created`.
r = self.client.get(urlparams(self.url, sort='newest'))
self.assertRedirects(r, urlparams(self.url, sort='created'), 301)
def test_sort_order_unknown(self):
self._generate()
self.check_sort_links('xxx')
def test_sort_order_default(self):
self._generate()
self.check_sort_links(None, sort_by='weekly_subscribers')
def test_sort_order_weekly(self):
self._generate()
self.check_sort_links('weekly', sort_by='weekly_subscribers')
def test_sort_order_default_with_term(self):
self._generate()
self.check_sort_links(None, sort_by='weekly_subscribers',
params={'q': 'collection'})
def test_sort_order_weekly_with_term(self):
self._generate()
self.check_sort_links('weekly', sort_by='weekly_subscribers',
params={'q': 'collection'})
def test_sort_order_monthly(self):
self._generate()
self.check_sort_links('monthly', sort_by='monthly_subscribers')
def test_sort_order_all(self):
self._generate()
self.check_sort_links('all', sort_by='subscribers')
def test_sort_order_rating(self):
self._generate()
self.check_sort_links('rating', sort_by='rating')
def test_sort_order_name(self):
self._generate()
self.check_sort_links('name', sort_by='name', reverse=False)
def test_sort_order_created(self):
self._generate()
self.check_sort_links('created', sort_by='created')
def test_sort_order_updated(self):
self._generate()
self.check_sort_links('updated', sort_by='modified')
def test_created_timestamp(self):
self._generate()
r = self.client.get(urlparams(self.url, sort='created'))
items = pq(r.content)('.primary .item')
for idx, c in enumerate(r.context['pager'].object_list):
eq_(strip_whitespace(items.eq(idx).find('.modified').text()),
'Added %s' % strip_whitespace(datetime_filter(c.created)))
def test_updated_timestamp(self):
self._generate()
r = self.client.get(urlparams(self.url, sort='updated'))
items = pq(r.content)('.primary .item')
for idx, c in enumerate(r.context['pager'].object_list):
eq_(strip_whitespace(items.eq(idx).find('.modified').text()),
'Updated %s' % strip_whitespace(datetime_filter(c.modified)))
def check_followers_count(self, sort, column):
# Checks that we show the correct type/number of followers.
r = self.client.get(urlparams(self.url, sort=sort))
items = pq(r.content)('.primary .item')
for idx, c in enumerate(r.context['pager'].object_list):
eq_(items.eq(idx).find('.followers').text().split()[0],
numberfmt(getattr(c, column)))
def test_followers_all(self):
self._generate()
for sort in ('', 'all', 'rating', 'created', 'modified', 'name'):
self.check_followers_count(sort, column='subscribers')
def test_followers_monthly(self):
self._generate()
self.check_followers_count('monthly', column='monthly_subscribers')
def test_followers_weekly(self):
self._generate()
self.check_followers_count('weekly', column='weekly_subscribers')
def test_heading(self):
# One is a lonely number. But that's all we need.
amo.tests.collection_factory()
self.check_heading()
def test_results_blank_query(self):
self._generate()
collection_ids = sorted(p.id for p in self.collections)
r = self.client.get(self.url, follow=True)
eq_(r.status_code, 200)
eq_(self.get_results(r), collection_ids)
doc = pq(r.content)
eq_(doc('.primary .item').length, len(collection_ids))
eq_(doc('.listing-footer').length, 0)
def test_results_name_query(self):
self._generate()
c1 = self.collections[0]
c1.name = 'SeaVans: A Collection of Cars at the Beach'
c1.save()
c2 = self.collections[1]
c2.name = 'The Life Aquatic with SeaVan: An Underwater Collection'
c2.save()
self.refresh(timesleep=1)
# These contain terms that are in every result - so return everything.
for term in ('collection',
'seavan: a collection of cars at the beach'):
self.check_name_results({'q': term},
sorted(p.id for p in self.collections))
# Garbage search terms should return nothing.
for term in ('xxx', 'garbage', '£'):
self.check_name_results({'q': term}, [])
# Try to match 'SeaVans: A Collection of Cars at the Beach'.
for term in ('cars', 'beach'):
self.check_name_results({'q': term}, [c1.pk])
# Match 'The Life Aquatic with SeaVan: An Underwater Collection'.
for term in ('life aquatic', 'life', 'aquatic', 'underwater', 'under'):
self.check_name_results({'q': term}, [c2.pk])
# Match both results above.
for term in ('seavan', 'seavans'):
self.check_name_results({'q': term}, sorted([c1.pk, c2.pk]))
def test_results_popularity(self):
collections = [
('Traveler Pack', 2000),
('Tools for Developer', 67),
('Web Developer', 250),
('Web Developer Necessities', 50),
('Web Pro', 200),
('Web Developer Pack', 242),
]
for name, subscribers in collections:
amo.tests.collection_factory(name=name, subscribers=subscribers,
weekly_subscribers=subscribers)
self.refresh()
# "Web Developer Collection" should be the #1 most relevant result.
expected_name, expected_subscribers = collections[2]
for sort in ('', 'all'):
r = self.client.get(urlparams(self.url, q='web developer',
sort=sort), follow=True)
eq_(r.status_code, 200)
results = list(r.context['pager'].object_list)
first = results[0]
eq_(unicode(first.name), expected_name,
'Was not first result for %r. Results: %s' % (sort, results))
eq_(first.subscribers, expected_subscribers,
'Incorrect subscribers for %r. Got %r. Expected %r.' % (
sort, first.subscribers, results))
def test_results_appver_platform(self):
self._generate()
self.check_appver_platform_ignored(
sorted(c.id for c in self.collections))
def test_results_other_applications(self):
tb_collection = amo.tests.collection_factory(
application_id=amo.THUNDERBIRD.id)
sm_collection = amo.tests.collection_factory(
application_id=amo.SEAMONKEY.id)
self.refresh()
r = self.client.get(self.url)
eq_(self.get_results(r), [])
r = self.client.get(self.url.replace('firefox', 'thunderbird'))
eq_(self.get_results(r), [tb_collection.id])
r = self.client.get(self.url.replace('firefox', 'seamonkey'))
eq_(self.get_results(r), [sm_collection.id])
def test_search_redirects():
changes = (
('q=yeah&sort=newest', 'q=yeah&sort=updated'),
('sort=weeklydownloads', 'sort=users'),
('sort=averagerating', 'sort=rating'),
('lver=5.*', 'appver=5.*'),
('q=woo&sort=averagerating&lver=6.0', 'q=woo&sort=rating&appver=6.0'),
('pid=2', 'platform=linux'),
('q=woo&lver=6.0&sort=users&pid=5',
'q=woo&appver=6.0&sort=users&platform=windows'),
)
def check(before, after):
eq_(views.fix_search_query(QueryDict(before)),
dict(urlparse.parse_qsl(after)))
for before, after in changes:
yield check, before, after
queries = (
'q=yeah',
'q=yeah&sort=users',
'sort=users',
'q=yeah&appver=6.0',
'q=yeah&appver=6.0&platform=mac',
)
def same(qs):
q = QueryDict(qs)
assert views.fix_search_query(q) is q
for qs in queries:
yield same, qs
class TestAjaxSearch(amo.tests.ESTestCase):
@classmethod
def setUpClass(cls):
super(TestAjaxSearch, cls).setUpClass()
cls.setUpIndex()
def search_addons(self, url, params, addons=[], types=amo.ADDON_TYPES,
src=None):
r = self.client.get(url + '?' + params)
eq_(r.status_code, 200)
data = json.loads(r.content)
data = sorted(data, key=lambda x: x['id'])
addons = sorted(addons, key=lambda x: x.id)
eq_(len(data), len(addons))
for got, expected in zip(data, addons):
eq_(int(got['id']), expected.id)
eq_(got['name'], unicode(expected.name))
expected_url = expected.get_url_path()
if src:
expected_url += '?src=ss'
eq_(got['url'], expected_url)
eq_(got['icon'], expected.icon_url)
assert expected.status in amo.REVIEWED_STATUSES, (
'Unreviewed add-ons should not appear in search results.')
eq_(expected.is_disabled, False)
assert expected.type in types, (
'Add-on type %s should not be searchable.' % expected.type)
class TestGenericAjaxSearch(TestAjaxSearch):
def search_addons(self, params, addons=[]):
[a.save() for a in Addon.objects.all()]
self.refresh(timesleep=1)
super(TestGenericAjaxSearch, self).search_addons(
reverse('search.ajax'), params, addons)
def test_ajax_search_by_id(self):
addon = Addon.objects.reviewed().all()[0]
self.search_addons('q=%s' % addon.id, [addon])
def test_ajax_search_by_bad_id(self):
self.search_addons('q=999', [])
def test_ajax_search_unreviewed_by_id(self):
addon = Addon.objects.all()[3]
addon.update(status=amo.STATUS_UNREVIEWED)
self.search_addons('q=999', [])
def test_ajax_search_lite_reviewed_by_id(self):
addon = Addon.objects.all()[3]
addon.update(status=amo.STATUS_LITE)
q = 'q=%s' % addon.id
self.search_addons(q, [addon])
addon.update(status=amo.STATUS_LITE_AND_NOMINATED)
self.search_addons(q, [addon])
def test_ajax_search_user_disabled_by_id(self):
addon = Addon.objects.filter(disabled_by_user=True)[0]
self.search_addons('q=%s' % addon.id, [])
def test_ajax_search_admin_disabled_by_id(self):
addon = Addon.objects.filter(status=amo.STATUS_DISABLED)[0]
self.search_addons('q=%s' % addon.id, [])
def test_ajax_search_admin_deleted_by_id(self):
amo.tests.addon_factory(status=amo.STATUS_DELETED)
self.refresh()
addon = Addon.with_deleted.filter(status=amo.STATUS_DELETED)[0]
self.search_addons('q=%s' % addon.id, [])
def test_ajax_search_personas_by_id(self):
addon = Addon.objects.all()[3]
addon.update(type=amo.ADDON_PERSONA)
addon.update(status=amo.STATUS_LITE)
Persona.objects.create(persona_id=addon.id, addon_id=addon.id)
self.search_addons('q=%s' % addon.id, [addon])
def test_ajax_search_webapp_by_id(self):
"""Webapps should not appear in ajax search results."""
addon = Addon.objects.all()[3]
addon.update(type=amo.ADDON_WEBAPP)
self.search_addons('q=%s' % addon.id, [])
def test_ajax_search_by_name(self):
addon = amo.tests.addon_factory(
name='uniqueaddon',
status=amo.STATUS_LITE,
type=amo.ADDON_EXTENSION,
)
self.refresh(timesleep=1)
self.search_addons('q=' + unicode(addon.name), [addon])
def test_ajax_search_by_bad_name(self):
self.search_addons('q=some+filthy+bad+word', [])
class TestSearchSuggestions(TestAjaxSearch):
def setUp(self):
self.url = reverse('search.suggestions')
amo.tests.addon_factory(name='addon webapp', type=amo.ADDON_WEBAPP)
amo.tests.addon_factory(name='addon persona', type=amo.ADDON_PERSONA)
amo.tests.addon_factory(name='addon persona', type=amo.ADDON_PERSONA,
disabled_by_user=True, status=amo.STATUS_NULL)
self.refresh(timesleep=1)
def search_addons(self, params, addons=[],
types=views.AddonSuggestionsAjax.types):
super(TestSearchSuggestions, self).search_addons(
self.url, params, addons, types, src='ss')
def search_applications(self, params, apps=[]):
r = self.client.get(self.url + '?' + params)
eq_(r.status_code, 200)
data = json.loads(r.content)
data = sorted(data, key=lambda x: x['id'])
apps = sorted(apps, key=lambda x: x.id)
eq_(len(data), len(apps))
for got, expected in zip(data, apps):
eq_(int(got['id']), expected.id)
eq_(got['name'], '%s Add-ons' % unicode(expected.pretty))
eq_(got['url'], locale_url(expected.short))
eq_(got['cls'], 'app ' + expected.short)
def test_get(self):
r = self.client.get(self.url)
eq_(r.status_code, 200)
def test_addons(self):
addons = (Addon.objects.reviewed()
.filter(disabled_by_user=False,
type__in=views.AddonSuggestionsAjax.types))
self.search_addons('q=add', list(addons))
self.search_addons('q=add&cat=all', list(addons))
def test_unicode(self):
self.search_addons('q=%C2%B2%C2%B2', [])
def test_personas(self):
personas = (Addon.objects.reviewed()
.filter(type=amo.ADDON_PERSONA, disabled_by_user=False))
personas, types = list(personas), [amo.ADDON_PERSONA]
self.search_addons('q=add&cat=themes', personas, types)
self.search_addons('q=persona&cat=themes', personas, types)
self.search_addons('q=PERSONA&cat=themes', personas, types)
self.search_addons('q=persona&cat=all', [])
def test_applications(self):
self.search_applications('', [])
self.search_applications('q=FIREFOX', [amo.FIREFOX])
self.search_applications('q=firefox', [amo.FIREFOX])
self.search_applications('q=bird', [amo.THUNDERBIRD])
self.search_applications('q=mobile', [amo.MOBILE])
self.search_applications('q=mozilla', [])
|
# This file is part of EbookLib.
# Copyright (c) 2013 Aleksandar Erkalovic <aerkalov@gmail.com>
#
# EbookLib is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# EbookLib is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with EbookLib. If not, see <http://www.gnu.org/licenses/>.
import zipfile
import six
import logging
import uuid
import posixpath as zip_path
import os.path
from collections import OrderedDict
try:
from urllib.parse import unquote
except ImportError:
from urllib import unquote
from lxml import etree
import ebooklib
from ebooklib.utils import parse_string, parse_html_string, guess_type, get_pages_for_items
# Version of EPUB library
VERSION = (0, 17, 0)
NAMESPACES = {'XML': 'http://www.w3.org/XML/1998/namespace',
'EPUB': 'http://www.idpf.org/2007/ops',
'DAISY': 'http://www.daisy.org/z3986/2005/ncx/',
'OPF': 'http://www.idpf.org/2007/opf',
'CONTAINERNS': 'urn:oasis:names:tc:opendocument:xmlns:container',
'DC': 'http://purl.org/dc/elements/1.1/',
'XHTML': 'http://www.w3.org/1999/xhtml'}
# XML Templates
CONTAINER_PATH = 'META-INF/container.xml'
CONTAINER_XML = '''<?xml version='1.0' encoding='utf-8'?>
<container xmlns="urn:oasis:names:tc:opendocument:xmlns:container" version="1.0">
<rootfiles>
<rootfile media-type="application/oebps-package+xml" full-path="%(folder_name)s/content.opf"/>
</rootfiles>
</container>
'''
NCX_XML = six.b('''<!DOCTYPE ncx PUBLIC "-//NISO//DTD ncx 2005-1//EN" "http://www.daisy.org/z3986/2005/ncx-2005-1.dtd">
<ncx xmlns="http://www.daisy.org/z3986/2005/ncx/" version="2005-1" />''')
NAV_XML = six.b('''<?xml version="1.0" encoding="utf-8"?><!DOCTYPE html><html xmlns="http://www.w3.org/1999/xhtml" xmlns:epub="http://www.idpf.org/2007/ops"/>''')
CHAPTER_XML = six.b('''<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE html><html xmlns="http://www.w3.org/1999/xhtml" xmlns:epub="http://www.idpf.org/2007/ops" epub:prefix="z3998: http://www.daisy.org/z3998/2012/vocab/structure/#"></html>''')
COVER_XML = six.b('''<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE html>
<html xmlns="http://www.w3.org/1999/xhtml" xmlns:epub="http://www.idpf.org/2007/ops" lang="en" xml:lang="en">
<head>
<style>
body { margin: 0em; padding: 0em; }
img { max-width: 100%; max-height: 100%; }
</style>
</head>
<body>
<img src="" alt="" />
</body>
</html>''')
IMAGE_MEDIA_TYPES = ['image/jpeg', 'image/jpg', 'image/png', 'image/svg+xml']
# TOC and navigation elements
class Section(object):
def __init__(self, title, href=''):
self.title = title
self.href = href
class Link(object):
def __init__(self, href, title, uid=None):
self.href = href
self.title = title
self.uid = uid
# Exceptions
class EpubException(Exception):
def __init__(self, code, msg):
self.code = code
self.msg = msg
def __str__(self):
return repr(self.msg)
# Items
class EpubItem(object):
"""
Base class for the items in a book.
"""
def __init__(self, uid=None, file_name='', media_type='', content=six.b(''), manifest=True):
"""
:Args:
- uid: Unique identifier for this item (optional)
- file_name: File name for this item (optional)
- media_type: Media type for this item (optional)
- content: Content for this item (optional)
- manifest: Manifest for this item (optional)
"""
self.id = uid
self.file_name = file_name
self.media_type = media_type
self.content = content
self.is_linear = True
self.manifest = manifest
self.book = None
def get_id(self):
"""
Returns unique identifier for this item.
:Returns:
Returns uid number as string.
"""
return self.id
def get_name(self):
"""
Returns name for this item. By default it is always file name but it does not have to be.
:Returns:
Returns file name for this item.
"""
return self.file_name
def get_type(self):
"""
Guess type according to the file extension. Might not be the best way how to do it, but it works for now.
Items can be of type:
- ITEM_UNKNOWN = 0
- ITEM_IMAGE = 1
- ITEM_STYLE = 2
- ITEM_SCRIPT = 3
- ITEM_NAVIGATION = 4
- ITEM_VECTOR = 5
- ITEM_FONT = 6
- ITEM_VIDEO = 7
- ITEM_AUDIO = 8
- ITEM_DOCUMENT = 9
- ITEM_COVER = 10
We map type according to the extensions which are defined in ebooklib.EXTENSIONS.
:Returns:
Returns type of the item as number.
"""
_, ext = zip_path.splitext(self.get_name())
ext = ext.lower()
for uid, ext_list in six.iteritems(ebooklib.EXTENSIONS):
if ext in ext_list:
return uid
return ebooklib.ITEM_UNKNOWN
def get_content(self, default=six.b('')):
"""
Returns content of the item. Content should be of type 'str' (Python 2) or 'bytes' (Python 3)
:Args:
- default: Default value for the content if it is not already defined.
:Returns:
Returns content of the item.
"""
return self.content or default
def set_content(self, content):
"""
Sets content value for this item.
:Args:
- content: Content value
"""
self.content = content
def __str__(self):
return '<EpubItem:%s>' % self.id
class EpubNcx(EpubItem):
"Represents Navigation Control File (NCX) in the EPUB."
def __init__(self, uid='ncx', file_name='toc.ncx'):
super(EpubNcx, self).__init__(uid=uid, file_name=file_name, media_type='application/x-dtbncx+xml')
def __str__(self):
return '<EpubNcx:%s>' % self.id
class EpubCover(EpubItem):
"""
Represents Cover image in the EPUB file.
"""
def __init__(self, uid='cover-img', file_name=''):
super(EpubCover, self).__init__(uid=uid, file_name=file_name)
def get_type(self):
return ebooklib.ITEM_COVER
def __str__(self):
return '<EpubCover:%s:%s>' % (self.id, self.file_name)
class EpubHtml(EpubItem):
"""
Represents HTML document in the EPUB file.
"""
_template_name = 'chapter'
def __init__(self, uid=None, file_name='', media_type='', content=None, title='',
lang=None, direction=None, media_overlay=None, media_duration=None):
super(EpubHtml, self).__init__(uid, file_name, media_type, content)
self.title = title
self.lang = lang
self.direction = direction
self.media_overlay = media_overlay
self.media_duration = media_duration
self.links = []
self.properties = []
self.pages = []
def is_chapter(self):
"""
Returns if this document is chapter or not.
:Returns:
Returns book value.
"""
return True
def get_type(self):
"""
Always returns ebooklib.ITEM_DOCUMENT as type of this document.
:Returns:
Always returns ebooklib.ITEM_DOCUMENT
"""
return ebooklib.ITEM_DOCUMENT
def set_language(self, lang):
"""
Sets language for this book item. By default it will use language of the book but it
can be overwritten with this call.
"""
self.lang = lang
def get_language(self):
"""
Get language code for this book item. Language of the book item can be different from
the language settings defined globaly for book.
:Returns:
As string returns language code.
"""
return self.lang
def add_link(self, **kwgs):
"""
Add additional link to the document. Links will be embeded only inside of this document.
>>> add_link(href='styles.css', rel='stylesheet', type='text/css')
"""
self.links.append(kwgs)
def get_links(self):
"""
Returns list of additional links defined for this document.
:Returns:
As tuple return list of links.
"""
return (link for link in self.links)
def get_links_of_type(self, link_type):
"""
Returns list of additional links of specific type.
:Returns:
As tuple returns list of links.
"""
return (link for link in self.links if link.get('type', '') == link_type)
def add_item(self, item):
"""
Add other item to this document. It will create additional links according to the item type.
:Args:
- item: item we want to add defined as instance of EpubItem
"""
if item.get_type() == ebooklib.ITEM_STYLE:
self.add_link(href=item.get_name(), rel='stylesheet', type='text/css')
if item.get_type() == ebooklib.ITEM_SCRIPT:
self.add_link(src=item.get_name(), type='text/javascript')
def get_body_content(self):
"""
Returns content of BODY element for this HTML document. Content will be of type 'str' (Python 2)
or 'bytes' (Python 3).
:Returns:
Returns content of this document.
"""
try:
html_tree = parse_html_string(self.content)
except:
return ''
html_root = html_tree.getroottree()
if len(html_root.find('body')) != 0:
body = html_tree.find('body')
tree_str = etree.tostring(body, pretty_print=True, encoding='utf-8', xml_declaration=False)
# this is so stupid
if tree_str.startswith(six.b('<body>')):
n = tree_str.rindex(six.b('</body>'))
return tree_str[6:n]
return tree_str
return ''
def get_content(self, default=None):
"""
Returns content for this document as HTML string. Content will be of type 'str' (Python 2)
or 'bytes' (Python 3).
:Args:
- default: Default value for the content if it is not defined.
:Returns:
Returns content of this document.
"""
tree = parse_string(self.book.get_template(self._template_name))
tree_root = tree.getroot()
tree_root.set('lang', self.lang or self.book.language)
tree_root.attrib['{%s}lang' % NAMESPACES['XML']] = self.lang or self.book.language
# add to the head also
# <meta charset="utf-8" />
try:
html_tree = parse_html_string(self.content)
except:
return ''
html_root = html_tree.getroottree()
# create and populate head
_head = etree.SubElement(tree_root, 'head')
if self.title != '':
_title = etree.SubElement(_head, 'title')
_title.text = self.title
for lnk in self.links:
if lnk.get('type') == 'text/javascript':
_lnk = etree.SubElement(_head, 'script', lnk)
# force <script></script>
_lnk.text = ''
else:
_lnk = etree.SubElement(_head, 'link', lnk)
# this should not be like this
# head = html_root.find('head')
# if head is not None:
# for i in head.getchildren():
# if i.tag == 'title' and self.title != '':
# continue
# _head.append(i)
# create and populate body
_body = etree.SubElement(tree_root, 'body')
if self.direction:
_body.set('dir', self.direction)
body = html_tree.find('body')
if body is not None:
for i in body.getchildren():
_body.append(i)
tree_str = etree.tostring(tree, pretty_print=True, encoding='utf-8', xml_declaration=True)
return tree_str
def __str__(self):
return '<EpubHtml:%s:%s>' % (self.id, self.file_name)
class EpubCoverHtml(EpubHtml):
"""
Represents Cover page in the EPUB file.
"""
def __init__(self, uid='cover', file_name='cover.xhtml', image_name='', title='Cover'):
super(EpubCoverHtml, self).__init__(uid=uid, file_name=file_name, title=title)
self.image_name = image_name
self.is_linear = False
def is_chapter(self):
"""
Returns if this document is chapter or not.
:Returns:
Returns book value.
"""
return False
def get_content(self):
"""
Returns content for cover page as HTML string. Content will be of type 'str' (Python 2) or 'bytes' (Python 3).
:Returns:
Returns content of this document.
"""
self.content = self.book.get_template('cover')
tree = parse_string(super(EpubCoverHtml, self).get_content())
tree_root = tree.getroot()
images = tree_root.xpath('//xhtml:img', namespaces={'xhtml': NAMESPACES['XHTML']})
images[0].set('src', self.image_name)
images[0].set('alt', self.title)
tree_str = etree.tostring(tree, pretty_print=True, encoding='utf-8', xml_declaration=True)
return tree_str
def __str__(self):
return '<EpubCoverHtml:%s:%s>' % (self.id, self.file_name)
class EpubNav(EpubHtml):
"""
Represents Navigation Document in the EPUB file.
"""
def __init__(self, uid='nav', file_name='nav.xhtml', media_type='application/xhtml+xml'):
super(EpubNav, self).__init__(uid=uid, file_name=file_name, media_type=media_type)
def is_chapter(self):
"""
Returns if this document is chapter or not.
:Returns:
Returns book value.
"""
return False
def __str__(self):
return '<EpubNav:%s:%s>' % (self.id, self.file_name)
class EpubImage(EpubItem):
"""
Represents Image in the EPUB file.
"""
def __init__(self):
super(EpubImage, self).__init__()
def get_type(self):
return ebooklib.ITEM_IMAGE
def __str__(self):
return '<EpubImage:%s:%s>' % (self.id, self.file_name)
class EpubSMIL(EpubItem):
def __init__(self, uid=None, file_name='', content=None):
super(EpubSMIL, self).__init__(uid=uid, file_name=file_name, media_type='application/smil+xml', content=content)
def get_type(self):
return ebooklib.ITEM_SMIL
def __str__(self):
return '<EpubSMIL:%s:%s>' % (self.id, self.file_name)
# EpubBook
class EpubBook(object):
def __init__(self):
self.EPUB_VERSION = None
self.reset()
# we should have options here
def reset(self):
"Initialises all needed variables to default values"
self.metadata = {}
self.items = []
self.spine = []
self.guide = []
self.pages = []
self.toc = []
self.bindings = []
self.IDENTIFIER_ID = 'id'
self.FOLDER_NAME = 'EPUB'
self._id_html = 0
self._id_image = 0
self._id_static = 0
self.title = ''
self.language = 'en'
self.direction = None
self.templates = {
'ncx': NCX_XML,
'nav': NAV_XML,
'chapter': CHAPTER_XML,
'cover': COVER_XML
}
self.add_metadata('OPF', 'generator', '', {
'name': 'generator', 'content': 'Ebook-lib %s' % '.'.join([str(s) for s in VERSION])
})
# default to using a randomly-unique identifier if one is not specified manually
self.set_identifier(str(uuid.uuid4()))
# custom prefixes and namespaces to be set to the content.opf doc
self.prefixes = []
self.namespaces = {}
def set_identifier(self, uid):
"""
Sets unique id for this epub
:Args:
- uid: Value of unique identifier for this book
"""
self.uid = uid
self.set_unique_metadata('DC', 'identifier', self.uid, {'id': self.IDENTIFIER_ID})
def set_title(self, title):
"""
Set title. You can set multiple titles.
:Args:
- title: Title value
"""
self.title = title
self.add_metadata('DC', 'title', self.title)
def set_language(self, lang):
"""
Set language for this epub. You can set multiple languages. Specific items in the book can have
different language settings.
:Args:
- lang: Language code
"""
self.language = lang
self.add_metadata('DC', 'language', lang)
def set_direction(self, direction):
"""
:Args:
- direction: Options are "ltr", "rtl" and "default"
"""
self.direction = direction
def set_cover(self, file_name, content, create_page=True):
"""
Set cover and create cover document if needed.
:Args:
- file_name: file name of the cover page
- content: Content for the cover image
- create_page: Should cover page be defined. Defined as bool value (optional). Default value is True.
"""
# as it is now, it can only be called once
c0 = EpubCover(file_name=file_name)
c0.content = content
self.add_item(c0)
if create_page:
c1 = EpubCoverHtml(image_name=file_name)
self.add_item(c1)
self.add_metadata(None, 'meta', '', OrderedDict([('name', 'cover'), ('content', 'cover-img')]))
def add_author(self, author, file_as=None, role=None, uid='creator'):
"Add author for this document"
self.add_metadata('DC', 'creator', author, {'id': uid})
if file_as:
self.add_metadata(None, 'meta', file_as, {'refines': '#' + uid,
'property': 'file-as',
'scheme': 'marc:relators'})
if role:
self.add_metadata(None, 'meta', role, {'refines': '#' + uid,
'property': 'role',
'scheme': 'marc:relators'})
def add_metadata(self, namespace, name, value, others=None):
"Add metadata"
if namespace in NAMESPACES:
namespace = NAMESPACES[namespace]
if namespace not in self.metadata:
self.metadata[namespace] = {}
if name not in self.metadata[namespace]:
self.metadata[namespace][name] = []
self.metadata[namespace][name].append((value, others))
def get_metadata(self, namespace, name):
"Retrieve metadata"
if namespace in NAMESPACES:
namespace = NAMESPACES[namespace]
return self.metadata[namespace].get(name, [])
def set_unique_metadata(self, namespace, name, value, others=None):
"Add metadata if metadata with this identifier does not already exist, otherwise update existing metadata."
if namespace in NAMESPACES:
namespace = NAMESPACES[namespace]
if namespace in self.metadata and name in self.metadata[namespace]:
self.metadata[namespace][name] = [(value, others)]
else:
self.add_metadata(namespace, name, value, others)
def add_item(self, item):
"""
Add additional item to the book. If not defined, media type and chapter id will be defined
for the item.
:Args:
- item: Item instance
"""
if item.media_type == '':
(has_guessed, media_type) = guess_type(item.get_name().lower())
if has_guessed:
if media_type is not None:
item.media_type = media_type
else:
item.media_type = has_guessed
else:
item.media_type = 'application/octet-stream'
if not item.get_id():
# make chapter_, image_ and static_ configurable
if isinstance(item, EpubHtml):
item.id = 'chapter_%d' % self._id_html
self._id_html += 1
# If there's a page list, append it to the book's page list
self.pages += item.pages
elif isinstance(item, EpubImage):
item.id = 'image_%d' % self._id_image
self._id_image += 1
else:
item.id = 'static_%d' % self._id_image
self._id_image += 1
item.book = self
self.items.append(item)
return item
def get_item_with_id(self, uid):
"""
Returns item for defined UID.
>>> book.get_item_with_id('image_001')
:Args:
- uid: UID for the item
:Returns:
Returns item object. Returns None if nothing was found.
"""
for item in self.get_items():
if item.id == uid:
return item
return None
def get_item_with_href(self, href):
"""
Returns item for defined HREF.
>>> book.get_item_with_href('EPUB/document.xhtml')
:Args:
- href: HREF for the item we are searching for
:Returns:
Returns item object. Returns None if nothing was found.
"""
for item in self.get_items():
if item.get_name() == href:
return item
return None
def get_items(self):
"""
Returns all items attached to this book.
:Returns:
Returns all items as tuple.
"""
return (item for item in self.items)
def get_items_of_type(self, item_type):
"""
Returns all items of specified type.
>>> book.get_items_of_type(epub.ITEM_IMAGE)
:Args:
- item_type: Type for items we are searching for
:Returns:
Returns found items as tuple.
"""
return (item for item in self.items if item.get_type() == item_type)
def get_items_of_media_type(self, media_type):
"""
Returns all items of specified media type.
:Args:
- media_type: Media type for items we are searching for
:Returns:
Returns found items as tuple.
"""
return (item for item in self.items if item.media_type == media_type)
def set_template(self, name, value):
"""
Defines templates which are used to generate certain types of pages. When defining new value for the template
we have to use content of type 'str' (Python 2) or 'bytes' (Python 3).
At the moment we use these templates:
- ncx
- nav
- chapter
- cover
:Args:
- name: Name for the template
- value: Content for the template
"""
self.templates[name] = value
def get_template(self, name):
"""
Returns value for the template.
:Args:
- name: template name
:Returns:
Value of the template.
"""
return self.templates.get(name)
def add_prefix(self, name, uri):
"""
Appends custom prefix to be added to the content.opf document
>>> epub_book.add_prefix('bkterms', 'http://booktype.org/')
:Args:
- name: namespave name
- uri: URI for the namespace
"""
self.prefixes.append('%s: %s' % (name, uri))
class EpubWriter(object):
DEFAULT_OPTIONS = {
'epub2_guide': True,
'epub3_landmark': True,
'epub3_pages': True,
'landmark_title': 'Guide',
'pages_title': 'Pages',
'spine_direction': True,
'package_direction': False,
'play_order': {
'enabled': False,
'start_from': 1
}
}
def __init__(self, name, book, options=None):
self.file_name = name
self.book = book
self.options = dict(self.DEFAULT_OPTIONS)
if options:
self.options.update(options)
self._init_play_order()
def _init_play_order(self):
self._play_order = {
'enabled': False,
'start_from': 1
}
try:
self._play_order['enabled'] = self.options['play_order']['enabled']
self._play_order['start_from'] = self.options['play_order']['start_from']
except KeyError:
pass
def process(self):
# should cache this html parsing so we don't do it for every plugin
for plg in self.options.get('plugins', []):
if hasattr(plg, 'before_write'):
plg.before_write(self.book)
for item in self.book.get_items():
if isinstance(item, EpubHtml):
for plg in self.options.get('plugins', []):
if hasattr(plg, 'html_before_write'):
plg.html_before_write(self.book, item)
def _write_container(self):
container_xml = CONTAINER_XML % {'folder_name': self.book.FOLDER_NAME}
self.out.writestr(CONTAINER_PATH, container_xml)
def _write_opf_metadata(self, root):
# This is really not needed
# problem is uppercase/lowercase
# for ns_name, values in six.iteritems(self.book.metadata):
# if ns_name:
# for n_id, ns_url in six.iteritems(NAMESPACES):
# if ns_name == ns_url:
# nsmap[n_id.lower()] = NAMESPACES[n_id]
nsmap = {'dc': NAMESPACES['DC'], 'opf': NAMESPACES['OPF']}
nsmap.update(self.book.namespaces)
metadata = etree.SubElement(root, 'metadata', nsmap=nsmap)
el = etree.SubElement(metadata, 'meta', {'property': 'dcterms:modified'})
if 'mtime' in self.options:
mtime = self.options['mtime']
else:
import datetime
mtime = datetime.datetime.now()
el.text = mtime.strftime('%Y-%m-%dT%H:%M:%SZ')
for ns_name, values in six.iteritems(self.book.metadata):
if ns_name == NAMESPACES['OPF']:
for values in values.values():
for v in values:
if 'property' in v[1] and v[1]['property'] == 'dcterms:modified':
continue
try:
el = etree.SubElement(metadata, 'meta', v[1])
if v[0]:
el.text = v[0]
except ValueError:
logging.error('Could not create metadata.')
else:
for name, values in six.iteritems(values):
for v in values:
try:
if ns_name:
el = etree.SubElement(metadata, '{%s}%s' % (ns_name, name), v[1])
else:
el = etree.SubElement(metadata, '%s' % name, v[1])
el.text = v[0]
except ValueError:
logging.error('Could not create metadata "{}".'.format(name))
def _write_opf_manifest(self, root):
manifest = etree.SubElement(root, 'manifest')
_ncx_id = None
# mathml, scripted, svg, remote-resources, and switch
# nav
# cover-image
for item in self.book.get_items():
if not item.manifest:
continue
if isinstance(item, EpubNav):
etree.SubElement(manifest, 'item', {'href': item.get_name(),
'id': item.id,
'media-type': item.media_type,
'properties': 'nav'})
elif isinstance(item, EpubNcx):
_ncx_id = item.id
etree.SubElement(manifest, 'item', {'href': item.file_name,
'id': item.id,
'media-type': item.media_type})
elif isinstance(item, EpubCover):
etree.SubElement(manifest, 'item', {'href': item.file_name,
'id': item.id,
'media-type': item.media_type,
'properties': 'cover-image'})
else:
opts = {'href': item.file_name,
'id': item.id,
'media-type': item.media_type}
if hasattr(item, 'properties') and len(item.properties) > 0:
opts['properties'] = ' '.join(item.properties)
if hasattr(item, 'media_overlay') and item.media_overlay is not None:
opts['media-overlay'] = item.media_overlay
if hasattr(item, 'media_duration') and item.media_duration is not None:
opts['duration'] = item.media_duration
etree.SubElement(manifest, 'item', opts)
return _ncx_id
def _write_opf_spine(self, root, ncx_id):
spine_attributes = {'toc': ncx_id or 'ncx'}
if self.book.direction and self.options['spine_direction']:
spine_attributes['page-progression-direction'] = self.book.direction
spine = etree.SubElement(root, 'spine', spine_attributes)
for _item in self.book.spine:
# this is for now
# later we should be able to fetch things from tuple
is_linear = True
if isinstance(_item, tuple):
item = _item[0]
if len(_item) > 1:
if _item[1] == 'no':
is_linear = False
else:
item = _item
if isinstance(item, EpubHtml):
opts = {'idref': item.get_id()}
if not item.is_linear or not is_linear:
opts['linear'] = 'no'
elif isinstance(item, EpubItem):
opts = {'idref': item.get_id()}
if not item.is_linear or not is_linear:
opts['linear'] = 'no'
else:
opts = {'idref': item}
try:
itm = self.book.get_item_with_id(item)
if not itm.is_linear or not is_linear:
opts['linear'] = 'no'
except:
pass
etree.SubElement(spine, 'itemref', opts)
def _write_opf_guide(self, root):
# - http://www.idpf.org/epub/20/spec/OPF_2.0.1_draft.htm#Section2.6
if len(self.book.guide) > 0 and self.options.get('epub2_guide'):
guide = etree.SubElement(root, 'guide', {})
for item in self.book.guide:
if 'item' in item:
chap = item.get('item')
if chap:
_href = chap.file_name
_title = chap.title
else:
_href = item.get('href', '')
_title = item.get('title', '')
if _title is None:
_title = ''
ref = etree.SubElement(guide, 'reference', {'type': item.get('type', ''),
'title': _title,
'href': _href})
def _write_opf_bindings(self, root):
if len(self.book.bindings) > 0:
bindings = etree.SubElement(root, 'bindings', {})
for item in self.book.bindings:
etree.SubElement(bindings, 'mediaType', item)
def _write_opf_file(self, root):
tree_str = etree.tostring(root, pretty_print=True, encoding='utf-8', xml_declaration=True)
self.out.writestr('%s/content.opf' % self.book.FOLDER_NAME, tree_str)
def _write_opf(self):
package_attributes = {'xmlns': NAMESPACES['OPF'],
'unique-identifier': self.book.IDENTIFIER_ID,
'version': '3.0'}
if self.book.direction and self.options['package_direction']:
package_attributes['dir'] = self.book.direction
root = etree.Element('package', package_attributes)
prefixes = ['rendition: http://www.idpf.org/vocab/rendition/#'] + self.book.prefixes
root.attrib['prefix'] = ' '.join(prefixes)
# METADATA
self._write_opf_metadata(root)
# MANIFEST
_ncx_id = self._write_opf_manifest(root)
# SPINE
self._write_opf_spine(root, _ncx_id)
# GUIDE
self._write_opf_guide(root)
# BINDINGS
self._write_opf_bindings(root)
# WRITE FILE
self._write_opf_file(root)
def _get_nav(self, item):
# just a basic navigation for now
nav_xml = parse_string(self.book.get_template('nav'))
root = nav_xml.getroot()
root.set('lang', self.book.language)
root.attrib['{%s}lang' % NAMESPACES['XML']] = self.book.language
nav_dir_name = os.path.dirname(item.file_name)
head = etree.SubElement(root, 'head')
title = etree.SubElement(head, 'title')
title.text = self.book.title
# for now this just handles css files and ignores others
for _link in item.links:
_lnk = etree.SubElement(head, 'link', {
'href': _link.get('href', ''), 'rel': 'stylesheet', 'type': 'text/css'
})
body = etree.SubElement(root, 'body')
nav = etree.SubElement(body, 'nav', {
'{%s}type' % NAMESPACES['EPUB']: 'toc',
'id': 'id',
'role': 'doc-toc',
})
content_title = etree.SubElement(nav, 'h2')
content_title.text = self.book.title
def _create_section(itm, items):
ol = etree.SubElement(itm, 'ol')
for item in items:
if isinstance(item, tuple) or isinstance(item, list):
li = etree.SubElement(ol, 'li')
if isinstance(item[0], EpubHtml):
a = etree.SubElement(li, 'a', {'href': os.path.relpath(item[0].file_name, nav_dir_name)})
elif isinstance(item[0], Section) and item[0].href != '':
a = etree.SubElement(li, 'a', {'href': os.path.relpath(item[0].href, nav_dir_name)})
elif isinstance(item[0], Link):
a = etree.SubElement(li, 'a', {'href': os.path.relpath(item[0].href, nav_dir_name)})
else:
a = etree.SubElement(li, 'span')
a.text = item[0].title
_create_section(li, item[1])
elif isinstance(item, Link):
li = etree.SubElement(ol, 'li')
a = etree.SubElement(li, 'a', {'href': os.path.relpath(item.href, nav_dir_name)})
a.text = item.title
elif isinstance(item, EpubHtml):
li = etree.SubElement(ol, 'li')
a = etree.SubElement(li, 'a', {'href': os.path.relpath(item.file_name, nav_dir_name)})
a.text = item.title
_create_section(nav, self.book.toc)
# LANDMARKS / GUIDE
# - http://www.idpf.org/epub/30/spec/epub30-contentdocs.html#sec-xhtml-nav-def-types-landmarks
if len(self.book.guide) > 0 and self.options.get('epub3_landmark'):
# Epub2 guide types do not map completely to epub3 landmark types.
guide_to_landscape_map = {
'notes': 'rearnotes',
'text': 'bodymatter'
}
guide_nav = etree.SubElement(body, 'nav', {'{%s}type' % NAMESPACES['EPUB']: 'landmarks'})
guide_content_title = etree.SubElement(guide_nav, 'h2')
guide_content_title.text = self.options.get('landmark_title', 'Guide')
guild_ol = etree.SubElement(guide_nav, 'ol')
for elem in self.book.guide:
li_item = etree.SubElement(guild_ol, 'li')
if 'item' in elem:
chap = elem.get('item', None)
if chap:
_href = chap.file_name
_title = chap.title
else:
_href = elem.get('href', '')
_title = elem.get('title', '')
guide_type = elem.get('type', '')
a_item = etree.SubElement(li_item, 'a', {
'{%s}type' % NAMESPACES['EPUB']: guide_to_landscape_map.get(guide_type, guide_type),
'href': os.path.relpath(_href, nav_dir_name)
})
a_item.text = _title
# PAGE-LIST
if self.options.get('epub3_pages'):
inserted_pages = get_pages_for_items([item for item in self.book.get_items_of_type(ebooklib.ITEM_DOCUMENT) \
if not isinstance(item, EpubNav)])
if len(inserted_pages) > 0:
pagelist_nav = etree.SubElement(
body,
'nav',
{
'{%s}type' % NAMESPACES['EPUB']: 'page-list',
'id': 'pages',
'hidden': 'hidden',
}
)
pagelist_content_title = etree.SubElement(pagelist_nav, 'h2')
pagelist_content_title.text = self.options.get(
'pages_title', 'Pages'
)
pages_ol = etree.SubElement(pagelist_nav, 'ol')
for filename, pageref, label in inserted_pages:
li_item = etree.SubElement(pages_ol, 'li')
_href = u'{}#{}'.format(filename, pageref)
_title = label
a_item = etree.SubElement(li_item, 'a', {
'href': os.path.relpath(_href, nav_dir_name),
})
a_item.text = _title
tree_str = etree.tostring(nav_xml, pretty_print=True, encoding='utf-8', xml_declaration=True)
return tree_str
def _get_ncx(self):
# we should be able to setup language for NCX as also
ncx = parse_string(self.book.get_template('ncx'))
root = ncx.getroot()
head = etree.SubElement(root, 'head')
# get this id
uid = etree.SubElement(head, 'meta', {'content': self.book.uid, 'name': 'dtb:uid'})
uid = etree.SubElement(head, 'meta', {'content': '0', 'name': 'dtb:depth'})
uid = etree.SubElement(head, 'meta', {'content': '0', 'name': 'dtb:totalPageCount'})
uid = etree.SubElement(head, 'meta', {'content': '0', 'name': 'dtb:maxPageNumber'})
doc_title = etree.SubElement(root, 'docTitle')
title = etree.SubElement(doc_title, 'text')
title.text = self.book.title
# doc_author = etree.SubElement(root, 'docAuthor')
# author = etree.SubElement(doc_author, 'text')
# author.text = 'Name of the person'
# For now just make a very simple navMap
nav_map = etree.SubElement(root, 'navMap')
def _add_play_order(nav_point):
nav_point.set('playOrder', str(self._play_order['start_from']))
self._play_order['start_from'] += 1
def _create_section(itm, items, uid):
for item in items:
if isinstance(item, tuple) or isinstance(item, list):
section, subsection = item[0], item[1]
np = etree.SubElement(itm, 'navPoint', {
'id': section.get_id() if isinstance(section, EpubHtml) else 'sep_%d' % uid
})
if self._play_order['enabled']:
_add_play_order(np)
nl = etree.SubElement(np, 'navLabel')
nt = etree.SubElement(nl, 'text')
nt.text = section.title
# CAN NOT HAVE EMPTY SRC HERE
href = ''
if isinstance(section, EpubHtml):
href = section.file_name
elif isinstance(section, Section) and section.href != '':
href = section.href
elif isinstance(section, Link):
href = section.href
nc = etree.SubElement(np, 'content', {'src': href})
uid = _create_section(np, subsection, uid + 1)
elif isinstance(item, Link):
_parent = itm
_content = _parent.find('content')
if _content is not None:
if _content.get('src') == '':
_content.set('src', item.href)
np = etree.SubElement(itm, 'navPoint', {'id': item.uid})
if self._play_order['enabled']:
_add_play_order(np)
nl = etree.SubElement(np, 'navLabel')
nt = etree.SubElement(nl, 'text')
nt.text = item.title
nc = etree.SubElement(np, 'content', {'src': item.href})
elif isinstance(item, EpubHtml):
_parent = itm
_content = _parent.find('content')
if _content is not None:
if _content.get('src') == '':
_content.set('src', item.file_name)
np = etree.SubElement(itm, 'navPoint', {'id': item.get_id()})
if self._play_order['enabled']:
_add_play_order(np)
nl = etree.SubElement(np, 'navLabel')
nt = etree.SubElement(nl, 'text')
nt.text = item.title
nc = etree.SubElement(np, 'content', {'src': item.file_name})
return uid
_create_section(nav_map, self.book.toc, 0)
tree_str = etree.tostring(root, pretty_print=True, encoding='utf-8', xml_declaration=True)
return tree_str
def _write_items(self):
for item in self.book.get_items():
if isinstance(item, EpubNcx):
self.out.writestr('%s/%s' % (self.book.FOLDER_NAME, item.file_name), self._get_ncx())
elif isinstance(item, EpubNav):
self.out.writestr('%s/%s' % (self.book.FOLDER_NAME, item.file_name), self._get_nav(item))
elif item.manifest:
self.out.writestr('%s/%s' % (self.book.FOLDER_NAME, item.file_name), item.get_content())
else:
self.out.writestr('%s' % item.file_name, item.get_content())
def write(self):
# check for the option allowZip64
self.out = zipfile.ZipFile(self.file_name, 'w', zipfile.ZIP_DEFLATED)
self.out.writestr('mimetype', 'application/epub+zip', compress_type=zipfile.ZIP_STORED)
self._write_container()
self._write_opf()
self._write_items()
self.out.close()
class EpubReader(object):
DEFAULT_OPTIONS = {}
def __init__(self, epub_file_name, options=None):
self.file_name = epub_file_name
self.book = EpubBook()
self.zf = None
self.opf_file = ''
self.opf_dir = ''
self.options = dict(self.DEFAULT_OPTIONS)
if options:
self.options.update(options)
def process(self):
# should cache this html parsing so we don't do it for every plugin
for plg in self.options.get('plugins', []):
if hasattr(plg, 'after_read'):
plg.after_read(self.book)
for item in self.book.get_items():
if isinstance(item, EpubHtml):
for plg in self.options.get('plugins', []):
if hasattr(plg, 'html_after_read'):
plg.html_after_read(self.book, item)
def load(self):
self._load()
return self.book
def read_file(self, name):
# Raises KeyError
name = os.path.normpath(name)
return self.zf.read(name)
def _load_container(self):
meta_inf = self.read_file('META-INF/container.xml')
tree = parse_string(meta_inf)
for root_file in tree.findall('//xmlns:rootfile[@media-type]', namespaces={'xmlns': NAMESPACES['CONTAINERNS']}):
if root_file.get('media-type') == 'application/oebps-package+xml':
self.opf_file = root_file.get('full-path')
self.opf_dir = zip_path.dirname(self.opf_file)
def _load_metadata(self):
container_root = self.container.getroot()
# get epub version
self.book.version = container_root.get('version', None)
# get unique-identifier
if container_root.get('unique-identifier', None):
self.book.IDENTIFIER_ID = container_root.get('unique-identifier')
# get xml:lang
# get metadata
metadata = self.container.find('{%s}%s' % (NAMESPACES['OPF'], 'metadata'))
nsmap = metadata.nsmap
nstags = dict((k, '{%s}' % v) for k, v in six.iteritems(nsmap))
default_ns = nstags.get(None, '')
nsdict = dict((v, {}) for v in nsmap.values())
def add_item(ns, tag, value, extra):
if ns not in nsdict:
nsdict[ns] = {}
values = nsdict[ns].setdefault(tag, [])
values.append((value, extra))
for t in metadata:
if not etree.iselement(t) or t.tag is etree.Comment:
continue
if t.tag == default_ns + 'meta':
name = t.get('name')
others = dict((k, v) for k, v in t.items())
if name and ':' in name:
prefix, name = name.split(':', 1)
else:
prefix = None
add_item(t.nsmap.get(prefix, prefix), name, t.text, others)
else:
tag = t.tag[t.tag.rfind('}') + 1:]
if (t.prefix and t.prefix.lower() == 'dc') and tag == 'identifier':
_id = t.get('id', None)
if _id:
self.book.IDENTIFIER_ID = _id
others = dict((k, v) for k, v in t.items())
add_item(t.nsmap[t.prefix], tag, t.text, others)
self.book.metadata = nsdict
titles = self.book.get_metadata('DC', 'title')
if len(titles) > 0:
self.book.title = titles[0][0]
for value, others in self.book.get_metadata('DC', 'identifier'):
if others.get('id') == self.book.IDENTIFIER_ID:
self.book.uid = value
def _load_manifest(self):
for r in self.container.find('{%s}%s' % (NAMESPACES['OPF'], 'manifest')):
if r is not None and r.tag != '{%s}item' % NAMESPACES['OPF']:
continue
media_type = r.get('media-type')
_properties = r.get('properties', '')
if _properties:
properties = _properties.split(' ')
else:
properties = []
# people use wrong content types
if media_type == 'image/jpg':
media_type = 'image/jpeg'
if media_type == 'application/x-dtbncx+xml':
ei = EpubNcx(uid=r.get('id'), file_name=unquote(r.get('href')))
ei.content = self.read_file(zip_path.join(self.opf_dir, ei.file_name))
if media_type == 'application/smil+xml':
ei = EpubSMIL(uid=r.get('id'), file_name=unquote(r.get('href')))
ei.content = self.read_file(zip_path.join(self.opf_dir, ei.file_name))
elif media_type == 'application/xhtml+xml':
if 'nav' in properties:
ei = EpubNav(uid=r.get('id'), file_name=unquote(r.get('href')))
ei.content = self.read_file(zip_path.join(self.opf_dir, r.get('href')))
elif 'cover' in properties:
ei = EpubCoverHtml()
ei.content = self.read_file(zip_path.join(self.opf_dir, unquote(r.get('href'))))
else:
ei = EpubHtml()
ei.id = r.get('id')
ei.file_name = unquote(r.get('href'))
ei.media_type = media_type
ei.media_overlay = r.get('media-overlay', None)
ei.media_duration = r.get('duration', None)
ei.content = self.read_file(zip_path.join(self.opf_dir, ei.get_name()))
ei.properties = properties
elif media_type in IMAGE_MEDIA_TYPES:
if 'cover-image' in properties:
ei = EpubCover(uid=r.get('id'), file_name=unquote(r.get('href')))
ei.media_type = media_type
ei.content = self.read_file(zip_path.join(self.opf_dir, ei.get_name()))
else:
ei = EpubImage()
ei.id = r.get('id')
ei.file_name = unquote(r.get('href'))
ei.media_type = media_type
ei.content = self.read_file(zip_path.join(self.opf_dir, ei.get_name()))
else:
# different types
ei = EpubItem()
ei.id = r.get('id')
ei.file_name = unquote(r.get('href'))
ei.media_type = media_type
ei.content = self.read_file(zip_path.join(self.opf_dir, ei.get_name()))
self.book.add_item(ei)
def _parse_ncx(self, data):
tree = parse_string(data)
tree_root = tree.getroot()
nav_map = tree_root.find('{%s}navMap' % NAMESPACES['DAISY'])
def _get_children(elems, n, nid):
label, content = '', ''
children = []
for a in elems.getchildren():
if a.tag == '{%s}navLabel' % NAMESPACES['DAISY']:
label = a.getchildren()[0].text
if a.tag == '{%s}content' % NAMESPACES['DAISY']:
content = a.get('src', '')
if a.tag == '{%s}navPoint' % NAMESPACES['DAISY']:
children.append(_get_children(a, n + 1, a.get('id', '')))
if len(children) > 0:
if n == 0:
return children
return (Section(label, href=content),
children)
else:
return Link(content, label, nid)
self.book.toc = _get_children(nav_map, 0, '')
def _parse_nav(self, data, base_path, navtype='toc'):
html_node = parse_html_string(data)
if navtype == 'toc':
# parsing the table of contents
nav_node = html_node.xpath("//nav[@*='toc']")[0]
else:
# parsing the list of pages
nav_node = html_node.xpath("//nav[@*='page-list']")[0]
def parse_list(list_node):
items = []
for item_node in list_node.findall('li'):
sublist_node = item_node.find('ol')
link_node = item_node.find('a')
if sublist_node is not None:
title = item_node[0].text
children = parse_list(sublist_node)
if link_node is not None:
href = zip_path.normpath(zip_path.join(base_path, link_node.get('href')))
items.append((Section(title, href=href), children))
else:
items.append((Section(title), children))
elif link_node is not None:
title = link_node.text
href = zip_path.normpath(zip_path.join(base_path, link_node.get('href')))
items.append(Link(href, title))
return items
if navtype == 'toc':
self.book.toc = parse_list(nav_node.find('ol'))
elif nav_node is not None:
# generate the pages list if there is one
self.book.pages = parse_list(nav_node.find('ol'))
# generate the per-file pages lists
# because of the order of parsing the files, this can't be done
# when building the EpubHtml objects
htmlfiles = dict()
for htmlfile in self.book.items:
if isinstance(htmlfile, EpubHtml):
htmlfiles[htmlfile.file_name] = htmlfile
for page in self.book.pages:
try:
(filename, idref) = page.href.split('#')
except ValueError:
filename = page.href
if filename in htmlfiles:
htmlfiles[filename].pages.append(page)
def _load_spine(self):
spine = self.container.find('{%s}%s' % (NAMESPACES['OPF'], 'spine'))
self.book.spine = [(t.get('idref'), t.get('linear', 'yes')) for t in spine]
toc = spine.get('toc', '')
self.book.set_direction(spine.get('page-progression-direction', None))
# should read ncx or nav file
if toc:
try:
ncxFile = self.read_file(zip_path.join(self.opf_dir, self.book.get_item_with_id(toc).get_name()))
except KeyError:
raise EpubException(-1, 'Can not find ncx file.')
self._parse_ncx(ncxFile)
def _load_guide(self):
guide = self.container.find('{%s}%s' % (NAMESPACES['OPF'], 'guide'))
if guide is not None:
self.book.guide = [{'href': t.get('href'), 'title': t.get('title'), 'type': t.get('type')} for t in guide]
def _load_opf_file(self):
try:
s = self.read_file(self.opf_file)
except KeyError:
raise EpubException(-1, 'Can not find container file')
self.container = parse_string(s)
self._load_metadata()
self._load_manifest()
self._load_spine()
self._load_guide()
# read nav file if found
#
nav_item = next((item for item in self.book.items if isinstance(item, EpubNav)), None)
if nav_item:
if not self.book.toc:
self._parse_nav(
nav_item.content,
zip_path.dirname(nav_item.file_name),
navtype='toc'
)
self._parse_nav(
nav_item.content,
zip_path.dirname(nav_item.file_name),
navtype='pages'
)
def _load(self):
try:
self.zf = zipfile.ZipFile(self.file_name, 'r', compression=zipfile.ZIP_DEFLATED, allowZip64=True)
except zipfile.BadZipfile as bz:
raise EpubException(0, 'Bad Zip file')
except zipfile.LargeZipFile as bz:
raise EpubException(1, 'Large Zip file')
# 1st check metadata
self._load_container()
self._load_opf_file()
self.zf.close()
# WRITE
def write_epub(name, book, options=None):
"""
Creates epub file with the content defined in EpubBook.
>>> ebooklib.write_epub('book.epub', book)
:Args:
- name: file name for the output file
- book: instance of EpubBook
- options: extra opions as dictionary (optional)
"""
epub = EpubWriter(name, book, options)
epub.process()
try:
epub.write()
except IOError:
pass
# READ
def read_epub(name, options=None):
"""
Creates new instance of EpubBook with the content defined in the input file.
>>> book = ebooklib.read_epub('book.epub')
:Args:
- name: full path to the input file
- options: extra options as dictionary (optional)
:Returns:
Instance of EpubBook.
"""
reader = EpubReader(name, options)
book = reader.load()
reader.process()
return book
Fixed #164 - Parsing files fails because of page-list
# This file is part of EbookLib.
# Copyright (c) 2013 Aleksandar Erkalovic <aerkalov@gmail.com>
#
# EbookLib is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# EbookLib is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with EbookLib. If not, see <http://www.gnu.org/licenses/>.
import zipfile
import six
import logging
import uuid
import posixpath as zip_path
import os.path
from collections import OrderedDict
try:
from urllib.parse import unquote
except ImportError:
from urllib import unquote
from lxml import etree
import ebooklib
from ebooklib.utils import parse_string, parse_html_string, guess_type, get_pages_for_items
# Version of EPUB library
VERSION = (0, 17, 0)
NAMESPACES = {'XML': 'http://www.w3.org/XML/1998/namespace',
'EPUB': 'http://www.idpf.org/2007/ops',
'DAISY': 'http://www.daisy.org/z3986/2005/ncx/',
'OPF': 'http://www.idpf.org/2007/opf',
'CONTAINERNS': 'urn:oasis:names:tc:opendocument:xmlns:container',
'DC': 'http://purl.org/dc/elements/1.1/',
'XHTML': 'http://www.w3.org/1999/xhtml'}
# XML Templates
CONTAINER_PATH = 'META-INF/container.xml'
CONTAINER_XML = '''<?xml version='1.0' encoding='utf-8'?>
<container xmlns="urn:oasis:names:tc:opendocument:xmlns:container" version="1.0">
<rootfiles>
<rootfile media-type="application/oebps-package+xml" full-path="%(folder_name)s/content.opf"/>
</rootfiles>
</container>
'''
NCX_XML = six.b('''<!DOCTYPE ncx PUBLIC "-//NISO//DTD ncx 2005-1//EN" "http://www.daisy.org/z3986/2005/ncx-2005-1.dtd">
<ncx xmlns="http://www.daisy.org/z3986/2005/ncx/" version="2005-1" />''')
NAV_XML = six.b('''<?xml version="1.0" encoding="utf-8"?><!DOCTYPE html><html xmlns="http://www.w3.org/1999/xhtml" xmlns:epub="http://www.idpf.org/2007/ops"/>''')
CHAPTER_XML = six.b('''<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE html><html xmlns="http://www.w3.org/1999/xhtml" xmlns:epub="http://www.idpf.org/2007/ops" epub:prefix="z3998: http://www.daisy.org/z3998/2012/vocab/structure/#"></html>''')
COVER_XML = six.b('''<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE html>
<html xmlns="http://www.w3.org/1999/xhtml" xmlns:epub="http://www.idpf.org/2007/ops" lang="en" xml:lang="en">
<head>
<style>
body { margin: 0em; padding: 0em; }
img { max-width: 100%; max-height: 100%; }
</style>
</head>
<body>
<img src="" alt="" />
</body>
</html>''')
IMAGE_MEDIA_TYPES = ['image/jpeg', 'image/jpg', 'image/png', 'image/svg+xml']
# TOC and navigation elements
class Section(object):
def __init__(self, title, href=''):
self.title = title
self.href = href
class Link(object):
def __init__(self, href, title, uid=None):
self.href = href
self.title = title
self.uid = uid
# Exceptions
class EpubException(Exception):
def __init__(self, code, msg):
self.code = code
self.msg = msg
def __str__(self):
return repr(self.msg)
# Items
class EpubItem(object):
"""
Base class for the items in a book.
"""
def __init__(self, uid=None, file_name='', media_type='', content=six.b(''), manifest=True):
"""
:Args:
- uid: Unique identifier for this item (optional)
- file_name: File name for this item (optional)
- media_type: Media type for this item (optional)
- content: Content for this item (optional)
- manifest: Manifest for this item (optional)
"""
self.id = uid
self.file_name = file_name
self.media_type = media_type
self.content = content
self.is_linear = True
self.manifest = manifest
self.book = None
def get_id(self):
"""
Returns unique identifier for this item.
:Returns:
Returns uid number as string.
"""
return self.id
def get_name(self):
"""
Returns name for this item. By default it is always file name but it does not have to be.
:Returns:
Returns file name for this item.
"""
return self.file_name
def get_type(self):
"""
Guess type according to the file extension. Might not be the best way how to do it, but it works for now.
Items can be of type:
- ITEM_UNKNOWN = 0
- ITEM_IMAGE = 1
- ITEM_STYLE = 2
- ITEM_SCRIPT = 3
- ITEM_NAVIGATION = 4
- ITEM_VECTOR = 5
- ITEM_FONT = 6
- ITEM_VIDEO = 7
- ITEM_AUDIO = 8
- ITEM_DOCUMENT = 9
- ITEM_COVER = 10
We map type according to the extensions which are defined in ebooklib.EXTENSIONS.
:Returns:
Returns type of the item as number.
"""
_, ext = zip_path.splitext(self.get_name())
ext = ext.lower()
for uid, ext_list in six.iteritems(ebooklib.EXTENSIONS):
if ext in ext_list:
return uid
return ebooklib.ITEM_UNKNOWN
def get_content(self, default=six.b('')):
"""
Returns content of the item. Content should be of type 'str' (Python 2) or 'bytes' (Python 3)
:Args:
- default: Default value for the content if it is not already defined.
:Returns:
Returns content of the item.
"""
return self.content or default
def set_content(self, content):
"""
Sets content value for this item.
:Args:
- content: Content value
"""
self.content = content
def __str__(self):
return '<EpubItem:%s>' % self.id
class EpubNcx(EpubItem):
"Represents Navigation Control File (NCX) in the EPUB."
def __init__(self, uid='ncx', file_name='toc.ncx'):
super(EpubNcx, self).__init__(uid=uid, file_name=file_name, media_type='application/x-dtbncx+xml')
def __str__(self):
return '<EpubNcx:%s>' % self.id
class EpubCover(EpubItem):
"""
Represents Cover image in the EPUB file.
"""
def __init__(self, uid='cover-img', file_name=''):
super(EpubCover, self).__init__(uid=uid, file_name=file_name)
def get_type(self):
return ebooklib.ITEM_COVER
def __str__(self):
return '<EpubCover:%s:%s>' % (self.id, self.file_name)
class EpubHtml(EpubItem):
"""
Represents HTML document in the EPUB file.
"""
_template_name = 'chapter'
def __init__(self, uid=None, file_name='', media_type='', content=None, title='',
lang=None, direction=None, media_overlay=None, media_duration=None):
super(EpubHtml, self).__init__(uid, file_name, media_type, content)
self.title = title
self.lang = lang
self.direction = direction
self.media_overlay = media_overlay
self.media_duration = media_duration
self.links = []
self.properties = []
self.pages = []
def is_chapter(self):
"""
Returns if this document is chapter or not.
:Returns:
Returns book value.
"""
return True
def get_type(self):
"""
Always returns ebooklib.ITEM_DOCUMENT as type of this document.
:Returns:
Always returns ebooklib.ITEM_DOCUMENT
"""
return ebooklib.ITEM_DOCUMENT
def set_language(self, lang):
"""
Sets language for this book item. By default it will use language of the book but it
can be overwritten with this call.
"""
self.lang = lang
def get_language(self):
"""
Get language code for this book item. Language of the book item can be different from
the language settings defined globaly for book.
:Returns:
As string returns language code.
"""
return self.lang
def add_link(self, **kwgs):
"""
Add additional link to the document. Links will be embeded only inside of this document.
>>> add_link(href='styles.css', rel='stylesheet', type='text/css')
"""
self.links.append(kwgs)
def get_links(self):
"""
Returns list of additional links defined for this document.
:Returns:
As tuple return list of links.
"""
return (link for link in self.links)
def get_links_of_type(self, link_type):
"""
Returns list of additional links of specific type.
:Returns:
As tuple returns list of links.
"""
return (link for link in self.links if link.get('type', '') == link_type)
def add_item(self, item):
"""
Add other item to this document. It will create additional links according to the item type.
:Args:
- item: item we want to add defined as instance of EpubItem
"""
if item.get_type() == ebooklib.ITEM_STYLE:
self.add_link(href=item.get_name(), rel='stylesheet', type='text/css')
if item.get_type() == ebooklib.ITEM_SCRIPT:
self.add_link(src=item.get_name(), type='text/javascript')
def get_body_content(self):
"""
Returns content of BODY element for this HTML document. Content will be of type 'str' (Python 2)
or 'bytes' (Python 3).
:Returns:
Returns content of this document.
"""
try:
html_tree = parse_html_string(self.content)
except:
return ''
html_root = html_tree.getroottree()
if len(html_root.find('body')) != 0:
body = html_tree.find('body')
tree_str = etree.tostring(body, pretty_print=True, encoding='utf-8', xml_declaration=False)
# this is so stupid
if tree_str.startswith(six.b('<body>')):
n = tree_str.rindex(six.b('</body>'))
return tree_str[6:n]
return tree_str
return ''
def get_content(self, default=None):
"""
Returns content for this document as HTML string. Content will be of type 'str' (Python 2)
or 'bytes' (Python 3).
:Args:
- default: Default value for the content if it is not defined.
:Returns:
Returns content of this document.
"""
tree = parse_string(self.book.get_template(self._template_name))
tree_root = tree.getroot()
tree_root.set('lang', self.lang or self.book.language)
tree_root.attrib['{%s}lang' % NAMESPACES['XML']] = self.lang or self.book.language
# add to the head also
# <meta charset="utf-8" />
try:
html_tree = parse_html_string(self.content)
except:
return ''
html_root = html_tree.getroottree()
# create and populate head
_head = etree.SubElement(tree_root, 'head')
if self.title != '':
_title = etree.SubElement(_head, 'title')
_title.text = self.title
for lnk in self.links:
if lnk.get('type') == 'text/javascript':
_lnk = etree.SubElement(_head, 'script', lnk)
# force <script></script>
_lnk.text = ''
else:
_lnk = etree.SubElement(_head, 'link', lnk)
# this should not be like this
# head = html_root.find('head')
# if head is not None:
# for i in head.getchildren():
# if i.tag == 'title' and self.title != '':
# continue
# _head.append(i)
# create and populate body
_body = etree.SubElement(tree_root, 'body')
if self.direction:
_body.set('dir', self.direction)
body = html_tree.find('body')
if body is not None:
for i in body.getchildren():
_body.append(i)
tree_str = etree.tostring(tree, pretty_print=True, encoding='utf-8', xml_declaration=True)
return tree_str
def __str__(self):
return '<EpubHtml:%s:%s>' % (self.id, self.file_name)
class EpubCoverHtml(EpubHtml):
"""
Represents Cover page in the EPUB file.
"""
def __init__(self, uid='cover', file_name='cover.xhtml', image_name='', title='Cover'):
super(EpubCoverHtml, self).__init__(uid=uid, file_name=file_name, title=title)
self.image_name = image_name
self.is_linear = False
def is_chapter(self):
"""
Returns if this document is chapter or not.
:Returns:
Returns book value.
"""
return False
def get_content(self):
"""
Returns content for cover page as HTML string. Content will be of type 'str' (Python 2) or 'bytes' (Python 3).
:Returns:
Returns content of this document.
"""
self.content = self.book.get_template('cover')
tree = parse_string(super(EpubCoverHtml, self).get_content())
tree_root = tree.getroot()
images = tree_root.xpath('//xhtml:img', namespaces={'xhtml': NAMESPACES['XHTML']})
images[0].set('src', self.image_name)
images[0].set('alt', self.title)
tree_str = etree.tostring(tree, pretty_print=True, encoding='utf-8', xml_declaration=True)
return tree_str
def __str__(self):
return '<EpubCoverHtml:%s:%s>' % (self.id, self.file_name)
class EpubNav(EpubHtml):
"""
Represents Navigation Document in the EPUB file.
"""
def __init__(self, uid='nav', file_name='nav.xhtml', media_type='application/xhtml+xml'):
super(EpubNav, self).__init__(uid=uid, file_name=file_name, media_type=media_type)
def is_chapter(self):
"""
Returns if this document is chapter or not.
:Returns:
Returns book value.
"""
return False
def __str__(self):
return '<EpubNav:%s:%s>' % (self.id, self.file_name)
class EpubImage(EpubItem):
"""
Represents Image in the EPUB file.
"""
def __init__(self):
super(EpubImage, self).__init__()
def get_type(self):
return ebooklib.ITEM_IMAGE
def __str__(self):
return '<EpubImage:%s:%s>' % (self.id, self.file_name)
class EpubSMIL(EpubItem):
def __init__(self, uid=None, file_name='', content=None):
super(EpubSMIL, self).__init__(uid=uid, file_name=file_name, media_type='application/smil+xml', content=content)
def get_type(self):
return ebooklib.ITEM_SMIL
def __str__(self):
return '<EpubSMIL:%s:%s>' % (self.id, self.file_name)
# EpubBook
class EpubBook(object):
def __init__(self):
self.EPUB_VERSION = None
self.reset()
# we should have options here
def reset(self):
"Initialises all needed variables to default values"
self.metadata = {}
self.items = []
self.spine = []
self.guide = []
self.pages = []
self.toc = []
self.bindings = []
self.IDENTIFIER_ID = 'id'
self.FOLDER_NAME = 'EPUB'
self._id_html = 0
self._id_image = 0
self._id_static = 0
self.title = ''
self.language = 'en'
self.direction = None
self.templates = {
'ncx': NCX_XML,
'nav': NAV_XML,
'chapter': CHAPTER_XML,
'cover': COVER_XML
}
self.add_metadata('OPF', 'generator', '', {
'name': 'generator', 'content': 'Ebook-lib %s' % '.'.join([str(s) for s in VERSION])
})
# default to using a randomly-unique identifier if one is not specified manually
self.set_identifier(str(uuid.uuid4()))
# custom prefixes and namespaces to be set to the content.opf doc
self.prefixes = []
self.namespaces = {}
def set_identifier(self, uid):
"""
Sets unique id for this epub
:Args:
- uid: Value of unique identifier for this book
"""
self.uid = uid
self.set_unique_metadata('DC', 'identifier', self.uid, {'id': self.IDENTIFIER_ID})
def set_title(self, title):
"""
Set title. You can set multiple titles.
:Args:
- title: Title value
"""
self.title = title
self.add_metadata('DC', 'title', self.title)
def set_language(self, lang):
"""
Set language for this epub. You can set multiple languages. Specific items in the book can have
different language settings.
:Args:
- lang: Language code
"""
self.language = lang
self.add_metadata('DC', 'language', lang)
def set_direction(self, direction):
"""
:Args:
- direction: Options are "ltr", "rtl" and "default"
"""
self.direction = direction
def set_cover(self, file_name, content, create_page=True):
"""
Set cover and create cover document if needed.
:Args:
- file_name: file name of the cover page
- content: Content for the cover image
- create_page: Should cover page be defined. Defined as bool value (optional). Default value is True.
"""
# as it is now, it can only be called once
c0 = EpubCover(file_name=file_name)
c0.content = content
self.add_item(c0)
if create_page:
c1 = EpubCoverHtml(image_name=file_name)
self.add_item(c1)
self.add_metadata(None, 'meta', '', OrderedDict([('name', 'cover'), ('content', 'cover-img')]))
def add_author(self, author, file_as=None, role=None, uid='creator'):
"Add author for this document"
self.add_metadata('DC', 'creator', author, {'id': uid})
if file_as:
self.add_metadata(None, 'meta', file_as, {'refines': '#' + uid,
'property': 'file-as',
'scheme': 'marc:relators'})
if role:
self.add_metadata(None, 'meta', role, {'refines': '#' + uid,
'property': 'role',
'scheme': 'marc:relators'})
def add_metadata(self, namespace, name, value, others=None):
"Add metadata"
if namespace in NAMESPACES:
namespace = NAMESPACES[namespace]
if namespace not in self.metadata:
self.metadata[namespace] = {}
if name not in self.metadata[namespace]:
self.metadata[namespace][name] = []
self.metadata[namespace][name].append((value, others))
def get_metadata(self, namespace, name):
"Retrieve metadata"
if namespace in NAMESPACES:
namespace = NAMESPACES[namespace]
return self.metadata[namespace].get(name, [])
def set_unique_metadata(self, namespace, name, value, others=None):
"Add metadata if metadata with this identifier does not already exist, otherwise update existing metadata."
if namespace in NAMESPACES:
namespace = NAMESPACES[namespace]
if namespace in self.metadata and name in self.metadata[namespace]:
self.metadata[namespace][name] = [(value, others)]
else:
self.add_metadata(namespace, name, value, others)
def add_item(self, item):
"""
Add additional item to the book. If not defined, media type and chapter id will be defined
for the item.
:Args:
- item: Item instance
"""
if item.media_type == '':
(has_guessed, media_type) = guess_type(item.get_name().lower())
if has_guessed:
if media_type is not None:
item.media_type = media_type
else:
item.media_type = has_guessed
else:
item.media_type = 'application/octet-stream'
if not item.get_id():
# make chapter_, image_ and static_ configurable
if isinstance(item, EpubHtml):
item.id = 'chapter_%d' % self._id_html
self._id_html += 1
# If there's a page list, append it to the book's page list
self.pages += item.pages
elif isinstance(item, EpubImage):
item.id = 'image_%d' % self._id_image
self._id_image += 1
else:
item.id = 'static_%d' % self._id_image
self._id_image += 1
item.book = self
self.items.append(item)
return item
def get_item_with_id(self, uid):
"""
Returns item for defined UID.
>>> book.get_item_with_id('image_001')
:Args:
- uid: UID for the item
:Returns:
Returns item object. Returns None if nothing was found.
"""
for item in self.get_items():
if item.id == uid:
return item
return None
def get_item_with_href(self, href):
"""
Returns item for defined HREF.
>>> book.get_item_with_href('EPUB/document.xhtml')
:Args:
- href: HREF for the item we are searching for
:Returns:
Returns item object. Returns None if nothing was found.
"""
for item in self.get_items():
if item.get_name() == href:
return item
return None
def get_items(self):
"""
Returns all items attached to this book.
:Returns:
Returns all items as tuple.
"""
return (item for item in self.items)
def get_items_of_type(self, item_type):
"""
Returns all items of specified type.
>>> book.get_items_of_type(epub.ITEM_IMAGE)
:Args:
- item_type: Type for items we are searching for
:Returns:
Returns found items as tuple.
"""
return (item for item in self.items if item.get_type() == item_type)
def get_items_of_media_type(self, media_type):
"""
Returns all items of specified media type.
:Args:
- media_type: Media type for items we are searching for
:Returns:
Returns found items as tuple.
"""
return (item for item in self.items if item.media_type == media_type)
def set_template(self, name, value):
"""
Defines templates which are used to generate certain types of pages. When defining new value for the template
we have to use content of type 'str' (Python 2) or 'bytes' (Python 3).
At the moment we use these templates:
- ncx
- nav
- chapter
- cover
:Args:
- name: Name for the template
- value: Content for the template
"""
self.templates[name] = value
def get_template(self, name):
"""
Returns value for the template.
:Args:
- name: template name
:Returns:
Value of the template.
"""
return self.templates.get(name)
def add_prefix(self, name, uri):
"""
Appends custom prefix to be added to the content.opf document
>>> epub_book.add_prefix('bkterms', 'http://booktype.org/')
:Args:
- name: namespave name
- uri: URI for the namespace
"""
self.prefixes.append('%s: %s' % (name, uri))
class EpubWriter(object):
DEFAULT_OPTIONS = {
'epub2_guide': True,
'epub3_landmark': True,
'epub3_pages': True,
'landmark_title': 'Guide',
'pages_title': 'Pages',
'spine_direction': True,
'package_direction': False,
'play_order': {
'enabled': False,
'start_from': 1
}
}
def __init__(self, name, book, options=None):
self.file_name = name
self.book = book
self.options = dict(self.DEFAULT_OPTIONS)
if options:
self.options.update(options)
self._init_play_order()
def _init_play_order(self):
self._play_order = {
'enabled': False,
'start_from': 1
}
try:
self._play_order['enabled'] = self.options['play_order']['enabled']
self._play_order['start_from'] = self.options['play_order']['start_from']
except KeyError:
pass
def process(self):
# should cache this html parsing so we don't do it for every plugin
for plg in self.options.get('plugins', []):
if hasattr(plg, 'before_write'):
plg.before_write(self.book)
for item in self.book.get_items():
if isinstance(item, EpubHtml):
for plg in self.options.get('plugins', []):
if hasattr(plg, 'html_before_write'):
plg.html_before_write(self.book, item)
def _write_container(self):
container_xml = CONTAINER_XML % {'folder_name': self.book.FOLDER_NAME}
self.out.writestr(CONTAINER_PATH, container_xml)
def _write_opf_metadata(self, root):
# This is really not needed
# problem is uppercase/lowercase
# for ns_name, values in six.iteritems(self.book.metadata):
# if ns_name:
# for n_id, ns_url in six.iteritems(NAMESPACES):
# if ns_name == ns_url:
# nsmap[n_id.lower()] = NAMESPACES[n_id]
nsmap = {'dc': NAMESPACES['DC'], 'opf': NAMESPACES['OPF']}
nsmap.update(self.book.namespaces)
metadata = etree.SubElement(root, 'metadata', nsmap=nsmap)
el = etree.SubElement(metadata, 'meta', {'property': 'dcterms:modified'})
if 'mtime' in self.options:
mtime = self.options['mtime']
else:
import datetime
mtime = datetime.datetime.now()
el.text = mtime.strftime('%Y-%m-%dT%H:%M:%SZ')
for ns_name, values in six.iteritems(self.book.metadata):
if ns_name == NAMESPACES['OPF']:
for values in values.values():
for v in values:
if 'property' in v[1] and v[1]['property'] == 'dcterms:modified':
continue
try:
el = etree.SubElement(metadata, 'meta', v[1])
if v[0]:
el.text = v[0]
except ValueError:
logging.error('Could not create metadata.')
else:
for name, values in six.iteritems(values):
for v in values:
try:
if ns_name:
el = etree.SubElement(metadata, '{%s}%s' % (ns_name, name), v[1])
else:
el = etree.SubElement(metadata, '%s' % name, v[1])
el.text = v[0]
except ValueError:
logging.error('Could not create metadata "{}".'.format(name))
def _write_opf_manifest(self, root):
manifest = etree.SubElement(root, 'manifest')
_ncx_id = None
# mathml, scripted, svg, remote-resources, and switch
# nav
# cover-image
for item in self.book.get_items():
if not item.manifest:
continue
if isinstance(item, EpubNav):
etree.SubElement(manifest, 'item', {'href': item.get_name(),
'id': item.id,
'media-type': item.media_type,
'properties': 'nav'})
elif isinstance(item, EpubNcx):
_ncx_id = item.id
etree.SubElement(manifest, 'item', {'href': item.file_name,
'id': item.id,
'media-type': item.media_type})
elif isinstance(item, EpubCover):
etree.SubElement(manifest, 'item', {'href': item.file_name,
'id': item.id,
'media-type': item.media_type,
'properties': 'cover-image'})
else:
opts = {'href': item.file_name,
'id': item.id,
'media-type': item.media_type}
if hasattr(item, 'properties') and len(item.properties) > 0:
opts['properties'] = ' '.join(item.properties)
if hasattr(item, 'media_overlay') and item.media_overlay is not None:
opts['media-overlay'] = item.media_overlay
if hasattr(item, 'media_duration') and item.media_duration is not None:
opts['duration'] = item.media_duration
etree.SubElement(manifest, 'item', opts)
return _ncx_id
def _write_opf_spine(self, root, ncx_id):
spine_attributes = {'toc': ncx_id or 'ncx'}
if self.book.direction and self.options['spine_direction']:
spine_attributes['page-progression-direction'] = self.book.direction
spine = etree.SubElement(root, 'spine', spine_attributes)
for _item in self.book.spine:
# this is for now
# later we should be able to fetch things from tuple
is_linear = True
if isinstance(_item, tuple):
item = _item[0]
if len(_item) > 1:
if _item[1] == 'no':
is_linear = False
else:
item = _item
if isinstance(item, EpubHtml):
opts = {'idref': item.get_id()}
if not item.is_linear or not is_linear:
opts['linear'] = 'no'
elif isinstance(item, EpubItem):
opts = {'idref': item.get_id()}
if not item.is_linear or not is_linear:
opts['linear'] = 'no'
else:
opts = {'idref': item}
try:
itm = self.book.get_item_with_id(item)
if not itm.is_linear or not is_linear:
opts['linear'] = 'no'
except:
pass
etree.SubElement(spine, 'itemref', opts)
def _write_opf_guide(self, root):
# - http://www.idpf.org/epub/20/spec/OPF_2.0.1_draft.htm#Section2.6
if len(self.book.guide) > 0 and self.options.get('epub2_guide'):
guide = etree.SubElement(root, 'guide', {})
for item in self.book.guide:
if 'item' in item:
chap = item.get('item')
if chap:
_href = chap.file_name
_title = chap.title
else:
_href = item.get('href', '')
_title = item.get('title', '')
if _title is None:
_title = ''
ref = etree.SubElement(guide, 'reference', {'type': item.get('type', ''),
'title': _title,
'href': _href})
def _write_opf_bindings(self, root):
if len(self.book.bindings) > 0:
bindings = etree.SubElement(root, 'bindings', {})
for item in self.book.bindings:
etree.SubElement(bindings, 'mediaType', item)
def _write_opf_file(self, root):
tree_str = etree.tostring(root, pretty_print=True, encoding='utf-8', xml_declaration=True)
self.out.writestr('%s/content.opf' % self.book.FOLDER_NAME, tree_str)
def _write_opf(self):
package_attributes = {'xmlns': NAMESPACES['OPF'],
'unique-identifier': self.book.IDENTIFIER_ID,
'version': '3.0'}
if self.book.direction and self.options['package_direction']:
package_attributes['dir'] = self.book.direction
root = etree.Element('package', package_attributes)
prefixes = ['rendition: http://www.idpf.org/vocab/rendition/#'] + self.book.prefixes
root.attrib['prefix'] = ' '.join(prefixes)
# METADATA
self._write_opf_metadata(root)
# MANIFEST
_ncx_id = self._write_opf_manifest(root)
# SPINE
self._write_opf_spine(root, _ncx_id)
# GUIDE
self._write_opf_guide(root)
# BINDINGS
self._write_opf_bindings(root)
# WRITE FILE
self._write_opf_file(root)
def _get_nav(self, item):
# just a basic navigation for now
nav_xml = parse_string(self.book.get_template('nav'))
root = nav_xml.getroot()
root.set('lang', self.book.language)
root.attrib['{%s}lang' % NAMESPACES['XML']] = self.book.language
nav_dir_name = os.path.dirname(item.file_name)
head = etree.SubElement(root, 'head')
title = etree.SubElement(head, 'title')
title.text = self.book.title
# for now this just handles css files and ignores others
for _link in item.links:
_lnk = etree.SubElement(head, 'link', {
'href': _link.get('href', ''), 'rel': 'stylesheet', 'type': 'text/css'
})
body = etree.SubElement(root, 'body')
nav = etree.SubElement(body, 'nav', {
'{%s}type' % NAMESPACES['EPUB']: 'toc',
'id': 'id',
'role': 'doc-toc',
})
content_title = etree.SubElement(nav, 'h2')
content_title.text = self.book.title
def _create_section(itm, items):
ol = etree.SubElement(itm, 'ol')
for item in items:
if isinstance(item, tuple) or isinstance(item, list):
li = etree.SubElement(ol, 'li')
if isinstance(item[0], EpubHtml):
a = etree.SubElement(li, 'a', {'href': os.path.relpath(item[0].file_name, nav_dir_name)})
elif isinstance(item[0], Section) and item[0].href != '':
a = etree.SubElement(li, 'a', {'href': os.path.relpath(item[0].href, nav_dir_name)})
elif isinstance(item[0], Link):
a = etree.SubElement(li, 'a', {'href': os.path.relpath(item[0].href, nav_dir_name)})
else:
a = etree.SubElement(li, 'span')
a.text = item[0].title
_create_section(li, item[1])
elif isinstance(item, Link):
li = etree.SubElement(ol, 'li')
a = etree.SubElement(li, 'a', {'href': os.path.relpath(item.href, nav_dir_name)})
a.text = item.title
elif isinstance(item, EpubHtml):
li = etree.SubElement(ol, 'li')
a = etree.SubElement(li, 'a', {'href': os.path.relpath(item.file_name, nav_dir_name)})
a.text = item.title
_create_section(nav, self.book.toc)
# LANDMARKS / GUIDE
# - http://www.idpf.org/epub/30/spec/epub30-contentdocs.html#sec-xhtml-nav-def-types-landmarks
if len(self.book.guide) > 0 and self.options.get('epub3_landmark'):
# Epub2 guide types do not map completely to epub3 landmark types.
guide_to_landscape_map = {
'notes': 'rearnotes',
'text': 'bodymatter'
}
guide_nav = etree.SubElement(body, 'nav', {'{%s}type' % NAMESPACES['EPUB']: 'landmarks'})
guide_content_title = etree.SubElement(guide_nav, 'h2')
guide_content_title.text = self.options.get('landmark_title', 'Guide')
guild_ol = etree.SubElement(guide_nav, 'ol')
for elem in self.book.guide:
li_item = etree.SubElement(guild_ol, 'li')
if 'item' in elem:
chap = elem.get('item', None)
if chap:
_href = chap.file_name
_title = chap.title
else:
_href = elem.get('href', '')
_title = elem.get('title', '')
guide_type = elem.get('type', '')
a_item = etree.SubElement(li_item, 'a', {
'{%s}type' % NAMESPACES['EPUB']: guide_to_landscape_map.get(guide_type, guide_type),
'href': os.path.relpath(_href, nav_dir_name)
})
a_item.text = _title
# PAGE-LIST
if self.options.get('epub3_pages'):
inserted_pages = get_pages_for_items([item for item in self.book.get_items_of_type(ebooklib.ITEM_DOCUMENT) \
if not isinstance(item, EpubNav)])
if len(inserted_pages) > 0:
pagelist_nav = etree.SubElement(
body,
'nav',
{
'{%s}type' % NAMESPACES['EPUB']: 'page-list',
'id': 'pages',
'hidden': 'hidden',
}
)
pagelist_content_title = etree.SubElement(pagelist_nav, 'h2')
pagelist_content_title.text = self.options.get(
'pages_title', 'Pages'
)
pages_ol = etree.SubElement(pagelist_nav, 'ol')
for filename, pageref, label in inserted_pages:
li_item = etree.SubElement(pages_ol, 'li')
_href = u'{}#{}'.format(filename, pageref)
_title = label
a_item = etree.SubElement(li_item, 'a', {
'href': os.path.relpath(_href, nav_dir_name),
})
a_item.text = _title
tree_str = etree.tostring(nav_xml, pretty_print=True, encoding='utf-8', xml_declaration=True)
return tree_str
def _get_ncx(self):
# we should be able to setup language for NCX as also
ncx = parse_string(self.book.get_template('ncx'))
root = ncx.getroot()
head = etree.SubElement(root, 'head')
# get this id
uid = etree.SubElement(head, 'meta', {'content': self.book.uid, 'name': 'dtb:uid'})
uid = etree.SubElement(head, 'meta', {'content': '0', 'name': 'dtb:depth'})
uid = etree.SubElement(head, 'meta', {'content': '0', 'name': 'dtb:totalPageCount'})
uid = etree.SubElement(head, 'meta', {'content': '0', 'name': 'dtb:maxPageNumber'})
doc_title = etree.SubElement(root, 'docTitle')
title = etree.SubElement(doc_title, 'text')
title.text = self.book.title
# doc_author = etree.SubElement(root, 'docAuthor')
# author = etree.SubElement(doc_author, 'text')
# author.text = 'Name of the person'
# For now just make a very simple navMap
nav_map = etree.SubElement(root, 'navMap')
def _add_play_order(nav_point):
nav_point.set('playOrder', str(self._play_order['start_from']))
self._play_order['start_from'] += 1
def _create_section(itm, items, uid):
for item in items:
if isinstance(item, tuple) or isinstance(item, list):
section, subsection = item[0], item[1]
np = etree.SubElement(itm, 'navPoint', {
'id': section.get_id() if isinstance(section, EpubHtml) else 'sep_%d' % uid
})
if self._play_order['enabled']:
_add_play_order(np)
nl = etree.SubElement(np, 'navLabel')
nt = etree.SubElement(nl, 'text')
nt.text = section.title
# CAN NOT HAVE EMPTY SRC HERE
href = ''
if isinstance(section, EpubHtml):
href = section.file_name
elif isinstance(section, Section) and section.href != '':
href = section.href
elif isinstance(section, Link):
href = section.href
nc = etree.SubElement(np, 'content', {'src': href})
uid = _create_section(np, subsection, uid + 1)
elif isinstance(item, Link):
_parent = itm
_content = _parent.find('content')
if _content is not None:
if _content.get('src') == '':
_content.set('src', item.href)
np = etree.SubElement(itm, 'navPoint', {'id': item.uid})
if self._play_order['enabled']:
_add_play_order(np)
nl = etree.SubElement(np, 'navLabel')
nt = etree.SubElement(nl, 'text')
nt.text = item.title
nc = etree.SubElement(np, 'content', {'src': item.href})
elif isinstance(item, EpubHtml):
_parent = itm
_content = _parent.find('content')
if _content is not None:
if _content.get('src') == '':
_content.set('src', item.file_name)
np = etree.SubElement(itm, 'navPoint', {'id': item.get_id()})
if self._play_order['enabled']:
_add_play_order(np)
nl = etree.SubElement(np, 'navLabel')
nt = etree.SubElement(nl, 'text')
nt.text = item.title
nc = etree.SubElement(np, 'content', {'src': item.file_name})
return uid
_create_section(nav_map, self.book.toc, 0)
tree_str = etree.tostring(root, pretty_print=True, encoding='utf-8', xml_declaration=True)
return tree_str
def _write_items(self):
for item in self.book.get_items():
if isinstance(item, EpubNcx):
self.out.writestr('%s/%s' % (self.book.FOLDER_NAME, item.file_name), self._get_ncx())
elif isinstance(item, EpubNav):
self.out.writestr('%s/%s' % (self.book.FOLDER_NAME, item.file_name), self._get_nav(item))
elif item.manifest:
self.out.writestr('%s/%s' % (self.book.FOLDER_NAME, item.file_name), item.get_content())
else:
self.out.writestr('%s' % item.file_name, item.get_content())
def write(self):
# check for the option allowZip64
self.out = zipfile.ZipFile(self.file_name, 'w', zipfile.ZIP_DEFLATED)
self.out.writestr('mimetype', 'application/epub+zip', compress_type=zipfile.ZIP_STORED)
self._write_container()
self._write_opf()
self._write_items()
self.out.close()
class EpubReader(object):
DEFAULT_OPTIONS = {}
def __init__(self, epub_file_name, options=None):
self.file_name = epub_file_name
self.book = EpubBook()
self.zf = None
self.opf_file = ''
self.opf_dir = ''
self.options = dict(self.DEFAULT_OPTIONS)
if options:
self.options.update(options)
def process(self):
# should cache this html parsing so we don't do it for every plugin
for plg in self.options.get('plugins', []):
if hasattr(plg, 'after_read'):
plg.after_read(self.book)
for item in self.book.get_items():
if isinstance(item, EpubHtml):
for plg in self.options.get('plugins', []):
if hasattr(plg, 'html_after_read'):
plg.html_after_read(self.book, item)
def load(self):
self._load()
return self.book
def read_file(self, name):
# Raises KeyError
name = os.path.normpath(name)
return self.zf.read(name)
def _load_container(self):
meta_inf = self.read_file('META-INF/container.xml')
tree = parse_string(meta_inf)
for root_file in tree.findall('//xmlns:rootfile[@media-type]', namespaces={'xmlns': NAMESPACES['CONTAINERNS']}):
if root_file.get('media-type') == 'application/oebps-package+xml':
self.opf_file = root_file.get('full-path')
self.opf_dir = zip_path.dirname(self.opf_file)
def _load_metadata(self):
container_root = self.container.getroot()
# get epub version
self.book.version = container_root.get('version', None)
# get unique-identifier
if container_root.get('unique-identifier', None):
self.book.IDENTIFIER_ID = container_root.get('unique-identifier')
# get xml:lang
# get metadata
metadata = self.container.find('{%s}%s' % (NAMESPACES['OPF'], 'metadata'))
nsmap = metadata.nsmap
nstags = dict((k, '{%s}' % v) for k, v in six.iteritems(nsmap))
default_ns = nstags.get(None, '')
nsdict = dict((v, {}) for v in nsmap.values())
def add_item(ns, tag, value, extra):
if ns not in nsdict:
nsdict[ns] = {}
values = nsdict[ns].setdefault(tag, [])
values.append((value, extra))
for t in metadata:
if not etree.iselement(t) or t.tag is etree.Comment:
continue
if t.tag == default_ns + 'meta':
name = t.get('name')
others = dict((k, v) for k, v in t.items())
if name and ':' in name:
prefix, name = name.split(':', 1)
else:
prefix = None
add_item(t.nsmap.get(prefix, prefix), name, t.text, others)
else:
tag = t.tag[t.tag.rfind('}') + 1:]
if (t.prefix and t.prefix.lower() == 'dc') and tag == 'identifier':
_id = t.get('id', None)
if _id:
self.book.IDENTIFIER_ID = _id
others = dict((k, v) for k, v in t.items())
add_item(t.nsmap[t.prefix], tag, t.text, others)
self.book.metadata = nsdict
titles = self.book.get_metadata('DC', 'title')
if len(titles) > 0:
self.book.title = titles[0][0]
for value, others in self.book.get_metadata('DC', 'identifier'):
if others.get('id') == self.book.IDENTIFIER_ID:
self.book.uid = value
def _load_manifest(self):
for r in self.container.find('{%s}%s' % (NAMESPACES['OPF'], 'manifest')):
if r is not None and r.tag != '{%s}item' % NAMESPACES['OPF']:
continue
media_type = r.get('media-type')
_properties = r.get('properties', '')
if _properties:
properties = _properties.split(' ')
else:
properties = []
# people use wrong content types
if media_type == 'image/jpg':
media_type = 'image/jpeg'
if media_type == 'application/x-dtbncx+xml':
ei = EpubNcx(uid=r.get('id'), file_name=unquote(r.get('href')))
ei.content = self.read_file(zip_path.join(self.opf_dir, ei.file_name))
if media_type == 'application/smil+xml':
ei = EpubSMIL(uid=r.get('id'), file_name=unquote(r.get('href')))
ei.content = self.read_file(zip_path.join(self.opf_dir, ei.file_name))
elif media_type == 'application/xhtml+xml':
if 'nav' in properties:
ei = EpubNav(uid=r.get('id'), file_name=unquote(r.get('href')))
ei.content = self.read_file(zip_path.join(self.opf_dir, r.get('href')))
elif 'cover' in properties:
ei = EpubCoverHtml()
ei.content = self.read_file(zip_path.join(self.opf_dir, unquote(r.get('href'))))
else:
ei = EpubHtml()
ei.id = r.get('id')
ei.file_name = unquote(r.get('href'))
ei.media_type = media_type
ei.media_overlay = r.get('media-overlay', None)
ei.media_duration = r.get('duration', None)
ei.content = self.read_file(zip_path.join(self.opf_dir, ei.get_name()))
ei.properties = properties
elif media_type in IMAGE_MEDIA_TYPES:
if 'cover-image' in properties:
ei = EpubCover(uid=r.get('id'), file_name=unquote(r.get('href')))
ei.media_type = media_type
ei.content = self.read_file(zip_path.join(self.opf_dir, ei.get_name()))
else:
ei = EpubImage()
ei.id = r.get('id')
ei.file_name = unquote(r.get('href'))
ei.media_type = media_type
ei.content = self.read_file(zip_path.join(self.opf_dir, ei.get_name()))
else:
# different types
ei = EpubItem()
ei.id = r.get('id')
ei.file_name = unquote(r.get('href'))
ei.media_type = media_type
ei.content = self.read_file(zip_path.join(self.opf_dir, ei.get_name()))
self.book.add_item(ei)
def _parse_ncx(self, data):
tree = parse_string(data)
tree_root = tree.getroot()
nav_map = tree_root.find('{%s}navMap' % NAMESPACES['DAISY'])
def _get_children(elems, n, nid):
label, content = '', ''
children = []
for a in elems.getchildren():
if a.tag == '{%s}navLabel' % NAMESPACES['DAISY']:
label = a.getchildren()[0].text
if a.tag == '{%s}content' % NAMESPACES['DAISY']:
content = a.get('src', '')
if a.tag == '{%s}navPoint' % NAMESPACES['DAISY']:
children.append(_get_children(a, n + 1, a.get('id', '')))
if len(children) > 0:
if n == 0:
return children
return (Section(label, href=content),
children)
else:
return Link(content, label, nid)
self.book.toc = _get_children(nav_map, 0, '')
def _parse_nav(self, data, base_path, navtype='toc'):
html_node = parse_html_string(data)
if navtype == 'toc':
# parsing the table of contents
nav_node = html_node.xpath("//nav[@*='toc']")[0]
else:
# parsing the list of pages
_page_list = html_node.xpath("//nav[@*='page-list']")
if len(_page_list) == 0:
return
nav_node = _page_list[0]
def parse_list(list_node):
items = []
for item_node in list_node.findall('li'):
sublist_node = item_node.find('ol')
link_node = item_node.find('a')
if sublist_node is not None:
title = item_node[0].text
children = parse_list(sublist_node)
if link_node is not None:
href = zip_path.normpath(zip_path.join(base_path, link_node.get('href')))
items.append((Section(title, href=href), children))
else:
items.append((Section(title), children))
elif link_node is not None:
title = link_node.text
href = zip_path.normpath(zip_path.join(base_path, link_node.get('href')))
items.append(Link(href, title))
return items
if navtype == 'toc':
self.book.toc = parse_list(nav_node.find('ol'))
elif nav_node is not None:
# generate the pages list if there is one
self.book.pages = parse_list(nav_node.find('ol'))
# generate the per-file pages lists
# because of the order of parsing the files, this can't be done
# when building the EpubHtml objects
htmlfiles = dict()
for htmlfile in self.book.items:
if isinstance(htmlfile, EpubHtml):
htmlfiles[htmlfile.file_name] = htmlfile
for page in self.book.pages:
try:
(filename, idref) = page.href.split('#')
except ValueError:
filename = page.href
if filename in htmlfiles:
htmlfiles[filename].pages.append(page)
def _load_spine(self):
spine = self.container.find('{%s}%s' % (NAMESPACES['OPF'], 'spine'))
self.book.spine = [(t.get('idref'), t.get('linear', 'yes')) for t in spine]
toc = spine.get('toc', '')
self.book.set_direction(spine.get('page-progression-direction', None))
# should read ncx or nav file
if toc:
try:
ncxFile = self.read_file(zip_path.join(self.opf_dir, self.book.get_item_with_id(toc).get_name()))
except KeyError:
raise EpubException(-1, 'Can not find ncx file.')
self._parse_ncx(ncxFile)
def _load_guide(self):
guide = self.container.find('{%s}%s' % (NAMESPACES['OPF'], 'guide'))
if guide is not None:
self.book.guide = [{'href': t.get('href'), 'title': t.get('title'), 'type': t.get('type')} for t in guide]
def _load_opf_file(self):
try:
s = self.read_file(self.opf_file)
except KeyError:
raise EpubException(-1, 'Can not find container file')
self.container = parse_string(s)
self._load_metadata()
self._load_manifest()
self._load_spine()
self._load_guide()
# read nav file if found
#
nav_item = next((item for item in self.book.items if isinstance(item, EpubNav)), None)
if nav_item:
if not self.book.toc:
self._parse_nav(
nav_item.content,
zip_path.dirname(nav_item.file_name),
navtype='toc'
)
self._parse_nav(
nav_item.content,
zip_path.dirname(nav_item.file_name),
navtype='pages'
)
def _load(self):
try:
self.zf = zipfile.ZipFile(self.file_name, 'r', compression=zipfile.ZIP_DEFLATED, allowZip64=True)
except zipfile.BadZipfile as bz:
raise EpubException(0, 'Bad Zip file')
except zipfile.LargeZipFile as bz:
raise EpubException(1, 'Large Zip file')
# 1st check metadata
self._load_container()
self._load_opf_file()
self.zf.close()
# WRITE
def write_epub(name, book, options=None):
"""
Creates epub file with the content defined in EpubBook.
>>> ebooklib.write_epub('book.epub', book)
:Args:
- name: file name for the output file
- book: instance of EpubBook
- options: extra opions as dictionary (optional)
"""
epub = EpubWriter(name, book, options)
epub.process()
try:
epub.write()
except IOError:
pass
# READ
def read_epub(name, options=None):
"""
Creates new instance of EpubBook with the content defined in the input file.
>>> book = ebooklib.read_epub('book.epub')
:Args:
- name: full path to the input file
- options: extra options as dictionary (optional)
:Returns:
Instance of EpubBook.
"""
reader = EpubReader(name, options)
book = reader.load()
reader.process()
return book
|
import unittest
import numpy
import chainer
from chainer import cuda
from chainer import functions
from chainer import gradient_check
from chainer.testing import condition
class TestHuffmanTree(unittest.TestCase):
def test_empty(self):
with self.assertRaises(ValueError):
functions.create_huffman_tree({})
def test_simple(self):
tree = functions.create_huffman_tree(
{'x': 8, 'y': 6, 'z': 5, 'w': 4, 'v': 3})
expect = (('z', 'y'), (('v', 'w'), 'x'))
self.assertEqual(expect, tree)
class TestBinaryHierarchicalSoftmax(unittest.TestCase):
def setUp(self):
tree = ((0, 1), ((2, 3), 4))
self.func = functions.BinaryHierarchicalSoftmax(3, tree)
self.x = numpy.random.uniform(-1, 1, (2, 3)).astype(numpy.float32)
self.t = numpy.array([0, 2])
self.gy = numpy.random.uniform(-1, 1, (1, 1)).astype(numpy.float32)
self.W = self.func.W.copy()
def test_sum(self):
x = numpy.array([[1.0, 2.0, 3.0]], numpy.float32)
total = 0
for i in range(5):
t = numpy.array([i])
loss, = self.func.forward_cpu((x, t))
self.assertEqual(loss.dtype, numpy.float32)
self.assertEqual(loss.shape, ())
total += numpy.exp(-loss)
self.assertAlmostEqual(1.0, float(total))
def check_backward(self, x_data, t_data, y_grad, use_cudnn=True):
x = chainer.Variable(x_data)
t = chainer.Variable(t_data)
y = self.func(x, t)
y.grad = y_grad
y.backward()
func = y.creator
f = lambda: func.forward((x.data, t.data))
gx, _, gW = gradient_check.numerical_grad(
f, (x.data, t.data, func.W), (y.grad,), eps=1e-2)
gradient_check.assert_allclose(cuda.to_cpu(gx), cuda.to_cpu(x.grad))
gradient_check.assert_allclose(cuda.to_cpu(gW), cuda.to_cpu(func.gW))
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.x, self.t, self.gy)
Fix test for HSM
import unittest
import numpy
import chainer
from chainer import cuda
from chainer import functions
from chainer import gradient_check
from chainer.testing import condition
class TestHuffmanTree(unittest.TestCase):
def test_empty(self):
with self.assertRaises(ValueError):
functions.create_huffman_tree({})
def test_simple(self):
tree = functions.create_huffman_tree(
{'x': 8, 'y': 6, 'z': 5, 'w': 4, 'v': 3})
expect = (('z', 'y'), (('v', 'w'), 'x'))
self.assertEqual(expect, tree)
class TestBinaryHierarchicalSoftmax(unittest.TestCase):
def setUp(self):
tree = ((0, 1), ((2, 3), 4))
self.func = functions.BinaryHierarchicalSoftmax(3, tree)
self.x = numpy.random.uniform(-1, 1, (2, 3)).astype(numpy.float32)
self.t = numpy.array([0, 2])
self.gy = numpy.random.uniform(-1, 1, (1, 1)).astype(numpy.float32)
self.W = self.func.W.copy()
@condition.retry(3)
def test_sum(self):
x = numpy.array([[1.0, 2.0, 3.0]], numpy.float32)
total = 0
for i in range(5):
t = numpy.array([i])
loss, = self.func.forward_cpu((x, t))
self.assertEqual(loss.dtype, numpy.float32)
self.assertEqual(loss.shape, ())
total += numpy.exp(-loss)
self.assertAlmostEqual(1.0, float(total), delta=1.0e-5)
def check_backward(self, x_data, t_data, y_grad, use_cudnn=True):
x = chainer.Variable(x_data)
t = chainer.Variable(t_data)
y = self.func(x, t)
y.grad = y_grad
y.backward()
func = y.creator
f = lambda: func.forward((x.data, t.data))
gx, _, gW = gradient_check.numerical_grad(
f, (x.data, t.data, func.W), (y.grad,), eps=1e-2)
gradient_check.assert_allclose(cuda.to_cpu(gx), cuda.to_cpu(x.grad))
gradient_check.assert_allclose(cuda.to_cpu(gW), cuda.to_cpu(func.gW))
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.x, self.t, self.gy)
|
"""
Downloads and parses the variables table from the documentation
author: chris @ sihrc
"""
#Python Modules
import urllib2, unicodedata
from bs4 import BeautifulSoup as Soup
#Local Modules
import config
from wrappers import debug
@debug
def download(datafile):
"""
From get_features.py\n
Downloads the documentation as text file from HTML
"""
page = urllib2.urlopen(config.tables.format(datafile.lower())).read()
with open(config.path("..","data",datafile,"data", "tables.txt"), 'wb') as f:
f.write(page)
return page
@debug
def read_tables(datafile):
"""
From get_features.py
Parses the HTML as plain text
Returns dictionary of {titles:variables}
"""
path = config.path("..","data",datafile,"data", "tables.txt")
if not config.os.path.exists(path):
page = download(datafile)
else:
with open(path, 'rb') as f:
page = f.read()
#Grab relevant section
start = page.find("<a name=\"DVariable\">D. Variable-Source Crosswalk</a>")
end = page.rfind("<a name=\"Appendix1\">")
soup = Soup(page[start:-abs(end)])
#Find tables and titles
tables = [[str(tag.text) for tag in line.find_all("th")][3:] for line in soup.find_all("table",{"class":"contentStyle"})]
titles = [str(title.text) for title in soup.find_all("p",{"class":"contentStyle"})][2:]
if len(titles) == 0: titles = [str(title.text) for title in soup.find_all("caption",{"class","dtCaption"})]
print tables
print titles
#Create dictionary
variables = dict(zip(titles,tables))
return variables
if __name__ == "__main__":
import sys
read_tables(sys.argv[1])
Removed print statements in get_features
"""
Downloads and parses the variables table from the documentation
author: chris @ sihrc
"""
#Python Modules
import urllib2, unicodedata
from bs4 import BeautifulSoup as Soup
#Local Modules
import config
from wrappers import debug
@debug
def download(datafile):
"""
From get_features.py\n
Downloads the documentation as text file from HTML
"""
page = urllib2.urlopen(config.tables.format(datafile.lower())).read()
with open(config.path("..","data",datafile,"data", "tables.txt"), 'wb') as f:
f.write(page)
return page
@debug
def read_tables(datafile):
"""
From get_features.py
Parses the HTML as plain text
Returns dictionary of {titles:variables}
"""
path = config.path("..","data",datafile,"data", "tables.txt")
if not config.os.path.exists(path):
page = download(datafile)
else:
with open(path, 'rb') as f:
page = f.read()
#Grab relevant section
start = page.find("<a name=\"DVariable\">D. Variable-Source Crosswalk</a>")
end = page.rfind("<a name=\"Appendix1\">")
soup = Soup(page[start:-abs(end)])
#Find tables and titles
tables = [[str(tag.text) for tag in line.find_all("th")][3:] for line in soup.find_all("table",{"class":"contentStyle"})]
titles = [str(title.text) for title in soup.find_all("p",{"class":"contentStyle"})][2:]
if len(titles) == 0: titles = [str(title.text) for title in soup.find_all("caption",{"class","dtCaption"})]
#Create dictionary
variables = dict(zip(titles,tables))
return variables
if __name__ == "__main__":
import sys
read_tables(sys.argv[1]) |
'''
Created on 14.06.2011
@author: michi
'''
import datetime
from PyQt4.QtCore import QAbstractTableModel, QModelIndex, Qt, QVariant, QString
from sqlalchemy.orm import object_mapper, ColumnProperty, RelationshipProperty
from sqlalchemy.util import NamedTuple
from ems import qt4
from ems.thirdparty.odict import OrderedDict
class SAOrmSearchModel(QAbstractTableModel):
def __init__(self,session, queriedObject, querybuilder, filter=None,
columns = [],
dataListener=None,
appendOptions = None):
super(SAOrmSearchModel, self).__init__()
self._session = session
self.__dataListener = dataListener
self._queriedObject = queriedObject
self._resultCache = {}
self._objectCache = {}
self._headerCache = {}
self._columns = columns
if not len(self._columns):
self._columns = self.possibleColumns
self._appendOptions = appendOptions
self._mapper = None
self._ormProperties = None
self._flagsCache = {}
self._queryBuilder = querybuilder
self._filter = filter
self._askCount = 0
try:
self._queryBuilder.propertyNames
except KeyError, e:
print e
print "Mein Objekt: %s" % self._queriedObject
raise e
self._query = None
self._headerNameCache = {}
self._defaultColumns = []
self._columnName2Index = self._buildReversedColumnLookup(columns)
self._dirty = True
@property
def queryBuilder(self):
return self._queryBuilder
def getQuery(self):
return self._query
def setQuery(self, query):
#TODO: Dirty Fix wegen eagerload, welches nicht beim Setzen der Columns ausgefuehrt wird
raise NotImplementedError("This feature has been throwed out")
self._query = query
self._dirty = True
self.perform()
query = property(getQuery, setQuery)
def getFilter(self):
return self._filter
def setFilter(self, filter):
self._filter = filter
self._dirty = True
self.perform()
filter = property(getFilter, setFilter)
@property
def mapper(self):
if self._mapper is None:
self._mapper = object_mapper(self._queriedObject())
return self._mapper
@property
def ormProperties(self):
if self._ormProperties is None:
self._ormProperties = OrderedDict()
for propertyName in self._queryBuilder.propertyNamesDecorated:
self._ormProperties[propertyName] = \
self._queryBuilder.properties[propertyName]
#self._ormProperties.append(property)
return self._ormProperties
@property
def possibleColumns(self):
if not len(self._defaultColumns):
self._defaultColumns = self.__buildDefaultColumns()
return self._defaultColumns
def __buildDefaultColumns(self):
columns = []
for property in self.ormProperties.keys():
columns.append(property)
return columns
def _buildReversedColumnLookup(self, columns):
i = 0
reversed = {}
for column in columns:
reversed[str(column)] = i
i += 1
return reversed
def getColumns(self):
return self._columns
def setColumns(self, cols):
self._columns = cols
self._dirty = True
self.perform()
columns = property(getColumns, setColumns)
@property
def session(self):
return self._session
def rowCount(self, index=QModelIndex()):
self.perform()
return len(self._objectCache)
def columnCount(self, index=QModelIndex()):
self.perform()
return len(self._columns)
def getPropertyNameByIndex(self, index):
return self._columns[index]
def getIndexByPropertyName(self, name):
return self._columnName2Index[name]
def extractValue(self, index, propertyName):
currentObj = self._objectCache[index.row()]
if hasattr(currentObj, propertyName):
return currentObj.__getattribute__(propertyName)
else:
if propertyName.find('.'):
stack = propertyName.split('.')
value = self._extractValue(currentObj, stack)
if value:
return value
return "*Nichts*"
def _extractValue(self, obj, pathStack):
if(hasattr(obj, pathStack[0])):
if len(pathStack) < 2:
return obj.__getattribute__(pathStack[0])
nextObj = obj.__getattribute__(pathStack[0])
pathStack.pop(0)
return self._extractValue(nextObj, pathStack)
def getDataListener(self):
return self.__dataListener
def setDataListener(self, dataListener):
self.__dataListener = dataListener
def delDataListener(self):
self.__dataListener = None
dataListener = property(getDataListener,setDataListener,delDataListener)
def data(self, index, role=Qt.DisplayRole):
#return QVariant()
self._askCount += 1
if self.__dataListener is not None:
self.__dataListener.data(index, role)
self.perform()
if not index.isValid() or \
not (0 <= index.row() < self.rowCount()):
return QVariant()
if role in (Qt.DisplayRole, Qt.EditRole):
if self._resultCache[index.row()].has_key(index.column()):
# print "cacheHit %s" % self._askCount
return self._resultCache[index.row()][index.column()]
columnName = self.getPropertyNameByIndex(index.column())
value = self.extractValue(index, columnName)
if isinstance(value, basestring):
self._resultCache[index.row()][index.column()] = QVariant(unicode(value))
return self._resultCache[index.row()][index.column()]
elif hasattr(value.__class__,'__ormDecorator__'):
self._resultCache[index.row()][index.column()] = \
QVariant(value.__class__.__ormDecorator__().getReprasentiveString(value))
return self._resultCache[index.row()][index.column()]
elif isinstance(value, datetime.datetime):
self._resultCache[index.row()][index.column()] = \
QVariant(value.strftime("%c"))
return self._resultCache[index.row()][index.column()]
self._resultCache[index.row()][index.column()] = QVariant(value)
return self._resultCache[index.row()][index.column()]
if role == qt4.ColumnNameRole:
# print "columnNameRole"
return QVariant(unicode(self._queryBuilder.currentColumnList[index.column()]))
return QVariant()
def getObject(self, row):
if self._objectCache.has_key(row):
return self._objectCache[row]
def headerData(self, section, orientation, role=Qt.DisplayRole):
#print "headerData"
if role == Qt.TextAlignmentRole:
if orientation == Qt.Horizontal:
return QVariant(int(Qt.AlignLeft|Qt.AlignVCenter))
return QVariant(int(Qt.AlignRight|Qt.AlignVCenter))
if role != Qt.DisplayRole:
return QVariant()
if orientation == Qt.Horizontal:
columnName = unicode(self.getPropertyNameByIndex(section))
name = self.getPropertyFriendlyName(columnName)
return QVariant(name)
return QVariant(int(section + 1))
def getPropertyFriendlyName(self, propertyPath):
if not self._headerNameCache.has_key(propertyPath):
fieldName = propertyPath.split('.')[-1:][0]
#print "%s %s" % (columnName, columnName.split('.')[-1:][0])
try:
dec = self._queryBuilder.propertyName2Class[propertyPath].__ormDecorator__()
name = dec.getPropertyFriendlyName(fieldName)
except KeyError:
name = fieldName
self._headerNameCache[propertyPath] = QString.fromUtf8(name)
return self._headerNameCache[propertyPath]
def isPrimaryKey(self, index):
self._flagsCache
print
# def flags(self, index):
#
# propertyName = self.getPropertyNameByIndex(index.column())
# if not self._flagsCache.has_key(propertyName):
# isPk = False
# if isinstance(self.ormProperties[propertyName], ColumnProperty):
# for col in self.ormProperties[propertyName].columns:
# if col.primary_key:
# isPk = True
# if not isPk:
# self._flagsCache[propertyName] = Qt.ItemIsEditable | Qt.ItemIsSelectable | Qt.ItemIsEnabled
# else:
# self._flagsCache[propertyName] = Qt.ItemIsSelectable | Qt.ItemIsEnabled
# else:
# self._flagsCache[propertyName] = Qt.ItemIsSelectable | Qt.ItemIsEnabled
#
# return self._flagsCache[propertyName]
def isDataChanged(self):
return self._session.dirty
def perform(self):
if not self._dirty:
return
#self.beginResetModel()
#print "%s : I actually perform" % self._queriedObject
#print self._session.get_bind(self._queriedObject)
i = 0
self.beginResetModel()
self._resultCache.clear()
self._objectCache.clear()
self._headerCache.clear()
self._askCount = 0
query = self._queryBuilder.getQuery(self._session,
propertySelection=self._columns,
filter=self._filter,
appendOptions=self._appendOptions)
for obj in query.all():
if isinstance(obj, NamedTuple):
self._objectCache[i] = obj[0]
else:
self._objectCache[i] = obj
#Create ResultCache Structure
self._resultCache[i] = {}
i += 1
self._dirty = False
self.endResetModel()
forceReset() eingebaut
'''
Created on 14.06.2011
@author: michi
'''
import datetime
from PyQt4.QtCore import QAbstractTableModel, QModelIndex, Qt, QVariant, QString
from sqlalchemy.orm import object_mapper, ColumnProperty, RelationshipProperty
from sqlalchemy.util import NamedTuple
from ems import qt4
from ems.thirdparty.odict import OrderedDict
class SAOrmSearchModel(QAbstractTableModel):
def __init__(self,session, queriedObject, querybuilder, filter=None,
columns = [],
dataListener=None,
appendOptions = None):
super(SAOrmSearchModel, self).__init__()
self._session = session
self.__dataListener = dataListener
self._queriedObject = queriedObject
self._resultCache = {}
self._objectCache = {}
self._headerCache = {}
self._columns = columns
if not len(self._columns):
self._columns = self.possibleColumns
self._appendOptions = appendOptions
self._mapper = None
self._ormProperties = None
self._flagsCache = {}
self._queryBuilder = querybuilder
self._filter = filter
self._askCount = 0
try:
self._queryBuilder.propertyNames
except KeyError, e:
print e
print "Mein Objekt: %s" % self._queriedObject
raise e
self._query = None
self._headerNameCache = {}
self._defaultColumns = []
self._columnName2Index = self._buildReversedColumnLookup(columns)
self._dirty = True
@property
def queryBuilder(self):
return self._queryBuilder
def getQuery(self):
return self._query
def setQuery(self, query):
#TODO: Dirty Fix wegen eagerload, welches nicht beim Setzen der Columns ausgefuehrt wird
raise NotImplementedError("This feature has been throwed out")
self._query = query
self._dirty = True
self.perform()
query = property(getQuery, setQuery)
def getFilter(self):
return self._filter
def setFilter(self, filter):
self._filter = filter
self._dirty = True
self.perform()
filter = property(getFilter, setFilter)
@property
def mapper(self):
if self._mapper is None:
self._mapper = object_mapper(self._queriedObject())
return self._mapper
@property
def ormProperties(self):
if self._ormProperties is None:
self._ormProperties = OrderedDict()
for propertyName in self._queryBuilder.propertyNamesDecorated:
self._ormProperties[propertyName] = \
self._queryBuilder.properties[propertyName]
#self._ormProperties.append(property)
return self._ormProperties
@property
def possibleColumns(self):
if not len(self._defaultColumns):
self._defaultColumns = self.__buildDefaultColumns()
return self._defaultColumns
def __buildDefaultColumns(self):
columns = []
for property in self.ormProperties.keys():
columns.append(property)
return columns
def _buildReversedColumnLookup(self, columns):
i = 0
reversed = {}
for column in columns:
reversed[str(column)] = i
i += 1
return reversed
def getColumns(self):
return self._columns
def setColumns(self, cols):
self._columns = cols
self._dirty = True
self.perform()
columns = property(getColumns, setColumns)
@property
def session(self):
return self._session
def rowCount(self, index=QModelIndex()):
self.perform()
return len(self._objectCache)
def columnCount(self, index=QModelIndex()):
self.perform()
return len(self._columns)
def getPropertyNameByIndex(self, index):
return self._columns[index]
def getIndexByPropertyName(self, name):
return self._columnName2Index[name]
def extractValue(self, index, propertyName):
currentObj = self._objectCache[index.row()]
if hasattr(currentObj, propertyName):
return currentObj.__getattribute__(propertyName)
else:
if propertyName.find('.'):
stack = propertyName.split('.')
value = self._extractValue(currentObj, stack)
if value:
return value
return "*Nichts*"
def _extractValue(self, obj, pathStack):
if(hasattr(obj, pathStack[0])):
if len(pathStack) < 2:
return obj.__getattribute__(pathStack[0])
nextObj = obj.__getattribute__(pathStack[0])
pathStack.pop(0)
return self._extractValue(nextObj, pathStack)
def getDataListener(self):
return self.__dataListener
def setDataListener(self, dataListener):
self.__dataListener = dataListener
def delDataListener(self):
self.__dataListener = None
dataListener = property(getDataListener,setDataListener,delDataListener)
def data(self, index, role=Qt.DisplayRole):
#return QVariant()
self._askCount += 1
if self.__dataListener is not None:
self.__dataListener.data(index, role)
self.perform()
if not index.isValid() or \
not (0 <= index.row() < self.rowCount()):
return QVariant()
if role in (Qt.DisplayRole, Qt.EditRole):
if self._resultCache[index.row()].has_key(index.column()):
# print "cacheHit %s" % self._askCount
return self._resultCache[index.row()][index.column()]
columnName = self.getPropertyNameByIndex(index.column())
value = self.extractValue(index, columnName)
if isinstance(value, basestring):
self._resultCache[index.row()][index.column()] = QVariant(unicode(value))
return self._resultCache[index.row()][index.column()]
elif hasattr(value.__class__,'__ormDecorator__'):
self._resultCache[index.row()][index.column()] = \
QVariant(value.__class__.__ormDecorator__().getReprasentiveString(value))
return self._resultCache[index.row()][index.column()]
elif isinstance(value, datetime.datetime):
self._resultCache[index.row()][index.column()] = \
QVariant(value.strftime("%c"))
return self._resultCache[index.row()][index.column()]
self._resultCache[index.row()][index.column()] = QVariant(value)
return self._resultCache[index.row()][index.column()]
if role == qt4.ColumnNameRole:
# print "columnNameRole"
return QVariant(unicode(self._queryBuilder.currentColumnList[index.column()]))
return QVariant()
def getObject(self, row):
if self._objectCache.has_key(row):
return self._objectCache[row]
def headerData(self, section, orientation, role=Qt.DisplayRole):
#print "headerData"
if role == Qt.TextAlignmentRole:
if orientation == Qt.Horizontal:
return QVariant(int(Qt.AlignLeft|Qt.AlignVCenter))
return QVariant(int(Qt.AlignRight|Qt.AlignVCenter))
if role != Qt.DisplayRole:
return QVariant()
if orientation == Qt.Horizontal:
columnName = unicode(self.getPropertyNameByIndex(section))
name = self.getPropertyFriendlyName(columnName)
return QVariant(name)
return QVariant(int(section + 1))
def getPropertyFriendlyName(self, propertyPath):
if not self._headerNameCache.has_key(propertyPath):
fieldName = propertyPath.split('.')[-1:][0]
#print "%s %s" % (columnName, columnName.split('.')[-1:][0])
try:
dec = self._queryBuilder.propertyName2Class[propertyPath].__ormDecorator__()
name = dec.getPropertyFriendlyName(fieldName)
except KeyError:
name = fieldName
self._headerNameCache[propertyPath] = QString.fromUtf8(name)
return self._headerNameCache[propertyPath]
def isPrimaryKey(self, index):
self._flagsCache
print
# def flags(self, index):
#
# propertyName = self.getPropertyNameByIndex(index.column())
# if not self._flagsCache.has_key(propertyName):
# isPk = False
# if isinstance(self.ormProperties[propertyName], ColumnProperty):
# for col in self.ormProperties[propertyName].columns:
# if col.primary_key:
# isPk = True
# if not isPk:
# self._flagsCache[propertyName] = Qt.ItemIsEditable | Qt.ItemIsSelectable | Qt.ItemIsEnabled
# else:
# self._flagsCache[propertyName] = Qt.ItemIsSelectable | Qt.ItemIsEnabled
# else:
# self._flagsCache[propertyName] = Qt.ItemIsSelectable | Qt.ItemIsEnabled
#
# return self._flagsCache[propertyName]
def isDataChanged(self):
return self._session.dirty
def forceReset(self):
self._dirty = True
self.perform()
def perform(self):
if not self._dirty:
return
#self.beginResetModel()
#print "%s : I actually perform" % self._queriedObject
#print self._session.get_bind(self._queriedObject)
i = 0
self.beginResetModel()
self._resultCache.clear()
self._objectCache.clear()
self._headerCache.clear()
self._askCount = 0
query = self._queryBuilder.getQuery(self._session,
propertySelection=self._columns,
filter=self._filter,
appendOptions=self._appendOptions)
for obj in query.all():
if isinstance(obj, NamedTuple):
self._objectCache[i] = obj[0]
else:
self._objectCache[i] = obj
#Create ResultCache Structure
self._resultCache[i] = {}
i += 1
self._dirty = False
self.endResetModel() |
#!/usr/bin/env python
# Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Script to automatically roll dependencies in the WebRTC DEPS file."""
import argparse
import base64
import collections
import logging
import os
import re
import subprocess
import sys
import urllib
# Skip these dependencies (list without solution name prefix).
DONT_AUTOROLL_THESE = [
'src/third_party/gflags/src',
'src/third_party/winsdk_samples',
'src/webrtc/examples/androidtests/third_party/gradle',
]
# Run these CQ trybots in addition to the default ones in infra/config/cq.cfg.
EXTRA_TRYBOTS = (
'master.internal.tryserver.corp.webrtc:linux_internal;'
)
WEBRTC_URL = 'https://chromium.googlesource.com/external/webrtc'
CHROMIUM_SRC_URL = 'https://chromium.googlesource.com/chromium/src'
CHROMIUM_COMMIT_TEMPLATE = CHROMIUM_SRC_URL + '/+/%s'
CHROMIUM_LOG_TEMPLATE = CHROMIUM_SRC_URL + '/+log/%s'
CHROMIUM_FILE_TEMPLATE = CHROMIUM_SRC_URL + '/+/%s/%s'
COMMIT_POSITION_RE = re.compile('^Cr-Commit-Position: .*#([0-9]+).*$')
CLANG_REVISION_RE = re.compile(r'^CLANG_REVISION = \'(\d+)\'$')
ROLL_BRANCH_NAME = 'roll_chromium_revision'
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
CHECKOUT_SRC_DIR = os.path.realpath(os.path.join(SCRIPT_DIR, os.pardir,
os.pardir))
CHECKOUT_ROOT_DIR = os.path.realpath(os.path.join(CHECKOUT_SRC_DIR, os.pardir))
sys.path.append(os.path.join(CHECKOUT_SRC_DIR, 'build'))
import find_depot_tools
find_depot_tools.add_depot_tools_to_path()
from gclient import GClientKeywords
CLANG_UPDATE_SCRIPT_URL_PATH = 'tools/clang/scripts/update.py'
CLANG_UPDATE_SCRIPT_LOCAL_PATH = os.path.join(CHECKOUT_SRC_DIR, 'tools',
'clang', 'scripts', 'update.py')
DepsEntry = collections.namedtuple('DepsEntry', 'path url revision')
ChangedDep = collections.namedtuple('ChangedDep',
'path url current_rev new_rev')
class RollError(Exception):
pass
def ParseDepsDict(deps_content):
local_scope = {}
var = GClientKeywords.VarImpl({}, local_scope)
global_scope = {
'Var': var.Lookup,
'deps_os': {},
}
exec(deps_content, global_scope, local_scope)
return local_scope
def ParseLocalDepsFile(filename):
with open(filename, 'rb') as f:
deps_content = f.read()
return ParseDepsDict(deps_content)
def ParseRemoteCrDepsFile(revision):
deps_content = ReadRemoteCrFile('DEPS', revision)
return ParseDepsDict(deps_content)
def ParseCommitPosition(commit_message):
for line in reversed(commit_message.splitlines()):
m = COMMIT_POSITION_RE.match(line.strip())
if m:
return m.group(1)
logging.error('Failed to parse commit position id from:\n%s\n',
commit_message)
sys.exit(-1)
def _RunCommand(command, working_dir=None, ignore_exit_code=False,
extra_env=None):
"""Runs a command and returns the output from that command.
If the command fails (exit code != 0), the function will exit the process.
Returns:
A tuple containing the stdout and stderr outputs as strings.
"""
working_dir = working_dir or CHECKOUT_SRC_DIR
logging.debug('CMD: %s CWD: %s', ' '.join(command), working_dir)
env = os.environ.copy()
if extra_env:
assert all(type(value) == str for value in extra_env.values())
logging.debug('extra env: %s', extra_env)
env.update(extra_env)
p = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=env,
cwd=working_dir, universal_newlines=True)
std_output = p.stdout.read()
err_output = p.stderr.read()
p.wait()
p.stdout.close()
p.stderr.close()
if not ignore_exit_code and p.returncode != 0:
logging.error('Command failed: %s\n'
'stdout:\n%s\n'
'stderr:\n%s\n', ' '.join(command), std_output, err_output)
sys.exit(p.returncode)
return std_output, err_output
def _GetBranches():
"""Returns a tuple of active,branches.
The 'active' is the name of the currently active branch and 'branches' is a
list of all branches.
"""
lines = _RunCommand(['git', 'branch'])[0].split('\n')
branches = []
active = ''
for line in lines:
if '*' in line:
# The assumption is that the first char will always be the '*'.
active = line[1:].strip()
branches.append(active)
else:
branch = line.strip()
if branch:
branches.append(branch)
return active, branches
def _ReadGitilesContent(url):
# Download and decode BASE64 content until
# https://code.google.com/p/gitiles/issues/detail?id=7 is fixed.
base64_content = ReadUrlContent(url + '?format=TEXT')
return base64.b64decode(base64_content[0])
def ReadRemoteCrFile(path_below_src, revision):
"""Reads a remote Chromium file of a specific revision. Returns a string."""
return _ReadGitilesContent(CHROMIUM_FILE_TEMPLATE % (revision,
path_below_src))
def ReadRemoteCrCommit(revision):
"""Reads a remote Chromium commit message. Returns a string."""
return _ReadGitilesContent(CHROMIUM_COMMIT_TEMPLATE % revision)
def ReadUrlContent(url):
"""Connect to a remote host and read the contents. Returns a list of lines."""
conn = urllib.urlopen(url)
try:
return conn.readlines()
except IOError as e:
logging.exception('Error connecting to %s. Error: %s', url, e)
raise
finally:
conn.close()
def GetMatchingDepsEntries(depsentry_dict, dir_path):
"""Gets all deps entries matching the provided path.
This list may contain more than one DepsEntry object.
Example: dir_path='src/testing' would give results containing both
'src/testing/gtest' and 'src/testing/gmock' deps entries for Chromium's DEPS.
Example 2: dir_path='src/build' should return 'src/build' but not
'src/buildtools'.
Returns:
A list of DepsEntry objects.
"""
result = []
for path, depsentry in depsentry_dict.iteritems():
if path == dir_path:
result.append(depsentry)
else:
parts = path.split('/')
if all(part == parts[i]
for i, part in enumerate(dir_path.split('/'))):
result.append(depsentry)
return result
def BuildDepsentryDict(deps_dict):
"""Builds a dict of paths to DepsEntry objects from a raw parsed deps dict."""
result = {}
def AddDepsEntries(deps_subdict):
for path, deps_url in deps_subdict.iteritems():
if not result.has_key(path):
url, revision = deps_url.split('@') if deps_url else (None, None)
result[path] = DepsEntry(path, url, revision)
AddDepsEntries(deps_dict['deps'])
for deps_os in ['win', 'mac', 'unix', 'android', 'ios', 'unix']:
AddDepsEntries(deps_dict.get('deps_os', {}).get(deps_os, {}))
return result
def CalculateChangedDeps(webrtc_deps, new_cr_deps):
"""
Calculate changed deps entries based on entries defined in the WebRTC DEPS
file:
- If a shared dependency with the Chromium DEPS file: roll it to the same
revision as Chromium (i.e. entry in the new_cr_deps dict)
- If it's a Chromium sub-directory, roll it to the HEAD revision (notice
this means it may be ahead of the chromium_revision, but generally these
should be close).
- If it's another DEPS entry (not shared with Chromium), roll it to HEAD
unless it's configured to be skipped.
Returns:
A list of ChangedDep objects representing the changed deps.
"""
result = []
webrtc_entries = BuildDepsentryDict(webrtc_deps)
new_cr_entries = BuildDepsentryDict(new_cr_deps)
for path, webrtc_deps_entry in webrtc_entries.iteritems():
if path in DONT_AUTOROLL_THESE:
continue
cr_deps_entry = new_cr_entries.get(path)
if cr_deps_entry:
# Use the revision from Chromium's DEPS file.
new_rev = cr_deps_entry.revision
assert webrtc_deps_entry.url == cr_deps_entry.url, (
'WebRTC DEPS entry %s has a different URL (%s) than Chromium (%s).' %
(path, webrtc_deps_entry.url, cr_deps_entry.url))
else:
# Use the HEAD of the deps repo.
stdout, _ = _RunCommand(['git', 'ls-remote', webrtc_deps_entry.url,
'HEAD'])
new_rev = stdout.strip().split('\t')[0]
# Check if an update is necessary.
if webrtc_deps_entry.revision != new_rev:
logging.debug('Roll dependency %s to %s', path, new_rev)
result.append(ChangedDep(path, webrtc_deps_entry.url,
webrtc_deps_entry.revision, new_rev))
return sorted(result)
def CalculateChangedClang(new_cr_rev):
def GetClangRev(lines):
for line in lines:
match = CLANG_REVISION_RE.match(line)
if match:
return match.group(1)
raise RollError('Could not parse Clang revision!')
with open(CLANG_UPDATE_SCRIPT_LOCAL_PATH, 'rb') as f:
current_lines = f.readlines()
current_rev = GetClangRev(current_lines)
new_clang_update_py = ReadRemoteCrFile(CLANG_UPDATE_SCRIPT_URL_PATH,
new_cr_rev).splitlines()
new_rev = GetClangRev(new_clang_update_py)
return ChangedDep(CLANG_UPDATE_SCRIPT_LOCAL_PATH, None, current_rev, new_rev)
def GenerateCommitMessage(current_cr_rev, new_cr_rev, current_commit_pos,
new_commit_pos, changed_deps_list, clang_change):
current_cr_rev = current_cr_rev[0:10]
new_cr_rev = new_cr_rev[0:10]
rev_interval = '%s..%s' % (current_cr_rev, new_cr_rev)
git_number_interval = '%s:%s' % (current_commit_pos, new_commit_pos)
commit_msg = ['Roll chromium_revision %s (%s)\n' % (rev_interval,
git_number_interval)]
commit_msg.append('Change log: %s' % (CHROMIUM_LOG_TEMPLATE % rev_interval))
commit_msg.append('Full diff: %s\n' % (CHROMIUM_COMMIT_TEMPLATE %
rev_interval))
# TBR field will be empty unless in some custom cases, where some engineers
# are added.
tbr_authors = ''
if changed_deps_list:
commit_msg.append('Changed dependencies:')
for c in changed_deps_list:
commit_msg.append('* %s: %s/+log/%s..%s' % (c.path, c.url,
c.current_rev[0:10],
c.new_rev[0:10]))
if 'libvpx' in c.path:
tbr_authors += 'marpan@webrtc.org, '
change_url = CHROMIUM_FILE_TEMPLATE % (rev_interval, 'DEPS')
commit_msg.append('DEPS diff: %s\n' % change_url)
else:
commit_msg.append('No dependencies changed.')
if clang_change.current_rev != clang_change.new_rev:
commit_msg.append('Clang version changed %s:%s' %
(clang_change.current_rev, clang_change.new_rev))
change_url = CHROMIUM_FILE_TEMPLATE % (rev_interval,
CLANG_UPDATE_SCRIPT_URL_PATH)
commit_msg.append('Details: %s\n' % change_url)
else:
commit_msg.append('No update to Clang.\n')
commit_msg.append('TBR=%s' % tbr_authors)
commit_msg.append('BUG=None')
commit_msg.append('CQ_INCLUDE_TRYBOTS=%s' % EXTRA_TRYBOTS)
return '\n'.join(commit_msg)
def UpdateDepsFile(deps_filename, old_cr_revision, new_cr_revision,
changed_deps):
"""Update the DEPS file with the new revision."""
# Update the chromium_revision variable.
with open(deps_filename, 'rb') as deps_file:
deps_content = deps_file.read()
deps_content = deps_content.replace(old_cr_revision, new_cr_revision)
with open(deps_filename, 'wb') as deps_file:
deps_file.write(deps_content)
# Update each individual DEPS entry.
for dep in changed_deps:
local_dep_dir = os.path.join(CHECKOUT_ROOT_DIR, dep.path)
if not os.path.isdir(local_dep_dir):
raise RollError(
'Cannot find local directory %s. Either run\n'
'gclient sync --deps=all\n'
'or make sure the .gclient file for your solution contains all '
'platforms in the target_os list, i.e.\n'
'target_os = ["android", "unix", "mac", "ios", "win"];\n'
'Then run "gclient sync" again.' % local_dep_dir)
_, stderr = _RunCommand(
['roll-dep-svn', '--no-verify-revision', dep.path, dep.new_rev],
working_dir=CHECKOUT_SRC_DIR, ignore_exit_code=True)
if stderr:
logging.warning('roll-dep-svn: %s', stderr)
def _IsTreeClean():
stdout, _ = _RunCommand(['git', 'status', '--porcelain'])
if len(stdout) == 0:
return True
logging.error('Dirty/unversioned files:\n%s', stdout)
return False
def _EnsureUpdatedMasterBranch(dry_run):
current_branch = _RunCommand(
['git', 'rev-parse', '--abbrev-ref', 'HEAD'])[0].splitlines()[0]
if current_branch != 'master':
logging.error('Please checkout the master branch and re-run this script.')
if not dry_run:
sys.exit(-1)
logging.info('Updating master branch...')
_RunCommand(['git', 'pull'])
def _CreateRollBranch(dry_run):
logging.info('Creating roll branch: %s', ROLL_BRANCH_NAME)
if not dry_run:
_RunCommand(['git', 'checkout', '-b', ROLL_BRANCH_NAME])
def _RemovePreviousRollBranch(dry_run):
active_branch, branches = _GetBranches()
if active_branch == ROLL_BRANCH_NAME:
active_branch = 'master'
if ROLL_BRANCH_NAME in branches:
logging.info('Removing previous roll branch (%s)', ROLL_BRANCH_NAME)
if not dry_run:
_RunCommand(['git', 'checkout', active_branch])
_RunCommand(['git', 'branch', '-D', ROLL_BRANCH_NAME])
def _LocalCommit(commit_msg, dry_run):
logging.info('Committing changes locally.')
if not dry_run:
_RunCommand(['git', 'add', '--update', '.'])
_RunCommand(['git', 'commit', '-m', commit_msg])
def _UploadCL(dry_run, rietveld_email=None):
logging.info('Uploading CL...')
if not dry_run:
cmd = ['git', 'cl', 'upload', '-f']
if rietveld_email:
cmd.append('--email=%s' % rietveld_email)
_RunCommand(cmd, extra_env={'EDITOR': 'true'})
def _SendToCQ(dry_run, skip_cq):
logging.info('Sending the CL to the CQ...')
if not dry_run and not skip_cq:
_RunCommand(['git', 'cl', 'set_commit'])
logging.info('Sent the CL to the CQ.')
def main():
p = argparse.ArgumentParser()
p.add_argument('--clean', action='store_true', default=False,
help='Removes any previous local roll branch.')
p.add_argument('-r', '--revision',
help=('Chromium Git revision to roll to. Defaults to the '
'Chromium HEAD revision if omitted.'))
p.add_argument('-u', '--rietveld-email',
help=('E-mail address to use for creating the CL at Rietveld'
'If omitted a previously cached one will be used or an '
'error will be thrown during upload.'))
p.add_argument('--dry-run', action='store_true', default=False,
help=('Calculate changes and modify DEPS, but don\'t create '
'any local branch, commit, upload CL or send any '
'tryjobs.'))
p.add_argument('-i', '--ignore-unclean-workdir', action='store_true',
default=False,
help=('Ignore if the current branch is not master or if there '
'are uncommitted changes (default: %(default)s).'))
p.add_argument('--skip-cq', action='store_true', default=False,
help='Skip sending the CL to the CQ (default: %(default)s)')
p.add_argument('-v', '--verbose', action='store_true', default=False,
help='Be extra verbose in printing of log messages.')
opts = p.parse_args()
if opts.verbose:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
if not opts.ignore_unclean_workdir and not _IsTreeClean():
logging.error('Please clean your local checkout first.')
return 1
if opts.clean:
_RemovePreviousRollBranch(opts.dry_run)
if not opts.ignore_unclean_workdir:
_EnsureUpdatedMasterBranch(opts.dry_run)
new_cr_rev = opts.revision
if not new_cr_rev:
stdout, _ = _RunCommand(['git', 'ls-remote', CHROMIUM_SRC_URL, 'HEAD'])
head_rev = stdout.strip().split('\t')[0]
logging.info('No revision specified. Using HEAD: %s', head_rev)
new_cr_rev = head_rev
deps_filename = os.path.join(CHECKOUT_SRC_DIR, 'DEPS')
webrtc_deps = ParseLocalDepsFile(deps_filename)
current_cr_rev = webrtc_deps['vars']['chromium_revision']
current_commit_pos = ParseCommitPosition(ReadRemoteCrCommit(current_cr_rev))
new_commit_pos = ParseCommitPosition(ReadRemoteCrCommit(new_cr_rev))
new_cr_deps = ParseRemoteCrDepsFile(new_cr_rev)
changed_deps = CalculateChangedDeps(webrtc_deps, new_cr_deps)
clang_change = CalculateChangedClang(new_cr_rev)
commit_msg = GenerateCommitMessage(current_cr_rev, new_cr_rev,
current_commit_pos, new_commit_pos,
changed_deps, clang_change)
logging.debug('Commit message:\n%s', commit_msg)
_CreateRollBranch(opts.dry_run)
UpdateDepsFile(deps_filename, current_cr_rev, new_cr_rev, changed_deps)
if _IsTreeClean():
logging.info("No DEPS changes detected, skipping CL creation.")
else:
_LocalCommit(commit_msg, opts.dry_run)
_UploadCL(opts.dry_run, opts.rietveld_email)
_SendToCQ(opts.dry_run, opts.skip_cq)
return 0
if __name__ == '__main__':
sys.exit(main())
Fix CQ_INCLUDE_TRYBOTS CL value generated by roll_deps.py
A seen in https://codereview.webrtc.org/2956153004/ the CQ
is picky about the ending ;, so this CL removes it.
BUG=None
TBR=d05609f027dc667cc37492b1dc1bdea5d60fa82a@webrtc.org
NOTRY=True
Change-Id: I5e6359f0966f171c98225a982da042cc7147f765
Reviewed-on: https://chromium-review.googlesource.com/552138
Reviewed-by: Henrik Kjellander <60c11d3e646624f25a02b88f51d51ca6b7ff7c72@webrtc.org>
Reviewed-by: Edward Lesmes <d05609f027dc667cc37492b1dc1bdea5d60fa82a@chromium.org>
Commit-Queue: Edward Lemur <d05609f027dc667cc37492b1dc1bdea5d60fa82a@webrtc.org>
Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#18807}
#!/usr/bin/env python
# Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Script to automatically roll dependencies in the WebRTC DEPS file."""
import argparse
import base64
import collections
import logging
import os
import re
import subprocess
import sys
import urllib
# Skip these dependencies (list without solution name prefix).
DONT_AUTOROLL_THESE = [
'src/third_party/gflags/src',
'src/third_party/winsdk_samples',
'src/webrtc/examples/androidtests/third_party/gradle',
]
# Run these CQ trybots in addition to the default ones in infra/config/cq.cfg.
EXTRA_TRYBOTS = (
'master.internal.tryserver.corp.webrtc:linux_internal'
)
WEBRTC_URL = 'https://chromium.googlesource.com/external/webrtc'
CHROMIUM_SRC_URL = 'https://chromium.googlesource.com/chromium/src'
CHROMIUM_COMMIT_TEMPLATE = CHROMIUM_SRC_URL + '/+/%s'
CHROMIUM_LOG_TEMPLATE = CHROMIUM_SRC_URL + '/+log/%s'
CHROMIUM_FILE_TEMPLATE = CHROMIUM_SRC_URL + '/+/%s/%s'
COMMIT_POSITION_RE = re.compile('^Cr-Commit-Position: .*#([0-9]+).*$')
CLANG_REVISION_RE = re.compile(r'^CLANG_REVISION = \'(\d+)\'$')
ROLL_BRANCH_NAME = 'roll_chromium_revision'
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
CHECKOUT_SRC_DIR = os.path.realpath(os.path.join(SCRIPT_DIR, os.pardir,
os.pardir))
CHECKOUT_ROOT_DIR = os.path.realpath(os.path.join(CHECKOUT_SRC_DIR, os.pardir))
sys.path.append(os.path.join(CHECKOUT_SRC_DIR, 'build'))
import find_depot_tools
find_depot_tools.add_depot_tools_to_path()
from gclient import GClientKeywords
CLANG_UPDATE_SCRIPT_URL_PATH = 'tools/clang/scripts/update.py'
CLANG_UPDATE_SCRIPT_LOCAL_PATH = os.path.join(CHECKOUT_SRC_DIR, 'tools',
'clang', 'scripts', 'update.py')
DepsEntry = collections.namedtuple('DepsEntry', 'path url revision')
ChangedDep = collections.namedtuple('ChangedDep',
'path url current_rev new_rev')
class RollError(Exception):
pass
def ParseDepsDict(deps_content):
local_scope = {}
var = GClientKeywords.VarImpl({}, local_scope)
global_scope = {
'Var': var.Lookup,
'deps_os': {},
}
exec(deps_content, global_scope, local_scope)
return local_scope
def ParseLocalDepsFile(filename):
with open(filename, 'rb') as f:
deps_content = f.read()
return ParseDepsDict(deps_content)
def ParseRemoteCrDepsFile(revision):
deps_content = ReadRemoteCrFile('DEPS', revision)
return ParseDepsDict(deps_content)
def ParseCommitPosition(commit_message):
for line in reversed(commit_message.splitlines()):
m = COMMIT_POSITION_RE.match(line.strip())
if m:
return m.group(1)
logging.error('Failed to parse commit position id from:\n%s\n',
commit_message)
sys.exit(-1)
def _RunCommand(command, working_dir=None, ignore_exit_code=False,
extra_env=None):
"""Runs a command and returns the output from that command.
If the command fails (exit code != 0), the function will exit the process.
Returns:
A tuple containing the stdout and stderr outputs as strings.
"""
working_dir = working_dir or CHECKOUT_SRC_DIR
logging.debug('CMD: %s CWD: %s', ' '.join(command), working_dir)
env = os.environ.copy()
if extra_env:
assert all(type(value) == str for value in extra_env.values())
logging.debug('extra env: %s', extra_env)
env.update(extra_env)
p = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=env,
cwd=working_dir, universal_newlines=True)
std_output = p.stdout.read()
err_output = p.stderr.read()
p.wait()
p.stdout.close()
p.stderr.close()
if not ignore_exit_code and p.returncode != 0:
logging.error('Command failed: %s\n'
'stdout:\n%s\n'
'stderr:\n%s\n', ' '.join(command), std_output, err_output)
sys.exit(p.returncode)
return std_output, err_output
def _GetBranches():
"""Returns a tuple of active,branches.
The 'active' is the name of the currently active branch and 'branches' is a
list of all branches.
"""
lines = _RunCommand(['git', 'branch'])[0].split('\n')
branches = []
active = ''
for line in lines:
if '*' in line:
# The assumption is that the first char will always be the '*'.
active = line[1:].strip()
branches.append(active)
else:
branch = line.strip()
if branch:
branches.append(branch)
return active, branches
def _ReadGitilesContent(url):
# Download and decode BASE64 content until
# https://code.google.com/p/gitiles/issues/detail?id=7 is fixed.
base64_content = ReadUrlContent(url + '?format=TEXT')
return base64.b64decode(base64_content[0])
def ReadRemoteCrFile(path_below_src, revision):
"""Reads a remote Chromium file of a specific revision. Returns a string."""
return _ReadGitilesContent(CHROMIUM_FILE_TEMPLATE % (revision,
path_below_src))
def ReadRemoteCrCommit(revision):
"""Reads a remote Chromium commit message. Returns a string."""
return _ReadGitilesContent(CHROMIUM_COMMIT_TEMPLATE % revision)
def ReadUrlContent(url):
"""Connect to a remote host and read the contents. Returns a list of lines."""
conn = urllib.urlopen(url)
try:
return conn.readlines()
except IOError as e:
logging.exception('Error connecting to %s. Error: %s', url, e)
raise
finally:
conn.close()
def GetMatchingDepsEntries(depsentry_dict, dir_path):
"""Gets all deps entries matching the provided path.
This list may contain more than one DepsEntry object.
Example: dir_path='src/testing' would give results containing both
'src/testing/gtest' and 'src/testing/gmock' deps entries for Chromium's DEPS.
Example 2: dir_path='src/build' should return 'src/build' but not
'src/buildtools'.
Returns:
A list of DepsEntry objects.
"""
result = []
for path, depsentry in depsentry_dict.iteritems():
if path == dir_path:
result.append(depsentry)
else:
parts = path.split('/')
if all(part == parts[i]
for i, part in enumerate(dir_path.split('/'))):
result.append(depsentry)
return result
def BuildDepsentryDict(deps_dict):
"""Builds a dict of paths to DepsEntry objects from a raw parsed deps dict."""
result = {}
def AddDepsEntries(deps_subdict):
for path, deps_url in deps_subdict.iteritems():
if not result.has_key(path):
url, revision = deps_url.split('@') if deps_url else (None, None)
result[path] = DepsEntry(path, url, revision)
AddDepsEntries(deps_dict['deps'])
for deps_os in ['win', 'mac', 'unix', 'android', 'ios', 'unix']:
AddDepsEntries(deps_dict.get('deps_os', {}).get(deps_os, {}))
return result
def CalculateChangedDeps(webrtc_deps, new_cr_deps):
"""
Calculate changed deps entries based on entries defined in the WebRTC DEPS
file:
- If a shared dependency with the Chromium DEPS file: roll it to the same
revision as Chromium (i.e. entry in the new_cr_deps dict)
- If it's a Chromium sub-directory, roll it to the HEAD revision (notice
this means it may be ahead of the chromium_revision, but generally these
should be close).
- If it's another DEPS entry (not shared with Chromium), roll it to HEAD
unless it's configured to be skipped.
Returns:
A list of ChangedDep objects representing the changed deps.
"""
result = []
webrtc_entries = BuildDepsentryDict(webrtc_deps)
new_cr_entries = BuildDepsentryDict(new_cr_deps)
for path, webrtc_deps_entry in webrtc_entries.iteritems():
if path in DONT_AUTOROLL_THESE:
continue
cr_deps_entry = new_cr_entries.get(path)
if cr_deps_entry:
# Use the revision from Chromium's DEPS file.
new_rev = cr_deps_entry.revision
assert webrtc_deps_entry.url == cr_deps_entry.url, (
'WebRTC DEPS entry %s has a different URL (%s) than Chromium (%s).' %
(path, webrtc_deps_entry.url, cr_deps_entry.url))
else:
# Use the HEAD of the deps repo.
stdout, _ = _RunCommand(['git', 'ls-remote', webrtc_deps_entry.url,
'HEAD'])
new_rev = stdout.strip().split('\t')[0]
# Check if an update is necessary.
if webrtc_deps_entry.revision != new_rev:
logging.debug('Roll dependency %s to %s', path, new_rev)
result.append(ChangedDep(path, webrtc_deps_entry.url,
webrtc_deps_entry.revision, new_rev))
return sorted(result)
def CalculateChangedClang(new_cr_rev):
def GetClangRev(lines):
for line in lines:
match = CLANG_REVISION_RE.match(line)
if match:
return match.group(1)
raise RollError('Could not parse Clang revision!')
with open(CLANG_UPDATE_SCRIPT_LOCAL_PATH, 'rb') as f:
current_lines = f.readlines()
current_rev = GetClangRev(current_lines)
new_clang_update_py = ReadRemoteCrFile(CLANG_UPDATE_SCRIPT_URL_PATH,
new_cr_rev).splitlines()
new_rev = GetClangRev(new_clang_update_py)
return ChangedDep(CLANG_UPDATE_SCRIPT_LOCAL_PATH, None, current_rev, new_rev)
def GenerateCommitMessage(current_cr_rev, new_cr_rev, current_commit_pos,
new_commit_pos, changed_deps_list, clang_change):
current_cr_rev = current_cr_rev[0:10]
new_cr_rev = new_cr_rev[0:10]
rev_interval = '%s..%s' % (current_cr_rev, new_cr_rev)
git_number_interval = '%s:%s' % (current_commit_pos, new_commit_pos)
commit_msg = ['Roll chromium_revision %s (%s)\n' % (rev_interval,
git_number_interval)]
commit_msg.append('Change log: %s' % (CHROMIUM_LOG_TEMPLATE % rev_interval))
commit_msg.append('Full diff: %s\n' % (CHROMIUM_COMMIT_TEMPLATE %
rev_interval))
# TBR field will be empty unless in some custom cases, where some engineers
# are added.
tbr_authors = ''
if changed_deps_list:
commit_msg.append('Changed dependencies:')
for c in changed_deps_list:
commit_msg.append('* %s: %s/+log/%s..%s' % (c.path, c.url,
c.current_rev[0:10],
c.new_rev[0:10]))
if 'libvpx' in c.path:
tbr_authors += 'marpan@webrtc.org, '
change_url = CHROMIUM_FILE_TEMPLATE % (rev_interval, 'DEPS')
commit_msg.append('DEPS diff: %s\n' % change_url)
else:
commit_msg.append('No dependencies changed.')
if clang_change.current_rev != clang_change.new_rev:
commit_msg.append('Clang version changed %s:%s' %
(clang_change.current_rev, clang_change.new_rev))
change_url = CHROMIUM_FILE_TEMPLATE % (rev_interval,
CLANG_UPDATE_SCRIPT_URL_PATH)
commit_msg.append('Details: %s\n' % change_url)
else:
commit_msg.append('No update to Clang.\n')
commit_msg.append('TBR=%s' % tbr_authors)
commit_msg.append('BUG=None')
commit_msg.append('CQ_INCLUDE_TRYBOTS=%s' % EXTRA_TRYBOTS)
return '\n'.join(commit_msg)
def UpdateDepsFile(deps_filename, old_cr_revision, new_cr_revision,
changed_deps):
"""Update the DEPS file with the new revision."""
# Update the chromium_revision variable.
with open(deps_filename, 'rb') as deps_file:
deps_content = deps_file.read()
deps_content = deps_content.replace(old_cr_revision, new_cr_revision)
with open(deps_filename, 'wb') as deps_file:
deps_file.write(deps_content)
# Update each individual DEPS entry.
for dep in changed_deps:
local_dep_dir = os.path.join(CHECKOUT_ROOT_DIR, dep.path)
if not os.path.isdir(local_dep_dir):
raise RollError(
'Cannot find local directory %s. Either run\n'
'gclient sync --deps=all\n'
'or make sure the .gclient file for your solution contains all '
'platforms in the target_os list, i.e.\n'
'target_os = ["android", "unix", "mac", "ios", "win"];\n'
'Then run "gclient sync" again.' % local_dep_dir)
_, stderr = _RunCommand(
['roll-dep-svn', '--no-verify-revision', dep.path, dep.new_rev],
working_dir=CHECKOUT_SRC_DIR, ignore_exit_code=True)
if stderr:
logging.warning('roll-dep-svn: %s', stderr)
def _IsTreeClean():
stdout, _ = _RunCommand(['git', 'status', '--porcelain'])
if len(stdout) == 0:
return True
logging.error('Dirty/unversioned files:\n%s', stdout)
return False
def _EnsureUpdatedMasterBranch(dry_run):
current_branch = _RunCommand(
['git', 'rev-parse', '--abbrev-ref', 'HEAD'])[0].splitlines()[0]
if current_branch != 'master':
logging.error('Please checkout the master branch and re-run this script.')
if not dry_run:
sys.exit(-1)
logging.info('Updating master branch...')
_RunCommand(['git', 'pull'])
def _CreateRollBranch(dry_run):
logging.info('Creating roll branch: %s', ROLL_BRANCH_NAME)
if not dry_run:
_RunCommand(['git', 'checkout', '-b', ROLL_BRANCH_NAME])
def _RemovePreviousRollBranch(dry_run):
active_branch, branches = _GetBranches()
if active_branch == ROLL_BRANCH_NAME:
active_branch = 'master'
if ROLL_BRANCH_NAME in branches:
logging.info('Removing previous roll branch (%s)', ROLL_BRANCH_NAME)
if not dry_run:
_RunCommand(['git', 'checkout', active_branch])
_RunCommand(['git', 'branch', '-D', ROLL_BRANCH_NAME])
def _LocalCommit(commit_msg, dry_run):
logging.info('Committing changes locally.')
if not dry_run:
_RunCommand(['git', 'add', '--update', '.'])
_RunCommand(['git', 'commit', '-m', commit_msg])
def _UploadCL(dry_run, rietveld_email=None):
logging.info('Uploading CL...')
if not dry_run:
cmd = ['git', 'cl', 'upload', '-f']
if rietveld_email:
cmd.append('--email=%s' % rietveld_email)
_RunCommand(cmd, extra_env={'EDITOR': 'true'})
def _SendToCQ(dry_run, skip_cq):
logging.info('Sending the CL to the CQ...')
if not dry_run and not skip_cq:
_RunCommand(['git', 'cl', 'set_commit'])
logging.info('Sent the CL to the CQ.')
def main():
p = argparse.ArgumentParser()
p.add_argument('--clean', action='store_true', default=False,
help='Removes any previous local roll branch.')
p.add_argument('-r', '--revision',
help=('Chromium Git revision to roll to. Defaults to the '
'Chromium HEAD revision if omitted.'))
p.add_argument('-u', '--rietveld-email',
help=('E-mail address to use for creating the CL at Rietveld'
'If omitted a previously cached one will be used or an '
'error will be thrown during upload.'))
p.add_argument('--dry-run', action='store_true', default=False,
help=('Calculate changes and modify DEPS, but don\'t create '
'any local branch, commit, upload CL or send any '
'tryjobs.'))
p.add_argument('-i', '--ignore-unclean-workdir', action='store_true',
default=False,
help=('Ignore if the current branch is not master or if there '
'are uncommitted changes (default: %(default)s).'))
p.add_argument('--skip-cq', action='store_true', default=False,
help='Skip sending the CL to the CQ (default: %(default)s)')
p.add_argument('-v', '--verbose', action='store_true', default=False,
help='Be extra verbose in printing of log messages.')
opts = p.parse_args()
if opts.verbose:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
if not opts.ignore_unclean_workdir and not _IsTreeClean():
logging.error('Please clean your local checkout first.')
return 1
if opts.clean:
_RemovePreviousRollBranch(opts.dry_run)
if not opts.ignore_unclean_workdir:
_EnsureUpdatedMasterBranch(opts.dry_run)
new_cr_rev = opts.revision
if not new_cr_rev:
stdout, _ = _RunCommand(['git', 'ls-remote', CHROMIUM_SRC_URL, 'HEAD'])
head_rev = stdout.strip().split('\t')[0]
logging.info('No revision specified. Using HEAD: %s', head_rev)
new_cr_rev = head_rev
deps_filename = os.path.join(CHECKOUT_SRC_DIR, 'DEPS')
webrtc_deps = ParseLocalDepsFile(deps_filename)
current_cr_rev = webrtc_deps['vars']['chromium_revision']
current_commit_pos = ParseCommitPosition(ReadRemoteCrCommit(current_cr_rev))
new_commit_pos = ParseCommitPosition(ReadRemoteCrCommit(new_cr_rev))
new_cr_deps = ParseRemoteCrDepsFile(new_cr_rev)
changed_deps = CalculateChangedDeps(webrtc_deps, new_cr_deps)
clang_change = CalculateChangedClang(new_cr_rev)
commit_msg = GenerateCommitMessage(current_cr_rev, new_cr_rev,
current_commit_pos, new_commit_pos,
changed_deps, clang_change)
logging.debug('Commit message:\n%s', commit_msg)
_CreateRollBranch(opts.dry_run)
UpdateDepsFile(deps_filename, current_cr_rev, new_cr_rev, changed_deps)
if _IsTreeClean():
logging.info("No DEPS changes detected, skipping CL creation.")
else:
_LocalCommit(commit_msg, opts.dry_run)
_UploadCL(opts.dry_run, opts.rietveld_email)
_SendToCQ(opts.dry_run, opts.skip_cq)
return 0
if __name__ == '__main__':
sys.exit(main())
|
from sympy.core import Basic
import random
class Partition(Basic):
"""
This class represents an abstract partition.
A partition is a set of disjoint sets whose
union equals a given set.
"""
def next(self):
"""
Generates the next partition.
Examples:
"""
raise NotImplementedError()
def previous(self):
"""
Generates the previous partition.
Examples:
"""
raise NotImplementedError()
@property
def size(self):
"""
Gets the size of the partition.
Examples:
>>> from sympy.combinatorics.partitions import Partition
>>> a = Partition([[1,2],[3]],[1,2,3])
>>> a.size
2
"""
return len(self.args[0])
@property
def partition(self):
"""
Gets the partition itself.
Examples:
>>> from sympy.combinatorics.partitions import Partition
>>> a = Partition([[1,2],[3]],[1,2,3])
>>> a.partition
[[1, 2], [3]]
"""
return self.args[0]
@property
def partition_set(self):
"""
Gets the set of the partition.
Examples:
>>> from sympy.combinatorics.partitions import Partition
>>> a = Partition([[1,2],[3]],[1,2,3])
>>> a.partition_set
[1, 2, 3]
"""
return self.args[1]
@property
def partition_set_size(self):
"""
Gets the set of the partition.
Examples:
>>> from sympy.combinatorics.partitions import Partition
>>> a = Partition([[1,2],[3]],[1,2,3])
>>> a.partition_set_size
3
"""
return len(self.args[1])
def __str__(self):
return str(self.partition)
def __repr__(self):
return str(self)
def __new__(cls, *args, **kw_args):
"""
Generates a new partition object.
It also verifies if the arguments passed are
valid and if it is found that they are not then
an exception is raised.
Examples:
>>> from sympy.combinatorics.partitions import Partition
>>> a = Partition([[1,2],[3]],[1,2,3])
>>> str(a)
'[[1, 2], [3]]'
"""
partition = args[0]
super_set = args[1][:]
check = []
for part in partition:
if not isinstance(part, list):
raise ValueError("The input has been provided incorrectly")
check.extend(part)
check.sort()
super_set.sort()
if check != super_set:
raise ValueError("The partition provided is not valid.")
obj = Basic.__new__(cls, *args, **kw_args)
return obj
def _compare(self, other):
"""
Compares two partitions.
The basis for comparison of two partitions is rank.
A partition with a lesser rank is greater than a
partition with a greater rank.
Examples:
"""
raise NotImplementedError()
def __eq__(self, other):
"""
Checks for equality of two partitions.
Examples:
"""
return self._compare(other) == 0
def __ne__(self, other):
"""
Checks for inequality of two partitions.
Examples:
"""
return self._compare(other) != 0
def __gt__(self, other):
"""
Checks if a partition is greater than the other.
Examples:
"""
return self._compare(other) > 0
def __lt__(self, other):
"""
Checks if a partition is less than the other.
Examples:
"""
return self._compare(other) < 0
def __ge__(self, other):
"""
Checks if a partition is greater than or equal to
the other partition.
Examples:
"""
return self == other or self > other
def __le__(self, other):
"""
Checks if a partition is less than or equal to
the other partition.
Examples:
"""
return self == other or self < other
@property
def rank(self):
"""
Gets the rank of a partition.
Examples:
"""
raise NotImplementedError()
@property
def RGS(self):
"""
Returns the restricted growth string of the partition.
Examples:
>>> from sympy.combinatorics.partitions import Partition
>>> a = Partition([[1,2],[3],[4,5]], [1,2,3,4,5])
>>> a.RGS
[0, 0, 1, 2, 2]
>>> a = Partition([[1,4],[2],[3,5]], [1,2,3,4,5])
>>> a.RGS
[0, 1, 2, 0, 2]
"""
rgs = [0] * self.partition_set_size
a = 0
for part in self.partition:
for i in part:
rgs[self.partition_set.index(i)] = a
a += 1
return rgs
def from_RGS(rgs, superset):
"""
Creates a set partition from a restricted growth string.
Examples:
>>> from sympy.combinatorics.partitions import *
>>> from_RGS([0,1,2,0,1],['a','b','c','d','e'])
Partition([['a', 'd'], ['b', 'e'], ['c']], \
['a', 'b', 'c', 'd', 'e'])
>>> a = Partition([[1,4],[2],[3,5]], [1,2,3,4,5])
>>> from_RGS(a.RGS, a.partition_set)
Partition([[1, 4], [2], [3, 5]], [1, 2, 3, 4, 5])
"""
max_elem = max(rgs) + 1
partition = [[] for i in xrange(max_elem)]
j = 0
for i in rgs:
partition[i].append(superset[j])
j += 1
return Partition(partition, superset)
class IntegerPartition(Partition):
"""
This class represents an abstract partition.
A partition is a set of disjoint sets whose
union equals a given set.
"""
def next(self):
"""
Generates the next partition.
Examples:
"""
raise NotImplementedError()
def previous(self):
"""
Generates the previous partition.
Examples:
"""
raise NotImplementedError()
@property
def size(self):
"""
Gets the size of the partition.
Examples:
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> a = IntegerPartition([1,3,4], 8)
>>> a.size
3
"""
return len(self.args[0])
@property
def partition_set(self):
"""
Gets the set of the partition.
Examples:
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> a = IntegerPartition([1,3,4], 8)
>>> a.partition_set
8
"""
return self.args[1]
@property
def partition_array(self):
"""
Gets the array of partitions from the
partition object
Examples:
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> a = IntegerPartition([1,3,4], 8)
>>> a.partition_array
[4, 3, 1]
"""
return self.args[0]
@property
def conjugate(self):
"""
Find the conjugate of a partition.
This is the vector that satisfies
len(p) = max(conjugate(p)) and vice versa.
Examples:
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> a = IntegerPartition([1,3,4], 8)
>>> a.conjugate
[3, 2, 2, 1]
"""
result = []
j = len(self.partition_array)
if j <= 0:
return result
while True:
result.append(j)
while len(result) >= self.partition_array[j-1]:
j -= 1
if j == 0:
return result
@property
def is_SelfConjugate(self):
"""
Checks if the conjugate of a partition equals itself.
Examples:
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> a = IntegerPartition([6,3,3,2,1], 15)
>>> a.is_SelfConjugate
False
>>> a = IntegerPartition([3,2,1], 6)
>>> a.is_SelfConjugate
True
"""
return self.conjugate == self.partition_array
@property
def conjugate_partition(self):
"""
Computes the conjugate partition of itself.
Examples:
>>> from sympy.combinatorics.partitions import \
IntegerPartition
>>> a = IntegerPartition([6,3,3,2,1], 15)
>>> a.conjugate_partition
[5, 4, 3, 1, 1, 1]
>>> a = IntegerPartition([5,4,3,1,1,1], 15)
>>> a.conjugate_partition
[6, 3, 3, 2, 1]
"""
j = 1
temp_arr = self.partition_array[:] + [0]
k = temp_arr[0]
b = [0] * (k)
while k > 0:
while k > temp_arr[j]:
b[k - 1] = j
k -= 1
j += 1
return b
def __new__(cls, *args, **kw_args):
"""
Generates a new partition object.
It also verifies if the arguments passed are
valid and if it is found that they are not then
an exception is raised.
Examples:
"""
partition = args[0]
integer_rep = args[1]
if not isinstance(partition, list) or sum(partition) != integer_rep:
raise ValueError("The partition is not valid")
list.sort(args[0], key = lambda x: -x)
obj = Basic.__new__(cls, *args, **kw_args)
return obj
def _compare(self, other):
"""
Compares two partitions.
The basis for comparison of two integer partitions is the
majorizing concept.
Examples:
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> a = IntegerPartition([1,1,1,1,1], 5)
>>> b = IntegerPartition([1,1,1,2], 5)
>>> a.compare(b)
-1
>>> a < b
True
"""
k = min(self.size, other.size)
val_self = 0
val_other = 0
for i in xrange(k):
val_self += self.partition_array[i]
val_other += other.partition_array[i]
if val_self > val_other:
return 1
elif val_self < val_other:
return -1
return 1
@property
def rank(self):
"""
Gets the rank of a partition.
Examples:
"""
raise NotImplementedError()
def ferrers_representation(self):
"""
Prints the ferrer diagram of a partition.
Examples:
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> a = IntegerPartition([3,2,1], 6)
>>> b = IntegerPartition([5,1,1], 7)
>>> print a.ferrers_representation()
###
##
#
>>> print b.ferrers_representation()
#####
#
#
"""
return "\n".join(['#'*i for i in self.partition_array])
def random_integer_partition(n):
"""
Generates a random integer partition.
"""
partition = []
while(n > 0):
k = random.randint(1, n)
partition.append(k)
n -= k
list.sort(partition)
return partition
combinatorics/partitions: Added routine to compute the total
number of RGS possible for a given set size.
Signed-off-by: Saptarshi Mandal <13ec3c7bab52575f3a0cda6406c76269cc13b041@gmail.com>
from sympy.core import Basic, C
import random
class Partition(Basic):
"""
This class represents an abstract partition.
A partition is a set of disjoint sets whose
union equals a given set.
"""
def next(self):
"""
Generates the next partition.
Examples:
"""
raise NotImplementedError()
def previous(self):
"""
Generates the previous partition.
Examples:
"""
raise NotImplementedError()
@property
def size(self):
"""
Gets the size of the partition.
Examples:
>>> from sympy.combinatorics.partitions import Partition
>>> a = Partition([[1,2],[3]],[1,2,3])
>>> a.size
2
"""
return len(self.args[0])
@property
def partition(self):
"""
Gets the partition itself.
Examples:
>>> from sympy.combinatorics.partitions import Partition
>>> a = Partition([[1,2],[3]],[1,2,3])
>>> a.partition
[[1, 2], [3]]
"""
return self.args[0]
@property
def partition_set(self):
"""
Gets the set of the partition.
Examples:
>>> from sympy.combinatorics.partitions import Partition
>>> a = Partition([[1,2],[3]],[1,2,3])
>>> a.partition_set
[1, 2, 3]
"""
return self.args[1]
@property
def partition_set_size(self):
"""
Gets the set of the partition.
Examples:
>>> from sympy.combinatorics.partitions import Partition
>>> a = Partition([[1,2],[3]],[1,2,3])
>>> a.partition_set_size
3
"""
return len(self.args[1])
def __str__(self):
return str(self.partition)
def __repr__(self):
return str(self)
def __new__(cls, *args, **kw_args):
"""
Generates a new partition object.
It also verifies if the arguments passed are
valid and if it is found that they are not then
an exception is raised.
Examples:
>>> from sympy.combinatorics.partitions import Partition
>>> a = Partition([[1,2],[3]],[1,2,3])
>>> str(a)
'[[1, 2], [3]]'
"""
partition = args[0]
super_set = args[1][:]
check = []
for part in partition:
if not isinstance(part, list):
raise ValueError("The input has been provided incorrectly")
check.extend(part)
check.sort()
super_set.sort()
if check != super_set:
raise ValueError("The partition provided is not valid.")
obj = Basic.__new__(cls, *args, **kw_args)
return obj
def _compare(self, other):
"""
Compares two partitions.
The basis for comparison of two partitions is rank.
A partition with a lesser rank is greater than a
partition with a greater rank.
Examples:
"""
raise NotImplementedError()
def __eq__(self, other):
"""
Checks for equality of two partitions.
Examples:
"""
return self._compare(other) == 0
def __ne__(self, other):
"""
Checks for inequality of two partitions.
Examples:
"""
return self._compare(other) != 0
def __gt__(self, other):
"""
Checks if a partition is greater than the other.
Examples:
"""
return self._compare(other) > 0
def __lt__(self, other):
"""
Checks if a partition is less than the other.
Examples:
"""
return self._compare(other) < 0
def __ge__(self, other):
"""
Checks if a partition is greater than or equal to
the other partition.
Examples:
"""
return self == other or self > other
def __le__(self, other):
"""
Checks if a partition is less than or equal to
the other partition.
Examples:
"""
return self == other or self < other
@property
def rank(self):
"""
Gets the rank of a partition.
Examples:
"""
raise NotImplementedError()
@property
def RGS(self):
"""
Returns the restricted growth string of the partition.
Examples:
>>> from sympy.combinatorics.partitions import Partition
>>> a = Partition([[1,2],[3],[4,5]], [1,2,3,4,5])
>>> a.RGS
[0, 0, 1, 2, 2]
>>> a = Partition([[1,4],[2],[3,5]], [1,2,3,4,5])
>>> a.RGS
[0, 1, 2, 0, 2]
"""
rgs = [0] * self.partition_set_size
a = 0
for part in self.partition:
for i in part:
rgs[self.partition_set.index(i)] = a
a += 1
return rgs
def from_RGS(rgs, superset):
"""
Creates a set partition from a restricted growth string.
Examples:
>>> from sympy.combinatorics.partitions import *
>>> from_RGS([0,1,2,0,1],['a','b','c','d','e'])
Partition([['a', 'd'], ['b', 'e'], ['c']], \
['a', 'b', 'c', 'd', 'e'])
>>> a = Partition([[1,4],[2],[3,5]], [1,2,3,4,5])
>>> from_RGS(a.RGS, a.partition_set)
Partition([[1, 4], [2], [3, 5]], [1, 2, 3, 4, 5])
"""
max_elem = max(rgs) + 1
partition = [[] for i in xrange(max_elem)]
j = 0
for i in rgs:
partition[i].append(superset[j])
j += 1
return Partition(partition, superset)
class IntegerPartition(Partition):
"""
This class represents an abstract partition.
A partition is a set of disjoint sets whose
union equals a given set.
"""
def next(self):
"""
Generates the next partition.
Examples:
"""
raise NotImplementedError()
def previous(self):
"""
Generates the previous partition.
Examples:
"""
raise NotImplementedError()
@property
def size(self):
"""
Gets the size of the partition.
Examples:
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> a = IntegerPartition([1,3,4], 8)
>>> a.size
3
"""
return len(self.args[0])
@property
def partition_set(self):
"""
Gets the set of the partition.
Examples:
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> a = IntegerPartition([1,3,4], 8)
>>> a.partition_set
8
"""
return self.args[1]
@property
def partition_array(self):
"""
Gets the array of partitions from the
partition object
Examples:
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> a = IntegerPartition([1,3,4], 8)
>>> a.partition_array
[4, 3, 1]
"""
return self.args[0]
@property
def conjugate(self):
"""
Find the conjugate of a partition.
This is the vector that satisfies
len(p) = max(conjugate(p)) and vice versa.
Examples:
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> a = IntegerPartition([1,3,4], 8)
>>> a.conjugate
[3, 2, 2, 1]
"""
result = []
j = len(self.partition_array)
if j <= 0:
return result
while True:
result.append(j)
while len(result) >= self.partition_array[j-1]:
j -= 1
if j == 0:
return result
@property
def is_SelfConjugate(self):
"""
Checks if the conjugate of a partition equals itself.
Examples:
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> a = IntegerPartition([6,3,3,2,1], 15)
>>> a.is_SelfConjugate
False
>>> a = IntegerPartition([3,2,1], 6)
>>> a.is_SelfConjugate
True
"""
return self.conjugate == self.partition_array
@property
def conjugate_partition(self):
"""
Computes the conjugate partition of itself.
Examples:
>>> from sympy.combinatorics.partitions import \
IntegerPartition
>>> a = IntegerPartition([6,3,3,2,1], 15)
>>> a.conjugate_partition
[5, 4, 3, 1, 1, 1]
>>> a = IntegerPartition([5,4,3,1,1,1], 15)
>>> a.conjugate_partition
[6, 3, 3, 2, 1]
"""
j = 1
temp_arr = self.partition_array[:] + [0]
k = temp_arr[0]
b = [0] * (k)
while k > 0:
while k > temp_arr[j]:
b[k - 1] = j
k -= 1
j += 1
return b
def __new__(cls, *args, **kw_args):
"""
Generates a new partition object.
It also verifies if the arguments passed are
valid and if it is found that they are not then
an exception is raised.
Examples:
"""
partition = args[0]
integer_rep = args[1]
if not isinstance(partition, list) or sum(partition) != integer_rep:
raise ValueError("The partition is not valid")
list.sort(args[0], key = lambda x: -x)
obj = Basic.__new__(cls, *args, **kw_args)
return obj
def _compare(self, other):
"""
Compares two partitions.
The basis for comparison of two integer partitions is the
majorizing concept.
Examples:
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> a = IntegerPartition([1,1,1,1,1], 5)
>>> b = IntegerPartition([1,1,1,2], 5)
>>> a.compare(b)
-1
>>> a < b
True
"""
k = min(self.size, other.size)
val_self = 0
val_other = 0
for i in xrange(k):
val_self += self.partition_array[i]
val_other += other.partition_array[i]
if val_self > val_other:
return 1
elif val_self < val_other:
return -1
return 1
@property
def rank(self):
"""
Gets the rank of a partition.
Examples:
"""
raise NotImplementedError()
def ferrers_representation(self):
"""
Prints the ferrer diagram of a partition.
Examples:
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> a = IntegerPartition([3,2,1], 6)
>>> b = IntegerPartition([5,1,1], 7)
>>> print a.ferrers_representation()
###
##
#
>>> print b.ferrers_representation()
#####
#
#
"""
return "\n".join(['#'*i for i in self.partition_array])
def random_integer_partition(n):
"""
Generates a random integer partition.
"""
partition = []
while(n > 0):
k = random.randint(1, n)
partition.append(k)
n -= k
list.sort(partition)
return partition
def RGS_enum(m):
"""
RGS_enum computes the total number of restricted growth strings
possible for a superset of size m.
Examples:
>>> from sympy.combinatorics.partitions import RGS_enum
>>> RGS_enum(4)
15
>>> RGS_enum(5)
52
>>> RGS_enum(6)
203
"""
m += 1
if (m < 0):
return 0
elif (m == 0):
return 1
else:
b = [1] * (m)
for j in xrange(1, m):
for i in xrange(1, j):
b[j] += C.binomial(j - 1, i) * b[i]
nrgf = b[m - 1]
return nrgf
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Eager mode tests for the experimental `replicate` transformation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.core.protobuf import cluster_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import tensorflow_server_pb2
from tensorflow.python import pywrap_tfe
from tensorflow.python.data.experimental.ops import distribute
from tensorflow.python.data.experimental.ops import distribute_options
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import combinations
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import test
from tensorflow.python.training import server_lib
class LocalReplicateTest(test_base.DatasetTestBase, parameterized.TestCase):
def __init__(self, methodName="runTest"): # pylint: disable=invalid-name
super(LocalReplicateTest, self).__init__(methodName)
self._device0 = "/device:CPU:0"
self._device1 = "/device:CPU:1"
self._device2 = "/device:CPU:2"
@combinations.generate(
combinations.combine(tf_api_version=[1], mode=["graph", "eager"]))
def testBasic(self):
with ops.device(self._device0):
dataset0 = dataset_ops.Dataset.range(100)
replicated_ds = distribute.replicate(dataset0,
[self._device1, self._device2])
dataset1 = replicated_ds[self._device1]
dataset2 = replicated_ds[self._device2]
with ops.device(self._device0):
self.assertDatasetProduces(dataset0, range(100))
with ops.device(self._device1):
self.assertDatasetProduces(dataset1, range(100))
with ops.device(self._device2):
self.assertDatasetProduces(dataset2, range(100))
@combinations.generate(
combinations.combine(tf_api_version=[1], mode=["graph", "eager"]))
def testVariableInput(self):
with ops.device(self._device0):
counter_var = variable_scope.get_variable(
"counter", (), dtypes.int32, use_resource=True)
dataset0 = dataset_ops.Dataset.range(100).map(
lambda _: counter_var.assign_add(1))
replicated_ds = distribute.replicate(dataset0,
[self._device1, self._device2])
dataset1 = replicated_ds[self._device1]
dataset2 = replicated_ds[self._device2]
self.evaluate(counter_var.initializer)
with ops.device(self._device0):
self.assertDatasetProduces(
dataset0, range(1, 101), requires_initialization=True)
with ops.device(self._device1):
self.assertDatasetProduces(
dataset1, range(101, 201), requires_initialization=True)
with ops.device(self._device2):
self.assertDatasetProduces(
dataset2, range(201, 301), requires_initialization=True)
@combinations.generate(
combinations.combine(tf_api_version=[1], mode=["graph", "eager"]))
def testExternalStatePolicyIgnore(self):
with ops.device(self._device0):
dataset0 = dataset_ops.Dataset.range(100).map(
lambda _: random_ops.random_uniform( # pylint:disable=g-long-lambda
[],
minval=1,
maxval=10,
dtype=dtypes.float32))
opt = dataset_ops.Options()
opt.experimental_external_state_policy = (
distribute_options.ExternalStatePolicy.IGNORE)
dataset0 = dataset0.with_options(opt)
replicated_ds = distribute.replicate(dataset0,
[self._device1, self._device2])
dataset1 = replicated_ds[self._device1]
dataset2 = replicated_ds[self._device2]
with ops.device(self._device0):
get_next0 = self.getNext(dataset0)
with ops.device(self._device1):
get_next1 = self.getNext(dataset1)
with ops.device(self._device2):
get_next2 = self.getNext(dataset2)
for _ in range(100):
self.evaluate(get_next0())
self.evaluate(get_next1())
self.evaluate(get_next2())
@combinations.generate(
combinations.combine(tf_api_version=[1], mode=["graph", "eager"]))
def testExternalStatePolicyWarn(self):
with ops.device(self._device0):
dataset0 = dataset_ops.Dataset.range(100).map(
lambda _: random_ops.random_uniform( # pylint:disable=g-long-lambda
[],
minval=1,
maxval=10,
dtype=dtypes.float32))
opt = dataset_ops.Options()
opt.experimental_external_state_policy = (
distribute_options.ExternalStatePolicy.WARN)
dataset0 = dataset0.with_options(opt)
replicated_ds = distribute.replicate(dataset0,
[self._device1, self._device2])
dataset1 = replicated_ds[self._device1]
dataset2 = replicated_ds[self._device2]
with ops.device(self._device0):
get_next0 = self.getNext(dataset0)
with ops.device(self._device1):
get_next1 = self.getNext(dataset1)
with ops.device(self._device2):
get_next2 = self.getNext(dataset2)
for _ in range(100):
self.evaluate(get_next0())
self.evaluate(get_next1())
self.evaluate(get_next2())
@combinations.generate(
combinations.combine(tf_api_version=[1], mode=["graph", "eager"]))
def testExternalStatePolicyFail(self):
with ops.device(self._device0):
dataset0 = dataset_ops.Dataset.range(100).map(
lambda _: random_ops.random_uniform( # pylint:disable=g-long-lambda
[],
minval=1,
maxval=10,
dtype=dtypes.float32))
opt = dataset_ops.Options()
opt.experimental_external_state_policy = (
distribute_options.ExternalStatePolicy.FAIL)
dataset0 = dataset0.with_options(opt)
with self.assertRaises(errors.FailedPreconditionError):
replicated_ds = distribute.replicate(dataset0,
[self._device1, self._device2])
dataset1 = replicated_ds[self._device1]
dataset2 = replicated_ds[self._device2]
with ops.device(self._device0):
get_next0 = self.getNext(dataset0)
with ops.device(self._device1):
get_next1 = self.getNext(dataset1)
with ops.device(self._device2):
get_next2 = self.getNext(dataset2)
for _ in range(100):
self.evaluate(get_next0())
self.evaluate(get_next1())
self.evaluate(get_next2())
JOB_NAME = "remote_device"
def _get_server_def(job_name, local_server_port, remote_server_addresses,
task_index):
"""Returns a server def with a single job + multiple tasks."""
cluster_def = cluster_pb2.ClusterDef()
job_def = cluster_def.job.add()
job_def.name = job_name
job_def.tasks[0] = "localhost:%d" % local_server_port
for i, remote_server_address in enumerate(remote_server_addresses, start=1):
job_def.tasks[i] = remote_server_address
server_def = tensorflow_server_pb2.ServerDef(
cluster=cluster_def,
job_name=job_name,
task_index=task_index,
protocol="grpc")
return server_def
# Pure eager mode test that sets up a cluster of processes.
class RemoteReplicateTest(test_base.DatasetTestBase, parameterized.TestCase):
def __init__(self, methodName="runTest"): # pylint: disable=invalid-name
super(RemoteReplicateTest, self).__init__(methodName)
self._cached_server1 = server_lib.Server.create_local_server()
self._cached_server2 = server_lib.Server.create_local_server()
self._cached_server1_target = self._cached_server1.target[len("grpc://"):]
self._cached_server2_target = self._cached_server2.target[len("grpc://"):]
self._device0 = "/job:%s/replica:0/task:0/device:CPU:0" % JOB_NAME
self._device1 = "/job:%s/replica:0/task:1/device:CPU:0" % JOB_NAME
self._device2 = "/job:%s/replica:0/task:2/device:CPU:0" % JOB_NAME
def setUp(self):
super(RemoteReplicateTest, self).setUp()
# Start the local server.
local_port = pywrap_tfe.TF_PickUnusedPortOrDie()
context.set_server_def(
server_def=_get_server_def(
JOB_NAME,
local_server_port=local_port,
remote_server_addresses=[
self._cached_server1_target, self._cached_server2_target
],
task_index=0))
@combinations.generate(
combinations.combine(tf_api_version=[2], mode=["eager"]))
def testBasic(self):
with ops.device(self._device0):
dataset0 = dataset_ops.Dataset.range(100)
replicated_ds = distribute.replicate(dataset0,
[self._device1, self._device2])
dataset1 = replicated_ds[self._device1]
dataset2 = replicated_ds[self._device2]
with ops.device(self._device0):
self.assertDatasetProduces(dataset0, range(100))
with ops.device(self._device1):
self.assertDatasetProduces(dataset1, range(100))
with ops.device(self._device2):
self.assertDatasetProduces(dataset2, range(100))
@combinations.generate(
combinations.combine(tf_api_version=[2], mode=["eager"]))
def testMap(self):
with ops.device(self._device0):
dataset0 = dataset_ops.Dataset.range(100).map(lambda x: x * 2)
replicated_ds = distribute.replicate(dataset0,
[self._device1, self._device2])
dataset1 = replicated_ds[self._device1]
dataset2 = replicated_ds[self._device2]
with ops.device(self._device0):
self.assertDatasetProduces(dataset0, range(0, 200, 2))
with ops.device(self._device1):
self.assertDatasetProduces(dataset1, range(0, 200, 2))
with ops.device(self._device2):
self.assertDatasetProduces(dataset2, range(0, 200, 2))
@combinations.generate(
combinations.combine(tf_api_version=[2], mode=["eager"]))
def testVariableInput(self):
with ops.device(self._device0):
counter_var = variable_scope.get_variable(
"counter", (), dtypes.int32, use_resource=True)
dataset0 = dataset_ops.Dataset.range(100).map(
lambda _: counter_var.assign_add(1))
with self.assertRaises(errors.InvalidArgumentError):
replicated_ds = distribute.replicate(dataset0,
[self._device1, self._device2])
dataset1 = replicated_ds[self._device1]
dataset2 = replicated_ds[self._device2]
with ops.device(self._device0):
get_next0 = self.getNext(dataset0)
with ops.device(self._device1):
get_next1 = self.getNext(dataset1)
with ops.device(self._device2):
get_next2 = self.getNext(dataset2)
for _ in range(100):
self.evaluate(get_next0())
self.evaluate(get_next1())
self.evaluate(get_next2())
if __name__ == "__main__":
ops.enable_eager_execution(
config=config_pb2.ConfigProto(device_count={"CPU": 3}))
test.main()
[tf.data] Enabling a newly passing test.
PiperOrigin-RevId: 305730251
Change-Id: I273537bcf89023d263485e0aac951dd14ad21044
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Eager mode tests for the experimental `replicate` transformation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.core.protobuf import cluster_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import tensorflow_server_pb2
from tensorflow.python import pywrap_tfe
from tensorflow.python.data.experimental.ops import distribute
from tensorflow.python.data.experimental.ops import distribute_options
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import combinations
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import test
from tensorflow.python.training import server_lib
class LocalReplicateTest(test_base.DatasetTestBase, parameterized.TestCase):
def __init__(self, methodName="runTest"): # pylint: disable=invalid-name
super(LocalReplicateTest, self).__init__(methodName)
self._device0 = "/device:CPU:0"
self._device1 = "/device:CPU:1"
self._device2 = "/device:CPU:2"
@combinations.generate(
combinations.combine(tf_api_version=[1], mode=["graph", "eager"]))
def testBasic(self):
with ops.device(self._device0):
dataset0 = dataset_ops.Dataset.range(100)
replicated_ds = distribute.replicate(dataset0,
[self._device1, self._device2])
dataset1 = replicated_ds[self._device1]
dataset2 = replicated_ds[self._device2]
with ops.device(self._device0):
self.assertDatasetProduces(dataset0, range(100))
with ops.device(self._device1):
self.assertDatasetProduces(dataset1, range(100))
with ops.device(self._device2):
self.assertDatasetProduces(dataset2, range(100))
@combinations.generate(
combinations.combine(tf_api_version=[1], mode=["graph", "eager"]))
def testVariableInput(self):
with ops.device(self._device0):
counter_var = variable_scope.get_variable(
"counter", (), dtypes.int32, use_resource=True)
dataset0 = dataset_ops.Dataset.range(100).map(
lambda _: counter_var.assign_add(1))
replicated_ds = distribute.replicate(dataset0,
[self._device1, self._device2])
dataset1 = replicated_ds[self._device1]
dataset2 = replicated_ds[self._device2]
self.evaluate(counter_var.initializer)
with ops.device(self._device0):
self.assertDatasetProduces(
dataset0, range(1, 101), requires_initialization=True)
with ops.device(self._device1):
self.assertDatasetProduces(
dataset1, range(101, 201), requires_initialization=True)
with ops.device(self._device2):
self.assertDatasetProduces(
dataset2, range(201, 301), requires_initialization=True)
@combinations.generate(
combinations.combine(tf_api_version=[1], mode=["graph", "eager"]))
def testExternalStatePolicyIgnore(self):
with ops.device(self._device0):
dataset0 = dataset_ops.Dataset.range(100).map(
lambda _: random_ops.random_uniform( # pylint:disable=g-long-lambda
[],
minval=1,
maxval=10,
dtype=dtypes.float32))
opt = dataset_ops.Options()
opt.experimental_external_state_policy = (
distribute_options.ExternalStatePolicy.IGNORE)
dataset0 = dataset0.with_options(opt)
replicated_ds = distribute.replicate(dataset0,
[self._device1, self._device2])
dataset1 = replicated_ds[self._device1]
dataset2 = replicated_ds[self._device2]
with ops.device(self._device0):
get_next0 = self.getNext(dataset0)
with ops.device(self._device1):
get_next1 = self.getNext(dataset1)
with ops.device(self._device2):
get_next2 = self.getNext(dataset2)
for _ in range(100):
self.evaluate(get_next0())
self.evaluate(get_next1())
self.evaluate(get_next2())
@combinations.generate(
combinations.combine(tf_api_version=[1], mode=["graph", "eager"]))
def testExternalStatePolicyWarn(self):
with ops.device(self._device0):
dataset0 = dataset_ops.Dataset.range(100).map(
lambda _: random_ops.random_uniform( # pylint:disable=g-long-lambda
[],
minval=1,
maxval=10,
dtype=dtypes.float32))
opt = dataset_ops.Options()
opt.experimental_external_state_policy = (
distribute_options.ExternalStatePolicy.WARN)
dataset0 = dataset0.with_options(opt)
replicated_ds = distribute.replicate(dataset0,
[self._device1, self._device2])
dataset1 = replicated_ds[self._device1]
dataset2 = replicated_ds[self._device2]
with ops.device(self._device0):
get_next0 = self.getNext(dataset0)
with ops.device(self._device1):
get_next1 = self.getNext(dataset1)
with ops.device(self._device2):
get_next2 = self.getNext(dataset2)
for _ in range(100):
self.evaluate(get_next0())
self.evaluate(get_next1())
self.evaluate(get_next2())
@combinations.generate(
combinations.combine(tf_api_version=[1], mode=["graph", "eager"]))
def testExternalStatePolicyFail(self):
with ops.device(self._device0):
dataset0 = dataset_ops.Dataset.range(100).map(
lambda _: random_ops.random_uniform( # pylint:disable=g-long-lambda
[],
minval=1,
maxval=10,
dtype=dtypes.float32))
opt = dataset_ops.Options()
opt.experimental_external_state_policy = (
distribute_options.ExternalStatePolicy.FAIL)
dataset0 = dataset0.with_options(opt)
with self.assertRaises(errors.FailedPreconditionError):
replicated_ds = distribute.replicate(dataset0,
[self._device1, self._device2])
dataset1 = replicated_ds[self._device1]
dataset2 = replicated_ds[self._device2]
with ops.device(self._device0):
get_next0 = self.getNext(dataset0)
with ops.device(self._device1):
get_next1 = self.getNext(dataset1)
with ops.device(self._device2):
get_next2 = self.getNext(dataset2)
for _ in range(100):
self.evaluate(get_next0())
self.evaluate(get_next1())
self.evaluate(get_next2())
JOB_NAME = "remote_device"
def _get_server_def(job_name, local_server_port, remote_server_addresses,
task_index):
"""Returns a server def with a single job + multiple tasks."""
cluster_def = cluster_pb2.ClusterDef()
job_def = cluster_def.job.add()
job_def.name = job_name
job_def.tasks[0] = "localhost:%d" % local_server_port
for i, remote_server_address in enumerate(remote_server_addresses, start=1):
job_def.tasks[i] = remote_server_address
server_def = tensorflow_server_pb2.ServerDef(
cluster=cluster_def,
job_name=job_name,
task_index=task_index,
protocol="grpc")
return server_def
# Pure eager mode test that sets up a cluster of processes.
class RemoteReplicateTest(test_base.DatasetTestBase, parameterized.TestCase):
def __init__(self, methodName="runTest"): # pylint: disable=invalid-name
super(RemoteReplicateTest, self).__init__(methodName)
self._cached_server1 = server_lib.Server.create_local_server()
self._cached_server2 = server_lib.Server.create_local_server()
self._cached_server1_target = self._cached_server1.target[len("grpc://"):]
self._cached_server2_target = self._cached_server2.target[len("grpc://"):]
self._device0 = "/job:%s/replica:0/task:0/device:CPU:0" % JOB_NAME
self._device1 = "/job:%s/replica:0/task:1/device:CPU:0" % JOB_NAME
self._device2 = "/job:%s/replica:0/task:2/device:CPU:0" % JOB_NAME
def setUp(self):
super(RemoteReplicateTest, self).setUp()
# Start the local server.
local_port = pywrap_tfe.TF_PickUnusedPortOrDie()
context.set_server_def(
server_def=_get_server_def(
JOB_NAME,
local_server_port=local_port,
remote_server_addresses=[
self._cached_server1_target, self._cached_server2_target
],
task_index=0))
@combinations.generate(
combinations.combine(tf_api_version=[2], mode=["eager"]))
def testBasic(self):
with ops.device(self._device0):
dataset0 = dataset_ops.Dataset.range(100)
replicated_ds = distribute.replicate(dataset0,
[self._device1, self._device2])
dataset1 = replicated_ds[self._device1]
dataset2 = replicated_ds[self._device2]
with ops.device(self._device0):
self.assertDatasetProduces(dataset0, range(100))
with ops.device(self._device1):
self.assertDatasetProduces(dataset1, range(100))
with ops.device(self._device2):
self.assertDatasetProduces(dataset2, range(100))
@combinations.generate(
combinations.combine(tf_api_version=[2], mode=["eager"]))
def testMap(self):
with ops.device(self._device0):
dataset0 = dataset_ops.Dataset.range(100).map(lambda x: x * 2)
replicated_ds = distribute.replicate(dataset0,
[self._device1, self._device2])
dataset1 = replicated_ds[self._device1]
dataset2 = replicated_ds[self._device2]
with ops.device(self._device0):
self.assertDatasetProduces(dataset0, range(0, 200, 2))
with ops.device(self._device1):
self.assertDatasetProduces(dataset1, range(0, 200, 2))
with ops.device(self._device2):
self.assertDatasetProduces(dataset2, range(0, 200, 2))
@combinations.generate(
combinations.combine(tf_api_version=[2], mode=["eager"]))
def testVariableInput(self):
with ops.device(self._device0):
counter_var = variable_scope.get_variable(
"counter", (), dtypes.int32, use_resource=True)
dataset0 = dataset_ops.Dataset.range(100).map(
lambda _: counter_var.assign_add(1))
replicated_ds = distribute.replicate(dataset0,
[self._device1, self._device2])
dataset1 = replicated_ds[self._device1]
dataset2 = replicated_ds[self._device2]
with ops.device(self._device0):
self.assertDatasetProduces(
dataset0, range(1, 101), requires_initialization=True)
with ops.device(self._device1):
self.assertDatasetProduces(
dataset1, range(101, 201), requires_initialization=True)
with ops.device(self._device2):
self.assertDatasetProduces(
dataset2, range(201, 301), requires_initialization=True)
if __name__ == "__main__":
ops.enable_eager_execution(
config=config_pb2.ConfigProto(device_count={"CPU": 3}))
test.main()
|
#!/usr/bin/python
from logging import warn
import pytz
import pymysql
import os
config = ''
def connectToDb():
try:
os.environ['bamboo_IFS_MYSQL_USER_NAME']
print("Using server mysql config")
config = {
'user': os.environ['bamboo_IFS_MYSQL_USER_NAME'],
'passwd': os.environ['bamboo_IFS_MYSQL_PASSWORD'],
'host': os.environ['bamboo_IFS_MYSQL_HOSTNAME'],
'db': os.environ['bamboo_IFS_MYSQL_DB_NAME'],
'port': 3306,
}
except KeyError:
print("Using local mysql config")
config = {
'user': 'root',
'passwd': 'password',
'host': 'ifs-database',
'db': 'ifs',
'port': 3306,
}
# Open database connection
db = pymysql.connect(**config)
# prepare a cursor object using cursor() method
cursor = db.cursor()
return db, cursor
db, cursor = connectToDb()
# execute SQL query using execute() method, to fetch the Competitions
cursor.execute("SELECT `id`,`name` FROM competition")
# Fetch all competition records
competition_ids = {}
for comp in cursor.fetchall():
competitionId = comp[0]
competitionName = comp[1]
if comp[1] is None:
competition_ids['none'] = str(competitionId)
else:
competition_ids[competitionName] = str(competitionId)
cursor.execute("SELECT `id`,`email` FROM user")
user_ids = {}
for user in cursor.fetchall():
userId = user[0]
userEmail = user[1]
user_ids[userEmail] = str(userId)
# execute SQL query using execute() method, to fetch the Applications
cursor.execute("SELECT `id`,`name` FROM application")
# Fetch the application records
application_ids = {}
for app in cursor.fetchall():
applicationId = app[0]
applicationName = app[1]
application_ids[applicationName] = str(applicationId)
# execute SQL query using execute() method, to fetch the Application Assessments
cursor.execute("select p.id, pa.email, a.name from application a join process p on p.target_id = a.id and p.process_type = 'Assessment' join process_role pr on pr.id = p.participant_id join user pa on pa.id = pr.user_id")
# Fetch the assessment records and store as a map of application names to maps of assessor email addresses and their assessment ids
assessment_ids = {}
for ass in cursor.fetchall():
assessmentId = ass[0]
assessorEmail = ass[1]
applicationName = ass[2]
if applicationName in assessment_ids:
existing_record = assessment_ids[applicationName]
existing_record[assessorEmail] = str(assessmentId)
else:
first_record = {}
first_record[assessorEmail] = str(assessmentId)
assessment_ids[applicationName] = first_record
# execute SQL query using execute() method, to fetch the Projects
cursor.execute("SELECT `id`,`name` FROM project")
# Fetch the project records
project_ids = {}
for proj in cursor.fetchall():
projectId = proj[0]
projectName = proj[1]
project_ids[projectName] = str(projectId)
# execute SQL query using execute() method, to fetch the Organisations
cursor.execute("SELECT `id`,`name` FROM organisation")
# Fetch the Organisation records
organisation_ids = {}
for org in cursor.fetchall():
organisationId = org[0]
organisationName = org[1]
organisation_ids[organisationName] = str(organisationId)
# execute SQL query using execute() method, to fetch the Competition milestones
cursor.execute("SELECT c.id, c.name, m.type, m.date FROM competition c JOIN milestone m ON m.competition_id = c.id")
competition_milestones = {}
for comp in cursor.fetchall():
competitionId = comp[0]
competitionName = comp[1]
milestoneType = comp[2]
milestoneDateDb = comp[3]
utc = pytz.utc
bst = pytz.timezone('Europe/London')
milestoneDate = utc.localize(milestoneDateDb).astimezone(bst) if milestoneDateDb is not None else None
milestones_for_competition = competition_milestones[competitionId] if competitionId in competition_milestones else {}
dates_for_milestone = milestones_for_competition[milestoneType] if milestoneType in milestones_for_competition else {}
dates_for_milestone['rawDate'] = milestoneDate
dates_for_milestone['simpleDate'] = milestoneDate.strftime('%Y-%m-%d') if milestoneDate is not None else None
dates_for_milestone['prettyDayMonth'] = milestoneDate.strftime('%-d %B') if milestoneDate is not None else None
dates_for_milestone['prettyDate'] = milestoneDate.strftime('%-d %B %Y') if milestoneDate is not None else None
dates_for_milestone['prettyDateTime'] = milestoneDate.strftime('%-d %B %Y %-I:%M') + milestoneDate.strftime('%p').lower() if milestoneDate is not None else None
dates_for_milestone['prettyLongDate'] = milestoneDate.strftime('%A %-d %B %Y') if milestoneDate is not None else None
dates_for_milestone['prettyLongDateTime'] = milestoneDate.strftime('%A %-d %B %Y %-I:%M') + milestoneDate.strftime('%p').lower() if milestoneDate is not None else None
dates_for_milestone['prettyLongTimeDate'] = milestoneDate.strftime('%-I:%M') + milestoneDate.strftime('%p').lower() + milestoneDate.strftime('%A %-d %B %Y') if milestoneDate is not None else None
dates_for_milestone['dateTimeDb'] = milestoneDate.strftime('%Y-%m-%d %-I:%M:%S') if milestoneDate is not None else None
dates_for_milestone['day'] = milestoneDate.strftime('%-d') if milestoneDate is not None else None
dates_for_milestone['month'] = milestoneDate.strftime('%-m') if milestoneDate is not None else None
dates_for_milestone['year'] = milestoneDate.strftime('%Y') if milestoneDate is not None else None
competition_milestones[competitionId] = milestones_for_competition
milestones_for_competition[milestoneType] = dates_for_milestone
def getSimpleMilestoneDate(competitionId, milestoneType):
return competition_milestones[competitionId][milestoneType]['simpleDate']
def getPrettyMilestoneDayMonth(competitionId, milestoneType):
return competition_milestones[competitionId][milestoneType]['prettyDayMonth']
def getPrettyMilestoneDate(competitionId, milestoneType):
return competition_milestones[competitionId][milestoneType]['prettyDate']
def getPrettyMilestoneDateTime(competitionId, milestoneType):
return competition_milestones[competitionId][milestoneType]['prettyDateTime']
def getPrettyLongMilestoneDate(competitionId, milestoneType):
return competition_milestones[competitionId][milestoneType]['prettyLongDate']
def getPrettyLongMilestoneDateTime(competitionId, milestoneType):
return competition_milestones[competitionId][milestoneType]['prettyLongDateTime']
def getPrettyLongMilestoneTimeDate(competitionId, milestoneType):
return competition_milestones[competitionId][milestoneType]['prettyLongTimeDate']
def getMilestoneDateTimeDb(competitionId, milestoneType):
return competition_milestones[competitionId][milestoneType]['dateTimeDb']
def getMilestoneDay(competitionId, milestoneType):
return competition_milestones[competitionId][milestoneType]['day']
def getMilestoneMonth(competitionId, milestoneType):
return competition_milestones[competitionId][milestoneType]['month']
def getMilestoneYear(competitionId, milestoneType):
return competition_milestones[competitionId][milestoneType]['year']
# disconnect from server
cursor.close()
db.close()
# different from the project_ids dictionary that we create during startup, this method can be used to look up
# new project ids that were not present during the start of the test runs
def getProjectId(name):
db, cursor = connectToDb()
# execute SQL query using execute() method, to fetch the Projects
cursor.execute("SELECT `id` FROM project where `name` = '" + name + "'")
id = cursor.fetchone()[0]
# disconnect from server
cursor.close()
db.close()
return id
# One can use this function in order to request the User Id of a user, by providing his email address.
def getUserId(email):
db, cursor = connectToDb()
# execute SQL query using execute() method, to fetch the Users
cursor.execute("SELECT `id` FROM user where `email` = '" + email + "'")
id = cursor.fetchone()[0]
# disconnect from server
cursor.close()
db.close()
return id
IFS-2986 adding comments on how to human read a date
#!/usr/bin/python
from logging import warn
import pytz
import pymysql
import os
config = ''
def connectToDb():
try:
os.environ['bamboo_IFS_MYSQL_USER_NAME']
print("Using server mysql config")
config = {
'user': os.environ['bamboo_IFS_MYSQL_USER_NAME'],
'passwd': os.environ['bamboo_IFS_MYSQL_PASSWORD'],
'host': os.environ['bamboo_IFS_MYSQL_HOSTNAME'],
'db': os.environ['bamboo_IFS_MYSQL_DB_NAME'],
'port': 3306,
}
except KeyError:
print("Using local mysql config")
config = {
'user': 'root',
'passwd': 'password',
'host': 'ifs-database',
'db': 'ifs',
'port': 3306,
}
# Open database connection
db = pymysql.connect(**config)
# prepare a cursor object using cursor() method
cursor = db.cursor()
return db, cursor
db, cursor = connectToDb()
# execute SQL query using execute() method, to fetch the Competitions
cursor.execute("SELECT `id`,`name` FROM competition")
# Fetch all competition records
competition_ids = {}
for comp in cursor.fetchall():
competitionId = comp[0]
competitionName = comp[1]
if comp[1] is None:
competition_ids['none'] = str(competitionId)
else:
competition_ids[competitionName] = str(competitionId)
cursor.execute("SELECT `id`,`email` FROM user")
user_ids = {}
for user in cursor.fetchall():
userId = user[0]
userEmail = user[1]
user_ids[userEmail] = str(userId)
# execute SQL query using execute() method, to fetch the Applications
cursor.execute("SELECT `id`,`name` FROM application")
# Fetch the application records
application_ids = {}
for app in cursor.fetchall():
applicationId = app[0]
applicationName = app[1]
application_ids[applicationName] = str(applicationId)
# execute SQL query using execute() method, to fetch the Application Assessments
cursor.execute("select p.id, pa.email, a.name from application a join process p on p.target_id = a.id and p.process_type = 'Assessment' join process_role pr on pr.id = p.participant_id join user pa on pa.id = pr.user_id")
# Fetch the assessment records and store as a map of application names to maps of assessor email addresses and their assessment ids
assessment_ids = {}
for ass in cursor.fetchall():
assessmentId = ass[0]
assessorEmail = ass[1]
applicationName = ass[2]
if applicationName in assessment_ids:
existing_record = assessment_ids[applicationName]
existing_record[assessorEmail] = str(assessmentId)
else:
first_record = {}
first_record[assessorEmail] = str(assessmentId)
assessment_ids[applicationName] = first_record
# execute SQL query using execute() method, to fetch the Projects
cursor.execute("SELECT `id`,`name` FROM project")
# Fetch the project records
project_ids = {}
for proj in cursor.fetchall():
projectId = proj[0]
projectName = proj[1]
project_ids[projectName] = str(projectId)
# execute SQL query using execute() method, to fetch the Organisations
cursor.execute("SELECT `id`,`name` FROM organisation")
# Fetch the Organisation records
organisation_ids = {}
for org in cursor.fetchall():
organisationId = org[0]
organisationName = org[1]
organisation_ids[organisationName] = str(organisationId)
# execute SQL query using execute() method, to fetch the Competition milestones
cursor.execute("SELECT c.id, c.name, m.type, m.date FROM competition c JOIN milestone m ON m.competition_id = c.id")
competition_milestones = {}
for comp in cursor.fetchall():
competitionId = comp[0]
competitionName = comp[1]
milestoneType = comp[2]
milestoneDateDb = comp[3]
utc = pytz.utc
bst = pytz.timezone('Europe/London')
milestoneDate = utc.localize(milestoneDateDb).astimezone(bst) if milestoneDateDb is not None else None
milestones_for_competition = competition_milestones[competitionId] if competitionId in competition_milestones else {}
dates_for_milestone = milestones_for_competition[milestoneType] if milestoneType in milestones_for_competition else {}
dates_for_milestone['rawDate'] = milestoneDate
dates_for_milestone['simpleDate'] = milestoneDate.strftime('%Y-%m-%d') if milestoneDate is not None else None # 2002-03-28
dates_for_milestone['prettyDayMonth'] = milestoneDate.strftime('%-d %B') if milestoneDate is not None else None # 4 February
dates_for_milestone['prettyDate'] = milestoneDate.strftime('%-d %B %Y') if milestoneDate is not None else None # 4 February 2002
dates_for_milestone['prettyDateTime'] = milestoneDate.strftime('%-d %B %Y %-I:%M') + milestoneDate.strftime('%p').lower() if milestoneDate is not None else None # 4 February 2002 2:04am
dates_for_milestone['prettyLongDate'] = milestoneDate.strftime('%A %-d %B %Y') if milestoneDate is not None else None # Sunday 2 February 2002
dates_for_milestone['prettyLongDateTime'] = milestoneDate.strftime('%A %-d %B %Y %-I:%M') + milestoneDate.strftime('%p').lower() if milestoneDate is not None else None # Sunday 4 February 2002 2:04am
dates_for_milestone['prettyLongTimeDate'] = milestoneDate.strftime('%-I:%M') + milestoneDate.strftime('%p').lower() + milestoneDate.strftime('%A %-d %B %Y') if milestoneDate is not None else None # 2:05am
dates_for_milestone['dateTimeDb'] = milestoneDate.strftime('%Y-%m-%d %-I:%M:%S') if milestoneDate is not None else None # 2002-02-02 2:05:36
dates_for_milestone['day'] = milestoneDate.strftime('%-d') if milestoneDate is not None else None # 2 as day the - means that there is no 0 if date is 02
dates_for_milestone['month'] = milestoneDate.strftime('%-m') if milestoneDate is not None else None # 2 as month the - means that there is no 0 if date is 02
dates_for_milestone['year'] = milestoneDate.strftime('%Y') if milestoneDate is not None else None # 2002
competition_milestones[competitionId] = milestones_for_competition
milestones_for_competition[milestoneType] = dates_for_milestone
def getSimpleMilestoneDate(competitionId, milestoneType):
return competition_milestones[competitionId][milestoneType]['simpleDate']
def getPrettyMilestoneDayMonth(competitionId, milestoneType):
return competition_milestones[competitionId][milestoneType]['prettyDayMonth']
def getPrettyMilestoneDate(competitionId, milestoneType):
return competition_milestones[competitionId][milestoneType]['prettyDate']
def getPrettyMilestoneDateTime(competitionId, milestoneType):
return competition_milestones[competitionId][milestoneType]['prettyDateTime']
def getPrettyLongMilestoneDate(competitionId, milestoneType):
return competition_milestones[competitionId][milestoneType]['prettyLongDate']
def getPrettyLongMilestoneDateTime(competitionId, milestoneType):
return competition_milestones[competitionId][milestoneType]['prettyLongDateTime']
def getPrettyLongMilestoneTimeDate(competitionId, milestoneType):
return competition_milestones[competitionId][milestoneType]['prettyLongTimeDate']
def getMilestoneDateTimeDb(competitionId, milestoneType):
return competition_milestones[competitionId][milestoneType]['dateTimeDb']
def getMilestoneDay(competitionId, milestoneType):
return competition_milestones[competitionId][milestoneType]['day']
def getMilestoneMonth(competitionId, milestoneType):
return competition_milestones[competitionId][milestoneType]['month']
def getMilestoneYear(competitionId, milestoneType):
return competition_milestones[competitionId][milestoneType]['year']
# disconnect from server
cursor.close()
db.close()
# different from the project_ids dictionary that we create during startup, this method can be used to look up
# new project ids that were not present during the start of the test runs
def getProjectId(name):
db, cursor = connectToDb()
# execute SQL query using execute() method, to fetch the Projects
cursor.execute("SELECT `id` FROM project where `name` = '" + name + "'")
id = cursor.fetchone()[0]
# disconnect from server
cursor.close()
db.close()
return id
# One can use this function in order to request the User Id of a user, by providing his email address.
def getUserId(email):
db, cursor = connectToDb()
# execute SQL query using execute() method, to fetch the Users
cursor.execute("SELECT `id` FROM user where `email` = '" + email + "'")
id = cursor.fetchone()[0]
# disconnect from server
cursor.close()
db.close()
return id |
#!/usr/bin/env python3
# Adapted from a found script.
# Downloads all images on a given web page.
# To merely see/print the image URLs, see the `list_image_Urls.py` script.
import requests
import os
from tqdm import tqdm
from bs4 import BeautifulSoup as bs
from urllib.parse import urljoin, urlparse
def is_valid(url):
"""
Checks whether `url` is a valid URL.
"""
parsed = urlparse(url)
return bool(parsed.netloc) and bool(parsed.scheme)
def get_all_images(url):
"""
Returns all image URLs on a single `url`
"""
soup = bs(requests.get(url).content, "html.parser")
urls = []
for img in tqdm(soup.find_all("img"), "Extracting images"):
img_url = img.attrs.get("src")
if not img_url:
# if img does not contain src attribute, just skip
continue
# make the URL absolute by joining domain with the URL that is just extracted
img_url = urljoin(url, img_url)
# remove URLs like '/hsts-pixel.gif?c=3.2.5'
try:
pos = img_url.index("?")
img_url = img_url[:pos]
except ValueError:
pass
# finally, if the url is valid
if is_valid(img_url):
urls.append(img_url)
return urls
def download(url, pathname):
"""
Downloads a file given an URL and puts it in the folder `pathname`
"""
# if path doesn't exist, make that path dir
if not os.path.isdir(pathname):
os.makedirs(pathname)
# download the body of response by chunk, not immediately
response = requests.get(url, stream=True)
# get the total file size
file_size = int(response.headers.get("Content-Length", 0))
# get the file name
filename = os.path.join(pathname, url.split("/")[-1])
# progress bar, changing the unit to bytes instead of iteration (default by tqdm)
progress = tqdm(response.iter_content(1024), f"Downloading {filename}", total=file_size, unit="B", unit_scale=True, unit_divisor=1024)
with open(filename, "wb") as f:
for data in progress:
# write data read to the file
f.write(data)
# update the progress bar manually
progress.update(len(data))
def download_images(url, path):
# get all images
imgs = get_all_images(url)
for img in imgs:
# for each img, download it
download(img, path)
# Now onto the actual work
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="This script downloads all images from a web page")
parser.add_argument("url", help="The URL of the web page you want to download images")
parser.add_argument("-p", "--path", help="The Directory you want to store your images, default is the domain of URL passed")
args = parser.parse_args()
url = args.url
path = args.path
if not path:
# if path isn't specified, use the domain name of that url as the folder name
path = urlparse(url).netloc
download_images(url, path)
adding some commentary
#!/usr/bin/env python3
# Adapted from a found script.
# Downloads all images on a given web page.
# To merely see/print the image URLs, see the `list_image_Urls.py` script.
#To Do: account for pages that lazy load.
import requests
import os
from tqdm import tqdm
from bs4 import BeautifulSoup as bs
from urllib.parse import urljoin, urlparse
def is_valid(url):
"""
Checks whether `url` is a valid URL.
"""
parsed = urlparse(url)
return bool(parsed.netloc) and bool(parsed.scheme)
def get_all_images(url):
"""
Returns all image URLs on a single `url`
"""
soup = bs(requests.get(url).content, "html.parser")
urls = []
for img in tqdm(soup.find_all("img"), "Extracting images"):
img_url = img.attrs.get("src")
if not img_url:
# if img does not contain src attribute, just skip
continue
# make the URL absolute by joining domain with the URL that is just extracted
img_url = urljoin(url, img_url)
# remove URLs like '/hsts-pixel.gif?c=3.2.5'
try:
pos = img_url.index("?")
img_url = img_url[:pos]
except ValueError:
pass
# finally, if the url is valid
if is_valid(img_url):
urls.append(img_url)
return urls
def download(url, pathname):
"""
Downloads a file given an URL and puts it in the folder `pathname`
"""
# if path doesn't exist, make that path dir
if not os.path.isdir(pathname):
os.makedirs(pathname)
# download the body of response by chunk, not immediately
response = requests.get(url, stream=True)
# get the total file size
file_size = int(response.headers.get("Content-Length", 0))
# get the file name
filename = os.path.join(pathname, url.split("/")[-1])
# progress bar, changing the unit to bytes instead of iteration (default by tqdm)
progress = tqdm(response.iter_content(1024), f"Downloading {filename}", total=file_size, unit="B", unit_scale=True, unit_divisor=1024)
with open(filename, "wb") as f:
for data in progress:
# write data read to the file
f.write(data)
# update the progress bar manually
progress.update(len(data))
def download_images(url, path):
# get all images
imgs = get_all_images(url)
for img in imgs:
# for each img, download it
download(img, path)
# Now onto the actual work
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="This script downloads all images from a web page")
parser.add_argument("url", help="The URL of the web page you want to download images")
parser.add_argument("-p", "--path", help="The Directory you want to store your images, default is the domain of URL passed")
args = parser.parse_args()
url = args.url
path = args.path
if not path:
# if path isn't specified, use the domain name of that url as the folder name
path = urlparse(url).netloc
download_images(url, path)
|
import urlparse
from cwpoliticl.extensions.base_parser import BaseParser
from cwpoliticl.items import Politicl, CacheItem, WebsiteTypes
class DnaIndiaParser(BaseParser):
def __init__(self):
super(DnaIndiaParser, self).__init__()
def parse_paginate(self, url, hxs, cache_db, history_db):
selector = '//*[@class="media-list eventtracker"]'
links = hxs.xpath(selector).extract()
count = 1
for link in links:
href_selector = "{}/div[{}]/div[2]/a/@href".format(selector, count)
detailed_href = self.get_value_with_urljoin(hxs, href_selector, url)
# If the link already exist on the history database, ignore it.
if history_db.check_history_exist(detailed_href):
continue
item = CacheItem.get_default(url=detailed_href, url_from=WebsiteTypes.dnaindia.value)
cache_db.save_cache(item, count)
count += 1
def parse(self, url, hxs, item_db):
image = self.get_value_response(hxs, '//*[@class="row article-img pos-lead"]/img/@src')
content = self.get_all_value_response(hxs, '//*[@class="body-text"]/p', max_len=2, sperator='\n' + '\n')
tags = hxs.xpath('//*[@data-event-sub-cat="ArticleTags"]/div/div/ul/li/a/text()').extract()
pass
def parse_tags(self, hxs):
pass
cwpoliticl.
import urlparse
from cwpoliticl.extensions.base_parser import BaseParser
from cwpoliticl.items import Politicl, CacheItem, WebsiteTypes
class DnaIndiaParser(BaseParser):
def __init__(self):
super(DnaIndiaParser, self).__init__()
def parse_paginate(self, url, hxs, cache_db, history_db):
selector = '//*[@class="media-list eventtracker"]'
links = hxs.xpath(selector).extract()
count = 1
for link in links:
href_selector = "{}/div[{}]/div[2]/a/@href".format(selector, count)
detailed_href = self.get_value_with_urljoin(hxs, href_selector, url)
# If the link already exist on the history database, ignore it.
if history_db.check_history_exist(detailed_href):
continue
item = CacheItem.get_default(url=detailed_href, url_from=WebsiteTypes.dnaindia.value)
cache_db.save_cache(item, count)
count += 1
def parse(self, url, hxs, item_db):
image = self.get_value_response(hxs, '//*[@class="row article-img pos-lead"]/img/@src')
content = self.get_all_value_response(hxs, '//*[@class="body-text"]/p', max_len=2, sperator='\n' + '\n')
tags = hxs.xpath('//*[@data-event-sub-cat="ArticleTags"]/div/div/ul/li/a/text()').extract()
pass
|
"""Define a connection to the SimpliSafe websocket."""
from __future__ import annotations
import asyncio
from dataclasses import InitVar, dataclass, field
from datetime import datetime, timedelta
from typing import TYPE_CHECKING, Any, Callable, Dict, Final, cast
from aiohttp import ClientWebSocketResponse, WSMsgType
from aiohttp.client_exceptions import (
ClientError,
ServerDisconnectedError,
WSServerHandshakeError,
)
from simplipy.const import DEFAULT_USER_AGENT, LOGGER
from simplipy.device import DeviceTypes
from simplipy.errors import (
CannotConnectError,
ConnectionClosedError,
ConnectionFailedError,
InvalidMessageError,
NotConnectedError,
)
from simplipy.util import schedule_callback
from simplipy.util.dt import utc_from_timestamp
if TYPE_CHECKING:
from simplipy import API
WEBSOCKET_SERVER_URL = "wss://socketlink.prd.aser.simplisafe.com"
DEFAULT_WATCHDOG_TIMEOUT = timedelta(minutes=5)
EVENT_ALARM_CANCELED: Final = "alarm_canceled"
EVENT_ALARM_TRIGGERED: Final = "alarm_triggered"
EVENT_ARMED_AWAY: Final = "armed_away"
EVENT_ARMED_AWAY_BY_KEYPAD: Final = "armed_away_by_keypad"
EVENT_ARMED_AWAY_BY_REMOTE: Final = "armed_away_by_remote"
EVENT_ARMED_HOME: Final = "armed_home"
EVENT_AUTOMATIC_TEST: Final = "automatic_test"
EVENT_AWAY_EXIT_DELAY_BY_KEYPAD: Final = "away_exit_delay_by_keypad"
EVENT_AWAY_EXIT_DELAY_BY_REMOTE: Final = "away_exit_delay_by_remote"
EVENT_CAMERA_MOTION_DETECTED: Final = "camera_motion_detected"
EVENT_CONNECTION_LOST: Final = "connection_lost"
EVENT_CONNECTION_RESTORED: Final = "connection_restored"
EVENT_DISARMED_BY_MASTER_PIN: Final = "disarmed_by_master_pin"
EVENT_DISARMED_BY_REMOTE: Final = "disarmed_by_remote"
EVENT_DOORBELL_DETECTED: Final = "doorbell_detected"
EVENT_DEVICE_TEST: Final = "device_test"
EVENT_ENTRY_DELAY: Final = "entry_delay"
EVENT_HOME_EXIT_DELAY: Final = "home_exit_delay"
EVENT_LOCK_ERROR: Final = "lock_error"
EVENT_LOCK_LOCKED: Final = "lock_locked"
EVENT_LOCK_UNLOCKED: Final = "lock_unlocked"
EVENT_POWER_OUTAGE: Final = "power_outage"
EVENT_POWER_RESTORED: Final = "power_restored"
EVENT_SECRET_ALERT_TRIGGERED: Final = "secret_alert_triggered"
EVENT_SENSOR_NOT_RESPONDING: Final = "sensor_not_responding"
EVENT_SENSOR_PAIRED_AND_NAMED: Final = "sensor_paired_and_named"
EVENT_SENSOR_RESTORED: Final = "sensor_restored"
EVENT_USER_INITIATED_TEST: Final = "user_initiated_test"
EVENT_MAPPING = {
1110: EVENT_ALARM_TRIGGERED,
1120: EVENT_ALARM_TRIGGERED,
1132: EVENT_ALARM_TRIGGERED,
1134: EVENT_ALARM_TRIGGERED,
1154: EVENT_ALARM_TRIGGERED,
1159: EVENT_ALARM_TRIGGERED,
1162: EVENT_ALARM_TRIGGERED,
1170: EVENT_CAMERA_MOTION_DETECTED,
1301: EVENT_POWER_OUTAGE,
1350: EVENT_CONNECTION_LOST,
1381: EVENT_SENSOR_NOT_RESPONDING,
1400: EVENT_DISARMED_BY_MASTER_PIN,
1406: EVENT_ALARM_CANCELED,
1407: EVENT_DISARMED_BY_REMOTE,
1409: EVENT_SECRET_ALERT_TRIGGERED,
1429: EVENT_ENTRY_DELAY,
1458: EVENT_DOORBELL_DETECTED,
1531: EVENT_SENSOR_PAIRED_AND_NAMED,
1601: EVENT_USER_INITIATED_TEST,
1602: EVENT_AUTOMATIC_TEST,
1604: EVENT_DEVICE_TEST,
3301: EVENT_POWER_RESTORED,
3350: EVENT_CONNECTION_RESTORED,
3381: EVENT_SENSOR_RESTORED,
3401: EVENT_ARMED_AWAY_BY_KEYPAD,
3407: EVENT_ARMED_AWAY_BY_REMOTE,
3441: EVENT_ARMED_HOME,
3481: EVENT_ARMED_AWAY,
3487: EVENT_ARMED_AWAY,
3491: EVENT_ARMED_HOME,
9401: EVENT_AWAY_EXIT_DELAY_BY_KEYPAD,
9407: EVENT_AWAY_EXIT_DELAY_BY_REMOTE,
9441: EVENT_HOME_EXIT_DELAY,
9700: EVENT_LOCK_UNLOCKED,
9701: EVENT_LOCK_LOCKED,
9703: EVENT_LOCK_ERROR,
}
class Watchdog:
"""Define a watchdog to kick the websocket connection at intervals."""
def __init__(
self, action: Callable[..., Any], timeout: timedelta = DEFAULT_WATCHDOG_TIMEOUT
):
"""Initialize."""
self._action = action
self._action_task: asyncio.Task | None = None
self._loop = asyncio.get_running_loop()
self._timeout_seconds = timeout.total_seconds()
self._timer_task: asyncio.TimerHandle | None = None
def _on_expire(self) -> None:
"""Log and act when the watchdog expires."""
if self._timer_task and not self._timer_task.cancelled():
LOGGER.info("Websocket watchdog expired")
schedule_callback(self._action)
def cancel(self) -> None:
"""Cancel the watchdog."""
if self._timer_task:
self._timer_task.cancel()
self._timer_task = None
def trigger(self) -> None:
"""Trigger the watchdog."""
LOGGER.info(
"Websocket watchdog triggered – sleeping for %s seconds",
self._timeout_seconds,
)
if self._timer_task:
self._timer_task.cancel()
self._timer_task = self._loop.call_later(self._timeout_seconds, self._on_expire)
@dataclass(frozen=True)
class WebsocketEvent: # pylint: disable=too-many-instance-attributes
"""Define a representation of a message."""
event_cid: InitVar[int]
info: str
system_id: int
timestamp: float
event_type: str | None = field(init=False)
changed_by: str | None = None
sensor_name: str | None = None
sensor_serial: str | None = None
sensor_type: DeviceTypes | None = None
def __post_init__(self, event_cid: int) -> None:
"""Run post-init initialization."""
if event_cid in EVENT_MAPPING:
object.__setattr__(self, "event_type", EVENT_MAPPING[event_cid])
else:
LOGGER.warning(
'Encountered unknown websocket event type: %s ("%s"). Please report it '
"at https://github.com/bachya/simplisafe-python/issues.",
event_cid,
self.info,
)
object.__setattr__(self, "event_type", None)
object.__setattr__(self, "timestamp", utc_from_timestamp(self.timestamp))
if self.sensor_type is not None:
try:
object.__setattr__(self, "sensor_type", DeviceTypes(self.sensor_type))
except ValueError:
LOGGER.warning(
'Encountered unknown device type: %s ("%s"). Please report it at'
"https://github.com/home-assistant/home-assistant/issues.",
self.sensor_type,
self.info,
)
object.__setattr__(self, "sensor_type", None)
def websocket_event_from_payload(payload: dict[str, Any]) -> WebsocketEvent:
"""Create a Message object from a websocket event payload."""
return WebsocketEvent(
payload["data"]["eventCid"],
payload["data"]["info"],
payload["data"]["sid"],
payload["data"]["eventTimestamp"],
changed_by=payload["data"]["pinName"],
sensor_name=payload["data"]["sensorName"],
sensor_serial=payload["data"]["sensorSerial"],
sensor_type=payload["data"]["sensorType"],
)
class WebsocketClient:
"""A websocket connection to the SimpliSafe cloud.
Note that this class shouldn't be instantiated directly; it will be instantiated as
appropriate via :meth:`simplipy.API.async_from_auth` or
:meth:`simplipy.API.async_from_refresh_token`.
:param api: A :meth:`simplipy.API` object
:type api: :meth:`simplipy.API`
"""
def __init__(self, api: API) -> None:
"""Initialize."""
self._api = api
self._connect_callbacks: list[Callable[..., None]] = []
self._disconnect_callbacks: list[Callable[..., None]] = []
self._event_callbacks: list[Callable[..., None]] = []
self._loop = asyncio.get_running_loop()
self._watchdog = Watchdog(self.async_reconnect)
# These will get filled in after initial authentication:
self._client: ClientWebSocketResponse | None = None
@property
def connected(self) -> bool:
"""Return if currently connected to the websocket."""
return self._client is not None and not self._client.closed
@staticmethod
def _add_callback(
callback_list: list, callback: Callable[..., Any]
) -> Callable[..., None]:
"""Add a callback callback to a particular list."""
callback_list.append(callback)
def remove() -> None:
"""Remove the callback."""
callback_list.remove(callback)
return remove
async def _async_receive_json(self) -> dict[str, Any]:
"""Receive a JSON response from the websocket server."""
assert self._client
msg = await self._client.receive()
if msg.type in (WSMsgType.CLOSE, WSMsgType.CLOSED, WSMsgType.CLOSING):
raise ConnectionClosedError("Connection was closed.")
if msg.type == WSMsgType.ERROR:
raise ConnectionFailedError
if msg.type != WSMsgType.TEXT:
raise InvalidMessageError(f"Received non-text message: {msg.type}")
try:
data = msg.json()
except ValueError as err:
raise InvalidMessageError("Received invalid JSON") from err
LOGGER.debug("Received data from websocket server: %s", data)
self._watchdog.trigger()
return cast(Dict[str, Any], data)
async def _async_send_json(self, payload: dict[str, Any]) -> None:
"""Send a JSON message to the websocket server.
Raises NotConnectedError if client is not connected.
"""
if not self.connected:
raise NotConnectedError
assert self._client
LOGGER.debug("Sending data to websocket server: %s", payload)
await self._client.send_json(payload)
def _parse_message(self, message: dict[str, Any]) -> None:
"""Parse an incoming message."""
if message["type"] == "com.simplisafe.event.standard":
event = websocket_event_from_payload(message)
for callback in self._event_callbacks:
schedule_callback(callback, event)
def add_connect_callback(self, callback: Callable[..., Any]) -> Callable[..., None]:
"""Add a callback callback to be called after connecting.
:param callback: The method to call after connecting
:type callback: ``Callable[..., None]``
"""
return self._add_callback(self._connect_callbacks, callback)
def add_disconnect_callback(
self, callback: Callable[..., Any]
) -> Callable[..., None]:
"""Add a callback callback to be called after disconnecting.
:param callback: The method to call after disconnecting
:type callback: ``Callable[..., None]``
"""
return self._add_callback(self._disconnect_callbacks, callback)
def add_event_callback(self, callback: Callable[..., Any]) -> Callable[..., None]:
"""Add a callback callback to be called upon receiving an event.
Note that callbacks should expect to receive a WebsocketEvent object as a
parameter.
:param callback: The method to call after receiving an event.
:type callback: ``Callable[..., None]``
"""
return self._add_callback(self._event_callbacks, callback)
async def async_connect(self) -> None:
"""Connect to the websocket server."""
if self.connected:
return
try:
self._client = await self._api.session.ws_connect(
WEBSOCKET_SERVER_URL, heartbeat=55
)
except (ClientError, ServerDisconnectedError, WSServerHandshakeError) as err:
raise CannotConnectError(err) from err
LOGGER.info("Connected to websocket server")
for callback in self._connect_callbacks:
schedule_callback(callback)
self._watchdog.trigger()
async def async_disconnect(self) -> None:
"""Disconnect from the websocket server."""
if not self.connected:
return
assert self._client
await self._client.close()
LOGGER.info("Disconnected from websocket server")
async def async_listen(self) -> None:
"""Start listening to the websocket server."""
assert self._client
now = datetime.utcnow()
now_ts = round(now.timestamp() * 1000)
now_utc_iso = f"{now.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3]}Z"
try:
await self._async_send_json(
{
"datacontenttype": "application/json",
"type": "com.simplisafe.connection.identify",
"time": now_utc_iso,
"id": f"ts:{now_ts}",
"specversion": "1.0",
"source": DEFAULT_USER_AGENT,
"data": {
"auth": {
"schema": "bearer",
"token": self._api.access_token,
},
"join": [f"uid:{self._api.user_id}"],
},
}
)
while not self._client.closed:
message = await self._async_receive_json()
self._parse_message(message)
except ConnectionClosedError:
pass
finally:
LOGGER.debug("Listen completed; cleaning up")
for callback in self._disconnect_callbacks:
schedule_callback(callback)
async def async_reconnect(self) -> None:
"""Reconnect (and re-listen, if appropriate) to the websocket."""
await self.async_disconnect()
await asyncio.sleep(1)
await self.async_connect()
Fix websocket watchdog not canceling when websocket disconnects (#293)
"""Define a connection to the SimpliSafe websocket."""
from __future__ import annotations
import asyncio
from dataclasses import InitVar, dataclass, field
from datetime import datetime, timedelta
from typing import TYPE_CHECKING, Any, Callable, Dict, Final, cast
from aiohttp import ClientWebSocketResponse, WSMsgType
from aiohttp.client_exceptions import (
ClientError,
ServerDisconnectedError,
WSServerHandshakeError,
)
from simplipy.const import DEFAULT_USER_AGENT, LOGGER
from simplipy.device import DeviceTypes
from simplipy.errors import (
CannotConnectError,
ConnectionClosedError,
ConnectionFailedError,
InvalidMessageError,
NotConnectedError,
)
from simplipy.util import schedule_callback
from simplipy.util.dt import utc_from_timestamp
if TYPE_CHECKING:
from simplipy import API
WEBSOCKET_SERVER_URL = "wss://socketlink.prd.aser.simplisafe.com"
DEFAULT_WATCHDOG_TIMEOUT = timedelta(minutes=5)
EVENT_ALARM_CANCELED: Final = "alarm_canceled"
EVENT_ALARM_TRIGGERED: Final = "alarm_triggered"
EVENT_ARMED_AWAY: Final = "armed_away"
EVENT_ARMED_AWAY_BY_KEYPAD: Final = "armed_away_by_keypad"
EVENT_ARMED_AWAY_BY_REMOTE: Final = "armed_away_by_remote"
EVENT_ARMED_HOME: Final = "armed_home"
EVENT_AUTOMATIC_TEST: Final = "automatic_test"
EVENT_AWAY_EXIT_DELAY_BY_KEYPAD: Final = "away_exit_delay_by_keypad"
EVENT_AWAY_EXIT_DELAY_BY_REMOTE: Final = "away_exit_delay_by_remote"
EVENT_CAMERA_MOTION_DETECTED: Final = "camera_motion_detected"
EVENT_CONNECTION_LOST: Final = "connection_lost"
EVENT_CONNECTION_RESTORED: Final = "connection_restored"
EVENT_DISARMED_BY_MASTER_PIN: Final = "disarmed_by_master_pin"
EVENT_DISARMED_BY_REMOTE: Final = "disarmed_by_remote"
EVENT_DOORBELL_DETECTED: Final = "doorbell_detected"
EVENT_DEVICE_TEST: Final = "device_test"
EVENT_ENTRY_DELAY: Final = "entry_delay"
EVENT_HOME_EXIT_DELAY: Final = "home_exit_delay"
EVENT_LOCK_ERROR: Final = "lock_error"
EVENT_LOCK_LOCKED: Final = "lock_locked"
EVENT_LOCK_UNLOCKED: Final = "lock_unlocked"
EVENT_POWER_OUTAGE: Final = "power_outage"
EVENT_POWER_RESTORED: Final = "power_restored"
EVENT_SECRET_ALERT_TRIGGERED: Final = "secret_alert_triggered"
EVENT_SENSOR_NOT_RESPONDING: Final = "sensor_not_responding"
EVENT_SENSOR_PAIRED_AND_NAMED: Final = "sensor_paired_and_named"
EVENT_SENSOR_RESTORED: Final = "sensor_restored"
EVENT_USER_INITIATED_TEST: Final = "user_initiated_test"
EVENT_MAPPING = {
1110: EVENT_ALARM_TRIGGERED,
1120: EVENT_ALARM_TRIGGERED,
1132: EVENT_ALARM_TRIGGERED,
1134: EVENT_ALARM_TRIGGERED,
1154: EVENT_ALARM_TRIGGERED,
1159: EVENT_ALARM_TRIGGERED,
1162: EVENT_ALARM_TRIGGERED,
1170: EVENT_CAMERA_MOTION_DETECTED,
1301: EVENT_POWER_OUTAGE,
1350: EVENT_CONNECTION_LOST,
1381: EVENT_SENSOR_NOT_RESPONDING,
1400: EVENT_DISARMED_BY_MASTER_PIN,
1406: EVENT_ALARM_CANCELED,
1407: EVENT_DISARMED_BY_REMOTE,
1409: EVENT_SECRET_ALERT_TRIGGERED,
1429: EVENT_ENTRY_DELAY,
1458: EVENT_DOORBELL_DETECTED,
1531: EVENT_SENSOR_PAIRED_AND_NAMED,
1601: EVENT_USER_INITIATED_TEST,
1602: EVENT_AUTOMATIC_TEST,
1604: EVENT_DEVICE_TEST,
3301: EVENT_POWER_RESTORED,
3350: EVENT_CONNECTION_RESTORED,
3381: EVENT_SENSOR_RESTORED,
3401: EVENT_ARMED_AWAY_BY_KEYPAD,
3407: EVENT_ARMED_AWAY_BY_REMOTE,
3441: EVENT_ARMED_HOME,
3481: EVENT_ARMED_AWAY,
3487: EVENT_ARMED_AWAY,
3491: EVENT_ARMED_HOME,
9401: EVENT_AWAY_EXIT_DELAY_BY_KEYPAD,
9407: EVENT_AWAY_EXIT_DELAY_BY_REMOTE,
9441: EVENT_HOME_EXIT_DELAY,
9700: EVENT_LOCK_UNLOCKED,
9701: EVENT_LOCK_LOCKED,
9703: EVENT_LOCK_ERROR,
}
class Watchdog:
"""Define a watchdog to kick the websocket connection at intervals."""
def __init__(
self, action: Callable[..., Any], timeout: timedelta = DEFAULT_WATCHDOG_TIMEOUT
):
"""Initialize."""
self._action = action
self._action_task: asyncio.Task | None = None
self._loop = asyncio.get_running_loop()
self._timeout_seconds = timeout.total_seconds()
self._timer_task: asyncio.TimerHandle | None = None
def _on_expire(self) -> None:
"""Log and act when the watchdog expires."""
LOGGER.info("Websocket watchdog expired")
schedule_callback(self._action)
def cancel(self) -> None:
"""Cancel the watchdog."""
if self._timer_task:
self._timer_task.cancel()
self._timer_task = None
def trigger(self) -> None:
"""Trigger the watchdog."""
LOGGER.info(
"Websocket watchdog triggered – sleeping for %s seconds",
self._timeout_seconds,
)
if self._timer_task:
self._timer_task.cancel()
self._timer_task = self._loop.call_later(self._timeout_seconds, self._on_expire)
@dataclass(frozen=True)
class WebsocketEvent: # pylint: disable=too-many-instance-attributes
"""Define a representation of a message."""
event_cid: InitVar[int]
info: str
system_id: int
timestamp: float
event_type: str | None = field(init=False)
changed_by: str | None = None
sensor_name: str | None = None
sensor_serial: str | None = None
sensor_type: DeviceTypes | None = None
def __post_init__(self, event_cid: int) -> None:
"""Run post-init initialization."""
if event_cid in EVENT_MAPPING:
object.__setattr__(self, "event_type", EVENT_MAPPING[event_cid])
else:
LOGGER.warning(
'Encountered unknown websocket event type: %s ("%s"). Please report it '
"at https://github.com/bachya/simplisafe-python/issues.",
event_cid,
self.info,
)
object.__setattr__(self, "event_type", None)
object.__setattr__(self, "timestamp", utc_from_timestamp(self.timestamp))
if self.sensor_type is not None:
try:
object.__setattr__(self, "sensor_type", DeviceTypes(self.sensor_type))
except ValueError:
LOGGER.warning(
'Encountered unknown device type: %s ("%s"). Please report it at'
"https://github.com/home-assistant/home-assistant/issues.",
self.sensor_type,
self.info,
)
object.__setattr__(self, "sensor_type", None)
def websocket_event_from_payload(payload: dict[str, Any]) -> WebsocketEvent:
"""Create a Message object from a websocket event payload."""
return WebsocketEvent(
payload["data"]["eventCid"],
payload["data"]["info"],
payload["data"]["sid"],
payload["data"]["eventTimestamp"],
changed_by=payload["data"]["pinName"],
sensor_name=payload["data"]["sensorName"],
sensor_serial=payload["data"]["sensorSerial"],
sensor_type=payload["data"]["sensorType"],
)
class WebsocketClient:
"""A websocket connection to the SimpliSafe cloud.
Note that this class shouldn't be instantiated directly; it will be instantiated as
appropriate via :meth:`simplipy.API.async_from_auth` or
:meth:`simplipy.API.async_from_refresh_token`.
:param api: A :meth:`simplipy.API` object
:type api: :meth:`simplipy.API`
"""
def __init__(self, api: API) -> None:
"""Initialize."""
self._api = api
self._connect_callbacks: list[Callable[..., None]] = []
self._disconnect_callbacks: list[Callable[..., None]] = []
self._event_callbacks: list[Callable[..., None]] = []
self._loop = asyncio.get_running_loop()
self._watchdog = Watchdog(self.async_reconnect)
# These will get filled in after initial authentication:
self._client: ClientWebSocketResponse | None = None
@property
def connected(self) -> bool:
"""Return if currently connected to the websocket."""
return self._client is not None and not self._client.closed
@staticmethod
def _add_callback(
callback_list: list, callback: Callable[..., Any]
) -> Callable[..., None]:
"""Add a callback callback to a particular list."""
callback_list.append(callback)
def remove() -> None:
"""Remove the callback."""
callback_list.remove(callback)
return remove
async def _async_receive_json(self) -> dict[str, Any]:
"""Receive a JSON response from the websocket server."""
assert self._client
msg = await self._client.receive()
if msg.type in (WSMsgType.CLOSE, WSMsgType.CLOSED, WSMsgType.CLOSING):
raise ConnectionClosedError("Connection was closed.")
if msg.type == WSMsgType.ERROR:
raise ConnectionFailedError
if msg.type != WSMsgType.TEXT:
raise InvalidMessageError(f"Received non-text message: {msg.type}")
try:
data = msg.json()
except ValueError as err:
raise InvalidMessageError("Received invalid JSON") from err
LOGGER.debug("Received data from websocket server: %s", data)
self._watchdog.trigger()
return cast(Dict[str, Any], data)
async def _async_send_json(self, payload: dict[str, Any]) -> None:
"""Send a JSON message to the websocket server.
Raises NotConnectedError if client is not connected.
"""
if not self.connected:
raise NotConnectedError
assert self._client
LOGGER.debug("Sending data to websocket server: %s", payload)
await self._client.send_json(payload)
def _parse_message(self, message: dict[str, Any]) -> None:
"""Parse an incoming message."""
if message["type"] == "com.simplisafe.event.standard":
event = websocket_event_from_payload(message)
for callback in self._event_callbacks:
schedule_callback(callback, event)
def add_connect_callback(self, callback: Callable[..., Any]) -> Callable[..., None]:
"""Add a callback callback to be called after connecting.
:param callback: The method to call after connecting
:type callback: ``Callable[..., None]``
"""
return self._add_callback(self._connect_callbacks, callback)
def add_disconnect_callback(
self, callback: Callable[..., Any]
) -> Callable[..., None]:
"""Add a callback callback to be called after disconnecting.
:param callback: The method to call after disconnecting
:type callback: ``Callable[..., None]``
"""
return self._add_callback(self._disconnect_callbacks, callback)
def add_event_callback(self, callback: Callable[..., Any]) -> Callable[..., None]:
"""Add a callback callback to be called upon receiving an event.
Note that callbacks should expect to receive a WebsocketEvent object as a
parameter.
:param callback: The method to call after receiving an event.
:type callback: ``Callable[..., None]``
"""
return self._add_callback(self._event_callbacks, callback)
async def async_connect(self) -> None:
"""Connect to the websocket server."""
if self.connected:
return
try:
self._client = await self._api.session.ws_connect(
WEBSOCKET_SERVER_URL, heartbeat=55
)
except (ClientError, ServerDisconnectedError, WSServerHandshakeError) as err:
raise CannotConnectError(err) from err
LOGGER.info("Connected to websocket server")
self._watchdog.trigger()
for callback in self._connect_callbacks:
schedule_callback(callback)
async def async_disconnect(self) -> None:
"""Disconnect from the websocket server."""
if not self.connected:
return
assert self._client
await self._client.close()
LOGGER.info("Disconnected from websocket server")
async def async_listen(self) -> None:
"""Start listening to the websocket server."""
assert self._client
now = datetime.utcnow()
now_ts = round(now.timestamp() * 1000)
now_utc_iso = f"{now.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3]}Z"
try:
await self._async_send_json(
{
"datacontenttype": "application/json",
"type": "com.simplisafe.connection.identify",
"time": now_utc_iso,
"id": f"ts:{now_ts}",
"specversion": "1.0",
"source": DEFAULT_USER_AGENT,
"data": {
"auth": {
"schema": "bearer",
"token": self._api.access_token,
},
"join": [f"uid:{self._api.user_id}"],
},
}
)
while not self._client.closed:
message = await self._async_receive_json()
self._parse_message(message)
except ConnectionClosedError:
pass
finally:
LOGGER.debug("Listen completed; cleaning up")
self._watchdog.cancel()
for callback in self._disconnect_callbacks:
schedule_callback(callback)
async def async_reconnect(self) -> None:
"""Reconnect (and re-listen, if appropriate) to the websocket."""
await self.async_disconnect()
await asyncio.sleep(1)
await self.async_connect()
|
"""
Search (Chapters 3-4)
The way to use this code is to subclass Problem to create a class of problems,
then create problem instances and solve them with calls to the various search
functions.
"""
import sys
from collections import deque
from utils import *
class Problem:
"""The abstract class for a formal problem. You should subclass
this and implement the methods actions and result, and possibly
__init__, goal_test, and path_cost. Then you will create instances
of your subclass and solve them with the various search functions."""
def __init__(self, initial, goal=None):
"""The constructor specifies the initial state, and possibly a goal
state, if there is a unique goal. Your subclass's constructor can add
other arguments."""
self.initial = initial
self.goal = goal
def actions(self, state):
"""Return the actions that can be executed in the given
state. The result would typically be a list, but if there are
many actions, consider yielding them one at a time in an
iterator, rather than building them all at once."""
raise NotImplementedError
def result(self, state, action):
"""Return the state that results from executing the given
action in the given state. The action must be one of
self.actions(state)."""
raise NotImplementedError
def goal_test(self, state):
"""Return True if the state is a goal. The default method compares the
state to self.goal or checks for state in self.goal if it is a
list, as specified in the constructor. Override this method if
checking against a single self.goal is not enough."""
if isinstance(self.goal, list):
return is_in(state, self.goal)
else:
return state == self.goal
def path_cost(self, c, state1, action, state2):
"""Return the cost of a solution path that arrives at state2 from
state1 via action, assuming cost c to get up to state1. If the problem
is such that the path doesn't matter, this function will only look at
state2. If the path does matter, it will consider c and maybe state1
and action. The default method costs 1 for every step in the path."""
return c + 1
def value(self, state):
"""For optimization problems, each state has a value. Hill Climbing
and related algorithms try to maximize this value."""
raise NotImplementedError
# ______________________________________________________________________________
class Node:
"""A node in a search tree. Contains a pointer to the parent (the node
that this is a successor of) and to the actual state for this node. Note
that if a state is arrived at by two paths, then there are two nodes with
the same state. Also includes the action that got us to this state, and
the total path_cost (also known as g) to reach the node. Other functions
may add an f and h value; see best_first_graph_search and astar_search for
an explanation of how the f and h values are handled. You will not need to
subclass this class."""
def __init__(self, state, parent=None, action=None, path_cost=0):
"""Create a search tree Node, derived from a parent by an action."""
self.state = state
self.parent = parent
self.action = action
self.path_cost = path_cost
self.depth = 0
if parent:
self.depth = parent.depth + 1
def __repr__(self):
return "<Node {}>".format(self.state)
def __lt__(self, node):
return self.state < node.state
def expand(self, problem):
"""List the nodes reachable in one step from this node."""
return [self.child_node(problem, action)
for action in problem.actions(self.state)]
def child_node(self, problem, action):
"""[Figure 3.10]"""
next_state = problem.result(self.state, action)
next_node = Node(next_state, self, action, problem.path_cost(self.path_cost, self.state, action, next_state))
return next_node
def solution(self):
"""Return the sequence of actions to go from the root to this node."""
return [node.action for node in self.path()[1:]]
def path(self):
"""Return a list of nodes forming the path from the root to this node."""
node, path_back = self, []
while node:
path_back.append(node)
node = node.parent
return list(reversed(path_back))
# We want for a queue of nodes in breadth_first_graph_search or
# astar_search to have no duplicated states, so we treat nodes
# with the same state as equal. [Problem: this may not be what you
# want in other contexts.]
def __eq__(self, other):
return isinstance(other, Node) and self.state == other.state
def __hash__(self):
# We use the hash value of the state
# stored in the node instead of the node
# object itself to quickly search a node
# with the same state in a Hash Table
return hash(self.state)
# ______________________________________________________________________________
class SimpleProblemSolvingAgentProgram:
"""
[Figure 3.1]
Abstract framework for a problem-solving agent.
"""
def __init__(self, initial_state=None):
"""State is an abstract representation of the state
of the world, and seq is the list of actions required
to get to a particular state from the initial state(root)."""
self.state = initial_state
self.seq = []
def __call__(self, percept):
"""[Figure 3.1] Formulate a goal and problem, then
search for a sequence of actions to solve it."""
self.state = self.update_state(self.state, percept)
if not self.seq:
goal = self.formulate_goal(self.state)
problem = self.formulate_problem(self.state, goal)
self.seq = self.search(problem)
if not self.seq:
return None
return self.seq.pop(0)
def update_state(self, state, percept):
raise NotImplementedError
def formulate_goal(self, state):
raise NotImplementedError
def formulate_problem(self, state, goal):
raise NotImplementedError
def search(self, problem):
raise NotImplementedError
# ______________________________________________________________________________
# Uninformed Search algorithms
def breadth_first_tree_search(problem):
"""
[Figure 3.7]
Search the shallowest nodes in the search tree first.
Search through the successors of a problem to find a goal.
The argument frontier should be an empty queue.
Repeats infinitely in case of loops.
"""
frontier = deque([Node(problem.initial)]) # FIFO queue
while frontier:
node = frontier.popleft()
if problem.goal_test(node.state):
return node
frontier.extend(node.expand(problem))
return None
def depth_first_tree_search(problem):
"""
[Figure 3.7]
Search the deepest nodes in the search tree first.
Search through the successors of a problem to find a goal.
The argument frontier should be an empty queue.
Repeats infinitely in case of loops.
"""
frontier = [Node(problem.initial)] # Stack
while frontier:
node = frontier.pop()
if problem.goal_test(node.state):
return node
frontier.extend(node.expand(problem))
return None
def depth_first_graph_search(problem):
"""
[Figure 3.7]
Search the deepest nodes in the search tree first.
Search through the successors of a problem to find a goal.
The argument frontier should be an empty queue.
Does not get trapped by loops.
If two paths reach a state, only use the first one.
"""
frontier = [(Node(problem.initial))] # Stack
explored = set()
while frontier:
node = frontier.pop()
if problem.goal_test(node.state):
return node
explored.add(node.state)
frontier.extend(child for child in node.expand(problem)
if child.state not in explored and child not in frontier)
return None
def breadth_first_graph_search(problem):
"""[Figure 3.11]
Note that this function can be implemented in a
single line as below:
return graph_search(problem, FIFOQueue())
"""
node = Node(problem.initial)
if problem.goal_test(node.state):
return node
frontier = deque([node])
explored = set()
while frontier:
node = frontier.popleft()
explored.add(node.state)
for child in node.expand(problem):
if child.state not in explored and child not in frontier:
if problem.goal_test(child.state):
return child
frontier.append(child)
return None
def best_first_graph_search(problem, f, display=False):
"""Search the nodes with the lowest f scores first.
You specify the function f(node) that you want to minimize; for example,
if f is a heuristic estimate to the goal, then we have greedy best
first search; if f is node.depth then we have breadth-first search.
There is a subtlety: the line "f = memoize(f, 'f')" means that the f
values will be cached on the nodes as they are computed. So after doing
a best first search you can examine the f values of the path returned."""
f = memoize(f, 'f')
node = Node(problem.initial)
frontier = PriorityQueue('min', f)
frontier.append(node)
explored = set()
while frontier:
node = frontier.pop()
if problem.goal_test(node.state):
if display:
print(len(explored), "paths have been expanded and", len(frontier), "paths remain in the frontier")
return node
explored.add(node.state)
for child in node.expand(problem):
if child.state not in explored and child not in frontier:
frontier.append(child)
elif child in frontier:
if f(child) < frontier[child]:
del frontier[child]
frontier.append(child)
return None
def uniform_cost_search(problem, display=False):
"""[Figure 3.14]"""
return best_first_graph_search(problem, lambda node: node.path_cost, display)
def depth_limited_search(problem, limit=50):
"""[Figure 3.17]"""
def recursive_dls(node, problem, limit):
if problem.goal_test(node.state):
return node
elif limit == 0:
return 'cutoff'
else:
cutoff_occurred = False
for child in node.expand(problem):
result = recursive_dls(child, problem, limit - 1)
if result == 'cutoff':
cutoff_occurred = True
elif result is not None:
return result
return 'cutoff' if cutoff_occurred else None
# Body of depth_limited_search:
return recursive_dls(Node(problem.initial), problem, limit)
def iterative_deepening_search(problem):
"""[Figure 3.18]"""
for depth in range(sys.maxsize):
result = depth_limited_search(problem, depth)
if result != 'cutoff':
return result
# ______________________________________________________________________________
# Bidirectional Search
# Pseudocode from https://webdocs.cs.ualberta.ca/%7Eholte/Publications/MM-AAAI2016.pdf
def bidirectional_search(problem):
e = 0
if isinstance(problem, GraphProblem):
e = problem.find_min_edge()
gF, gB = {Node(problem.initial): 0}, {Node(problem.goal): 0}
openF, openB = [Node(problem.initial)], [Node(problem.goal)]
closedF, closedB = [], []
U = np.inf
def extend(U, open_dir, open_other, g_dir, g_other, closed_dir):
"""Extend search in given direction"""
n = find_key(C, open_dir, g_dir)
open_dir.remove(n)
closed_dir.append(n)
for c in n.expand(problem):
if c in open_dir or c in closed_dir:
if g_dir[c] <= problem.path_cost(g_dir[n], n.state, None, c.state):
continue
open_dir.remove(c)
g_dir[c] = problem.path_cost(g_dir[n], n.state, None, c.state)
open_dir.append(c)
if c in open_other:
U = min(U, g_dir[c] + g_other[c])
return U, open_dir, closed_dir, g_dir
def find_min(open_dir, g):
"""Finds minimum priority, g and f values in open_dir"""
# pr_min_f isn't forward pr_min instead it's the f-value
# of node with priority pr_min.
pr_min, pr_min_f = np.inf, np.inf
for n in open_dir:
f = g[n] + problem.h(n)
pr = max(f, 2 * g[n])
pr_min = min(pr_min, pr)
pr_min_f = min(pr_min_f, f)
return pr_min, pr_min_f, min(g.values())
def find_key(pr_min, open_dir, g):
"""Finds key in open_dir with value equal to pr_min
and minimum g value."""
m = np.inf
node = Node(-1)
for n in open_dir:
pr = max(g[n] + problem.h(n), 2 * g[n])
if pr == pr_min:
if g[n] < m:
m = g[n]
node = n
return node
while openF and openB:
pr_min_f, f_min_f, g_min_f = find_min(openF, gF)
pr_min_b, f_min_b, g_min_b = find_min(openB, gB)
C = min(pr_min_f, pr_min_b)
if U <= max(C, f_min_f, f_min_b, g_min_f + g_min_b + e):
return U
if C == pr_min_f:
# Extend forward
U, openF, closedF, gF = extend(U, openF, openB, gF, gB, closedF)
else:
# Extend backward
U, openB, closedB, gB = extend(U, openB, openF, gB, gF, closedB)
return np.inf
# ______________________________________________________________________________
# Informed (Heuristic) Search
greedy_best_first_graph_search = best_first_graph_search
# Greedy best-first search is accomplished by specifying f(n) = h(n).
def astar_search(problem, h=None, display=False):
"""A* search is best-first graph search with f(n) = g(n)+h(n).
You need to specify the h function when you call astar_search, or
else in your Problem subclass."""
h = memoize(h or problem.h, 'h')
return best_first_graph_search(problem, lambda n: n.path_cost + h(n), display)
# ______________________________________________________________________________
# A* heuristics
class EightPuzzle(Problem):
""" The problem of sliding tiles numbered from 1 to 8 on a 3x3 board, where one of the
squares is a blank. A state is represented as a tuple of length 9, where element at
index i represents the tile number at index i (0 if it's an empty square) """
def __init__(self, initial, goal=(1, 2, 3, 4, 5, 6, 7, 8, 0)):
""" Define goal state and initialize a problem """
super().__init__(initial, goal)
def find_blank_square(self, state):
"""Return the index of the blank square in a given state"""
return state.index(0)
def actions(self, state):
""" Return the actions that can be executed in the given state.
The result would be a list, since there are only four possible actions
in any given state of the environment """
possible_actions = ['UP', 'DOWN', 'LEFT', 'RIGHT']
index_blank_square = self.find_blank_square(state)
if index_blank_square % 3 == 0:
possible_actions.remove('LEFT')
if index_blank_square < 3:
possible_actions.remove('UP')
if index_blank_square % 3 == 2:
possible_actions.remove('RIGHT')
if index_blank_square > 5:
possible_actions.remove('DOWN')
return possible_actions
def result(self, state, action):
""" Given state and action, return a new state that is the result of the action.
Action is assumed to be a valid action in the state """
# blank is the index of the blank square
blank = self.find_blank_square(state)
new_state = list(state)
delta = {'UP': -3, 'DOWN': 3, 'LEFT': -1, 'RIGHT': 1}
neighbor = blank + delta[action]
new_state[blank], new_state[neighbor] = new_state[neighbor], new_state[blank]
return tuple(new_state)
def goal_test(self, state):
""" Given a state, return True if state is a goal state or False, otherwise """
return state == self.goal
def check_solvability(self, state):
""" Checks if the given state is solvable """
inversion = 0
for i in range(len(state)):
for j in range(i + 1, len(state)):
if (state[i] > state[j]) and state[i] != 0 and state[j] != 0:
inversion += 1
return inversion % 2 == 0
def h(self, node):
""" Return the heuristic value for a given state. Default heuristic function used is
h(n) = number of misplaced tiles """
return sum(s != g for (s, g) in zip(node.state, self.goal))
# ______________________________________________________________________________
class PlanRoute(Problem):
""" The problem of moving the Hybrid Wumpus Agent from one place to other """
def __init__(self, initial, goal, allowed, dimrow):
""" Define goal state and initialize a problem """
super().__init__(initial, goal)
self.dimrow = dimrow
self.goal = goal
self.allowed = allowed
def actions(self, state):
""" Return the actions that can be executed in the given state.
The result would be a list, since there are only three possible actions
in any given state of the environment """
possible_actions = ['Forward', 'TurnLeft', 'TurnRight']
x, y = state.get_location()
orientation = state.get_orientation()
# Prevent Bumps
if x == 1 and orientation == 'LEFT':
if 'Forward' in possible_actions:
possible_actions.remove('Forward')
if y == 1 and orientation == 'DOWN':
if 'Forward' in possible_actions:
possible_actions.remove('Forward')
if x == self.dimrow and orientation == 'RIGHT':
if 'Forward' in possible_actions:
possible_actions.remove('Forward')
if y == self.dimrow and orientation == 'UP':
if 'Forward' in possible_actions:
possible_actions.remove('Forward')
return possible_actions
def result(self, state, action):
""" Given state and action, return a new state that is the result of the action.
Action is assumed to be a valid action in the state """
x, y = state.get_location()
proposed_loc = list()
# Move Forward
if action == 'Forward':
if state.get_orientation() == 'UP':
proposed_loc = [x, y + 1]
elif state.get_orientation() == 'DOWN':
proposed_loc = [x, y - 1]
elif state.get_orientation() == 'LEFT':
proposed_loc = [x - 1, y]
elif state.get_orientation() == 'RIGHT':
proposed_loc = [x + 1, y]
else:
raise Exception('InvalidOrientation')
# Rotate counter-clockwise
elif action == 'TurnLeft':
if state.get_orientation() == 'UP':
state.set_orientation('LEFT')
elif state.get_orientation() == 'DOWN':
state.set_orientation('RIGHT')
elif state.get_orientation() == 'LEFT':
state.set_orientation('DOWN')
elif state.get_orientation() == 'RIGHT':
state.set_orientation('UP')
else:
raise Exception('InvalidOrientation')
# Rotate clockwise
elif action == 'TurnRight':
if state.get_orientation() == 'UP':
state.set_orientation('RIGHT')
elif state.get_orientation() == 'DOWN':
state.set_orientation('LEFT')
elif state.get_orientation() == 'LEFT':
state.set_orientation('UP')
elif state.get_orientation() == 'RIGHT':
state.set_orientation('DOWN')
else:
raise Exception('InvalidOrientation')
if proposed_loc in self.allowed:
state.set_location(proposed_loc[0], [proposed_loc[1]])
return state
def goal_test(self, state):
""" Given a state, return True if state is a goal state or False, otherwise """
return state.get_location() == tuple(self.goal)
def h(self, node):
""" Return the heuristic value for a given state."""
# Manhattan Heuristic Function
x1, y1 = node.state.get_location()
x2, y2 = self.goal
return abs(x2 - x1) + abs(y2 - y1)
# ______________________________________________________________________________
# Other search algorithms
def recursive_best_first_search(problem, h=None):
"""[Figure 3.26]"""
h = memoize(h or problem.h, 'h')
def RBFS(problem, node, flimit):
if problem.goal_test(node.state):
return node, 0 # (The second value is immaterial)
successors = node.expand(problem)
if len(successors) == 0:
return None, np.inf
for s in successors:
s.f = max(s.path_cost + h(s), node.f)
while True:
# Order by lowest f value
successors.sort(key=lambda x: x.f)
best = successors[0]
if best.f > flimit:
return None, best.f
if len(successors) > 1:
alternative = successors[1].f
else:
alternative = np.inf
result, best.f = RBFS(problem, best, min(flimit, alternative))
if result is not None:
return result, best.f
node = Node(problem.initial)
node.f = h(node)
result, bestf = RBFS(problem, node, np.inf)
return result
def hill_climbing(problem):
"""
[Figure 4.2]
From the initial node, keep choosing the neighbor with highest value,
stopping when no neighbor is better.
"""
current = Node(problem.initial)
while True:
neighbors = current.expand(problem)
if not neighbors:
break
neighbor = argmax_random_tie(neighbors, key=lambda node: problem.value(node.state))
if problem.value(neighbor.state) <= problem.value(current.state):
break
current = neighbor
return current.state
def exp_schedule(k=20, lam=0.005, limit=100):
"""One possible schedule function for simulated annealing"""
return lambda t: (k * np.exp(-lam * t) if t < limit else 0)
def simulated_annealing(problem, schedule=exp_schedule()):
"""[Figure 4.5] CAUTION: This differs from the pseudocode as it
returns a state instead of a Node."""
current = Node(problem.initial)
for t in range(sys.maxsize):
T = schedule(t)
if T == 0:
return current.state
neighbors = current.expand(problem)
if not neighbors:
return current.state
next_choice = random.choice(neighbors)
delta_e = problem.value(next_choice.state) - problem.value(current.state)
if delta_e > 0 or probability(np.exp(delta_e / T)):
current = next_choice
def simulated_annealing_full(problem, schedule=exp_schedule()):
""" This version returns all the states encountered in reaching
the goal state."""
states = []
current = Node(problem.initial)
for t in range(sys.maxsize):
states.append(current.state)
T = schedule(t)
if T == 0:
return states
neighbors = current.expand(problem)
if not neighbors:
return current.state
next_choice = random.choice(neighbors)
delta_e = problem.value(next_choice.state) - problem.value(current.state)
if delta_e > 0 or probability(np.exp(delta_e / T)):
current = next_choice
def and_or_graph_search(problem):
"""[Figure 4.11]Used when the environment is nondeterministic and completely observable.
Contains OR nodes where the agent is free to choose any action.
After every action there is an AND node which contains all possible states
the agent may reach due to stochastic nature of environment.
The agent must be able to handle all possible states of the AND node (as it
may end up in any of them).
Returns a conditional plan to reach goal state,
or failure if the former is not possible."""
# functions used by and_or_search
def or_search(state, problem, path):
"""returns a plan as a list of actions"""
if problem.goal_test(state):
return []
if state in path:
return None
for action in problem.actions(state):
plan = and_search(problem.result(state, action),
problem, path + [state, ])
if plan is not None:
return [action, plan]
def and_search(states, problem, path):
"""Returns plan in form of dictionary where we take action plan[s] if we reach state s."""
plan = {}
for s in states:
plan[s] = or_search(s, problem, path)
if plan[s] is None:
return None
return plan
# body of and or search
return or_search(problem.initial, problem, [])
# Pre-defined actions for PeakFindingProblem
directions4 = {'W': (-1, 0), 'N': (0, 1), 'E': (1, 0), 'S': (0, -1)}
directions8 = dict(directions4)
directions8.update({'NW': (-1, 1), 'NE': (1, 1), 'SE': (1, -1), 'SW': (-1, -1)})
class PeakFindingProblem(Problem):
"""Problem of finding the highest peak in a limited grid"""
def __init__(self, initial, grid, defined_actions=directions4):
"""The grid is a 2 dimensional array/list whose state is specified by tuple of indices"""
super().__init__(initial)
self.grid = grid
self.defined_actions = defined_actions
self.n = len(grid)
assert self.n > 0
self.m = len(grid[0])
assert self.m > 0
def actions(self, state):
"""Returns the list of actions which are allowed to be taken from the given state"""
allowed_actions = []
for action in self.defined_actions:
next_state = vector_add(state, self.defined_actions[action])
if 0 <= next_state[0] <= self.n - 1 and 0 <= next_state[1] <= self.m - 1:
allowed_actions.append(action)
return allowed_actions
def result(self, state, action):
"""Moves in the direction specified by action"""
return vector_add(state, self.defined_actions[action])
def value(self, state):
"""Value of a state is the value it is the index to"""
x, y = state
assert 0 <= x < self.n
assert 0 <= y < self.m
return self.grid[x][y]
class OnlineDFSAgent:
"""
[Figure 4.21]
The abstract class for an OnlineDFSAgent. Override
update_state method to convert percept to state. While initializing
the subclass a problem needs to be provided which is an instance of
a subclass of the Problem class.
"""
def __init__(self, problem):
self.problem = problem
self.s = None
self.a = None
self.untried = dict()
self.unbacktracked = dict()
self.result = {}
def __call__(self, percept):
s1 = self.update_state(percept)
if self.problem.goal_test(s1):
self.a = None
else:
if s1 not in self.untried.keys():
self.untried[s1] = self.problem.actions(s1)
if self.s is not None:
if s1 != self.result[(self.s, self.a)]:
self.result[(self.s, self.a)] = s1
self.unbacktracked[s1].insert(0, self.s)
if len(self.untried[s1]) == 0:
if len(self.unbacktracked[s1]) == 0:
self.a = None
else:
# else a <- an action b such that result[s', b] = POP(unbacktracked[s'])
unbacktracked_pop = self.unbacktracked.pop(s1)
for (s, b) in self.result.keys():
if self.result[(s, b)] == unbacktracked_pop:
self.a = b
break
else:
self.a = self.untried.pop(s1)
self.s = s1
return self.a
def update_state(self, percept):
"""To be overridden in most cases. The default case
assumes the percept to be of type state."""
return percept
# ______________________________________________________________________________
class OnlineSearchProblem(Problem):
"""
A problem which is solved by an agent executing
actions, rather than by just computation.
Carried in a deterministic and a fully observable environment."""
def __init__(self, initial, goal, graph):
super().__init__(initial, goal)
self.graph = graph
def actions(self, state):
return self.graph.graph_dict[state].keys()
def output(self, state, action):
return self.graph.graph_dict[state][action]
def h(self, state):
"""Returns least possible cost to reach a goal for the given state."""
return self.graph.least_costs[state]
def c(self, s, a, s1):
"""Returns a cost estimate for an agent to move from state 's' to state 's1'."""
return 1
def update_state(self, percept):
raise NotImplementedError
def goal_test(self, state):
if state == self.goal:
return True
return False
class LRTAStarAgent:
""" [Figure 4.24]
Abstract class for LRTA*-Agent. A problem needs to be
provided which is an instance of a subclass of Problem Class.
Takes a OnlineSearchProblem [Figure 4.23] as a problem.
"""
def __init__(self, problem):
self.problem = problem
# self.result = {} # no need as we are using problem.result
self.H = {}
self.s = None
self.a = None
def __call__(self, s1): # as of now s1 is a state rather than a percept
if self.problem.goal_test(s1):
self.a = None
return self.a
else:
if s1 not in self.H:
self.H[s1] = self.problem.h(s1)
if self.s is not None:
# self.result[(self.s, self.a)] = s1 # no need as we are using problem.output
# minimum cost for action b in problem.actions(s)
self.H[self.s] = min(self.LRTA_cost(self.s, b, self.problem.output(self.s, b),
self.H) for b in self.problem.actions(self.s))
# an action b in problem.actions(s1) that minimizes costs
self.a = min(self.problem.actions(s1),
key=lambda b: self.LRTA_cost(s1, b, self.problem.output(s1, b), self.H))
self.s = s1
return self.a
def LRTA_cost(self, s, a, s1, H):
"""Returns cost to move from state 's' to state 's1' plus
estimated cost to get to goal from s1."""
print(s, a, s1)
if s1 is None:
return self.problem.h(s)
else:
# sometimes we need to get H[s1] which we haven't yet added to H
# to replace this try, except: we can initialize H with values from problem.h
try:
return self.problem.c(s, a, s1) + self.H[s1]
except:
return self.problem.c(s, a, s1) + self.problem.h(s1)
# ______________________________________________________________________________
# Genetic Algorithm
def genetic_search(problem, ngen=1000, pmut=0.1, n=20):
"""Call genetic_algorithm on the appropriate parts of a problem.
This requires the problem to have states that can mate and mutate,
plus a value method that scores states."""
# NOTE: This is not tested and might not work.
# TODO: Use this function to make Problems work with genetic_algorithm.
s = problem.initial_state
states = [problem.result(s, a) for a in problem.actions(s)]
random.shuffle(states)
return genetic_algorithm(states[:n], problem.value, ngen, pmut)
def genetic_algorithm(population, fitness_fn, gene_pool=[0, 1], f_thres=None, ngen=1000, pmut=0.1):
"""[Figure 4.8]"""
for i in range(ngen):
population = [mutate(recombine(*select(2, population, fitness_fn)), gene_pool, pmut)
for i in range(len(population))]
fittest_individual = fitness_threshold(fitness_fn, f_thres, population)
if fittest_individual:
return fittest_individual
return max(population, key=fitness_fn)
def fitness_threshold(fitness_fn, f_thres, population):
if not f_thres:
return None
fittest_individual = max(population, key=fitness_fn)
if fitness_fn(fittest_individual) >= f_thres:
return fittest_individual
return None
def init_population(pop_number, gene_pool, state_length):
"""Initializes population for genetic algorithm
pop_number : Number of individuals in population
gene_pool : List of possible values for individuals
state_length: The length of each individual"""
g = len(gene_pool)
population = []
for i in range(pop_number):
new_individual = [gene_pool[random.randrange(0, g)] for j in range(state_length)]
population.append(new_individual)
return population
def select(r, population, fitness_fn):
fitnesses = map(fitness_fn, population)
sampler = weighted_sampler(population, fitnesses)
return [sampler() for i in range(r)]
def recombine(x, y):
n = len(x)
c = random.randrange(0, n)
return x[:c] + y[c:]
def recombine_uniform(x, y):
n = len(x)
result = [0] * n
indexes = random.sample(range(n), n)
for i in range(n):
ix = indexes[i]
result[ix] = x[ix] if i < n / 2 else y[ix]
return ''.join(str(r) for r in result)
def mutate(x, gene_pool, pmut):
if random.uniform(0, 1) >= pmut:
return x
n = len(x)
g = len(gene_pool)
c = random.randrange(0, n)
r = random.randrange(0, g)
new_gene = gene_pool[r]
return x[:c] + [new_gene] + x[c + 1:]
# _____________________________________________________________________________
# The remainder of this file implements examples for the search algorithms.
# ______________________________________________________________________________
# Graphs and Graph Problems
class Graph:
"""A graph connects nodes (vertices) by edges (links). Each edge can also
have a length associated with it. The constructor call is something like:
g = Graph({'A': {'B': 1, 'C': 2})
this makes a graph with 3 nodes, A, B, and C, with an edge of length 1 from
A to B, and an edge of length 2 from A to C. You can also do:
g = Graph({'A': {'B': 1, 'C': 2}, directed=False)
This makes an undirected graph, so inverse links are also added. The graph
stays undirected; if you add more links with g.connect('B', 'C', 3), then
inverse link is also added. You can use g.nodes() to get a list of nodes,
g.get('A') to get a dict of links out of A, and g.get('A', 'B') to get the
length of the link from A to B. 'Lengths' can actually be any object at
all, and nodes can be any hashable object."""
def __init__(self, graph_dict=None, directed=True):
self.graph_dict = graph_dict or {}
self.directed = directed
if not directed:
self.make_undirected()
def make_undirected(self):
"""Make a digraph into an undirected graph by adding symmetric edges."""
for a in list(self.graph_dict.keys()):
for (b, dist) in self.graph_dict[a].items():
self.connect1(b, a, dist)
def connect(self, A, B, distance=1):
"""Add a link from A and B of given distance, and also add the inverse
link if the graph is undirected."""
self.connect1(A, B, distance)
if not self.directed:
self.connect1(B, A, distance)
def connect1(self, A, B, distance):
"""Add a link from A to B of given distance, in one direction only."""
self.graph_dict.setdefault(A, {})[B] = distance
def get(self, a, b=None):
"""Return a link distance or a dict of {node: distance} entries.
.get(a,b) returns the distance or None;
.get(a) returns a dict of {node: distance} entries, possibly {}."""
links = self.graph_dict.setdefault(a, {})
if b is None:
return links
else:
return links.get(b)
def nodes(self):
"""Return a list of nodes in the graph."""
s1 = set([k for k in self.graph_dict.keys()])
s2 = set([k2 for v in self.graph_dict.values() for k2, v2 in v.items()])
nodes = s1.union(s2)
return list(nodes)
def UndirectedGraph(graph_dict=None):
"""Build a Graph where every edge (including future ones) goes both ways."""
return Graph(graph_dict=graph_dict, directed=False)
def RandomGraph(nodes=list(range(10)), min_links=2, width=400, height=300,
curvature=lambda: random.uniform(1.1, 1.5)):
"""Construct a random graph, with the specified nodes, and random links.
The nodes are laid out randomly on a (width x height) rectangle.
Then each node is connected to the min_links nearest neighbors.
Because inverse links are added, some nodes will have more connections.
The distance between nodes is the hypotenuse times curvature(),
where curvature() defaults to a random number between 1.1 and 1.5."""
g = UndirectedGraph()
g.locations = {}
# Build the cities
for node in nodes:
g.locations[node] = (random.randrange(width), random.randrange(height))
# Build roads from each city to at least min_links nearest neighbors.
for i in range(min_links):
for node in nodes:
if len(g.get(node)) < min_links:
here = g.locations[node]
def distance_to_node(n):
if n is node or g.get(node, n):
return np.inf
return distance(g.locations[n], here)
neighbor = min(nodes, key=distance_to_node)
d = distance(g.locations[neighbor], here) * curvature()
g.connect(node, neighbor, int(d))
return g
""" [Figure 3.2]
Simplified road map of Romania
"""
romania_map = UndirectedGraph(dict(
Arad=dict(Zerind=75, Sibiu=140, Timisoara=118),
Bucharest=dict(Urziceni=85, Pitesti=101, Giurgiu=90, Fagaras=211),
Craiova=dict(Drobeta=120, Rimnicu=146, Pitesti=138),
Drobeta=dict(Mehadia=75),
Eforie=dict(Hirsova=86),
Fagaras=dict(Sibiu=99),
Hirsova=dict(Urziceni=98),
Iasi=dict(Vaslui=92, Neamt=87),
Lugoj=dict(Timisoara=111, Mehadia=70),
Oradea=dict(Zerind=71, Sibiu=151),
Pitesti=dict(Rimnicu=97),
Rimnicu=dict(Sibiu=80),
Urziceni=dict(Vaslui=142)))
romania_map.locations = dict(
Arad=(91, 492), Bucharest=(400, 327), Craiova=(253, 288),
Drobeta=(165, 299), Eforie=(562, 293), Fagaras=(305, 449),
Giurgiu=(375, 270), Hirsova=(534, 350), Iasi=(473, 506),
Lugoj=(165, 379), Mehadia=(168, 339), Neamt=(406, 537),
Oradea=(131, 571), Pitesti=(320, 368), Rimnicu=(233, 410),
Sibiu=(207, 457), Timisoara=(94, 410), Urziceni=(456, 350),
Vaslui=(509, 444), Zerind=(108, 531))
""" [Figure 4.9]
Eight possible states of the vacumm world
Each state is represented as
* "State of the left room" "State of the right room" "Room in which the agent
is present"
1 - DDL Dirty Dirty Left
2 - DDR Dirty Dirty Right
3 - DCL Dirty Clean Left
4 - DCR Dirty Clean Right
5 - CDL Clean Dirty Left
6 - CDR Clean Dirty Right
7 - CCL Clean Clean Left
8 - CCR Clean Clean Right
"""
vacuum_world = Graph(dict(
State_1=dict(Suck=['State_7', 'State_5'], Right=['State_2']),
State_2=dict(Suck=['State_8', 'State_4'], Left=['State_2']),
State_3=dict(Suck=['State_7'], Right=['State_4']),
State_4=dict(Suck=['State_4', 'State_2'], Left=['State_3']),
State_5=dict(Suck=['State_5', 'State_1'], Right=['State_6']),
State_6=dict(Suck=['State_8'], Left=['State_5']),
State_7=dict(Suck=['State_7', 'State_3'], Right=['State_8']),
State_8=dict(Suck=['State_8', 'State_6'], Left=['State_7'])
))
""" [Figure 4.23]
One-dimensional state space Graph
"""
one_dim_state_space = Graph(dict(
State_1=dict(Right='State_2'),
State_2=dict(Right='State_3', Left='State_1'),
State_3=dict(Right='State_4', Left='State_2'),
State_4=dict(Right='State_5', Left='State_3'),
State_5=dict(Right='State_6', Left='State_4'),
State_6=dict(Left='State_5')
))
one_dim_state_space.least_costs = dict(
State_1=8,
State_2=9,
State_3=2,
State_4=2,
State_5=4,
State_6=3)
""" [Figure 6.1]
Principal states and territories of Australia
"""
australia_map = UndirectedGraph(dict(
T=dict(),
SA=dict(WA=1, NT=1, Q=1, NSW=1, V=1),
NT=dict(WA=1, Q=1),
NSW=dict(Q=1, V=1)))
australia_map.locations = dict(WA=(120, 24), NT=(135, 20), SA=(135, 30),
Q=(145, 20), NSW=(145, 32), T=(145, 42),
V=(145, 37))
class GraphProblem(Problem):
"""The problem of searching a graph from one node to another."""
def __init__(self, initial, goal, graph):
super().__init__(initial, goal)
self.graph = graph
def actions(self, A):
"""The actions at a graph node are just its neighbors."""
return list(self.graph.get(A).keys())
def result(self, state, action):
"""The result of going to a neighbor is just that neighbor."""
return action
def path_cost(self, cost_so_far, A, action, B):
return cost_so_far + (self.graph.get(A, B) or np.inf)
def find_min_edge(self):
"""Find minimum value of edges."""
m = np.inf
for d in self.graph.graph_dict.values():
local_min = min(d.values())
m = min(m, local_min)
return m
def h(self, node):
"""h function is straight-line distance from a node's state to goal."""
locs = getattr(self.graph, 'locations', None)
if locs:
if type(node) is str:
return int(distance(locs[node], locs[self.goal]))
return int(distance(locs[node.state], locs[self.goal]))
else:
return np.inf
class GraphProblemStochastic(GraphProblem):
"""
A version of GraphProblem where an action can lead to
nondeterministic output i.e. multiple possible states.
Define the graph as dict(A = dict(Action = [[<Result 1>, <Result 2>, ...], <cost>], ...), ...)
A the dictionary format is different, make sure the graph is created as a directed graph.
"""
def result(self, state, action):
return self.graph.get(state, action)
def path_cost(self):
raise NotImplementedError
# ______________________________________________________________________________
class NQueensProblem(Problem):
"""The problem of placing N queens on an NxN board with none attacking
each other. A state is represented as an N-element array, where
a value of r in the c-th entry means there is a queen at column c,
row r, and a value of -1 means that the c-th column has not been
filled in yet. We fill in columns left to right.
>>> depth_first_tree_search(NQueensProblem(8))
<Node (7, 3, 0, 2, 5, 1, 6, 4)>
"""
def __init__(self, N):
super().__init__(tuple([-1] * N))
self.N = N
def actions(self, state):
"""In the leftmost empty column, try all non-conflicting rows."""
if state[-1] is not -1:
return [] # All columns filled; no successors
else:
col = state.index(-1)
return [row for row in range(self.N)
if not self.conflicted(state, row, col)]
def result(self, state, row):
"""Place the next queen at the given row."""
col = state.index(-1)
new = list(state[:])
new[col] = row
return tuple(new)
def conflicted(self, state, row, col):
"""Would placing a queen at (row, col) conflict with anything?"""
return any(self.conflict(row, col, state[c], c)
for c in range(col))
def conflict(self, row1, col1, row2, col2):
"""Would putting two queens in (row1, col1) and (row2, col2) conflict?"""
return (row1 == row2 or # same row
col1 == col2 or # same column
row1 - col1 == row2 - col2 or # same \ diagonal
row1 + col1 == row2 + col2) # same / diagonal
def goal_test(self, state):
"""Check if all columns filled, no conflicts."""
if state[-1] is -1:
return False
return not any(self.conflicted(state, state[col], col)
for col in range(len(state)))
def h(self, node):
"""Return number of conflicting queens for a given node"""
num_conflicts = 0
for (r1, c1) in enumerate(node.state):
for (r2, c2) in enumerate(node.state):
if (r1, c1) != (r2, c2):
num_conflicts += self.conflict(r1, c1, r2, c2)
return num_conflicts
# ______________________________________________________________________________
# Inverse Boggle: Search for a high-scoring Boggle board. A good domain for
# iterative-repair and related search techniques, as suggested by Justin Boyan.
ALPHABET = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
cubes16 = ['FORIXB', 'MOQABJ', 'GURILW', 'SETUPL',
'CMPDAE', 'ACITAO', 'SLCRAE', 'ROMASH',
'NODESW', 'HEFIYE', 'ONUDTK', 'TEVIGN',
'ANEDVZ', 'PINESH', 'ABILYT', 'GKYLEU']
def random_boggle(n=4):
"""Return a random Boggle board of size n x n.
We represent a board as a linear list of letters."""
cubes = [cubes16[i % 16] for i in range(n * n)]
random.shuffle(cubes)
return list(map(random.choice, cubes))
# The best 5x5 board found by Boyan, with our word list this board scores
# 2274 words, for a score of 9837
boyan_best = list('RSTCSDEIAEGNLRPEATESMSSID')
def print_boggle(board):
"""Print the board in a 2-d array."""
n2 = len(board)
n = exact_sqrt(n2)
for i in range(n2):
if i % n == 0 and i > 0:
print()
if board[i] == 'Q':
print('Qu', end=' ')
else:
print(str(board[i]) + ' ', end=' ')
print()
def boggle_neighbors(n2, cache={}):
"""Return a list of lists, where the i-th element is the list of indexes
for the neighbors of square i."""
if cache.get(n2):
return cache.get(n2)
n = exact_sqrt(n2)
neighbors = [None] * n2
for i in range(n2):
neighbors[i] = []
on_top = i < n
on_bottom = i >= n2 - n
on_left = i % n == 0
on_right = (i + 1) % n == 0
if not on_top:
neighbors[i].append(i - n)
if not on_left:
neighbors[i].append(i - n - 1)
if not on_right:
neighbors[i].append(i - n + 1)
if not on_bottom:
neighbors[i].append(i + n)
if not on_left:
neighbors[i].append(i + n - 1)
if not on_right:
neighbors[i].append(i + n + 1)
if not on_left:
neighbors[i].append(i - 1)
if not on_right:
neighbors[i].append(i + 1)
cache[n2] = neighbors
return neighbors
def exact_sqrt(n2):
"""If n2 is a perfect square, return its square root, else raise error."""
n = int(np.sqrt(n2))
assert n * n == n2
return n
# _____________________________________________________________________________
class Wordlist:
"""This class holds a list of words. You can use (word in wordlist)
to check if a word is in the list, or wordlist.lookup(prefix)
to see if prefix starts any of the words in the list."""
def __init__(self, file, min_len=3):
lines = file.read().upper().split()
self.words = [word for word in lines if len(word) >= min_len]
self.words.sort()
self.bounds = {}
for c in ALPHABET:
c2 = chr(ord(c) + 1)
self.bounds[c] = (bisect.bisect(self.words, c),
bisect.bisect(self.words, c2))
def lookup(self, prefix, lo=0, hi=None):
"""See if prefix is in dictionary, as a full word or as a prefix.
Return two values: the first is the lowest i such that
words[i].startswith(prefix), or is None; the second is
True iff prefix itself is in the Wordlist."""
words = self.words
if hi is None:
hi = len(words)
i = bisect.bisect_left(words, prefix, lo, hi)
if i < len(words) and words[i].startswith(prefix):
return i, (words[i] == prefix)
else:
return None, False
def __contains__(self, word):
return self.lookup(word)[1]
def __len__(self):
return len(self.words)
# _____________________________________________________________________________
class BoggleFinder:
"""A class that allows you to find all the words in a Boggle board."""
wordlist = None # A class variable, holding a wordlist
def __init__(self, board=None):
if BoggleFinder.wordlist is None:
BoggleFinder.wordlist = Wordlist(open_data("EN-text/wordlist.txt"))
self.found = {}
if board:
self.set_board(board)
def set_board(self, board=None):
"""Set the board, and find all the words in it."""
if board is None:
board = random_boggle()
self.board = board
self.neighbors = boggle_neighbors(len(board))
self.found = {}
for i in range(len(board)):
lo, hi = self.wordlist.bounds[board[i]]
self.find(lo, hi, i, [], '')
return self
def find(self, lo, hi, i, visited, prefix):
"""Looking in square i, find the words that continue the prefix,
considering the entries in self.wordlist.words[lo:hi], and not
revisiting the squares in visited."""
if i in visited:
return
wordpos, is_word = self.wordlist.lookup(prefix, lo, hi)
if wordpos is not None:
if is_word:
self.found[prefix] = True
visited.append(i)
c = self.board[i]
if c == 'Q':
c = 'QU'
prefix += c
for j in self.neighbors[i]:
self.find(wordpos, hi, j, visited, prefix)
visited.pop()
def words(self):
"""The words found."""
return list(self.found.keys())
scores = [0, 0, 0, 0, 1, 2, 3, 5] + [11] * 100
def score(self):
"""The total score for the words found, according to the rules."""
return sum([self.scores[len(w)] for w in self.words()])
def __len__(self):
"""The number of words found."""
return len(self.found)
# _____________________________________________________________________________
def boggle_hill_climbing(board=None, ntimes=100, verbose=True):
"""Solve inverse Boggle by hill-climbing: find a high-scoring board by
starting with a random one and changing it."""
finder = BoggleFinder()
if board is None:
board = random_boggle()
best = len(finder.set_board(board))
for _ in range(ntimes):
i, oldc = mutate_boggle(board)
new = len(finder.set_board(board))
if new > best:
best = new
if verbose:
print(best, _, board)
else:
board[i] = oldc # Change back
if verbose:
print_boggle(board)
return board, best
def mutate_boggle(board):
i = random.randrange(len(board))
oldc = board[i]
# random.choice(boyan_best)
board[i] = random.choice(random.choice(cubes16))
return i, oldc
# ______________________________________________________________________________
# Code to compare searchers on various problems.
class InstrumentedProblem(Problem):
"""Delegates to a problem, and keeps statistics."""
def __init__(self, problem):
self.problem = problem
self.succs = self.goal_tests = self.states = 0
self.found = None
def actions(self, state):
self.succs += 1
return self.problem.actions(state)
def result(self, state, action):
self.states += 1
return self.problem.result(state, action)
def goal_test(self, state):
self.goal_tests += 1
result = self.problem.goal_test(state)
if result:
self.found = state
return result
def path_cost(self, c, state1, action, state2):
return self.problem.path_cost(c, state1, action, state2)
def value(self, state):
return self.problem.value(state)
def __getattr__(self, attr):
return getattr(self.problem, attr)
def __repr__(self):
return '<{:4d}/{:4d}/{:4d}/{}>'.format(self.succs, self.goal_tests,
self.states, str(self.found)[:4])
def compare_searchers(problems, header,
searchers=[breadth_first_tree_search,
breadth_first_graph_search,
depth_first_graph_search,
iterative_deepening_search,
depth_limited_search,
recursive_best_first_search]):
def do(searcher, problem):
p = InstrumentedProblem(problem)
searcher(p)
return p
table = [[name(s)] + [do(s, p) for p in problems] for s in searchers]
print_table(table, header)
def compare_graph_searchers():
"""Prints a table of search results."""
compare_searchers(problems=[GraphProblem('Arad', 'Bucharest', romania_map),
GraphProblem('Oradea', 'Neamt', romania_map),
GraphProblem('Q', 'WA', australia_map)],
header=['Searcher', 'romania_map(Arad, Bucharest)',
'romania_map(Oradea, Neamt)', 'australia_map'])
Imported utils4e to resolve some dependency bugs (#1186)
"""
Search (Chapters 3-4)
The way to use this code is to subclass Problem to create a class of problems,
then create problem instances and solve them with calls to the various search
functions.
"""
import sys
from collections import deque
from utils import *
from utils4e import *
class Problem:
"""The abstract class for a formal problem. You should subclass
this and implement the methods actions and result, and possibly
__init__, goal_test, and path_cost. Then you will create instances
of your subclass and solve them with the various search functions."""
def __init__(self, initial, goal=None):
"""The constructor specifies the initial state, and possibly a goal
state, if there is a unique goal. Your subclass's constructor can add
other arguments."""
self.initial = initial
self.goal = goal
def actions(self, state):
"""Return the actions that can be executed in the given
state. The result would typically be a list, but if there are
many actions, consider yielding them one at a time in an
iterator, rather than building them all at once."""
raise NotImplementedError
def result(self, state, action):
"""Return the state that results from executing the given
action in the given state. The action must be one of
self.actions(state)."""
raise NotImplementedError
def goal_test(self, state):
"""Return True if the state is a goal. The default method compares the
state to self.goal or checks for state in self.goal if it is a
list, as specified in the constructor. Override this method if
checking against a single self.goal is not enough."""
if isinstance(self.goal, list):
return is_in(state, self.goal)
else:
return state == self.goal
def path_cost(self, c, state1, action, state2):
"""Return the cost of a solution path that arrives at state2 from
state1 via action, assuming cost c to get up to state1. If the problem
is such that the path doesn't matter, this function will only look at
state2. If the path does matter, it will consider c and maybe state1
and action. The default method costs 1 for every step in the path."""
return c + 1
def value(self, state):
"""For optimization problems, each state has a value. Hill Climbing
and related algorithms try to maximize this value."""
raise NotImplementedError
# ______________________________________________________________________________
class Node:
"""A node in a search tree. Contains a pointer to the parent (the node
that this is a successor of) and to the actual state for this node. Note
that if a state is arrived at by two paths, then there are two nodes with
the same state. Also includes the action that got us to this state, and
the total path_cost (also known as g) to reach the node. Other functions
may add an f and h value; see best_first_graph_search and astar_search for
an explanation of how the f and h values are handled. You will not need to
subclass this class."""
def __init__(self, state, parent=None, action=None, path_cost=0):
"""Create a search tree Node, derived from a parent by an action."""
self.state = state
self.parent = parent
self.action = action
self.path_cost = path_cost
self.depth = 0
if parent:
self.depth = parent.depth + 1
def __repr__(self):
return "<Node {}>".format(self.state)
def __lt__(self, node):
return self.state < node.state
def expand(self, problem):
"""List the nodes reachable in one step from this node."""
return [self.child_node(problem, action)
for action in problem.actions(self.state)]
def child_node(self, problem, action):
"""[Figure 3.10]"""
next_state = problem.result(self.state, action)
next_node = Node(next_state, self, action, problem.path_cost(self.path_cost, self.state, action, next_state))
return next_node
def solution(self):
"""Return the sequence of actions to go from the root to this node."""
return [node.action for node in self.path()[1:]]
def path(self):
"""Return a list of nodes forming the path from the root to this node."""
node, path_back = self, []
while node:
path_back.append(node)
node = node.parent
return list(reversed(path_back))
# We want for a queue of nodes in breadth_first_graph_search or
# astar_search to have no duplicated states, so we treat nodes
# with the same state as equal. [Problem: this may not be what you
# want in other contexts.]
def __eq__(self, other):
return isinstance(other, Node) and self.state == other.state
def __hash__(self):
# We use the hash value of the state
# stored in the node instead of the node
# object itself to quickly search a node
# with the same state in a Hash Table
return hash(self.state)
# ______________________________________________________________________________
class SimpleProblemSolvingAgentProgram:
"""
[Figure 3.1]
Abstract framework for a problem-solving agent.
"""
def __init__(self, initial_state=None):
"""State is an abstract representation of the state
of the world, and seq is the list of actions required
to get to a particular state from the initial state(root)."""
self.state = initial_state
self.seq = []
def __call__(self, percept):
"""[Figure 3.1] Formulate a goal and problem, then
search for a sequence of actions to solve it."""
self.state = self.update_state(self.state, percept)
if not self.seq:
goal = self.formulate_goal(self.state)
problem = self.formulate_problem(self.state, goal)
self.seq = self.search(problem)
if not self.seq:
return None
return self.seq.pop(0)
def update_state(self, state, percept):
raise NotImplementedError
def formulate_goal(self, state):
raise NotImplementedError
def formulate_problem(self, state, goal):
raise NotImplementedError
def search(self, problem):
raise NotImplementedError
# ______________________________________________________________________________
# Uninformed Search algorithms
def breadth_first_tree_search(problem):
"""
[Figure 3.7]
Search the shallowest nodes in the search tree first.
Search through the successors of a problem to find a goal.
The argument frontier should be an empty queue.
Repeats infinitely in case of loops.
"""
frontier = deque([Node(problem.initial)]) # FIFO queue
while frontier:
node = frontier.popleft()
if problem.goal_test(node.state):
return node
frontier.extend(node.expand(problem))
return None
def depth_first_tree_search(problem):
"""
[Figure 3.7]
Search the deepest nodes in the search tree first.
Search through the successors of a problem to find a goal.
The argument frontier should be an empty queue.
Repeats infinitely in case of loops.
"""
frontier = [Node(problem.initial)] # Stack
while frontier:
node = frontier.pop()
if problem.goal_test(node.state):
return node
frontier.extend(node.expand(problem))
return None
def depth_first_graph_search(problem):
"""
[Figure 3.7]
Search the deepest nodes in the search tree first.
Search through the successors of a problem to find a goal.
The argument frontier should be an empty queue.
Does not get trapped by loops.
If two paths reach a state, only use the first one.
"""
frontier = [(Node(problem.initial))] # Stack
explored = set()
while frontier:
node = frontier.pop()
if problem.goal_test(node.state):
return node
explored.add(node.state)
frontier.extend(child for child in node.expand(problem)
if child.state not in explored and child not in frontier)
return None
def breadth_first_graph_search(problem):
"""[Figure 3.11]
Note that this function can be implemented in a
single line as below:
return graph_search(problem, FIFOQueue())
"""
node = Node(problem.initial)
if problem.goal_test(node.state):
return node
frontier = deque([node])
explored = set()
while frontier:
node = frontier.popleft()
explored.add(node.state)
for child in node.expand(problem):
if child.state not in explored and child not in frontier:
if problem.goal_test(child.state):
return child
frontier.append(child)
return None
def best_first_graph_search(problem, f, display=False):
"""Search the nodes with the lowest f scores first.
You specify the function f(node) that you want to minimize; for example,
if f is a heuristic estimate to the goal, then we have greedy best
first search; if f is node.depth then we have breadth-first search.
There is a subtlety: the line "f = memoize(f, 'f')" means that the f
values will be cached on the nodes as they are computed. So after doing
a best first search you can examine the f values of the path returned."""
f = memoize(f, 'f')
node = Node(problem.initial)
frontier = PriorityQueue('min', f)
frontier.append(node)
explored = set()
while frontier:
node = frontier.pop()
if problem.goal_test(node.state):
if display:
print(len(explored), "paths have been expanded and", len(frontier), "paths remain in the frontier")
return node
explored.add(node.state)
for child in node.expand(problem):
if child.state not in explored and child not in frontier:
frontier.append(child)
elif child in frontier:
if f(child) < frontier[child]:
del frontier[child]
frontier.append(child)
return None
def uniform_cost_search(problem, display=False):
"""[Figure 3.14]"""
return best_first_graph_search(problem, lambda node: node.path_cost, display)
def depth_limited_search(problem, limit=50):
"""[Figure 3.17]"""
def recursive_dls(node, problem, limit):
if problem.goal_test(node.state):
return node
elif limit == 0:
return 'cutoff'
else:
cutoff_occurred = False
for child in node.expand(problem):
result = recursive_dls(child, problem, limit - 1)
if result == 'cutoff':
cutoff_occurred = True
elif result is not None:
return result
return 'cutoff' if cutoff_occurred else None
# Body of depth_limited_search:
return recursive_dls(Node(problem.initial), problem, limit)
def iterative_deepening_search(problem):
"""[Figure 3.18]"""
for depth in range(sys.maxsize):
result = depth_limited_search(problem, depth)
if result != 'cutoff':
return result
# ______________________________________________________________________________
# Bidirectional Search
# Pseudocode from https://webdocs.cs.ualberta.ca/%7Eholte/Publications/MM-AAAI2016.pdf
def bidirectional_search(problem):
e = 0
if isinstance(problem, GraphProblem):
e = problem.find_min_edge()
gF, gB = {Node(problem.initial): 0}, {Node(problem.goal): 0}
openF, openB = [Node(problem.initial)], [Node(problem.goal)]
closedF, closedB = [], []
U = np.inf
def extend(U, open_dir, open_other, g_dir, g_other, closed_dir):
"""Extend search in given direction"""
n = find_key(C, open_dir, g_dir)
open_dir.remove(n)
closed_dir.append(n)
for c in n.expand(problem):
if c in open_dir or c in closed_dir:
if g_dir[c] <= problem.path_cost(g_dir[n], n.state, None, c.state):
continue
open_dir.remove(c)
g_dir[c] = problem.path_cost(g_dir[n], n.state, None, c.state)
open_dir.append(c)
if c in open_other:
U = min(U, g_dir[c] + g_other[c])
return U, open_dir, closed_dir, g_dir
def find_min(open_dir, g):
"""Finds minimum priority, g and f values in open_dir"""
# pr_min_f isn't forward pr_min instead it's the f-value
# of node with priority pr_min.
pr_min, pr_min_f = np.inf, np.inf
for n in open_dir:
f = g[n] + problem.h(n)
pr = max(f, 2 * g[n])
pr_min = min(pr_min, pr)
pr_min_f = min(pr_min_f, f)
return pr_min, pr_min_f, min(g.values())
def find_key(pr_min, open_dir, g):
"""Finds key in open_dir with value equal to pr_min
and minimum g value."""
m = np.inf
node = Node(-1)
for n in open_dir:
pr = max(g[n] + problem.h(n), 2 * g[n])
if pr == pr_min:
if g[n] < m:
m = g[n]
node = n
return node
while openF and openB:
pr_min_f, f_min_f, g_min_f = find_min(openF, gF)
pr_min_b, f_min_b, g_min_b = find_min(openB, gB)
C = min(pr_min_f, pr_min_b)
if U <= max(C, f_min_f, f_min_b, g_min_f + g_min_b + e):
return U
if C == pr_min_f:
# Extend forward
U, openF, closedF, gF = extend(U, openF, openB, gF, gB, closedF)
else:
# Extend backward
U, openB, closedB, gB = extend(U, openB, openF, gB, gF, closedB)
return np.inf
# ______________________________________________________________________________
# Informed (Heuristic) Search
greedy_best_first_graph_search = best_first_graph_search
# Greedy best-first search is accomplished by specifying f(n) = h(n).
def astar_search(problem, h=None, display=False):
"""A* search is best-first graph search with f(n) = g(n)+h(n).
You need to specify the h function when you call astar_search, or
else in your Problem subclass."""
h = memoize(h or problem.h, 'h')
return best_first_graph_search(problem, lambda n: n.path_cost + h(n), display)
# ______________________________________________________________________________
# A* heuristics
class EightPuzzle(Problem):
""" The problem of sliding tiles numbered from 1 to 8 on a 3x3 board, where one of the
squares is a blank. A state is represented as a tuple of length 9, where element at
index i represents the tile number at index i (0 if it's an empty square) """
def __init__(self, initial, goal=(1, 2, 3, 4, 5, 6, 7, 8, 0)):
""" Define goal state and initialize a problem """
super().__init__(initial, goal)
def find_blank_square(self, state):
"""Return the index of the blank square in a given state"""
return state.index(0)
def actions(self, state):
""" Return the actions that can be executed in the given state.
The result would be a list, since there are only four possible actions
in any given state of the environment """
possible_actions = ['UP', 'DOWN', 'LEFT', 'RIGHT']
index_blank_square = self.find_blank_square(state)
if index_blank_square % 3 == 0:
possible_actions.remove('LEFT')
if index_blank_square < 3:
possible_actions.remove('UP')
if index_blank_square % 3 == 2:
possible_actions.remove('RIGHT')
if index_blank_square > 5:
possible_actions.remove('DOWN')
return possible_actions
def result(self, state, action):
""" Given state and action, return a new state that is the result of the action.
Action is assumed to be a valid action in the state """
# blank is the index of the blank square
blank = self.find_blank_square(state)
new_state = list(state)
delta = {'UP': -3, 'DOWN': 3, 'LEFT': -1, 'RIGHT': 1}
neighbor = blank + delta[action]
new_state[blank], new_state[neighbor] = new_state[neighbor], new_state[blank]
return tuple(new_state)
def goal_test(self, state):
""" Given a state, return True if state is a goal state or False, otherwise """
return state == self.goal
def check_solvability(self, state):
""" Checks if the given state is solvable """
inversion = 0
for i in range(len(state)):
for j in range(i + 1, len(state)):
if (state[i] > state[j]) and state[i] != 0 and state[j] != 0:
inversion += 1
return inversion % 2 == 0
def h(self, node):
""" Return the heuristic value for a given state. Default heuristic function used is
h(n) = number of misplaced tiles """
return sum(s != g for (s, g) in zip(node.state, self.goal))
# ______________________________________________________________________________
class PlanRoute(Problem):
""" The problem of moving the Hybrid Wumpus Agent from one place to other """
def __init__(self, initial, goal, allowed, dimrow):
""" Define goal state and initialize a problem """
super().__init__(initial, goal)
self.dimrow = dimrow
self.goal = goal
self.allowed = allowed
def actions(self, state):
""" Return the actions that can be executed in the given state.
The result would be a list, since there are only three possible actions
in any given state of the environment """
possible_actions = ['Forward', 'TurnLeft', 'TurnRight']
x, y = state.get_location()
orientation = state.get_orientation()
# Prevent Bumps
if x == 1 and orientation == 'LEFT':
if 'Forward' in possible_actions:
possible_actions.remove('Forward')
if y == 1 and orientation == 'DOWN':
if 'Forward' in possible_actions:
possible_actions.remove('Forward')
if x == self.dimrow and orientation == 'RIGHT':
if 'Forward' in possible_actions:
possible_actions.remove('Forward')
if y == self.dimrow and orientation == 'UP':
if 'Forward' in possible_actions:
possible_actions.remove('Forward')
return possible_actions
def result(self, state, action):
""" Given state and action, return a new state that is the result of the action.
Action is assumed to be a valid action in the state """
x, y = state.get_location()
proposed_loc = list()
# Move Forward
if action == 'Forward':
if state.get_orientation() == 'UP':
proposed_loc = [x, y + 1]
elif state.get_orientation() == 'DOWN':
proposed_loc = [x, y - 1]
elif state.get_orientation() == 'LEFT':
proposed_loc = [x - 1, y]
elif state.get_orientation() == 'RIGHT':
proposed_loc = [x + 1, y]
else:
raise Exception('InvalidOrientation')
# Rotate counter-clockwise
elif action == 'TurnLeft':
if state.get_orientation() == 'UP':
state.set_orientation('LEFT')
elif state.get_orientation() == 'DOWN':
state.set_orientation('RIGHT')
elif state.get_orientation() == 'LEFT':
state.set_orientation('DOWN')
elif state.get_orientation() == 'RIGHT':
state.set_orientation('UP')
else:
raise Exception('InvalidOrientation')
# Rotate clockwise
elif action == 'TurnRight':
if state.get_orientation() == 'UP':
state.set_orientation('RIGHT')
elif state.get_orientation() == 'DOWN':
state.set_orientation('LEFT')
elif state.get_orientation() == 'LEFT':
state.set_orientation('UP')
elif state.get_orientation() == 'RIGHT':
state.set_orientation('DOWN')
else:
raise Exception('InvalidOrientation')
if proposed_loc in self.allowed:
state.set_location(proposed_loc[0], [proposed_loc[1]])
return state
def goal_test(self, state):
""" Given a state, return True if state is a goal state or False, otherwise """
return state.get_location() == tuple(self.goal)
def h(self, node):
""" Return the heuristic value for a given state."""
# Manhattan Heuristic Function
x1, y1 = node.state.get_location()
x2, y2 = self.goal
return abs(x2 - x1) + abs(y2 - y1)
# ______________________________________________________________________________
# Other search algorithms
def recursive_best_first_search(problem, h=None):
"""[Figure 3.26]"""
h = memoize(h or problem.h, 'h')
def RBFS(problem, node, flimit):
if problem.goal_test(node.state):
return node, 0 # (The second value is immaterial)
successors = node.expand(problem)
if len(successors) == 0:
return None, np.inf
for s in successors:
s.f = max(s.path_cost + h(s), node.f)
while True:
# Order by lowest f value
successors.sort(key=lambda x: x.f)
best = successors[0]
if best.f > flimit:
return None, best.f
if len(successors) > 1:
alternative = successors[1].f
else:
alternative = np.inf
result, best.f = RBFS(problem, best, min(flimit, alternative))
if result is not None:
return result, best.f
node = Node(problem.initial)
node.f = h(node)
result, bestf = RBFS(problem, node, np.inf)
return result
def hill_climbing(problem):
"""
[Figure 4.2]
From the initial node, keep choosing the neighbor with highest value,
stopping when no neighbor is better.
"""
current = Node(problem.initial)
while True:
neighbors = current.expand(problem)
if not neighbors:
break
neighbor = argmax_random_tie(neighbors, key=lambda node: problem.value(node.state))
if problem.value(neighbor.state) <= problem.value(current.state):
break
current = neighbor
return current.state
def exp_schedule(k=20, lam=0.005, limit=100):
"""One possible schedule function for simulated annealing"""
return lambda t: (k * np.exp(-lam * t) if t < limit else 0)
def simulated_annealing(problem, schedule=exp_schedule()):
"""[Figure 4.5] CAUTION: This differs from the pseudocode as it
returns a state instead of a Node."""
current = Node(problem.initial)
for t in range(sys.maxsize):
T = schedule(t)
if T == 0:
return current.state
neighbors = current.expand(problem)
if not neighbors:
return current.state
next_choice = random.choice(neighbors)
delta_e = problem.value(next_choice.state) - problem.value(current.state)
if delta_e > 0 or probability(np.exp(delta_e / T)):
current = next_choice
def simulated_annealing_full(problem, schedule=exp_schedule()):
""" This version returns all the states encountered in reaching
the goal state."""
states = []
current = Node(problem.initial)
for t in range(sys.maxsize):
states.append(current.state)
T = schedule(t)
if T == 0:
return states
neighbors = current.expand(problem)
if not neighbors:
return current.state
next_choice = random.choice(neighbors)
delta_e = problem.value(next_choice.state) - problem.value(current.state)
if delta_e > 0 or probability(np.exp(delta_e / T)):
current = next_choice
def and_or_graph_search(problem):
"""[Figure 4.11]Used when the environment is nondeterministic and completely observable.
Contains OR nodes where the agent is free to choose any action.
After every action there is an AND node which contains all possible states
the agent may reach due to stochastic nature of environment.
The agent must be able to handle all possible states of the AND node (as it
may end up in any of them).
Returns a conditional plan to reach goal state,
or failure if the former is not possible."""
# functions used by and_or_search
def or_search(state, problem, path):
"""returns a plan as a list of actions"""
if problem.goal_test(state):
return []
if state in path:
return None
for action in problem.actions(state):
plan = and_search(problem.result(state, action),
problem, path + [state, ])
if plan is not None:
return [action, plan]
def and_search(states, problem, path):
"""Returns plan in form of dictionary where we take action plan[s] if we reach state s."""
plan = {}
for s in states:
plan[s] = or_search(s, problem, path)
if plan[s] is None:
return None
return plan
# body of and or search
return or_search(problem.initial, problem, [])
# Pre-defined actions for PeakFindingProblem
directions4 = {'W': (-1, 0), 'N': (0, 1), 'E': (1, 0), 'S': (0, -1)}
directions8 = dict(directions4)
directions8.update({'NW': (-1, 1), 'NE': (1, 1), 'SE': (1, -1), 'SW': (-1, -1)})
class PeakFindingProblem(Problem):
"""Problem of finding the highest peak in a limited grid"""
def __init__(self, initial, grid, defined_actions=directions4):
"""The grid is a 2 dimensional array/list whose state is specified by tuple of indices"""
super().__init__(initial)
self.grid = grid
self.defined_actions = defined_actions
self.n = len(grid)
assert self.n > 0
self.m = len(grid[0])
assert self.m > 0
def actions(self, state):
"""Returns the list of actions which are allowed to be taken from the given state"""
allowed_actions = []
for action in self.defined_actions:
next_state = vector_add(state, self.defined_actions[action])
if 0 <= next_state[0] <= self.n - 1 and 0 <= next_state[1] <= self.m - 1:
allowed_actions.append(action)
return allowed_actions
def result(self, state, action):
"""Moves in the direction specified by action"""
return vector_add(state, self.defined_actions[action])
def value(self, state):
"""Value of a state is the value it is the index to"""
x, y = state
assert 0 <= x < self.n
assert 0 <= y < self.m
return self.grid[x][y]
class OnlineDFSAgent:
"""
[Figure 4.21]
The abstract class for an OnlineDFSAgent. Override
update_state method to convert percept to state. While initializing
the subclass a problem needs to be provided which is an instance of
a subclass of the Problem class.
"""
def __init__(self, problem):
self.problem = problem
self.s = None
self.a = None
self.untried = dict()
self.unbacktracked = dict()
self.result = {}
def __call__(self, percept):
s1 = self.update_state(percept)
if self.problem.goal_test(s1):
self.a = None
else:
if s1 not in self.untried.keys():
self.untried[s1] = self.problem.actions(s1)
if self.s is not None:
if s1 != self.result[(self.s, self.a)]:
self.result[(self.s, self.a)] = s1
self.unbacktracked[s1].insert(0, self.s)
if len(self.untried[s1]) == 0:
if len(self.unbacktracked[s1]) == 0:
self.a = None
else:
# else a <- an action b such that result[s', b] = POP(unbacktracked[s'])
unbacktracked_pop = self.unbacktracked.pop(s1)
for (s, b) in self.result.keys():
if self.result[(s, b)] == unbacktracked_pop:
self.a = b
break
else:
self.a = self.untried.pop(s1)
self.s = s1
return self.a
def update_state(self, percept):
"""To be overridden in most cases. The default case
assumes the percept to be of type state."""
return percept
# ______________________________________________________________________________
class OnlineSearchProblem(Problem):
"""
A problem which is solved by an agent executing
actions, rather than by just computation.
Carried in a deterministic and a fully observable environment."""
def __init__(self, initial, goal, graph):
super().__init__(initial, goal)
self.graph = graph
def actions(self, state):
return self.graph.graph_dict[state].keys()
def output(self, state, action):
return self.graph.graph_dict[state][action]
def h(self, state):
"""Returns least possible cost to reach a goal for the given state."""
return self.graph.least_costs[state]
def c(self, s, a, s1):
"""Returns a cost estimate for an agent to move from state 's' to state 's1'."""
return 1
def update_state(self, percept):
raise NotImplementedError
def goal_test(self, state):
if state == self.goal:
return True
return False
class LRTAStarAgent:
""" [Figure 4.24]
Abstract class for LRTA*-Agent. A problem needs to be
provided which is an instance of a subclass of Problem Class.
Takes a OnlineSearchProblem [Figure 4.23] as a problem.
"""
def __init__(self, problem):
self.problem = problem
# self.result = {} # no need as we are using problem.result
self.H = {}
self.s = None
self.a = None
def __call__(self, s1): # as of now s1 is a state rather than a percept
if self.problem.goal_test(s1):
self.a = None
return self.a
else:
if s1 not in self.H:
self.H[s1] = self.problem.h(s1)
if self.s is not None:
# self.result[(self.s, self.a)] = s1 # no need as we are using problem.output
# minimum cost for action b in problem.actions(s)
self.H[self.s] = min(self.LRTA_cost(self.s, b, self.problem.output(self.s, b),
self.H) for b in self.problem.actions(self.s))
# an action b in problem.actions(s1) that minimizes costs
self.a = min(self.problem.actions(s1),
key=lambda b: self.LRTA_cost(s1, b, self.problem.output(s1, b), self.H))
self.s = s1
return self.a
def LRTA_cost(self, s, a, s1, H):
"""Returns cost to move from state 's' to state 's1' plus
estimated cost to get to goal from s1."""
print(s, a, s1)
if s1 is None:
return self.problem.h(s)
else:
# sometimes we need to get H[s1] which we haven't yet added to H
# to replace this try, except: we can initialize H with values from problem.h
try:
return self.problem.c(s, a, s1) + self.H[s1]
except:
return self.problem.c(s, a, s1) + self.problem.h(s1)
# ______________________________________________________________________________
# Genetic Algorithm
def genetic_search(problem, ngen=1000, pmut=0.1, n=20):
"""Call genetic_algorithm on the appropriate parts of a problem.
This requires the problem to have states that can mate and mutate,
plus a value method that scores states."""
# NOTE: This is not tested and might not work.
# TODO: Use this function to make Problems work with genetic_algorithm.
s = problem.initial_state
states = [problem.result(s, a) for a in problem.actions(s)]
random.shuffle(states)
return genetic_algorithm(states[:n], problem.value, ngen, pmut)
def genetic_algorithm(population, fitness_fn, gene_pool=[0, 1], f_thres=None, ngen=1000, pmut=0.1):
"""[Figure 4.8]"""
for i in range(ngen):
population = [mutate(recombine(*select(2, population, fitness_fn)), gene_pool, pmut)
for i in range(len(population))]
fittest_individual = fitness_threshold(fitness_fn, f_thres, population)
if fittest_individual:
return fittest_individual
return max(population, key=fitness_fn)
def fitness_threshold(fitness_fn, f_thres, population):
if not f_thres:
return None
fittest_individual = max(population, key=fitness_fn)
if fitness_fn(fittest_individual) >= f_thres:
return fittest_individual
return None
def init_population(pop_number, gene_pool, state_length):
"""Initializes population for genetic algorithm
pop_number : Number of individuals in population
gene_pool : List of possible values for individuals
state_length: The length of each individual"""
g = len(gene_pool)
population = []
for i in range(pop_number):
new_individual = [gene_pool[random.randrange(0, g)] for j in range(state_length)]
population.append(new_individual)
return population
def select(r, population, fitness_fn):
fitnesses = map(fitness_fn, population)
sampler = weighted_sampler(population, fitnesses)
return [sampler() for i in range(r)]
def recombine(x, y):
n = len(x)
c = random.randrange(0, n)
return x[:c] + y[c:]
def recombine_uniform(x, y):
n = len(x)
result = [0] * n
indexes = random.sample(range(n), n)
for i in range(n):
ix = indexes[i]
result[ix] = x[ix] if i < n / 2 else y[ix]
return ''.join(str(r) for r in result)
def mutate(x, gene_pool, pmut):
if random.uniform(0, 1) >= pmut:
return x
n = len(x)
g = len(gene_pool)
c = random.randrange(0, n)
r = random.randrange(0, g)
new_gene = gene_pool[r]
return x[:c] + [new_gene] + x[c + 1:]
# _____________________________________________________________________________
# The remainder of this file implements examples for the search algorithms.
# ______________________________________________________________________________
# Graphs and Graph Problems
class Graph:
"""A graph connects nodes (vertices) by edges (links). Each edge can also
have a length associated with it. The constructor call is something like:
g = Graph({'A': {'B': 1, 'C': 2})
this makes a graph with 3 nodes, A, B, and C, with an edge of length 1 from
A to B, and an edge of length 2 from A to C. You can also do:
g = Graph({'A': {'B': 1, 'C': 2}, directed=False)
This makes an undirected graph, so inverse links are also added. The graph
stays undirected; if you add more links with g.connect('B', 'C', 3), then
inverse link is also added. You can use g.nodes() to get a list of nodes,
g.get('A') to get a dict of links out of A, and g.get('A', 'B') to get the
length of the link from A to B. 'Lengths' can actually be any object at
all, and nodes can be any hashable object."""
def __init__(self, graph_dict=None, directed=True):
self.graph_dict = graph_dict or {}
self.directed = directed
if not directed:
self.make_undirected()
def make_undirected(self):
"""Make a digraph into an undirected graph by adding symmetric edges."""
for a in list(self.graph_dict.keys()):
for (b, dist) in self.graph_dict[a].items():
self.connect1(b, a, dist)
def connect(self, A, B, distance=1):
"""Add a link from A and B of given distance, and also add the inverse
link if the graph is undirected."""
self.connect1(A, B, distance)
if not self.directed:
self.connect1(B, A, distance)
def connect1(self, A, B, distance):
"""Add a link from A to B of given distance, in one direction only."""
self.graph_dict.setdefault(A, {})[B] = distance
def get(self, a, b=None):
"""Return a link distance or a dict of {node: distance} entries.
.get(a,b) returns the distance or None;
.get(a) returns a dict of {node: distance} entries, possibly {}."""
links = self.graph_dict.setdefault(a, {})
if b is None:
return links
else:
return links.get(b)
def nodes(self):
"""Return a list of nodes in the graph."""
s1 = set([k for k in self.graph_dict.keys()])
s2 = set([k2 for v in self.graph_dict.values() for k2, v2 in v.items()])
nodes = s1.union(s2)
return list(nodes)
def UndirectedGraph(graph_dict=None):
"""Build a Graph where every edge (including future ones) goes both ways."""
return Graph(graph_dict=graph_dict, directed=False)
def RandomGraph(nodes=list(range(10)), min_links=2, width=400, height=300,
curvature=lambda: random.uniform(1.1, 1.5)):
"""Construct a random graph, with the specified nodes, and random links.
The nodes are laid out randomly on a (width x height) rectangle.
Then each node is connected to the min_links nearest neighbors.
Because inverse links are added, some nodes will have more connections.
The distance between nodes is the hypotenuse times curvature(),
where curvature() defaults to a random number between 1.1 and 1.5."""
g = UndirectedGraph()
g.locations = {}
# Build the cities
for node in nodes:
g.locations[node] = (random.randrange(width), random.randrange(height))
# Build roads from each city to at least min_links nearest neighbors.
for i in range(min_links):
for node in nodes:
if len(g.get(node)) < min_links:
here = g.locations[node]
def distance_to_node(n):
if n is node or g.get(node, n):
return np.inf
return distance(g.locations[n], here)
neighbor = min(nodes, key=distance_to_node)
d = distance(g.locations[neighbor], here) * curvature()
g.connect(node, neighbor, int(d))
return g
""" [Figure 3.2]
Simplified road map of Romania
"""
romania_map = UndirectedGraph(dict(
Arad=dict(Zerind=75, Sibiu=140, Timisoara=118),
Bucharest=dict(Urziceni=85, Pitesti=101, Giurgiu=90, Fagaras=211),
Craiova=dict(Drobeta=120, Rimnicu=146, Pitesti=138),
Drobeta=dict(Mehadia=75),
Eforie=dict(Hirsova=86),
Fagaras=dict(Sibiu=99),
Hirsova=dict(Urziceni=98),
Iasi=dict(Vaslui=92, Neamt=87),
Lugoj=dict(Timisoara=111, Mehadia=70),
Oradea=dict(Zerind=71, Sibiu=151),
Pitesti=dict(Rimnicu=97),
Rimnicu=dict(Sibiu=80),
Urziceni=dict(Vaslui=142)))
romania_map.locations = dict(
Arad=(91, 492), Bucharest=(400, 327), Craiova=(253, 288),
Drobeta=(165, 299), Eforie=(562, 293), Fagaras=(305, 449),
Giurgiu=(375, 270), Hirsova=(534, 350), Iasi=(473, 506),
Lugoj=(165, 379), Mehadia=(168, 339), Neamt=(406, 537),
Oradea=(131, 571), Pitesti=(320, 368), Rimnicu=(233, 410),
Sibiu=(207, 457), Timisoara=(94, 410), Urziceni=(456, 350),
Vaslui=(509, 444), Zerind=(108, 531))
""" [Figure 4.9]
Eight possible states of the vacumm world
Each state is represented as
* "State of the left room" "State of the right room" "Room in which the agent
is present"
1 - DDL Dirty Dirty Left
2 - DDR Dirty Dirty Right
3 - DCL Dirty Clean Left
4 - DCR Dirty Clean Right
5 - CDL Clean Dirty Left
6 - CDR Clean Dirty Right
7 - CCL Clean Clean Left
8 - CCR Clean Clean Right
"""
vacuum_world = Graph(dict(
State_1=dict(Suck=['State_7', 'State_5'], Right=['State_2']),
State_2=dict(Suck=['State_8', 'State_4'], Left=['State_2']),
State_3=dict(Suck=['State_7'], Right=['State_4']),
State_4=dict(Suck=['State_4', 'State_2'], Left=['State_3']),
State_5=dict(Suck=['State_5', 'State_1'], Right=['State_6']),
State_6=dict(Suck=['State_8'], Left=['State_5']),
State_7=dict(Suck=['State_7', 'State_3'], Right=['State_8']),
State_8=dict(Suck=['State_8', 'State_6'], Left=['State_7'])
))
""" [Figure 4.23]
One-dimensional state space Graph
"""
one_dim_state_space = Graph(dict(
State_1=dict(Right='State_2'),
State_2=dict(Right='State_3', Left='State_1'),
State_3=dict(Right='State_4', Left='State_2'),
State_4=dict(Right='State_5', Left='State_3'),
State_5=dict(Right='State_6', Left='State_4'),
State_6=dict(Left='State_5')
))
one_dim_state_space.least_costs = dict(
State_1=8,
State_2=9,
State_3=2,
State_4=2,
State_5=4,
State_6=3)
""" [Figure 6.1]
Principal states and territories of Australia
"""
australia_map = UndirectedGraph(dict(
T=dict(),
SA=dict(WA=1, NT=1, Q=1, NSW=1, V=1),
NT=dict(WA=1, Q=1),
NSW=dict(Q=1, V=1)))
australia_map.locations = dict(WA=(120, 24), NT=(135, 20), SA=(135, 30),
Q=(145, 20), NSW=(145, 32), T=(145, 42),
V=(145, 37))
class GraphProblem(Problem):
"""The problem of searching a graph from one node to another."""
def __init__(self, initial, goal, graph):
super().__init__(initial, goal)
self.graph = graph
def actions(self, A):
"""The actions at a graph node are just its neighbors."""
return list(self.graph.get(A).keys())
def result(self, state, action):
"""The result of going to a neighbor is just that neighbor."""
return action
def path_cost(self, cost_so_far, A, action, B):
return cost_so_far + (self.graph.get(A, B) or np.inf)
def find_min_edge(self):
"""Find minimum value of edges."""
m = np.inf
for d in self.graph.graph_dict.values():
local_min = min(d.values())
m = min(m, local_min)
return m
def h(self, node):
"""h function is straight-line distance from a node's state to goal."""
locs = getattr(self.graph, 'locations', None)
if locs:
if type(node) is str:
return int(distance(locs[node], locs[self.goal]))
return int(distance(locs[node.state], locs[self.goal]))
else:
return np.inf
class GraphProblemStochastic(GraphProblem):
"""
A version of GraphProblem where an action can lead to
nondeterministic output i.e. multiple possible states.
Define the graph as dict(A = dict(Action = [[<Result 1>, <Result 2>, ...], <cost>], ...), ...)
A the dictionary format is different, make sure the graph is created as a directed graph.
"""
def result(self, state, action):
return self.graph.get(state, action)
def path_cost(self):
raise NotImplementedError
# ______________________________________________________________________________
class NQueensProblem(Problem):
"""The problem of placing N queens on an NxN board with none attacking
each other. A state is represented as an N-element array, where
a value of r in the c-th entry means there is a queen at column c,
row r, and a value of -1 means that the c-th column has not been
filled in yet. We fill in columns left to right.
>>> depth_first_tree_search(NQueensProblem(8))
<Node (7, 3, 0, 2, 5, 1, 6, 4)>
"""
def __init__(self, N):
super().__init__(tuple([-1] * N))
self.N = N
def actions(self, state):
"""In the leftmost empty column, try all non-conflicting rows."""
if state[-1] is not -1:
return [] # All columns filled; no successors
else:
col = state.index(-1)
return [row for row in range(self.N)
if not self.conflicted(state, row, col)]
def result(self, state, row):
"""Place the next queen at the given row."""
col = state.index(-1)
new = list(state[:])
new[col] = row
return tuple(new)
def conflicted(self, state, row, col):
"""Would placing a queen at (row, col) conflict with anything?"""
return any(self.conflict(row, col, state[c], c)
for c in range(col))
def conflict(self, row1, col1, row2, col2):
"""Would putting two queens in (row1, col1) and (row2, col2) conflict?"""
return (row1 == row2 or # same row
col1 == col2 or # same column
row1 - col1 == row2 - col2 or # same \ diagonal
row1 + col1 == row2 + col2) # same / diagonal
def goal_test(self, state):
"""Check if all columns filled, no conflicts."""
if state[-1] is -1:
return False
return not any(self.conflicted(state, state[col], col)
for col in range(len(state)))
def h(self, node):
"""Return number of conflicting queens for a given node"""
num_conflicts = 0
for (r1, c1) in enumerate(node.state):
for (r2, c2) in enumerate(node.state):
if (r1, c1) != (r2, c2):
num_conflicts += self.conflict(r1, c1, r2, c2)
return num_conflicts
# ______________________________________________________________________________
# Inverse Boggle: Search for a high-scoring Boggle board. A good domain for
# iterative-repair and related search techniques, as suggested by Justin Boyan.
ALPHABET = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
cubes16 = ['FORIXB', 'MOQABJ', 'GURILW', 'SETUPL',
'CMPDAE', 'ACITAO', 'SLCRAE', 'ROMASH',
'NODESW', 'HEFIYE', 'ONUDTK', 'TEVIGN',
'ANEDVZ', 'PINESH', 'ABILYT', 'GKYLEU']
def random_boggle(n=4):
"""Return a random Boggle board of size n x n.
We represent a board as a linear list of letters."""
cubes = [cubes16[i % 16] for i in range(n * n)]
random.shuffle(cubes)
return list(map(random.choice, cubes))
# The best 5x5 board found by Boyan, with our word list this board scores
# 2274 words, for a score of 9837
boyan_best = list('RSTCSDEIAEGNLRPEATESMSSID')
def print_boggle(board):
"""Print the board in a 2-d array."""
n2 = len(board)
n = exact_sqrt(n2)
for i in range(n2):
if i % n == 0 and i > 0:
print()
if board[i] == 'Q':
print('Qu', end=' ')
else:
print(str(board[i]) + ' ', end=' ')
print()
def boggle_neighbors(n2, cache={}):
"""Return a list of lists, where the i-th element is the list of indexes
for the neighbors of square i."""
if cache.get(n2):
return cache.get(n2)
n = exact_sqrt(n2)
neighbors = [None] * n2
for i in range(n2):
neighbors[i] = []
on_top = i < n
on_bottom = i >= n2 - n
on_left = i % n == 0
on_right = (i + 1) % n == 0
if not on_top:
neighbors[i].append(i - n)
if not on_left:
neighbors[i].append(i - n - 1)
if not on_right:
neighbors[i].append(i - n + 1)
if not on_bottom:
neighbors[i].append(i + n)
if not on_left:
neighbors[i].append(i + n - 1)
if not on_right:
neighbors[i].append(i + n + 1)
if not on_left:
neighbors[i].append(i - 1)
if not on_right:
neighbors[i].append(i + 1)
cache[n2] = neighbors
return neighbors
def exact_sqrt(n2):
"""If n2 is a perfect square, return its square root, else raise error."""
n = int(np.sqrt(n2))
assert n * n == n2
return n
# _____________________________________________________________________________
class Wordlist:
"""This class holds a list of words. You can use (word in wordlist)
to check if a word is in the list, or wordlist.lookup(prefix)
to see if prefix starts any of the words in the list."""
def __init__(self, file, min_len=3):
lines = file.read().upper().split()
self.words = [word for word in lines if len(word) >= min_len]
self.words.sort()
self.bounds = {}
for c in ALPHABET:
c2 = chr(ord(c) + 1)
self.bounds[c] = (bisect.bisect(self.words, c),
bisect.bisect(self.words, c2))
def lookup(self, prefix, lo=0, hi=None):
"""See if prefix is in dictionary, as a full word or as a prefix.
Return two values: the first is the lowest i such that
words[i].startswith(prefix), or is None; the second is
True iff prefix itself is in the Wordlist."""
words = self.words
if hi is None:
hi = len(words)
i = bisect.bisect_left(words, prefix, lo, hi)
if i < len(words) and words[i].startswith(prefix):
return i, (words[i] == prefix)
else:
return None, False
def __contains__(self, word):
return self.lookup(word)[1]
def __len__(self):
return len(self.words)
# _____________________________________________________________________________
class BoggleFinder:
"""A class that allows you to find all the words in a Boggle board."""
wordlist = None # A class variable, holding a wordlist
def __init__(self, board=None):
if BoggleFinder.wordlist is None:
BoggleFinder.wordlist = Wordlist(open_data("EN-text/wordlist.txt"))
self.found = {}
if board:
self.set_board(board)
def set_board(self, board=None):
"""Set the board, and find all the words in it."""
if board is None:
board = random_boggle()
self.board = board
self.neighbors = boggle_neighbors(len(board))
self.found = {}
for i in range(len(board)):
lo, hi = self.wordlist.bounds[board[i]]
self.find(lo, hi, i, [], '')
return self
def find(self, lo, hi, i, visited, prefix):
"""Looking in square i, find the words that continue the prefix,
considering the entries in self.wordlist.words[lo:hi], and not
revisiting the squares in visited."""
if i in visited:
return
wordpos, is_word = self.wordlist.lookup(prefix, lo, hi)
if wordpos is not None:
if is_word:
self.found[prefix] = True
visited.append(i)
c = self.board[i]
if c == 'Q':
c = 'QU'
prefix += c
for j in self.neighbors[i]:
self.find(wordpos, hi, j, visited, prefix)
visited.pop()
def words(self):
"""The words found."""
return list(self.found.keys())
scores = [0, 0, 0, 0, 1, 2, 3, 5] + [11] * 100
def score(self):
"""The total score for the words found, according to the rules."""
return sum([self.scores[len(w)] for w in self.words()])
def __len__(self):
"""The number of words found."""
return len(self.found)
# _____________________________________________________________________________
def boggle_hill_climbing(board=None, ntimes=100, verbose=True):
"""Solve inverse Boggle by hill-climbing: find a high-scoring board by
starting with a random one and changing it."""
finder = BoggleFinder()
if board is None:
board = random_boggle()
best = len(finder.set_board(board))
for _ in range(ntimes):
i, oldc = mutate_boggle(board)
new = len(finder.set_board(board))
if new > best:
best = new
if verbose:
print(best, _, board)
else:
board[i] = oldc # Change back
if verbose:
print_boggle(board)
return board, best
def mutate_boggle(board):
i = random.randrange(len(board))
oldc = board[i]
# random.choice(boyan_best)
board[i] = random.choice(random.choice(cubes16))
return i, oldc
# ______________________________________________________________________________
# Code to compare searchers on various problems.
class InstrumentedProblem(Problem):
"""Delegates to a problem, and keeps statistics."""
def __init__(self, problem):
self.problem = problem
self.succs = self.goal_tests = self.states = 0
self.found = None
def actions(self, state):
self.succs += 1
return self.problem.actions(state)
def result(self, state, action):
self.states += 1
return self.problem.result(state, action)
def goal_test(self, state):
self.goal_tests += 1
result = self.problem.goal_test(state)
if result:
self.found = state
return result
def path_cost(self, c, state1, action, state2):
return self.problem.path_cost(c, state1, action, state2)
def value(self, state):
return self.problem.value(state)
def __getattr__(self, attr):
return getattr(self.problem, attr)
def __repr__(self):
return '<{:4d}/{:4d}/{:4d}/{}>'.format(self.succs, self.goal_tests,
self.states, str(self.found)[:4])
def compare_searchers(problems, header,
searchers=[breadth_first_tree_search,
breadth_first_graph_search,
depth_first_graph_search,
iterative_deepening_search,
depth_limited_search,
recursive_best_first_search]):
def do(searcher, problem):
p = InstrumentedProblem(problem)
searcher(p)
return p
table = [[name(s)] + [do(s, p) for p in problems] for s in searchers]
print_table(table, header)
def compare_graph_searchers():
"""Prints a table of search results."""
compare_searchers(problems=[GraphProblem('Arad', 'Bucharest', romania_map),
GraphProblem('Oradea', 'Neamt', romania_map),
GraphProblem('Q', 'WA', australia_map)],
header=['Searcher', 'romania_map(Arad, Bucharest)',
'romania_map(Oradea, Neamt)', 'australia_map'])
|
import random
import asyncio
from functools import partial
from aioredis.errors import ProtocolError
from aioredis.commands import (
create_redis,
Redis,
create_redis_pool
)
from aioredis.util import decode, encode_str, cached_property
from aioredis.log import logger
from aioredis.errors import ReplyError, RedisClusterError
from .crc import crc16
from .base import RedisClusterBase
__all__ = (
'create_pool_cluster',
'RedisPoolCluster',
'create_cluster',
'RedisCluster',
)
def parse_moved_response_error(err):
if not err or not err.args or not err.args[0]:
return
data = err.args[0].strip()
if not data.startswith('MOVED'):
return
try:
host, port = data.split()[-1].split(':')
return host, int(port)
except IndexError:
return
class ClusterNode:
def __init__(
self, number, id, host, port, flags, master, status, slots,
**kwargs
):
self.id = id
self.host = host
self.port = port
self.flags = flags
self.master = master
self.status = status
self.slots = slots
self.number = number
def __repr__(self):
return r'Address: {!r}. Master: {!r}. Slave: {!r}. Alive: {!r}'.format(
self.address, self.is_master, self.is_slave, self.is_alive)
@cached_property
def is_master(self):
return 'master' in self.flags
@cached_property
def is_slave(self):
return 'slave' in self.flags
@cached_property
def address(self):
return self.host, self.port
@cached_property
def is_alive(self):
return ('fail' not in self.flags and
'fail?' not in self.flags and
self.status == 'connected')
def in_range(self, value):
if not self.slots:
return False
if value < self.slots[0][0]:
return False
if value > self.slots[-1][-1]:
return False
return any(rng[0] <= value <= rng[1] for rng in self.slots)
class ClusterNodesManager:
REDIS_CLUSTER_HASH_SLOTS = 16384
def __init__(self, nodes):
nodes = list(nodes)
masters_slots = {node.id: node.slots for node in nodes}
for node in nodes:
if node.is_slave:
node.slots = masters_slots[node.master]
self.nodes = nodes
def __repr__(self):
return r' == '.join(repr(node) for node in self.nodes)
def __str__(self):
return '\n'.join(repr(node) for node in self.nodes)
@classmethod
def parse_info(cls, info):
for index, node_data in enumerate(info):
yield ClusterNode(index, **node_data)
@classmethod
def create(cls, data):
nodes = cls.parse_info(data)
return cls(nodes)
@staticmethod
def key_slot(key, bucket=REDIS_CLUSTER_HASH_SLOTS):
"""Calculate key slot for a given key.
:param key - str|bytes
:param bucket - int
"""
k = encode_str(key)
start = k.find(b'{')
if start > -1:
end = k.find(b'}', start + 1)
if end > -1 and end != start + 1:
k = k[start + 1:end]
return crc16(k) % bucket
@cached_property
def alive_nodes(self):
return [node for node in self.nodes if node.is_alive]
@cached_property
def nodes_count(self):
return len(self.alive_nodes)
@cached_property
def masters_count(self):
return len(self.masters)
@cached_property
def slaves_count(self):
return len(self.slaves)
@cached_property
def masters(self):
return [node for node in self.alive_nodes if node.is_master]
@cached_property
def slaves(self):
return [node for node in self.alive_nodes if node.is_slave]
@cached_property
def all_slots_covered(self):
covered_slots_number = sum(
end - start + 1
for master in self.masters for start, end in master.slots
)
return covered_slots_number >= self.REDIS_CLUSTER_HASH_SLOTS
def get_node_by_slot(self, slot):
for node in self.masters:
if node.in_range(slot):
return node
else:
return None
def get_node_by_id(self, node_id):
for node in self.nodes:
if node_id == node.id:
return node
else:
return None
def get_node_by_address(self, address):
for node in self.nodes:
if address == node.address:
return node
else:
return None
def get_random_node(self):
return random.choice(self.alive_nodes)
def get_random_master_node(self):
return random.choice(self.masters)
def get_random_slave_node(self):
return random.choice(self.slaves)
def determine_slot(self, *keys):
if any(key is None for key in keys):
raise TypeError('key must not be None')
if len(keys) == 1:
return self.key_slot(keys[0])
else:
slots = {self.key_slot(key) for key in keys}
if len(slots) != 1:
raise RedisClusterError(
'all keys must map to the same key slot')
return slots.pop()
async def create_pool_cluster(
nodes, *, db=0, password=None, encoding=None,
minsize=10, maxsize=10, commands_factory=Redis, loop=None):
"""
Create Redis Pool Cluster.
:param nodes = [(address1, port1), (address2, port2), ...]
:param db - int
:param password: str
:param encoding: str
:param minsize: int
:param maxsize: int
:param commands_factory: obj
:param loop: obj
:return RedisPoolCluster instance.
"""
if not nodes or not isinstance(nodes, (tuple, list)):
raise RedisClusterError(
'Cluster nodes is not set properly. {0}'.
format(create_pool_cluster.__doc__))
cluster = RedisPoolCluster(
nodes, db, password, encoding=encoding, minsize=minsize,
maxsize=maxsize, commands_factory=commands_factory, loop=loop)
await cluster.initialize()
return cluster
async def create_cluster(
nodes, *, db=0, password=None, encoding=None,
commands_factory=Redis, loop=None):
"""
Create Redis Pool Cluster.
:param nodes = [(address1, port1), (address2, port2), ...]
:param db - int
:param password: str
:param encoding: str
:param commands_factory: obj
:param loop: obj
:return RedisPoolCluster instance.
"""
if not nodes or not isinstance(nodes, (tuple, list)):
raise RedisClusterError(
'Cluster nodes is not set properly. {0}'.
format(create_cluster.__doc__))
cluster = RedisCluster(
nodes, db, password, encoding=encoding,
commands_factory=commands_factory, loop=loop)
await cluster.initialize()
return cluster
class RedisCluster(RedisClusterBase):
"""Redis cluster."""
MAX_MOVED_COUNT = 10
def __init__(self, nodes, db=0, password=None, encoding=None,
*, commands_factory, loop=None):
if loop is None:
loop = asyncio.get_event_loop()
self._nodes = nodes
self._db = db
self._password = password
self._encoding = encoding
self._factory = commands_factory
self._loop = loop
self._moved_count = 0
self._cluster_manager = None
def _is_eval_command(self, command):
if isinstance(command, bytes):
command = command.decode('utf-8')
return command.lower() in ['eval', 'evalsha']
def get_node(self, command, *args, **kwargs):
if self._is_eval_command(command):
keys = kwargs.get('keys', [])
if not isinstance(keys, (list, tuple)):
raise TypeError('keys must be given as list or tuple')
else:
keys = args[:1]
if len(keys) > 0:
slot = self._cluster_manager.determine_slot(*keys)
node = self._cluster_manager.get_node_by_slot(slot)
if node is not None:
return node
return self._cluster_manager.get_random_master_node()
def node_count(self):
return self._cluster_manager.nodes_count
def masters_count(self):
return self._cluster_manager.masters_count
def slave_count(self):
return self._cluster_manager.slaves_count
def _get_nodes_entities(self, slaves=False):
slave_nodes = []
if slaves:
slave_nodes = [node.address for node in self.slave_nodes]
return [node.address for node in self.master_nodes] + slave_nodes
@property
def master_nodes(self):
return self._cluster_manager.masters
@property
def slave_nodes(self):
return self._cluster_manager.slaves
async def _get_raw_cluster_info_from_node(self, node):
conn = await create_redis(
node,
db=self._db,
password=self._password,
encoding='utf-8',
commands_factory=self._factory,
loop=self._loop
)
try:
nodes_resp = await conn.cluster_nodes()
return nodes_resp
finally:
conn.close()
await conn.wait_closed()
async def fetch_cluster_info(self):
logger.info('Loading cluster info from {}...'.format(self._nodes))
tasks = [
asyncio.ensure_future(
self._get_raw_cluster_info_from_node(node), loop=self._loop
) for node in self._nodes
]
try:
for task in asyncio.as_completed(tasks, loop=self._loop):
try:
nodes_raw_response = await task
self._cluster_manager = ClusterNodesManager.create(
nodes_raw_response)
logger.info('Cluster info loaded successfully: %s',
list(nodes_raw_response))
return
except (ReplyError, ProtocolError, ConnectionError) as exc:
logger.warning(
"Loading cluster info from a node failed with {}"
.format(repr(exc))
)
finally:
for task in tasks:
task.cancel()
# Wait until all tasks have closed their connection
await asyncio.gather(
*tasks, loop=self._loop, return_exceptions=True)
raise RedisClusterError(
"No cluster info could be loaded from any host")
async def initialize(self):
logger.info('Initializing cluster...')
self._moved_count = 0
await self.fetch_cluster_info()
logger.info('Initialized cluster.\n{}'.format(self._cluster_manager))
async def clear(self):
pass # All connections are created on demand and destroyed afterwards.
@property
def all_slots_covered(self):
return self._cluster_manager.all_slots_covered
async def create_connection(self, address):
conn = await create_redis(
address,
db=self._db,
encoding=self._encoding,
password=self._password,
commands_factory=self._factory,
loop=self._loop,
)
return conn
async def _execute_node(self, address, command, *args, **kwargs):
"""Execute redis command and returns Future waiting for the answer.
:param command str
:param pool obj
Raises:
* TypeError if any of args can not be encoded as bytes.
* ReplyError on redis '-ERR' responses.
* ProtocolError when response can not be decoded meaning connection
is broken.
"""
cmd = decode(command, 'utf-8').lower()
to_close = []
try:
conn = await self.create_connection(address)
to_close.append(conn)
return await getattr(conn, cmd)(*args, **kwargs)
except ReplyError as err:
address = parse_moved_response_error(err)
if address is None:
raise
logger.debug('Got MOVED command: {}'.format(err))
self._moved_count += 1
if self._moved_count >= self.MAX_MOVED_COUNT:
await self.initialize()
node = self.get_node(command, *args, **kwargs)
address = node.address
conn = await self.create_connection(address)
to_close.append(conn)
return await getattr(conn, cmd)(*args, **kwargs)
finally:
for conn in to_close:
conn.close()
await conn.wait_closed()
async def _execute_nodes(self, command, *args, slaves=False, **kwargs):
"""
Execute redis command for all nodes and returns
Future waiting for the answer.
:param command str
:param slaves bool - Execute on all nodes masters + slaves
Raises:
* TypeError if any of args can not be encoded as bytes.
* ReplyError on redis '-ERR' responses.
* ProtocolError when response can not be decoded meaning connection
is broken.
"""
nodes = self._get_nodes_entities(slaves=slaves)
return await asyncio.gather(*[
self._execute_node(node, command, *args, **kwargs)
for node in nodes
], loop=self._loop)
async def execute(
self, command, *args, address=None, many=False, slaves=False,
**kwargs
):
"""Execute redis command and returns Future waiting for the answer.
:param command str
:param address tuple - Execute on node with specified address
if many specified will be ignored
:param many bool - invoke on all master nodes
:param slaves bool - if many specified, execute even on slave nodes
Raises:
* TypeError if any of args can not be encoded as bytes.
* ReplyError on redis '-ERR' responses.
* ProtocolError when response can not be decoded meaning connection
is broken.
"""
# bad hack to prevent execution on many nodes
if many or (not args and 'cluster_' not in command):
return await self._execute_nodes(
command, *args, slaves=slaves, **kwargs
)
if not address:
address = self.get_node(command, *args, **kwargs).address
return await self._execute_node(address, command, *args, **kwargs)
def __getattr__(self, cmd):
return partial(self.execute, cmd)
class RedisPoolCluster(RedisCluster):
"""
Redis pool cluster.
Do not use it for cluster management.
Will not operate with slaves and target node
"""
def __init__(self, nodes, db=0, password=None, encoding=None,
*, minsize, maxsize, commands_factory, loop=None):
if loop is None:
loop = asyncio.get_event_loop()
super().__init__(nodes, db=db, password=password, encoding=encoding,
commands_factory=commands_factory, loop=loop)
self._minsize = minsize
self._maxsize = maxsize
self._cluster_pool = {}
def _get_nodes_entities(self, **kwargs):
return self._cluster_pool.values()
async def get_cluster_pool(self):
cluster_pool = {}
nodes = list(self._cluster_manager.masters)
tasks = [
create_redis_pool(
node.address,
db=self._db,
password=self._password,
encoding=self._encoding,
minsize=self._minsize,
maxsize=self._maxsize,
commands_factory=self._factory,
loop=self._loop
)
for node in nodes
]
results = await asyncio.gather(*tasks, loop=self._loop)
for node, connection in zip(nodes, results):
cluster_pool[node.id] = connection
return cluster_pool
async def reload_cluster_pool(self):
logger.info('Reloading cluster...')
await self.clear()
self._moved_count = 0
await self.fetch_cluster_info()
logger.info('Connecting to cluster...')
self._cluster_pool = await self.get_cluster_pool()
logger.info('Reloaded cluster')
async def initialize(self):
await super().initialize()
self._cluster_pool = await self.get_cluster_pool()
async def clear(self):
"""Clear pool connections. Close and remove all free connections."""
for pool in self._get_nodes_entities():
pool.close()
await pool.wait_closed()
def get_node(self, command, *args, **kwargs):
node = super().get_node(command, *args, **kwargs)
return self._cluster_pool[node.id]
async def _execute_node(self, pool, command, *args, **kwargs):
"""Execute redis command and returns Future waiting for the answer.
:param command str
:param pool obj
Raises:
* TypeError if any of args can not be encoded as bytes.
* ReplyError on redis '-ERR' responses.
* ProtocolError when response can not be decoded meaning connection
is broken.
"""
cmd = decode(command, 'utf-8').lower()
try:
with await pool as conn:
return await getattr(conn, cmd)(*args, **kwargs)
except ReplyError as err:
address = parse_moved_response_error(err)
if address is None:
raise
logger.debug('Got MOVED command: {}'.format(err))
self._moved_count += 1
if self._moved_count >= self.MAX_MOVED_COUNT:
await self.initialize()
pool = self.get_node(command, *args, **kwargs)
with await pool as conn:
return await getattr(conn, cmd)(*args, **kwargs)
else:
conn = await self.create_connection(address)
res = await getattr(conn, cmd)(*args, **kwargs)
conn.close()
await conn.wait_closed()
return res
async def execute(self, command, *args, many=False, **kwargs):
"""Execute redis command and returns Future waiting for the answer.
:param command str
:param many bool - invoke on all master nodes
Raises:
* TypeError if any of args can not be encoded as bytes.
* ReplyError on redis '-ERR' responses.
* ProtocolError when response can not be decoded meaning connection
is broken.
"""
# bad hack to prevent execution on many nodes
if many or (not args and 'cluster_' not in command):
return await self._execute_nodes(command, *args, **kwargs)
pool = self.get_node(command, *args, **kwargs)
return await self._execute_node(pool, command, *args, **kwargs)
Loading cluster info may fail with an OSError.
import random
import asyncio
from functools import partial
from aioredis.errors import ProtocolError
from aioredis.commands import (
create_redis,
Redis,
create_redis_pool
)
from aioredis.util import decode, encode_str, cached_property
from aioredis.log import logger
from aioredis.errors import ReplyError, RedisClusterError
from .crc import crc16
from .base import RedisClusterBase
__all__ = (
'create_pool_cluster',
'RedisPoolCluster',
'create_cluster',
'RedisCluster',
)
def parse_moved_response_error(err):
if not err or not err.args or not err.args[0]:
return
data = err.args[0].strip()
if not data.startswith('MOVED'):
return
try:
host, port = data.split()[-1].split(':')
return host, int(port)
except IndexError:
return
class ClusterNode:
def __init__(
self, number, id, host, port, flags, master, status, slots,
**kwargs
):
self.id = id
self.host = host
self.port = port
self.flags = flags
self.master = master
self.status = status
self.slots = slots
self.number = number
def __repr__(self):
return r'Address: {!r}. Master: {!r}. Slave: {!r}. Alive: {!r}'.format(
self.address, self.is_master, self.is_slave, self.is_alive)
@cached_property
def is_master(self):
return 'master' in self.flags
@cached_property
def is_slave(self):
return 'slave' in self.flags
@cached_property
def address(self):
return self.host, self.port
@cached_property
def is_alive(self):
return ('fail' not in self.flags and
'fail?' not in self.flags and
self.status == 'connected')
def in_range(self, value):
if not self.slots:
return False
if value < self.slots[0][0]:
return False
if value > self.slots[-1][-1]:
return False
return any(rng[0] <= value <= rng[1] for rng in self.slots)
class ClusterNodesManager:
REDIS_CLUSTER_HASH_SLOTS = 16384
def __init__(self, nodes):
nodes = list(nodes)
masters_slots = {node.id: node.slots for node in nodes}
for node in nodes:
if node.is_slave:
node.slots = masters_slots[node.master]
self.nodes = nodes
def __repr__(self):
return r' == '.join(repr(node) for node in self.nodes)
def __str__(self):
return '\n'.join(repr(node) for node in self.nodes)
@classmethod
def parse_info(cls, info):
for index, node_data in enumerate(info):
yield ClusterNode(index, **node_data)
@classmethod
def create(cls, data):
nodes = cls.parse_info(data)
return cls(nodes)
@staticmethod
def key_slot(key, bucket=REDIS_CLUSTER_HASH_SLOTS):
"""Calculate key slot for a given key.
:param key - str|bytes
:param bucket - int
"""
k = encode_str(key)
start = k.find(b'{')
if start > -1:
end = k.find(b'}', start + 1)
if end > -1 and end != start + 1:
k = k[start + 1:end]
return crc16(k) % bucket
@cached_property
def alive_nodes(self):
return [node for node in self.nodes if node.is_alive]
@cached_property
def nodes_count(self):
return len(self.alive_nodes)
@cached_property
def masters_count(self):
return len(self.masters)
@cached_property
def slaves_count(self):
return len(self.slaves)
@cached_property
def masters(self):
return [node for node in self.alive_nodes if node.is_master]
@cached_property
def slaves(self):
return [node for node in self.alive_nodes if node.is_slave]
@cached_property
def all_slots_covered(self):
covered_slots_number = sum(
end - start + 1
for master in self.masters for start, end in master.slots
)
return covered_slots_number >= self.REDIS_CLUSTER_HASH_SLOTS
def get_node_by_slot(self, slot):
for node in self.masters:
if node.in_range(slot):
return node
else:
return None
def get_node_by_id(self, node_id):
for node in self.nodes:
if node_id == node.id:
return node
else:
return None
def get_node_by_address(self, address):
for node in self.nodes:
if address == node.address:
return node
else:
return None
def get_random_node(self):
return random.choice(self.alive_nodes)
def get_random_master_node(self):
return random.choice(self.masters)
def get_random_slave_node(self):
return random.choice(self.slaves)
def determine_slot(self, *keys):
if any(key is None for key in keys):
raise TypeError('key must not be None')
if len(keys) == 1:
return self.key_slot(keys[0])
else:
slots = {self.key_slot(key) for key in keys}
if len(slots) != 1:
raise RedisClusterError(
'all keys must map to the same key slot')
return slots.pop()
async def create_pool_cluster(
nodes, *, db=0, password=None, encoding=None,
minsize=10, maxsize=10, commands_factory=Redis, loop=None):
"""
Create Redis Pool Cluster.
:param nodes = [(address1, port1), (address2, port2), ...]
:param db - int
:param password: str
:param encoding: str
:param minsize: int
:param maxsize: int
:param commands_factory: obj
:param loop: obj
:return RedisPoolCluster instance.
"""
if not nodes or not isinstance(nodes, (tuple, list)):
raise RedisClusterError(
'Cluster nodes is not set properly. {0}'.
format(create_pool_cluster.__doc__))
cluster = RedisPoolCluster(
nodes, db, password, encoding=encoding, minsize=minsize,
maxsize=maxsize, commands_factory=commands_factory, loop=loop)
await cluster.initialize()
return cluster
async def create_cluster(
nodes, *, db=0, password=None, encoding=None,
commands_factory=Redis, loop=None):
"""
Create Redis Pool Cluster.
:param nodes = [(address1, port1), (address2, port2), ...]
:param db - int
:param password: str
:param encoding: str
:param commands_factory: obj
:param loop: obj
:return RedisPoolCluster instance.
"""
if not nodes or not isinstance(nodes, (tuple, list)):
raise RedisClusterError(
'Cluster nodes is not set properly. {0}'.
format(create_cluster.__doc__))
cluster = RedisCluster(
nodes, db, password, encoding=encoding,
commands_factory=commands_factory, loop=loop)
await cluster.initialize()
return cluster
class RedisCluster(RedisClusterBase):
"""Redis cluster."""
MAX_MOVED_COUNT = 10
def __init__(self, nodes, db=0, password=None, encoding=None,
*, commands_factory, loop=None):
if loop is None:
loop = asyncio.get_event_loop()
self._nodes = nodes
self._db = db
self._password = password
self._encoding = encoding
self._factory = commands_factory
self._loop = loop
self._moved_count = 0
self._cluster_manager = None
def _is_eval_command(self, command):
if isinstance(command, bytes):
command = command.decode('utf-8')
return command.lower() in ['eval', 'evalsha']
def get_node(self, command, *args, **kwargs):
if self._is_eval_command(command):
keys = kwargs.get('keys', [])
if not isinstance(keys, (list, tuple)):
raise TypeError('keys must be given as list or tuple')
else:
keys = args[:1]
if len(keys) > 0:
slot = self._cluster_manager.determine_slot(*keys)
node = self._cluster_manager.get_node_by_slot(slot)
if node is not None:
return node
return self._cluster_manager.get_random_master_node()
def node_count(self):
return self._cluster_manager.nodes_count
def masters_count(self):
return self._cluster_manager.masters_count
def slave_count(self):
return self._cluster_manager.slaves_count
def _get_nodes_entities(self, slaves=False):
slave_nodes = []
if slaves:
slave_nodes = [node.address for node in self.slave_nodes]
return [node.address for node in self.master_nodes] + slave_nodes
@property
def master_nodes(self):
return self._cluster_manager.masters
@property
def slave_nodes(self):
return self._cluster_manager.slaves
async def _get_raw_cluster_info_from_node(self, node):
conn = await create_redis(
node,
db=self._db,
password=self._password,
encoding='utf-8',
commands_factory=self._factory,
loop=self._loop
)
try:
nodes_resp = await conn.cluster_nodes()
return nodes_resp
finally:
conn.close()
await conn.wait_closed()
async def fetch_cluster_info(self):
logger.info('Loading cluster info from {}...'.format(self._nodes))
tasks = [
asyncio.ensure_future(
self._get_raw_cluster_info_from_node(node), loop=self._loop
) for node in self._nodes
]
try:
for task in asyncio.as_completed(tasks, loop=self._loop):
try:
nodes_raw_response = await task
self._cluster_manager = ClusterNodesManager.create(
nodes_raw_response)
logger.info('Cluster info loaded successfully: %s',
list(nodes_raw_response))
return
except (ReplyError, ProtocolError, ConnectionError, OSError) as exc:
logger.warning(
"Loading cluster info from a node failed with {}"
.format(repr(exc))
)
finally:
for task in tasks:
task.cancel()
# Wait until all tasks have closed their connection
await asyncio.gather(
*tasks, loop=self._loop, return_exceptions=True)
raise RedisClusterError(
"No cluster info could be loaded from any host")
async def initialize(self):
logger.info('Initializing cluster...')
self._moved_count = 0
await self.fetch_cluster_info()
logger.info('Initialized cluster.\n{}'.format(self._cluster_manager))
async def clear(self):
pass # All connections are created on demand and destroyed afterwards.
@property
def all_slots_covered(self):
return self._cluster_manager.all_slots_covered
async def create_connection(self, address):
conn = await create_redis(
address,
db=self._db,
encoding=self._encoding,
password=self._password,
commands_factory=self._factory,
loop=self._loop,
)
return conn
async def _execute_node(self, address, command, *args, **kwargs):
"""Execute redis command and returns Future waiting for the answer.
:param command str
:param pool obj
Raises:
* TypeError if any of args can not be encoded as bytes.
* ReplyError on redis '-ERR' responses.
* ProtocolError when response can not be decoded meaning connection
is broken.
"""
cmd = decode(command, 'utf-8').lower()
to_close = []
try:
conn = await self.create_connection(address)
to_close.append(conn)
return await getattr(conn, cmd)(*args, **kwargs)
except ReplyError as err:
address = parse_moved_response_error(err)
if address is None:
raise
logger.debug('Got MOVED command: {}'.format(err))
self._moved_count += 1
if self._moved_count >= self.MAX_MOVED_COUNT:
await self.initialize()
node = self.get_node(command, *args, **kwargs)
address = node.address
conn = await self.create_connection(address)
to_close.append(conn)
return await getattr(conn, cmd)(*args, **kwargs)
finally:
for conn in to_close:
conn.close()
await conn.wait_closed()
async def _execute_nodes(self, command, *args, slaves=False, **kwargs):
"""
Execute redis command for all nodes and returns
Future waiting for the answer.
:param command str
:param slaves bool - Execute on all nodes masters + slaves
Raises:
* TypeError if any of args can not be encoded as bytes.
* ReplyError on redis '-ERR' responses.
* ProtocolError when response can not be decoded meaning connection
is broken.
"""
nodes = self._get_nodes_entities(slaves=slaves)
return await asyncio.gather(*[
self._execute_node(node, command, *args, **kwargs)
for node in nodes
], loop=self._loop)
async def execute(
self, command, *args, address=None, many=False, slaves=False,
**kwargs
):
"""Execute redis command and returns Future waiting for the answer.
:param command str
:param address tuple - Execute on node with specified address
if many specified will be ignored
:param many bool - invoke on all master nodes
:param slaves bool - if many specified, execute even on slave nodes
Raises:
* TypeError if any of args can not be encoded as bytes.
* ReplyError on redis '-ERR' responses.
* ProtocolError when response can not be decoded meaning connection
is broken.
"""
# bad hack to prevent execution on many nodes
if many or (not args and 'cluster_' not in command):
return await self._execute_nodes(
command, *args, slaves=slaves, **kwargs
)
if not address:
address = self.get_node(command, *args, **kwargs).address
return await self._execute_node(address, command, *args, **kwargs)
def __getattr__(self, cmd):
return partial(self.execute, cmd)
class RedisPoolCluster(RedisCluster):
"""
Redis pool cluster.
Do not use it for cluster management.
Will not operate with slaves and target node
"""
def __init__(self, nodes, db=0, password=None, encoding=None,
*, minsize, maxsize, commands_factory, loop=None):
if loop is None:
loop = asyncio.get_event_loop()
super().__init__(nodes, db=db, password=password, encoding=encoding,
commands_factory=commands_factory, loop=loop)
self._minsize = minsize
self._maxsize = maxsize
self._cluster_pool = {}
def _get_nodes_entities(self, **kwargs):
return self._cluster_pool.values()
async def get_cluster_pool(self):
cluster_pool = {}
nodes = list(self._cluster_manager.masters)
tasks = [
create_redis_pool(
node.address,
db=self._db,
password=self._password,
encoding=self._encoding,
minsize=self._minsize,
maxsize=self._maxsize,
commands_factory=self._factory,
loop=self._loop
)
for node in nodes
]
results = await asyncio.gather(*tasks, loop=self._loop)
for node, connection in zip(nodes, results):
cluster_pool[node.id] = connection
return cluster_pool
async def reload_cluster_pool(self):
logger.info('Reloading cluster...')
await self.clear()
self._moved_count = 0
await self.fetch_cluster_info()
logger.info('Connecting to cluster...')
self._cluster_pool = await self.get_cluster_pool()
logger.info('Reloaded cluster')
async def initialize(self):
await super().initialize()
self._cluster_pool = await self.get_cluster_pool()
async def clear(self):
"""Clear pool connections. Close and remove all free connections."""
for pool in self._get_nodes_entities():
pool.close()
await pool.wait_closed()
def get_node(self, command, *args, **kwargs):
node = super().get_node(command, *args, **kwargs)
return self._cluster_pool[node.id]
async def _execute_node(self, pool, command, *args, **kwargs):
"""Execute redis command and returns Future waiting for the answer.
:param command str
:param pool obj
Raises:
* TypeError if any of args can not be encoded as bytes.
* ReplyError on redis '-ERR' responses.
* ProtocolError when response can not be decoded meaning connection
is broken.
"""
cmd = decode(command, 'utf-8').lower()
try:
with await pool as conn:
return await getattr(conn, cmd)(*args, **kwargs)
except ReplyError as err:
address = parse_moved_response_error(err)
if address is None:
raise
logger.debug('Got MOVED command: {}'.format(err))
self._moved_count += 1
if self._moved_count >= self.MAX_MOVED_COUNT:
await self.initialize()
pool = self.get_node(command, *args, **kwargs)
with await pool as conn:
return await getattr(conn, cmd)(*args, **kwargs)
else:
conn = await self.create_connection(address)
res = await getattr(conn, cmd)(*args, **kwargs)
conn.close()
await conn.wait_closed()
return res
async def execute(self, command, *args, many=False, **kwargs):
"""Execute redis command and returns Future waiting for the answer.
:param command str
:param many bool - invoke on all master nodes
Raises:
* TypeError if any of args can not be encoded as bytes.
* ReplyError on redis '-ERR' responses.
* ProtocolError when response can not be decoded meaning connection
is broken.
"""
# bad hack to prevent execution on many nodes
if many or (not args and 'cluster_' not in command):
return await self._execute_nodes(command, *args, **kwargs)
pool = self.get_node(command, *args, **kwargs)
return await self._execute_node(pool, command, *args, **kwargs)
|
#tests: lint, mypy
import tensorflow as tf
import numpy as np
from neuralmonkey.nn.ortho_gru_cell import OrthoGRUCell
from neuralmonkey.vocabulary import START_TOKEN
from neuralmonkey.logging import log
from neuralmonkey.decoders.output_projection import no_deep_output
from neuralmonkey.nn.projection import linear
class Decoder(object):
"""A class that manages parts of the computation graph that are
used for the decoding.
"""
# pylint: disable=too-many-instance-attributes,too-many-locals,too-many-statements
# Big decoder cannot be simpler. Not sure if refactoring
# it into smaller units would be helpful
# Some locals may be turned to attributes
def __init__(self, encoders, vocabulary, data_id, name, **kwargs):
"""Creates a new instance of the decoder
Arguments:
encoders: List of encoders whose outputs will be decoded
vocabulary: Output vocabulary
data_id: Identifier of the data series fed to this decoder
Keyword arguments:
embedding_size: Size of embedding vectors. Default 200
max_output_len: Maximum length of the output. Default 20
rnn_size: When projection is used or when no encoder is supplied,
this is the size of the projected vector.
dropout_keep_prob: Dropout keep probability. Default 1 (no dropout)
use_attention: Boolean flag that indicates whether to use attention
from encoders
reuse_word_embeddings: Boolean flag specifying whether to
reuse word embeddings. If True, word embeddings
from the first encoder will be used
project_encoder_outputs: Boolean flag whether to project output
states of encoders
"""
self.encoders = encoders
self.vocabulary = vocabulary
self.data_id = data_id
self.name = name
self.output_projection = kwargs.get("output_projection", no_deep_output)
self.max_output = kwargs.get("max_output_len", 20)
self.embedding_size = kwargs.get("embedding_size", 200)
dropout_keep_prob = kwargs.get("dropout_keep_prob", 1.0)
self.use_attention = kwargs.get("use_attention", False)
self.reuse_word_embeddings = kwargs.get("reuse_word_embeddings", False)
if self.reuse_word_embeddings:
self.embedding_size = self.encoders[0].embedding_size
if "embedding_size" in kwargs:
log("Warning: Overriding embedding_size parameter with reused"
" embeddings from the encoder.", color="red")
self.project_encoder_outputs = kwargs.get("project_encoder_outputs",
False)
log("Initializing decoder, name: '{}'".format(self.name))
### Learning step
### TODO was here only because of scheduled sampling.
### needs to be refactored out
self.learning_step = tf.get_variable(
"learning_step", [], initializer=tf.constant_initializer(0),
trainable=False)
if self.project_encoder_outputs or len(self.encoders) == 0:
self.rnn_size = kwargs.get("rnn_size", 200)
else:
if "rnn_size" in kwargs:
log("Warning: rnn_size attribute will not be used "
"without encoder projection!", color="red")
self.rnn_size = sum(e.encoded.get_shape()[1].value
for e in self.encoders)
### Initialize model
self.dropout_placeholder = tf.placeholder_with_default(
tf.constant(dropout_keep_prob, tf.float32),
shape=[], name="decoder_dropout_placeholder")
state = self._initial_state()
self.embedding_matrix = self._input_embeddings()
self.train_inputs, self.train_weights = self._training_placeholders()
train_targets = self.train_inputs[1:]
self.go_symbols = tf.placeholder(tf.int32, shape=[None],
name="decoder_go_symbols")
### Construct the computation part of the graph
embedded_train_inputs = self._embed_inputs(self.train_inputs[:-1])
self.train_rnn_outputs, _, train_logits = self._attention_decoder(
embedded_train_inputs, state)
# runtime methods and objects are used when no ground truth is provided
# (such as during testing)
runtime_inputs = self._runtime_inputs(self.go_symbols)
### Use the same variables for runtime decoding!
tf.get_variable_scope().reuse_variables()
self.runtime_rnn_outputs, self.runtime_rnn_states, runtime_logits = \
self._attention_decoder(
runtime_inputs, state, runtime_mode=True)
self.train_logprobs = [tf.nn.log_softmax(l) for l in train_logits]
self.decoded = [tf.argmax(l[:, 1:], 1) + 1 for l in runtime_logits]
self.train_loss = tf.nn.seq2seq.sequence_loss(
train_logits, train_targets, self.train_weights,
self.vocabulary_size) * 100
self.runtime_loss = tf.nn.seq2seq.sequence_loss(
runtime_logits, train_targets, self.train_weights,
self.vocabulary_size) * 100
self.cross_entropies = tf.nn.seq2seq.sequence_loss_by_example(
train_logits, train_targets, self.train_weights,
self.vocabulary_size)
# TODO [refactor] put runtime logits to self from the beginning
self.runtime_logits = runtime_logits
self.runtime_logprobs = [tf.nn.log_softmax(l) for l in runtime_logits]
### Summaries
self._init_summaries()
log("Decoder initialized.")
@property
def vocabulary_size(self):
return len(self.vocabulary)
@property
def cost(self):
return self.train_loss
def top_k_runtime_logprobs(self, k_best):
"""Return the top runtime log probabilities calculated from runtime
logits.
Arguments:
k_best: How many output items to return
"""
## the array is of tuples ([values], [indices])
return [tf.nn.top_k(p, k_best) for p in self.runtime_logprobs]
def _initial_state(self):
"""Create the initial state of the decoder."""
if len(self.encoders) == 0:
return tf.zeros([self.rnn_size])
encoders_out = tf.concat(1, [e.encoded for e in self.encoders])
if self.project_encoder_outputs:
encoders_out = self._encoder_projection(encoders_out)
return self._dropout(encoders_out)
def _encoder_projection(self, encoded_states):
"""Creates a projection of concatenated encoder states
and applies a tanh activation
Arguments:
encoded_states: Tensor of concatenated states of input encoders
(batch x sum(states))
"""
input_size = encoded_states.get_shape()[1].value
output_size = self.rnn_size
weights = tf.get_variable(
"encoder_projection_W", [input_size, output_size],
initializer=tf.random_normal_initializer(stddev=0.01))
biases = tf.get_variable(
"encoder_projection_b",
initializer=tf.zeros_initializer([output_size]))
dropped_input = self._dropout(encoded_states)
return tf.tanh(tf.matmul(dropped_input, weights) + biases)
def _dropout(self, var):
"""Perform dropout on a variable
Arguments:
var: The variable to perform the dropout on
"""
return tf.nn.dropout(var, self.dropout_placeholder)
def _input_embeddings(self):
"""Create variables and operations for embedding of input words
If we are reusing word embeddings, this function takes
them from the first encoder
"""
if self.reuse_word_embeddings:
return self.encoders[0].word_embeddings
# NOTE In the Bahdanau paper, they say they initialized some weights
# as orthogonal matrices, some by sampling from gauss distro with
# stddev=0.001 and all other weight matrices as gaussian with
# stddev=0.01. Embeddings were not among any of the special cases so
# I assume that they initialized them as any other weight matrix.
return tf.get_variable(
"word_embeddings", [self.vocabulary_size, self.embedding_size],
initializer=tf.random_normal_initializer(stddev=0.01))
def _training_placeholders(self):
"""Defines data placeholders for training the decoder"""
inputs = [tf.placeholder(tf.int64, [None], name="decoder_{}".format(i))
for i in range(self.max_output + 2)]
# one less than inputs
weights = [tf.placeholder(tf.float32, [None],
name="decoder_padding_weights_{}".format(i))
for i in range(self.max_output + 1)]
return inputs, weights
def _get_rnn_cell(self):
"""Returns a RNNCell object for this decoder"""
return OrthoGRUCell(self.rnn_size)
def _collect_attention_objects(self, runtime_mode):
"""Collect attention objects from encoders."""
if not self.use_attention:
return []
return [e.get_attention_object(runtime_mode)
for e in self.encoders]
def _embed_inputs(self, inputs):
"""Embed inputs using the decoder"s word embedding matrix
Arguments:
inputs: List of (batched) input words to be embedded
"""
embedded = [tf.nn.embedding_lookup(self.embedding_matrix, o)
for o in inputs]
return [self._dropout(e) for e in embedded]
def _runtime_inputs(self, go_symbols):
"""Defines data inputs for running trained decoder
Arguments:
go_symbols: Tensor of go symbols. (Shape [batch])
"""
go_embeds = tf.nn.embedding_lookup(self.embedding_matrix, go_symbols)
inputs = [go_embeds]
inputs += [None for _ in range(self.max_output)]
return inputs
def _loop_function(self, rnn_output):
"""Basic loop function. Projects state to logits, take the
argmax of the logits, embed the word and perform dropout on the
embedding vector.
Arguments:
rnn_output: The output of the decoder RNN
"""
output_activation = self._logit_function(rnn_output)
previous_word = tf.argmax(output_activation, 1)
input_embedding = tf.nn.embedding_lookup(self.embedding_matrix,
previous_word)
return self._dropout(input_embedding)
def _logit_function(self, rnn_output):
"""Compute logits on the vocabulary given the state
This variant simply linearly project the vectors to fit
the size of the vocabulary
Arguments:
rnn_output: the output of the decoder RNN
(after output projection)
Returns:
A Tensor of shape batch_size x vocabulary_size
"""
return linear(self._dropout(rnn_output), self.vocabulary_size)
#pylint: disable=too-many-arguments
# TODO reduce the number of arguments
def _attention_decoder(self, inputs, initial_state, runtime_mode=False,
scope="attention_decoder"):
"""Run the decoder RNN.
Arguments:
inputs: The decoder inputs. If runtime_mode=True, only the first
input is used.
initial_state: The initial state of the decoder.
runtime_mode: Boolean flag whether the decoder is running in
runtime mode (with loop function).
scope: The variable scope to use with this function.
"""
cell = self._get_rnn_cell()
att_objects = self._collect_attention_objects(runtime_mode)
## Broadcast the initial state to the whole batch if needed
if len(initial_state.get_shape()) == 1:
assert initial_state.get_shape()[0].value == self.rnn_size
initial_state = tf.reshape(
tf.tile(initial_state, tf.shape(inputs[0])[:1]),
[-1, self.rnn_size])
with tf.variable_scope(scope):
## First decoding step
contexts = [a.attention(initial_state) for a in att_objects]
output = self.output_projection(inputs[0], initial_state, contexts)
_, state = cell(tf.concat(1, [inputs[0]] + contexts), initial_state)
logit = self._logit_function(output)
output_logits = [logit]
rnn_outputs = [output]
rnn_states = [initial_state, state]
for step in range(1, len(inputs)):
tf.get_variable_scope().reuse_variables()
if runtime_mode:
current_input = self._loop_function(output)
else:
current_input = inputs[step]
## N-th decoding step
contexts = [a.attention(state) for a in att_objects]
output = self.output_projection(current_input, state, contexts)
_, state = cell(tf.concat(1, [current_input] + contexts), state)
logit = self._logit_function(output)
output_logits.append(logit)
rnn_outputs.append(output)
rnn_states.append(state)
if runtime_mode:
for i, a in enumerate(att_objects):
alignments = tf.expand_dims(tf.transpose(
tf.pack(a.attentions_in_time), perm=[1, 2, 0]), -1)
tf.image_summary("attention_{}".format(i), alignments,
collections=["summary_val_plots"],
max_images=256)
return rnn_outputs, rnn_states, output_logits
def _init_summaries(self):
"""Initialize the summaries of the decoder
TensorBoard summaries are collected into the following
collections:
- summary_train: collects statistics from the train-time
"""
tf.scalar_summary("train_loss_with_decoded_inputs", self.runtime_loss,
collections=["summary_train"])
tf.scalar_summary("train_optimization_cost", self.train_loss,
collections=["summary_train"])
def feed_dict(self, dataset, train=False):
"""Populate the feed dictionary for the decoder object
Decoder placeholders:
``decoder_{x} for x in range(max_output+2)``
Training data placeholders. Starts with <s> and ends with </s>
``decoder_padding_weights{x} for x in range(max_output+1)``
Weights used for padding. (Float) tensor of ones and zeros.
This tensor is one-item shorter than the other one since the
decoder does not produce the first <s>.
``dropout_placeholder``
Scalar placeholder for dropout probability.
Has value 'dropout_keep_prob' from the constructor or 1
in case we are decoding at run-time
"""
# pylint: disable=invalid-name
# fd is the common name for feed dictionary
fd = {}
start_token_index = self.vocabulary.get_word_index(START_TOKEN)
fd[self.go_symbols] = np.repeat(start_token_index, len(dataset))
sentences = dataset.get_series(self.data_id, allow_none=True)
if sentences is not None:
inputs, weights = self.vocabulary.sentences_to_tensor(
sentences, self.max_output)
for placeholder, weight in zip(self.train_weights, weights):
fd[placeholder] = weight
for placeholder, tensor in zip(self.train_inputs, inputs):
fd[placeholder] = tensor
else:
fd[self.train_inputs[0]] = np.repeat(start_token_index,
len(dataset))
for placeholder in self.train_weights:
fd[placeholder] = np.ones(len(dataset))
if not train:
fd[self.dropout_placeholder] = 1.0
return fd
solves #192 bug
#tests: lint, mypy
import tensorflow as tf
import numpy as np
from neuralmonkey.nn.ortho_gru_cell import OrthoGRUCell
from neuralmonkey.vocabulary import START_TOKEN
from neuralmonkey.logging import log
from neuralmonkey.decoders.output_projection import no_deep_output
from neuralmonkey.nn.projection import linear
class Decoder(object):
"""A class that manages parts of the computation graph that are
used for the decoding.
"""
# pylint: disable=too-many-instance-attributes,too-many-locals,too-many-statements
# Big decoder cannot be simpler. Not sure if refactoring
# it into smaller units would be helpful
# Some locals may be turned to attributes
def __init__(self, encoders, vocabulary, data_id, name, **kwargs):
"""Creates a new instance of the decoder
Arguments:
encoders: List of encoders whose outputs will be decoded
vocabulary: Output vocabulary
data_id: Identifier of the data series fed to this decoder
Keyword arguments:
embedding_size: Size of embedding vectors. Default 200
max_output_len: Maximum length of the output. Default 20
rnn_size: When projection is used or when no encoder is supplied,
this is the size of the projected vector.
dropout_keep_prob: Dropout keep probability. Default 1 (no dropout)
use_attention: Boolean flag that indicates whether to use attention
from encoders
reuse_word_embeddings: Boolean flag specifying whether to
reuse word embeddings. If True, word embeddings
from the first encoder will be used
project_encoder_outputs: Boolean flag whether to project output
states of encoders
"""
self.encoders = encoders
self.vocabulary = vocabulary
self.data_id = data_id
self.name = name
self.output_projection = kwargs.get("output_projection", no_deep_output)
self.max_output = kwargs.get("max_output_len", 20)
self.embedding_size = kwargs.get("embedding_size", 200)
dropout_keep_prob = kwargs.get("dropout_keep_prob", 1.0)
self.use_attention = kwargs.get("use_attention", False)
self.reuse_word_embeddings = kwargs.get("reuse_word_embeddings", False)
if self.reuse_word_embeddings:
self.embedding_size = self.encoders[0].embedding_size
if "embedding_size" in kwargs:
log("Warning: Overriding embedding_size parameter with reused"
" embeddings from the encoder.", color="red")
self.project_encoder_outputs = kwargs.get("project_encoder_outputs",
False)
log("Initializing decoder, name: '{}'".format(self.name))
### Learning step
### TODO was here only because of scheduled sampling.
### needs to be refactored out
self.learning_step = tf.get_variable(
"learning_step", [], initializer=tf.constant_initializer(0),
trainable=False)
if self.project_encoder_outputs or len(self.encoders) == 0:
self.rnn_size = kwargs.get("rnn_size", 200)
else:
if "rnn_size" in kwargs:
log("Warning: rnn_size attribute will not be used "
"without encoder projection!", color="red")
self.rnn_size = sum(e.encoded.get_shape()[1].value
for e in self.encoders)
### Initialize model
self.dropout_placeholder = tf.placeholder_with_default(
tf.constant(dropout_keep_prob, tf.float32),
shape=[], name="decoder_dropout_placeholder")
state = self._initial_state()
self.embedding_matrix = self._input_embeddings()
self.train_inputs, self.train_weights = self._training_placeholders()
train_targets = self.train_inputs[1:]
self.go_symbols = tf.placeholder(tf.int32, shape=[None],
name="decoder_go_symbols")
### Construct the computation part of the graph
embedded_train_inputs = self._embed_inputs(self.train_inputs[:-1])
self.train_rnn_outputs, _, train_logits = self._attention_decoder(
embedded_train_inputs, state)
# runtime methods and objects are used when no ground truth is provided
# (such as during testing)
runtime_inputs = self._runtime_inputs(self.go_symbols)
### Use the same variables for runtime decoding!
tf.get_variable_scope().reuse_variables()
self.runtime_rnn_outputs, self.runtime_rnn_states, runtime_logits = \
self._attention_decoder(
runtime_inputs, state, runtime_mode=True)
self.train_logprobs = [tf.nn.log_softmax(l) for l in train_logits]
self.decoded = [tf.argmax(l[:, 1:], 1) + 1 for l in runtime_logits]
self.train_loss = tf.nn.seq2seq.sequence_loss(
train_logits, train_targets, self.train_weights,
self.vocabulary_size) * 100
self.runtime_loss = tf.nn.seq2seq.sequence_loss(
runtime_logits, train_targets, self.train_weights,
self.vocabulary_size) * 100
self.cross_entropies = tf.nn.seq2seq.sequence_loss_by_example(
train_logits, train_targets, self.train_weights,
self.vocabulary_size)
# TODO [refactor] put runtime logits to self from the beginning
self.runtime_logits = runtime_logits
self.runtime_logprobs = [tf.nn.log_softmax(l) for l in runtime_logits]
### Summaries
self._init_summaries()
log("Decoder initialized.")
@property
def vocabulary_size(self):
return len(self.vocabulary)
@property
def cost(self):
return self.train_loss
def top_k_runtime_logprobs(self, k_best):
"""Return the top runtime log probabilities calculated from runtime
logits.
Arguments:
k_best: How many output items to return
"""
## the array is of tuples ([values], [indices])
return [tf.nn.top_k(p, k_best) for p in self.runtime_logprobs]
def _initial_state(self):
"""Create the initial state of the decoder."""
if len(self.encoders) == 0:
return tf.zeros([self.rnn_size])
encoders_out = tf.concat(1, [e.encoded for e in self.encoders])
if self.project_encoder_outputs:
encoders_out = self._encoder_projection(encoders_out)
return self._dropout(encoders_out)
def _encoder_projection(self, encoded_states):
"""Creates a projection of concatenated encoder states
and applies a tanh activation
Arguments:
encoded_states: Tensor of concatenated states of input encoders
(batch x sum(states))
"""
input_size = encoded_states.get_shape()[1].value
output_size = self.rnn_size
weights = tf.get_variable(
"encoder_projection_W", [input_size, output_size],
initializer=tf.random_normal_initializer(stddev=0.01))
biases = tf.get_variable(
"encoder_projection_b",
initializer=tf.zeros_initializer([output_size]))
dropped_input = self._dropout(encoded_states)
return tf.tanh(tf.matmul(dropped_input, weights) + biases)
def _dropout(self, var):
"""Perform dropout on a variable
Arguments:
var: The variable to perform the dropout on
"""
return tf.nn.dropout(var, self.dropout_placeholder)
def _input_embeddings(self):
"""Create variables and operations for embedding of input words
If we are reusing word embeddings, this function takes
them from the first encoder
"""
if self.reuse_word_embeddings:
return self.encoders[0].embedding_matrix
# NOTE In the Bahdanau paper, they say they initialized some weights
# as orthogonal matrices, some by sampling from gauss distro with
# stddev=0.001 and all other weight matrices as gaussian with
# stddev=0.01. Embeddings were not among any of the special cases so
# I assume that they initialized them as any other weight matrix.
return tf.get_variable(
"word_embeddings", [self.vocabulary_size, self.embedding_size],
initializer=tf.random_normal_initializer(stddev=0.01))
def _training_placeholders(self):
"""Defines data placeholders for training the decoder"""
inputs = [tf.placeholder(tf.int64, [None], name="decoder_{}".format(i))
for i in range(self.max_output + 2)]
# one less than inputs
weights = [tf.placeholder(tf.float32, [None],
name="decoder_padding_weights_{}".format(i))
for i in range(self.max_output + 1)]
return inputs, weights
def _get_rnn_cell(self):
"""Returns a RNNCell object for this decoder"""
return OrthoGRUCell(self.rnn_size)
def _collect_attention_objects(self, runtime_mode):
"""Collect attention objects from encoders."""
if not self.use_attention:
return []
return [e.get_attention_object(runtime_mode)
for e in self.encoders]
def _embed_inputs(self, inputs):
"""Embed inputs using the decoder"s word embedding matrix
Arguments:
inputs: List of (batched) input words to be embedded
"""
embedded = [tf.nn.embedding_lookup(self.embedding_matrix, o)
for o in inputs]
return [self._dropout(e) for e in embedded]
def _runtime_inputs(self, go_symbols):
"""Defines data inputs for running trained decoder
Arguments:
go_symbols: Tensor of go symbols. (Shape [batch])
"""
go_embeds = tf.nn.embedding_lookup(self.embedding_matrix, go_symbols)
inputs = [go_embeds]
inputs += [None for _ in range(self.max_output)]
return inputs
def _loop_function(self, rnn_output):
"""Basic loop function. Projects state to logits, take the
argmax of the logits, embed the word and perform dropout on the
embedding vector.
Arguments:
rnn_output: The output of the decoder RNN
"""
output_activation = self._logit_function(rnn_output)
previous_word = tf.argmax(output_activation, 1)
input_embedding = tf.nn.embedding_lookup(self.embedding_matrix,
previous_word)
return self._dropout(input_embedding)
def _logit_function(self, rnn_output):
"""Compute logits on the vocabulary given the state
This variant simply linearly project the vectors to fit
the size of the vocabulary
Arguments:
rnn_output: the output of the decoder RNN
(after output projection)
Returns:
A Tensor of shape batch_size x vocabulary_size
"""
return linear(self._dropout(rnn_output), self.vocabulary_size)
#pylint: disable=too-many-arguments
# TODO reduce the number of arguments
def _attention_decoder(self, inputs, initial_state, runtime_mode=False,
scope="attention_decoder"):
"""Run the decoder RNN.
Arguments:
inputs: The decoder inputs. If runtime_mode=True, only the first
input is used.
initial_state: The initial state of the decoder.
runtime_mode: Boolean flag whether the decoder is running in
runtime mode (with loop function).
scope: The variable scope to use with this function.
"""
cell = self._get_rnn_cell()
att_objects = self._collect_attention_objects(runtime_mode)
## Broadcast the initial state to the whole batch if needed
if len(initial_state.get_shape()) == 1:
assert initial_state.get_shape()[0].value == self.rnn_size
initial_state = tf.reshape(
tf.tile(initial_state, tf.shape(inputs[0])[:1]),
[-1, self.rnn_size])
with tf.variable_scope(scope):
## First decoding step
contexts = [a.attention(initial_state) for a in att_objects]
output = self.output_projection(inputs[0], initial_state, contexts)
_, state = cell(tf.concat(1, [inputs[0]] + contexts), initial_state)
logit = self._logit_function(output)
output_logits = [logit]
rnn_outputs = [output]
rnn_states = [initial_state, state]
for step in range(1, len(inputs)):
tf.get_variable_scope().reuse_variables()
if runtime_mode:
current_input = self._loop_function(output)
else:
current_input = inputs[step]
## N-th decoding step
contexts = [a.attention(state) for a in att_objects]
output = self.output_projection(current_input, state, contexts)
_, state = cell(tf.concat(1, [current_input] + contexts), state)
logit = self._logit_function(output)
output_logits.append(logit)
rnn_outputs.append(output)
rnn_states.append(state)
if runtime_mode:
for i, a in enumerate(att_objects):
alignments = tf.expand_dims(tf.transpose(
tf.pack(a.attentions_in_time), perm=[1, 2, 0]), -1)
tf.image_summary("attention_{}".format(i), alignments,
collections=["summary_val_plots"],
max_images=256)
return rnn_outputs, rnn_states, output_logits
def _init_summaries(self):
"""Initialize the summaries of the decoder
TensorBoard summaries are collected into the following
collections:
- summary_train: collects statistics from the train-time
"""
tf.scalar_summary("train_loss_with_decoded_inputs", self.runtime_loss,
collections=["summary_train"])
tf.scalar_summary("train_optimization_cost", self.train_loss,
collections=["summary_train"])
def feed_dict(self, dataset, train=False):
"""Populate the feed dictionary for the decoder object
Decoder placeholders:
``decoder_{x} for x in range(max_output+2)``
Training data placeholders. Starts with <s> and ends with </s>
``decoder_padding_weights{x} for x in range(max_output+1)``
Weights used for padding. (Float) tensor of ones and zeros.
This tensor is one-item shorter than the other one since the
decoder does not produce the first <s>.
``dropout_placeholder``
Scalar placeholder for dropout probability.
Has value 'dropout_keep_prob' from the constructor or 1
in case we are decoding at run-time
"""
# pylint: disable=invalid-name
# fd is the common name for feed dictionary
fd = {}
start_token_index = self.vocabulary.get_word_index(START_TOKEN)
fd[self.go_symbols] = np.repeat(start_token_index, len(dataset))
sentences = dataset.get_series(self.data_id, allow_none=True)
if sentences is not None:
inputs, weights = self.vocabulary.sentences_to_tensor(
sentences, self.max_output)
for placeholder, weight in zip(self.train_weights, weights):
fd[placeholder] = weight
for placeholder, tensor in zip(self.train_inputs, inputs):
fd[placeholder] = tensor
else:
fd[self.train_inputs[0]] = np.repeat(start_token_index,
len(dataset))
for placeholder in self.train_weights:
fd[placeholder] = np.ones(len(dataset))
if not train:
fd[self.dropout_placeholder] = 1.0
return fd
|
import numpy
import math
from scipy import stats
from nab.detectors.base import AnomalyDetector
class GoodnessOfFitDetector(AnomalyDetector):
""" This detector is an implementation of online anomaly detection using
Relative Entropy statistic with multiple hypothesis as described in
Figure 1 of Wang, Chengwei, et al. "Statistical Techniques for Online
Anomaly Detection in Data Centers", Proceedings of the 8th ACM
international conference on Autonomic computing. ACM, 2011.
The algorithm is based on a hypothesis testing approach that compares
observed data against multiple null hypotheses, representing frequencies of
quantized data over a window. If the observed data is unseen and does not
agree with any existing hypothesis, it is declared anomolous and a new
hypothesis is created. Otherwise, it is declared non-anomolous, provided that
the accepted hypothesis occurs frequently enough. Decision to accept/reject a
null hypothesis is based on relative entropy compared against a threshold
of acceptable false negative probability determined by the chi-squared
distribution. Step-by-step details are given in code comments and parameters
have been tuned for best performance of NAB.
"""
def __init__(self, *args, **kwargs):
super(GoodnessOfFitDetector, self).__init__(*args, **kwargs)
# Timeseries of the metric on which anomaly needs to be detected
self.util = []
# Number of bins into which util is to be quantized
self.N_bins = 5.0
# Window size
self.W = 52
# Threshold against which the test statistic is compared. It is set to
# the point in the chi-squared cdf with N-bins -1 degrees of freedom that
# corresponds to 0.99.
self.T = stats.chi2.isf(0.01, self.N_bins - 1)
# Threshold to determine if hypothesis has occured frequently enough
self.c_th = 1
# Tracks the current number of null hypothesis
self.m = 0
# Step size in timeseries quantization
self.stepSize = (self.inputMax - self.inputMin)/self.N_bins
# List of lists where P[i] indicates the empirical frequency of the ith
# hypothesis.
self.P = []
# List where c[i] tracks the number of windows that agree with P[i]
self.c = []
def handleRecord(self, inputData):
""" Returns a list of [anomalyScore] that takes a binary value of 0 or 1.
The anomalyScore is determined based on the agreement of the observed data
with existing hypotheses that occur frequently enough. Threshold to
accept/reject a null hypothesis and declare an anomaly is determined by
comparing the relative entropy of the observed data and all null
hypothesis against the point on chi-squared distribution that
corresponds to 0.99 (probability of incorrectly rejecting a
null-hypothesis).
"""
anomalyScore = 0.0
inputValue = inputData["value"]
self.util.append(inputValue)
# This check is for files where self.inputMin == self.input max i.e
# all data points are identical and stepSize is 0 e.g
# artificalNoAnomaly/art_flatline.csv file. Every point in such files
# is declared non-anomolous.
if self.stepSize != 0.0:
# All points in the first window are declared non-anomolous and
# anomaly detection begins when length of data points seen is
# greater than window length.
if len(self.util) >= self.W:
# Extracting current window
util_current = self.util[-self.W:]
# Quantize window data points into discretized bin values
B_current = [math.ceil((c - self.inputMin) / self.stepSize) for c in
util_current]
# Create a histogram of empirial frequencies for the current window
# using B_current
P_hat = numpy.histogram(B_current,
bins=self.N_bins,
range=(0,self.N_bins),
density=True)[0]
if self.m == 0:
self.P.append(P_hat)
self.c.append(1)
self.m = 1
else:
index = self.getAgreementHypothesis(P_hat)
# Check if any null hypothesis is accepted or rejected
if index != -1:
# If hypothesis accepted, update counter for hypothesis that tracks
# number of windows that have agreed to it so far.
self.c[index] += 1
# Check if hypothesis accepted occurs at least as frequently as
# the given threshold. If not, classify data point as anomolous.
if self.c[index] <= self.c_th:
anomalyScore = 1.0
else:
# If all null hypothesis rejected, create new hypothesis based
# on current window and update variables tracking hypothesis counts.
anomalyScore = 1.0
self.P.append(P_hat)
self.c.append(1)
self.m += 1
return [anomalyScore]
def getAgreementHypothesis(self,P_hat):
"""This function computes multinomial goodness-of-fit test. It calculates
the relative entropy test statistic between P_hat and all `m` null
hypothesis and compares it against the threshold `T` based on cdf of
chi-squared distribution. The test relies on the observation that if the
null hypothesis P is true, then as the number of samples grow the relative
entropy converges to a chi-squared distribution1 with K-1 degrees of
freedom.
The function returns the index of hypothesis that agrees with minimum
relative entropy.If all hypothesis disagree,the function returns -1.
@param P_hat: empirical frequency of the current window
@return: index of the hypothesis with the minimum test statistic
"""
index = -1
minEntropy = float("inf")
for i in range(0,self.m):
entropy = 2 * self.W * stats.entropy(P_hat,self.P[i])
if entropy < self.T and entropy < minEntropy:
minEntropy = entropy
index = i
return index
review feedback
# ----------------------------------------------------------------------
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import math
import numpy
from scipy import stats
from nab.detectors.base import AnomalyDetector
class GoodnessOfFitDetector(AnomalyDetector):
""" This detector is an implementation of online anomaly detection using
Relative Entropy statistic with multiple hypotheses as described in
Figure 1 of Wang, Chengwei, et al. "Statistical Techniques for Online
Anomaly Detection in Data Centers", Proceedings of the 8th ACM
international conference on Autonomic computing. ACM, 2011.
The algorithm is based on a hypothesis testing approach that compares
observed data against multiple null hypotheses, representing frequencies of
quantized data over a window. If the observed data is unseen and does not
agree with any existing hypothesis, it is declared anomalous and a new
hypothesis is created. Otherwise, it is declared non-anomalous, provided that
the accepted hypothesis occurs frequently enough. Decision to accept/reject a
null hypothesis is based on relative entropy compared against a threshold
of acceptable false negative probability determined by the chi-squared
distribution. Step-by-step details are given in code comments and parameters
have been tuned for best performance of NAB.
"""
def __init__(self, *args, **kwargs):
""" Variable names are kept consistent with algorithm's pseudo code in
the paper."""
super(GoodnessOfFitDetector, self).__init__(*args, **kwargs)
# Timeseries of the metric on which anomaly needs to be detected
self.util = []
# Number of bins into which util is to be quantized
self.N_bins = 5.0
# Window size
self.W = 52
# Threshold against which the test statistic is compared. It is set to
# the point in the chi-squared cdf with N-bins -1 degrees of freedom that
# corresponds to 0.99.
self.T = stats.chi2.isf(0.01, self.N_bins - 1)
# Threshold to determine if hypothesis has occured frequently enough
self.c_th = 1
# Tracks the current number of null hypothesis
self.m = 0
# Step size in time series quantization
self.stepSize = (self.inputMax - self.inputMin) / self.N_bins
# List of lists where P[i] indicates the empirical frequency of the ith
# hypothesis.
self.P = []
# List where c[i] tracks the number of windows that agree with P[i]
self.c = []
def handleRecord(self, inputData):
""" Returns a list of [anomalyScore] that takes a binary value of 0 or 1.
The anomalyScore is determined based on the agreement of the observed data
with existing hypotheses that occur frequently enough. Threshold to
accept/reject a null hypothesis and declare an anomaly is determined by
comparing the relative entropy of the observed data and all null
hypothesis against the point on chi-squared distribution that
corresponds to 0.99 (probability of incorrectly rejecting a
null-hypothesis).
"""
anomalyScore = 0.0
self.util.append(inputData["value"])
# This check is for files where self.inputMin == self.input max i.e
# all data points are identical and stepSize is 0 e.g
# artificalNoAnomaly/art_flatline.csv file. Every point in such files
# is declared non-anomolous.
if self.stepSize != 0.0:
# All points in the first window are declared non-anomolous and
# anomaly detection begins when length of data points seen is
# greater than window length.
if len(self.util) >= self.W:
# Extracting current window
util_current = self.util[-self.W:]
# Quantize window data points into discretized bin values
B_current = [math.ceil((c - self.inputMin) / self.stepSize) for c in
util_current]
# Create a histogram of empirical frequencies for the current window
# using B_current
P_hat = numpy.histogram(B_current,
bins=self.N_bins,
range=(0,self.N_bins),
density=True)[0]
# This is for the first null hypothesis
if self.m == 0:
self.P.append(P_hat)
self.c.append(1)
self.m = 1
else:
index = self.getAgreementHypothesis(P_hat)
# Check if any null hypothesis is accepted or rejected
if index != -1:
# If hypothesis accepted, update counter for hypothesis that tracks
# number of windows that have agreed to it so far.
self.c[index] += 1
# Check if hypothesis accepted occurs at least as frequently as
# the given threshold. If not, classify data point as anomolous.
if self.c[index] <= self.c_th:
anomalyScore = 1.0
else:
# If all null hypothesis rejected, create new hypothesis based
# on current window and update variables tracking hypothesis counts.
anomalyScore = 1.0
self.P.append(P_hat)
self.c.append(1)
self.m += 1
return [anomalyScore]
def getAgreementHypothesis(self,P_hat):
"""This function computes multinomial goodness-of-fit test. It calculates
the relative entropy test statistic between P_hat and all `m` null
hypothesis and compares it against the threshold `T` based on cdf of
chi-squared distribution. The test relies on the observation that if the
null hypothesis P is true, then as the number of samples grow the relative
entropy converges to a chi-squared distribution1 with K-1 degrees of
freedom.
The function returns the index of hypothesis that agrees with minimum
relative entropy. If all hypotheses disagree, the function returns -1.
@param P_hat (list) Empirical frequencies of the current window.
@return index (int) Index of the hypothesis with the minimum test
statistic.
"""
index = -1
minEntropy = float("inf")
for i in xrange(self.m):
entropy = 2 * self.W * stats.entropy(P_hat,self.P[i])
if entropy < self.T and entropy < minEntropy:
minEntropy = entropy
index = i
return index
|
#!/usr/bin/env python
# http://stackoverflow.com/a/14050282
def check_antipackage():
from sys import version_info
sys_version = version_info[:2]
found = True
if sys_version < (3, 0):
# 'python 2'
from pkgutil import find_loader
found = find_loader('antipackage') is not None
elif sys_version <= (3, 3):
# 'python <= 3.3'
from importlib import find_loader
found = find_loader('antipackage') is not None
else:
# 'python >= 3.4'
from importlib import util
found = util.find_spec('antipackage') is not None
if not found:
print('Install missing package "antipackage"')
print('Example: pip install git+https://github.com/ellisonbg/antipackage.git#egg=antipackage')
from sys import exit
exit(1)
check_antipackage()
# ref: https://github.com/ellisonbg/antipackage
import antipackage
from github.appscode.libbuild import libbuild
import os
import os.path
import random
import string
import subprocess
import sys
from os.path import expandvars
import yaml
from collections import Counter
libbuild.REPO_ROOT = expandvars('$GOPATH') + '/src/github.com/kubedb/cli'
DATABASES = ['postgres', 'elasticsearch', 'etcd', 'mysql', 'mongodb', 'memcached', 'redis']
REPO_LIST = DATABASES + ['cli', 'operator', 'apimachinery']
KUTIL_VERSION = 'release-8.0'
KUBEMON_VERSION = 'release-8.0'
FORCED_DEPS = [
{
'package': 'github.com/cpuguy83/go-md2man',
'version': 'v1.0.8',
},
{
'package': 'github.com/json-iterator/go',
'version': '1.1.5',
},
{
'package': 'github.com/coreos/prometheus-operator',
'version': 'v0.23.2',
},
{
"package": "k8s.io/api",
"version": "kubernetes-1.11.3"
},
{
"package": "k8s.io/apiextensions-apiserver",
"version": "kubernetes-1.11.3"
},
{
"package": "k8s.io/apimachinery",
"repo": "https://github.com/pharmer/apimachinery.git",
"vcs": "git",
"version": "release-1.11.3"
},
{
"package": "k8s.io/apiserver",
"repo": "https://github.com/pharmer/apiserver.git",
"vcs": "git",
"version": "release-1.11.3"
},
{
"package": "k8s.io/client-go",
"version": "kubernetes-1.11.3"
},
{
"package": "k8s.io/kube-openapi",
"version": "master"
},
]
def die(status):
if status:
sys.exit(status)
def call(cmd, stdin=None, cwd=libbuild.REPO_ROOT, eoe=True):
print(cwd + ' $ ' + cmd)
status = subprocess.call([expandvars(cmd)], shell=True, stdin=stdin, cwd=cwd)
if eoe:
die(status)
else:
return status
def check_output(cmd, stdin=None, cwd=libbuild.REPO_ROOT):
print(cwd + ' $ ' + cmd)
return subprocess.check_output([expandvars(cmd)], shell=True, stdin=stdin, cwd=cwd)
def git_branch_exists(branch, cwd=libbuild.REPO_ROOT):
return call('git show-ref --quiet refs/heads/{0}'.format(branch), eoe=False, cwd=cwd) == 0
def git_checkout(branch, cwd=libbuild.REPO_ROOT):
call('git fetch --all --prune', cwd=cwd)
call('git fetch --tags', cwd=cwd)
if git_branch_exists(branch, cwd):
call('git checkout {0}'.format(branch), cwd=cwd)
else:
call('git checkout -b {0}'.format(branch), cwd=cwd)
def git_requires_commit(cwd=libbuild.REPO_ROOT):
changed_files = check_output('git diff --name-only', cwd=cwd).strip().split('\n')
return Counter(changed_files) != Counter(['glide.lock'])
def glide_mod(glide_config, changes):
for dep in glide_config['import']:
if dep['package'] in changes:
dep['version'] = changes[dep['package']]
for x in FORCED_DEPS:
for idx, dep in enumerate(glide_config['import']):
if dep['package'] == x['package']:
glide_config['import'][idx] = x
break
def glide_write(f, glide_config):
f.seek(0)
pkg = glide_config.pop('package')
out = 'package: ' + pkg + '\n' + yaml.dump(glide_config, default_flow_style=False)
f.write(out)
f.truncate()
glide_config['package'] = pkg
class Kitten(object):
def __init__(self):
self.seed = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6))
self.master_deps = {}
for k in REPO_LIST:
self.master_deps['github.com/kubedb/' + k] = 'master'
self.master_deps['github.com/appscode/kutil'] = KUTIL_VERSION
self.master_deps['github.com/appscode/kube-mon'] = KUBEMON_VERSION
print self.master_deps
def revendor_db(self, repo_name):
revendor_branch = 'api-{0}'.format(self.seed)
repo = libbuild.GOPATH + '/src/github.com/kubedb/' + repo_name
print(repo)
print('----------------------------------------------------------------------------------------')
call('git reset HEAD --hard', cwd=repo)
call('git clean -xfd', cwd=repo)
git_checkout('master', cwd=repo)
call('git pull --rebase origin master', cwd=repo)
git_checkout(revendor_branch, cwd=repo)
with open(repo + '/glide.yaml', 'r+') as glide_file:
glide_config = yaml.load(glide_file)
glide_mod(glide_config, self.master_deps)
glide_write(glide_file, glide_config)
call('glide slow', cwd=repo)
if git_requires_commit(cwd=repo):
call('git add --all', cwd=repo)
call('git commit -s -a -m "Revendor api"', cwd=repo, eoe=False)
call('git push origin {0}'.format(revendor_branch), cwd=repo)
else:
call('git reset HEAD --hard', cwd=repo)
def revendor_server_binary(self, repo_name):
revendor_branch = 'api-{0}'.format(self.seed)
repo = libbuild.GOPATH + '/src/github.com/kubedb/' + repo_name
print(repo)
print('----------------------------------------------------------------------------------------')
call('git reset HEAD --hard', cwd=repo)
call('git clean -xfd', cwd=repo)
git_checkout('master', cwd=repo)
call('git pull --rebase origin master', cwd=repo)
git_checkout(revendor_branch, cwd=repo)
with open(repo + '/glide.yaml', 'r+') as glide_file:
glide_config = yaml.load(glide_file)
glide_mod(glide_config, self.master_deps)
glide_write(glide_file, glide_config)
call('glide slow', cwd=repo)
if git_requires_commit(cwd=repo):
call('git add --all', cwd=repo)
call('git commit -s -a -m "Revendor api"', cwd=repo, eoe=False)
call('git push origin {0}'.format(revendor_branch), cwd=repo)
else:
call('git reset HEAD --hard', cwd=repo)
def revendor_cli(self):
repo_name = 'cli'
revendor_branch = 'api-{0}'.format(self.seed)
repo = libbuild.GOPATH + '/src/github.com/kubedb/' + repo_name
print(repo)
print('----------------------------------------------------------------------------------------')
call('git reset HEAD --hard', cwd=repo)
call('git clean -xfd', cwd=repo)
git_checkout('master', cwd=repo)
call('git pull --rebase origin master', cwd=repo)
git_checkout(revendor_branch, cwd=repo)
with open(repo + '/glide.yaml', 'r+') as glide_file:
glide_config = yaml.load(glide_file)
glide_mod(glide_config, self.master_deps)
glide_write(glide_file, glide_config)
call('glide slow', cwd=repo)
if git_requires_commit(cwd=repo):
call('git add --all', cwd=repo)
call('git commit -s -a -m "Revendor api"', cwd=repo, eoe=False)
call('git push origin {0}'.format(revendor_branch), cwd=repo)
else:
call('git reset HEAD --hard', cwd=repo)
def revendor(comp=None):
cat = Kitten()
if comp is None:
for name in DATABASES:
cat.revendor_db(name)
elif comp == 'all':
for name in DATABASES:
cat.revendor_db(name)
cat.revendor_server_binary('operator')
cat.revendor_cli()
elif comp in DATABASES:
cat.revendor_db(comp)
elif comp == 'operator':
cat.revendor_server_binary(comp)
elif comp == 'cli':
cat.revendor_cli()
if __name__ == "__main__":
if len(sys.argv) == 1:
revendor(None)
elif len(sys.argv) > 1:
# http://stackoverflow.com/a/834451
# http://stackoverflow.com/a/817296
revendor(*sys.argv[1:])
else:
print('Usage ./hack/revendor.py [component]')
Update revendor.py script
#!/usr/bin/env python
# http://stackoverflow.com/a/14050282
def check_antipackage():
from sys import version_info
sys_version = version_info[:2]
found = True
if sys_version < (3, 0):
# 'python 2'
from pkgutil import find_loader
found = find_loader('antipackage') is not None
elif sys_version <= (3, 3):
# 'python <= 3.3'
from importlib import find_loader
found = find_loader('antipackage') is not None
else:
# 'python >= 3.4'
from importlib import util
found = util.find_spec('antipackage') is not None
if not found:
print('Install missing package "antipackage"')
print('Example: pip install git+https://github.com/ellisonbg/antipackage.git#egg=antipackage')
from sys import exit
exit(1)
check_antipackage()
# ref: https://github.com/ellisonbg/antipackage
import antipackage
from github.appscode.libbuild import libbuild
import os
import os.path
import random
import string
import subprocess
import sys
from os.path import expandvars
import yaml
from collections import Counter
libbuild.REPO_ROOT = expandvars('$GOPATH') + '/src/github.com/kubedb/cli'
DATABASES = ['postgres', 'elasticsearch', 'etcd', 'mysql', 'mongodb', 'memcached', 'redis']
REPO_LIST = DATABASES + ['cli', 'operator', 'apimachinery']
KUTIL_VERSION = 'release-8.0'
KUBEMON_VERSION = 'release-8.0'
FORCED_DEPS = [
{
'package': 'github.com/cpuguy83/go-md2man',
'version': 'v1.0.8',
},
{
'package': 'github.com/json-iterator/go',
'version': '1.1.5',
},
{
'package': 'github.com/coreos/prometheus-operator',
'version': 'v0.23.2',
},
{
"package": "k8s.io/api",
"version": "kubernetes-1.11.3"
},
{
"package": "k8s.io/apiextensions-apiserver",
"version": "kubernetes-1.11.3"
},
{
"package": "k8s.io/apimachinery",
"repo": "https://github.com/pharmer/apimachinery.git",
"vcs": "git",
"version": "release-1.11.3"
},
{
"package": "k8s.io/apiserver",
"repo": "https://github.com/pharmer/apiserver.git",
"vcs": "git",
"version": "release-1.11.3"
},
{
"package": "k8s.io/client-go",
"version": "kubernetes-1.11.3"
},
{
"package": "k8s.io/kube-openapi",
"version": "master"
},
]
def die(status):
if status:
sys.exit(status)
def call(cmd, stdin=None, cwd=libbuild.REPO_ROOT, eoe=True):
print(cwd + ' $ ' + cmd)
status = subprocess.call([expandvars(cmd)], shell=True, stdin=stdin, cwd=cwd)
if eoe:
die(status)
else:
return status
def check_output(cmd, stdin=None, cwd=libbuild.REPO_ROOT):
print(cwd + ' $ ' + cmd)
return subprocess.check_output([expandvars(cmd)], shell=True, stdin=stdin, cwd=cwd)
def git_branch_exists(branch, cwd=libbuild.REPO_ROOT):
return call('git show-ref --quiet refs/heads/{0}'.format(branch), eoe=False, cwd=cwd) == 0
def git_checkout(branch, cwd=libbuild.REPO_ROOT):
call('git fetch --all --prune', cwd=cwd)
call('git fetch --tags', cwd=cwd)
if git_branch_exists(branch, cwd):
call('git checkout {0}'.format(branch), cwd=cwd)
else:
call('git checkout -b {0}'.format(branch), cwd=cwd)
def git_requires_commit(cwd=libbuild.REPO_ROOT):
changed_files = check_output('git diff --name-only', cwd=cwd).strip().split('\n')
return Counter(changed_files) != Counter(['glide.lock'])
def glide_mod(glide_config, changes):
for dep in glide_config['import']:
if dep['package'] in changes:
dep['version'] = changes[dep['package']]
for x in FORCED_DEPS:
for idx, dep in enumerate(glide_config['import']):
if dep['package'] == x['package']:
glide_config['import'][idx] = x
break
def glide_write(f, glide_config):
f.seek(0)
pkg = glide_config.pop('package')
out = 'package: ' + pkg + '\n' + yaml.dump(glide_config, default_flow_style=False)
f.write(out)
f.truncate()
glide_config['package'] = pkg
class Kitten(object):
def __init__(self):
self.seed = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6))
self.master_deps = {}
for k in REPO_LIST:
self.master_deps['github.com/kubedb/' + k] = 'master'
self.master_deps['github.com/appscode/kutil'] = KUTIL_VERSION
self.master_deps['github.com/appscode/kube-mon'] = KUBEMON_VERSION
print self.master_deps
def revendor_repo(self, repo_name):
revendor_branch = 'api-{0}'.format(self.seed)
repo = libbuild.GOPATH + '/src/github.com/kubedb/' + repo_name
print(repo)
print('----------------------------------------------------------------------------------------')
call('git reset HEAD --hard', cwd=repo)
call('git clean -xfd', cwd=repo)
git_checkout('master', cwd=repo)
call('git pull --rebase origin master', cwd=repo)
git_checkout(revendor_branch, cwd=repo)
with open(repo + '/glide.yaml', 'r+') as glide_file:
glide_config = yaml.load(glide_file)
glide_mod(glide_config, self.master_deps)
glide_write(glide_file, glide_config)
call('glide slow', cwd=repo)
if git_requires_commit(cwd=repo):
call('git add --all', cwd=repo)
call('git commit -s -a -m "Revendor api"', cwd=repo, eoe=False)
call('git push origin {0}'.format(revendor_branch), cwd=repo)
else:
call('git reset HEAD --hard', cwd=repo)
def revendor(comp=None):
cat = Kitten()
if comp is None:
for name in DATABASES:
cat.revendor_repo(name)
elif comp == 'all':
for name in DATABASES:
cat.revendor_repo(name)
cat.revendor_repo('operator')
cat.revendor_repo('cli')
elif comp in DATABASES:
cat.revendor_db(comp)
elif comp == 'operator':
cat.revendor_repo(comp)
elif comp == 'cli':
cat.revendor_repo(comp)
elif comp == 'apimachinery':
cat.revendor_repo(comp)
if __name__ == "__main__":
if len(sys.argv) == 1:
revendor(None)
elif len(sys.argv) > 1:
# http://stackoverflow.com/a/834451
# http://stackoverflow.com/a/817296
revendor(*sys.argv[1:])
else:
print('Usage ./hack/revendor.py [component]')
|
from flask import render_template, flash, redirect, jsonify, request
from app import app
import subprocess
import config
@app.route('/', methods=['GET','POST'])
@app.route('/index', methods=['GET','POST'])
def index():
return render_template('index.html')
@app.route('/about', methods=['GET'])
def about():
return render_template('about.html')
@app.route('/yangre', methods=['GET','POST'])
def yangre():
with open("w3c_input", "w") as f: # writing the test string to file, as required by w3cgrep
f.write(request.form['content'])
f.write("\n")
# python 3.5 dependency. To get stdout as a string we need the universal_newlines=True parameter
# in python 3.6 this changes to encoding='utf8'
w3c_input_obj = subprocess.run([config.W3CGREP_PATH,str(request.form['pattern']),"w3c_input"],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True);
if not w3c_input_obj.stdout:
w3c_input_result = 1
else:
w3c_input_result = 0
yangre_input_obj = {}
if request.form['inverted'] == "true":
w3c_input_result = int(not(w3c_input_result))
# python 3.5 dependency. To get stdout as a string we need the universal_newlines=True parameter
# in python 3.6 this changes to encoding='utf8'
yangre_input_obj = subprocess.run([config.YANGGRE_PATH, "-p", str(request.form['pattern']), "-i",
str(request.form['content'])],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True);
else:
# python 3.5 dependency. To get stdout as a string we need the universal_newlines=True parameter
# in python 3.6 this changes to encoding='utf8'
yangre_input_obj = subprocess.run([config.YANGGRE_PATH, "-p", str(request.form['pattern']),
str(request.form['content'])],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True);
if w3c_input_obj.returncode == 1:
w3c_input_result = -1 # I used -1 as error code
return jsonify({'pattern_nb' : request.form['pattern_nb'],
'w3cgrep_result' : w3c_input_result,
'w3cgrep_output' : w3c_input_obj.stdout,
'yangre_result' : yangre_input_obj.returncode,
'yangre_output': yangre_input_obj.stdout });
Testing quotes for yangre
from flask import render_template, flash, redirect, jsonify, request
from app import app
import subprocess
import config
@app.route('/', methods=['GET','POST'])
@app.route('/index', methods=['GET','POST'])
def index():
return render_template('index.html')
@app.route('/about', methods=['GET'])
def about():
return render_template('about.html')
@app.route('/yangre', methods=['GET','POST'])
def yangre():
with open("w3c_input", "w") as f: # writing the test string to file, as required by w3cgrep
f.write(request.form['content'])
f.write("\n")
# python 3.5 dependency. To get stdout as a string we need the universal_newlines=True parameter
# in python 3.6 this changes to encoding='utf8'
w3c_input_obj = subprocess.run([config.W3CGREP_PATH,str(request.form['pattern']),"w3c_input"],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True);
if not w3c_input_obj.stdout:
w3c_input_result = 1
else:
w3c_input_result = 0
yangre_input_obj = {}
if request.form['inverted'] == "true":
w3c_input_result = int(not(w3c_input_result))
# python 3.5 dependency. To get stdout as a string we need the universal_newlines=True parameter
# in python 3.6 this changes to encoding='utf8'
yangre_input_obj = subprocess.run([config.YANGGRE_PATH, "-p", str(request.form['pattern']), "-i",
str(request.form['content'])],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True);
else:
# python 3.5 dependency. To get stdout as a string we need the universal_newlines=True parameter
# in python 3.6 this changes to encoding='utf8'
yangre_input_obj = subprocess.run([config.YANGGRE_PATH, "-p", "\"" + str(request.form['pattern']) + "\"",
str(request.form['content'])],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True);
if w3c_input_obj.returncode == 1:
w3c_input_result = -1 # I used -1 as error code
return jsonify({'pattern_nb' : request.form['pattern_nb'],
'w3cgrep_result' : w3c_input_result,
'w3cgrep_output' : w3c_input_obj.stdout,
'yangre_result' : yangre_input_obj.returncode,
'yangre_output': yangre_input_obj.stdout });
|
import pickle
from Player import *
class TTTBoard:
def __init__(self):
""" Initializes the data members."""
self.SIZE = 3
self.board = [' ']*(self.SIZE*self.SIZE)
self.players = [None, None]
def __str__(self):
""" Returns a string representation of the board where
each empty square is indicated with the number of its move"""
ret = "\n"
for i in range(len(self.board)):
if self.board[i] == " ":
ret += str(i)
else:
ret+=self.board[i]
if (i+1) % 3 == 0:
ret+="\n"
ret += "\n"
return ret
def legalMove( self, playernum, move ):
"""Returns true or false, whether the move is legal for the
player."""
return move in self.legalMoves(playerNum)
def legalMoves( self, playerNum ):
""" Returns the legal moves reminaing for the player in question"""
moves = []
for m in range( len(self.board)):
if self.board[m] == ' ':
moves += [m]
return moves
def makeMove( self, playerNum, pos ):
""" Make a move for player in pos. Assumes pos is a legal move. """
move = pos
if move not in range(len(self.board)) or self.board[move] != ' ':
return False
if playerNum == 1:
self.board[move] = 'X'
else:
self.board[move] = 'O'
return True
def rowWin( self, c ):
""" Has the player playing char c won in a row?"""
for i in range(self.SIZE):
if self.board[i*self.SIZE:(i+1)*self.SIZE] == [c]*self.SIZE:
return True
return False
def colWin( self, c):
""" Has the player playing char c won in a column?"""
for i in range(self.SIZE):
col = []
for j in range(self.SIZE):
col += [self.board[j*self.SIZE+i]]
if col == [c]*self.SIZE:
return True
return False
def diagWin( self, c ):
""" Has the player playing char c won in a diagonal?"""
diag = []
offdiag = []
for i in range(self.SIZE):
diag += self.board[i*self.SIZE+i]
offdiag += self.board[((i+1)*self.SIZE)-1-i]
if diag == [c]*self.SIZE or offdiag == [c]*self.SIZE:
return True
return False
def hasWonPlayer( self, c ):
""" Has the player playing c won?"""
return self.rowWin(c) or self.colWin(c) or self.diagWin(c)
def hasWon( self, playerNum ):
""" Returns who has won: X, O, or None"""
if playerNum == 1:
return self.hasWonPlayer( "X" )
else:
return self.hasWonPlayer( "O" )
def gameOver(self):
""" Returns True if the game is over, and false if not"""
if self.hasWonPlayer("X") or self.hasWonPlayer("O"):
return True
else:
for move in self.board:
if move == ' ':
return False
return True
def reset( self ):
""" Reset the board for a new game """
self.board = [' ']*(self.SIZE*self.SIZE)
def hostGame( self, player1, player2 ):
""" Host a game of tic tac toe between two players"""
self.players = [player1, player2]
turn = player1 # Keep track of whose turn it is
wait = player2
winner = 0
rounds = 0
while winner == 0 and rounds < self.SIZE*self.SIZE:
print( self )
pos = turn.chooseMove(self)
self.makeMove( turn, pos )
if self.hasWon(turn.num):
winner = turn.num
temp = turn
turn = wait
wait = temp
rounds += 1
print self
if winner == 0:
print "Tie Game"
else:
if winner == 1:
print "X wins!"
else:
print "O wins!"
def saveGame(self, filename):
"""Given a file name, save the current game to the file using pickle."""
with open(filename, "w") as f:
p = pickle.Pickler(f)
p.dump(self)
def loadGame(self, filename):
"""Given a file name, load and return the object stored in the file."""
with open(filename, "r") as f:
u = pickle.Unpickler(f)
dObj = u.load()
return dObj
I missed one
import pickle
from Player import *
class TTTBoard:
def __init__(self):
""" Initializes the data members."""
self.SIZE = 3
self.board = [' ']*(self.SIZE*self.SIZE)
self.players = [None, None]
def __str__(self):
""" Returns a string representation of the board where
each empty square is indicated with the number of its move"""
ret = "\n"
for i in range(len(self.board)):
if self.board[i] == " ":
ret += str(i)
else:
ret+=self.board[i]
if (i+1) % 3 == 0:
ret+="\n"
ret += "\n"
return ret
def legalMove( self, playerNum, move ):
"""Returns true or false, whether the move is legal for the
player."""
return move in self.legalMoves(playerNum)
def legalMoves( self, playerNum ):
""" Returns the legal moves reminaing for the player in question"""
moves = []
for m in range( len(self.board)):
if self.board[m] == ' ':
moves += [m]
return moves
def makeMove( self, playerNum, pos ):
""" Make a move for player in pos. Assumes pos is a legal move. """
move = pos
if move not in range(len(self.board)) or self.board[move] != ' ':
return False
if playerNum == 1:
self.board[move] = 'X'
else:
self.board[move] = 'O'
return True
def rowWin( self, c ):
""" Has the player playing char c won in a row?"""
for i in range(self.SIZE):
if self.board[i*self.SIZE:(i+1)*self.SIZE] == [c]*self.SIZE:
return True
return False
def colWin( self, c):
""" Has the player playing char c won in a column?"""
for i in range(self.SIZE):
col = []
for j in range(self.SIZE):
col += [self.board[j*self.SIZE+i]]
if col == [c]*self.SIZE:
return True
return False
def diagWin( self, c ):
""" Has the player playing char c won in a diagonal?"""
diag = []
offdiag = []
for i in range(self.SIZE):
diag += self.board[i*self.SIZE+i]
offdiag += self.board[((i+1)*self.SIZE)-1-i]
if diag == [c]*self.SIZE or offdiag == [c]*self.SIZE:
return True
return False
def hasWonPlayer( self, c ):
""" Has the player playing c won?"""
return self.rowWin(c) or self.colWin(c) or self.diagWin(c)
def hasWon( self, playerNum ):
""" Returns who has won: X, O, or None"""
if playerNum == 1:
return self.hasWonPlayer( "X" )
else:
return self.hasWonPlayer( "O" )
def gameOver(self):
""" Returns True if the game is over, and false if not"""
if self.hasWonPlayer("X") or self.hasWonPlayer("O"):
return True
else:
for move in self.board:
if move == ' ':
return False
return True
def reset( self ):
""" Reset the board for a new game """
self.board = [' ']*(self.SIZE*self.SIZE)
def hostGame( self, player1, player2 ):
""" Host a game of tic tac toe between two players"""
self.players = [player1, player2]
turn = player1 # Keep track of whose turn it is
wait = player2
winner = 0
rounds = 0
while winner == 0 and rounds < self.SIZE*self.SIZE:
print( self )
pos = turn.chooseMove(self)
self.makeMove( turn, pos )
if self.hasWon(turn.num):
winner = turn.num
temp = turn
turn = wait
wait = temp
rounds += 1
print self
if winner == 0:
print "Tie Game"
else:
if winner == 1:
print "X wins!"
else:
print "O wins!"
def saveGame(self, filename):
"""Given a file name, save the current game to the file using pickle."""
with open(filename, "w") as f:
p = pickle.Pickler(f)
p.dump(self)
def loadGame(self, filename):
"""Given a file name, load and return the object stored in the file."""
with open(filename, "r") as f:
u = pickle.Unpickler(f)
dObj = u.load()
return dObj |
from __future__ import print_function, division, absolute_import
import random
import math
import copy
import numbers
import multiprocessing
import threading
import traceback
import sys
import os
import time
import json
import types
import numpy as np
import cv2
import imageio
import scipy.spatial.distance
import six
import six.moves as sm
import skimage.draw
import skimage.measure
import collections
import matplotlib.pyplot as plt
import shapely
import shapely.geometry
import shapely.ops
if sys.version_info[0] == 2:
import cPickle as pickle
from Queue import Empty as QueueEmpty, Full as QueueFull
import socket
BrokenPipeError = socket.error
elif sys.version_info[0] == 3:
import pickle
from queue import Empty as QueueEmpty, Full as QueueFull
xrange = range
ALL = "ALL"
FILE_DIR = os.path.dirname(os.path.abspath(__file__))
# filepath to the quokka image, its annotations and depth map
QUOKKA_FP = os.path.join(FILE_DIR, "quokka.jpg")
QUOKKA_ANNOTATIONS_FP = os.path.join(FILE_DIR, "quokka_annotations.json")
QUOKKA_DEPTH_MAP_HALFRES_FP = os.path.join(FILE_DIR, "quokka_depth_map_halfres.png")
DEFAULT_FONT_FP = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"DejaVuSans.ttf"
)
# We instantiate a current/global random state here once.
# One can also call np.random, but that is (in contrast to np.random.RandomState)
# a module and hence cannot be copied via deepcopy. That's why we use RandomState
# here (and in all augmenters) instead of np.random.
CURRENT_RANDOM_STATE = np.random.RandomState(42)
NP_FLOAT_TYPES = set(np.sctypes["float"])
NP_INT_TYPES = set(np.sctypes["int"])
NP_UINT_TYPES = set(np.sctypes["uint"])
IMSHOW_BACKEND_DEFAULT = "matplotlib"
IMRESIZE_VALID_INTERPOLATIONS = ["nearest", "linear", "area", "cubic",
cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_AREA, cv2.INTER_CUBIC]
def is_np_array(val):
"""
Checks whether a variable is a numpy array.
Parameters
----------
val
The variable to check.
Returns
-------
out : bool
True if the variable is a numpy array. Otherwise False.
"""
# using np.generic here via isinstance(val, (np.ndarray, np.generic)) seems to also fire for scalar numpy values
# even though those are not arrays
return isinstance(val, np.ndarray)
def is_single_integer(val):
"""
Checks whether a variable is an integer.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is an integer. Otherwise False.
"""
return isinstance(val, numbers.Integral) and not isinstance(val, bool)
def is_single_float(val):
"""
Checks whether a variable is a float.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a float. Otherwise False.
"""
return isinstance(val, numbers.Real) and not is_single_integer(val) and not isinstance(val, bool)
def is_single_number(val):
"""
Checks whether a variable is a number, i.e. an integer or float.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a number. Otherwise False.
"""
return is_single_integer(val) or is_single_float(val)
def is_iterable(val):
"""
Checks whether a variable is iterable.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is an iterable. Otherwise False.
"""
return isinstance(val, collections.Iterable)
# TODO convert to is_single_string() or rename is_single_integer/float/number()
def is_string(val):
"""
Checks whether a variable is a string.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a string. Otherwise False.
"""
return isinstance(val, six.string_types)
def is_single_bool(val):
"""
Checks whether a variable is a boolean.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a boolean. Otherwise False.
"""
return type(val) == type(True)
def is_integer_array(val):
"""
Checks whether a variable is a numpy integer array.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a numpy integer array. Otherwise False.
"""
return is_np_array(val) and issubclass(val.dtype.type, np.integer)
def is_float_array(val):
"""
Checks whether a variable is a numpy float array.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a numpy float array. Otherwise False.
"""
return is_np_array(val) and issubclass(val.dtype.type, np.floating)
def is_callable(val):
"""
Checks whether a variable is a callable, e.g. a function.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a callable. Otherwise False.
"""
# python 3.x with x <= 2 does not support callable(), apparently
if sys.version_info[0] == 3 and sys.version_info[1] <= 2:
return hasattr(val, '__call__')
else:
return callable(val)
def is_generator(val):
"""
Checks whether a variable is a generator.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True is the variable is a generator. Otherwise False.
"""
return isinstance(val, types.GeneratorType)
def caller_name():
"""
Returns the name of the caller, e.g. a function.
Returns
-------
str
The name of the caller as a string
"""
return sys._getframe(1).f_code.co_name
def seed(seedval):
"""
Set the seed used by the global random state and thereby all randomness
in the library.
This random state is by default by all augmenters. Under special
circumstances (e.g. when an augmenter is switched to deterministic mode),
the global random state is replaced by another -- local -- one.
The replacement is dependent on the global random state.
Parameters
----------
seedval : int
The seed to use.
"""
CURRENT_RANDOM_STATE.seed(seedval)
def current_random_state():
"""
Returns the current/global random state of the library.
Returns
----------
numpy.random.RandomState
The current/global random state.
"""
return CURRENT_RANDOM_STATE
def new_random_state(seed=None, fully_random=False):
"""
Returns a new random state.
Parameters
----------
seed : None or int, optional
Optional seed value to use.
The same datatypes are allowed as for ``numpy.random.RandomState(seed)``.
fully_random : bool, optional
Whether to use numpy's random initialization for the
RandomState (used if set to True). If False, a seed is sampled from
the global random state, which is a bit faster and hence the default.
Returns
-------
numpy.random.RandomState
The new random state.
"""
if seed is None:
if not fully_random:
# sample manually a seed instead of just RandomState(),
# because the latter one
# is way slower.
seed = CURRENT_RANDOM_STATE.randint(0, 10**6, 1)[0]
return np.random.RandomState(seed)
def dummy_random_state():
"""
Returns a dummy random state that is always based on a seed of 1.
Returns
-------
numpy.random.RandomState
The new random state.
"""
return np.random.RandomState(1)
def copy_random_state(random_state, force_copy=False):
"""
Creates a copy of a random state.
Parameters
----------
random_state : numpy.random.RandomState
The random state to copy.
force_copy : bool, optional
If True, this function will always create a copy of every random
state. If False, it will not copy numpy's default random state,
but all other random states.
Returns
-------
rs_copy : numpy.random.RandomState
The copied random state.
"""
if random_state == np.random and not force_copy:
return random_state
else:
rs_copy = dummy_random_state()
orig_state = random_state.get_state()
rs_copy.set_state(orig_state)
return rs_copy
def derive_random_state(random_state):
"""
Create a new random states based on an existing random state or seed.
Parameters
----------
random_state : numpy.random.RandomState
Random state or seed from which to derive the new random state.
Returns
-------
numpy.random.RandomState
Derived random state.
"""
return derive_random_states(random_state, n=1)[0]
# TODO use this everywhere instead of manual seed + create
def derive_random_states(random_state, n=1):
"""
Create N new random states based on an existing random state or seed.
Parameters
----------
random_state : numpy.random.RandomState
Random state or seed from which to derive new random states.
n : int, optional
Number of random states to derive.
Returns
-------
list of numpy.random.RandomState
Derived random states.
"""
seed_ = random_state.randint(0, 10**6, 1)[0]
return [new_random_state(seed_+i) for i in sm.xrange(n)]
def forward_random_state(random_state):
"""
Forward the internal state of a random state.
This makes sure that future calls to the random_state will produce new random values.
Parameters
----------
random_state : numpy.random.RandomState
Random state to forward.
"""
random_state.uniform()
def _quokka_normalize_extract(extract):
"""
Generate a normalized rectangle to be extract from the standard quokka image.
Parameters
----------
extract : 'square' or tuple of number or imgaug.BoundingBox or imgaug.BoundingBoxesOnImage
Unnormalized representation of the image subarea to be extracted.
* If string ``square``, then a squared area ``(x: 0 to max 643, y: 0 to max 643)``
will be extracted from the image.
* If a tuple, then expected to contain four numbers denoting ``x1``, ``y1``, ``x2``
and ``y2``.
* If a BoundingBox, then that bounding box's area will be extracted from the image.
* If a BoundingBoxesOnImage, then expected to contain exactly one bounding box
and a shape matching the full image dimensions (i.e. (643, 960, *)). Then the
one bounding box will be used similar to BoundingBox.
Returns
-------
bb : imgaug.BoundingBox
Normalized representation of the area to extract from the standard quokka image.
"""
if extract == "square":
bb = BoundingBox(x1=0, y1=0, x2=643, y2=643)
elif isinstance(extract, tuple) and len(extract) == 4:
bb = BoundingBox(x1=extract[0], y1=extract[1], x2=extract[2], y2=extract[3])
elif isinstance(extract, BoundingBox):
bb = extract
elif isinstance(extract, BoundingBoxesOnImage):
do_assert(len(extract.bounding_boxes) == 1)
do_assert(extract.shape[0:2] == (643, 960))
bb = extract.bounding_boxes[0]
else:
raise Exception(
"Expected None or tuple of four entries or BoundingBox or BoundingBoxesOnImage "
+ "for parameter 'extract', got %s." % (type(extract),)
)
return bb
def _compute_resized_shape(from_shape, to_shape):
"""
Computes the intended new shape of an image-like array after resizing.
Parameters
----------
from_shape : tuple or ndarray
Old shape of the array. Usually expected to be a tuple of form ``(H, W)`` or ``(H, W, C)`` or
alternatively an array with two or three dimensions.
to_shape : None or tuple of ints or tuple of floats or int or float
New shape of the array.
* If None, then `from_shape` will be used as the new shape.
* If an int ``V``, then the new shape will be ``(V, V, [C])``, where ``C`` will be added if it
is part of `from_shape`.
* If a float ``V``, then the new shape will be ``(H*V, W*V, [C])``, where ``H`` and ``W`` are the old
height/width.
* If a tuple ``(H', W', [C'])`` of ints, then ``H'`` and ``W'`` will be used as the new height
and width.
* If a tuple ``(H', W', [C'])`` of floats (except ``C``), then ``H'`` and ``W'`` will
be used as the new height and width.
Returns
-------
to_shape_computed : tuple of int
New shape.
"""
if is_np_array(from_shape):
from_shape = from_shape.shape
if is_np_array(to_shape):
to_shape = to_shape.shape
to_shape_computed = list(from_shape)
if to_shape is None:
pass
elif isinstance(to_shape, tuple):
if len(from_shape) == 3 and len(to_shape) == 3:
do_assert(from_shape[2] == to_shape[2])
elif len(to_shape) == 3:
to_shape_computed.append(to_shape[2])
if all([is_single_integer(v) for v in to_shape[0:2]]):
to_shape_computed[0] = to_shape[0]
to_shape_computed[1] = to_shape[1]
elif all([is_single_float(v) for v in to_shape[0:2]]):
to_shape_computed[0] = int(np.round(from_shape[0] * to_shape[0])) if to_shape[0] is not None else from_shape[0]
to_shape_computed[1] = int(np.round(from_shape[1] * to_shape[1])) if to_shape[1] is not None else from_shape[1]
elif is_single_integer(to_shape) or is_single_float(to_shape):
to_shape_computed = _compute_resized_shape(from_shape, (to_shape, to_shape))
else:
raise Exception("Expected to_shape to be None or ndarray or tuple of floats or tuple of ints or single int "
+ "or single float, got %s." % (type(to_shape),))
return to_shape_computed
def quokka(size=None, extract=None):
"""
Returns an image of a quokka as a numpy array.
Parameters
----------
size : None or float or tuple of int, optional
Size of the output image. Input into :func:`imgaug.imresize_single_image`.
Usually expected to be a tuple ``(H, W)``, where ``H`` is the desired height
and ``W`` is the width. If None, then the image will not be resized.
extract : None or 'square' or tuple of number or imgaug.BoundingBox or imgaug.BoundingBoxesOnImage
Subarea of the quokka image to extract:
* If None, then the whole image will be used.
* If string ``square``, then a squared area ``(x: 0 to max 643, y: 0 to max 643)`` will
be extracted from the image.
* If a tuple, then expected to contain four numbers denoting ``x1``, ``y1``, ``x2``
and ``y2``.
* If a BoundingBox, then that bounding box's area will be extracted from the image.
* If a BoundingBoxesOnImage, then expected to contain exactly one bounding box
and a shape matching the full image dimensions (i.e. ``(643, 960, *)``). Then the
one bounding box will be used similar to BoundingBox.
Returns
-------
img : (H,W,3) ndarray
The image array of dtype uint8.
"""
img = imageio.imread(QUOKKA_FP, pilmode="RGB")
if extract is not None:
bb = _quokka_normalize_extract(extract)
img = bb.extract_from_image(img)
if size is not None:
shape_resized = _compute_resized_shape(img.shape, size)
img = imresize_single_image(img, shape_resized[0:2])
return img
def quokka_square(size=None):
"""
Returns an (square) image of a quokka as a numpy array.
Parameters
----------
size : None or float or tuple of int, optional
Size of the output image. Input into :func:`imgaug.imresize_single_image`.
Usually expected to be a tuple ``(H, W)``, where ``H`` is the desired height
and ``W`` is the width. If None, then the image will not be resized.
Returns
-------
img : (H,W,3) ndarray
The image array of dtype uint8.
"""
return quokka(size=size, extract="square")
def quokka_heatmap(size=None, extract=None):
"""
Returns a heatmap (here: depth map) for the standard example quokka image.
Parameters
----------
size : None or float or tuple of int, optional
See :func:`imgaug.quokka`.
extract : None or 'square' or tuple of number or imgaug.BoundingBox or imgaug.BoundingBoxesOnImage
See :func:`imgaug.quokka`.
Returns
-------
result : imgaug.HeatmapsOnImage
Depth map as an heatmap object. Values close to 0.0 denote objects that are close to
the camera. Values close to 1.0 denote objects that are furthest away (among all shown
objects).
"""
img = imageio.imread(QUOKKA_DEPTH_MAP_HALFRES_FP, pilmode="RGB")
img = imresize_single_image(img, (643, 960), interpolation="cubic")
if extract is not None:
bb = _quokka_normalize_extract(extract)
img = bb.extract_from_image(img)
if size is None:
size = (643, 960)
shape_resized = _compute_resized_shape(img.shape, size)
img = imresize_single_image(img, shape_resized[0:2])
img_0to1 = img.astype(np.float32) / 255.0
img_0to1 = 1 - img_0to1 # depth map was saved as 0 being furthest away
return HeatmapsOnImage(img_0to1, shape=img_0to1.shape[0:2] + (3,))
def quokka_segmentation_map(size=None, extract=None):
"""
Returns a segmentation map for the standard example quokka image.
Parameters
----------
size : None or float or tuple of int, optional
See :func:`imgaug.quokka`.
extract : None or 'square' or tuple of number or imgaug.BoundingBox or imgaug.BoundingBoxesOnImage
See :func:`imgaug.quokka`.
Returns
-------
result : imgaug.SegmentationMapOnImage
Segmentation map object.
"""
with open(QUOKKA_ANNOTATIONS_FP, "r") as f:
json_dict = json.load(f)
xx = []
yy = []
for kp_dict in json_dict["polygons"][0]["keypoints"]:
x = kp_dict["x"]
y = kp_dict["y"]
xx.append(x)
yy.append(y)
img_seg = np.zeros((643, 960, 1), dtype=np.float32)
rr, cc = skimage.draw.polygon(np.array(yy), np.array(xx), shape=img_seg.shape)
img_seg[rr, cc] = 1.0
if extract is not None:
bb = _quokka_normalize_extract(extract)
img_seg = bb.extract_from_image(img_seg)
segmap = SegmentationMapOnImage(img_seg, shape=img_seg.shape[0:2] + (3,))
if size is not None:
shape_resized = _compute_resized_shape(img_seg.shape, size)
segmap = segmap.scale(shape_resized[0:2])
segmap.shape = tuple(shape_resized[0:2]) + (3,)
return segmap
def quokka_keypoints(size=None, extract=None):
"""
Returns example keypoints on the standard example quokke image.
The keypoints cover the eyes, ears, nose and paws.
Parameters
----------
size : None or float or tuple of int or tuple of float, optional
Size of the output image on which the keypoints are placed. If None, then the keypoints
are not projected to any new size (positions on the original image are used).
Floats lead to relative size changes, ints to absolute sizes in pixels.
extract : None or 'square' or tuple of number or imgaug.BoundingBox or imgaug.BoundingBoxesOnImage
Subarea to extract from the image. See :func:`imgaug.quokka`.
Returns
-------
kpsoi : imgaug.KeypointsOnImage
Example keypoints on the quokka image.
"""
left, top = 0, 0
if extract is not None:
bb_extract = _quokka_normalize_extract(extract)
left = bb_extract.x1
top = bb_extract.y1
with open(QUOKKA_ANNOTATIONS_FP, "r") as f:
json_dict = json.load(f)
keypoints = []
for kp_dict in json_dict["keypoints"]:
keypoints.append(Keypoint(x=kp_dict["x"] - left, y=kp_dict["y"] - top))
if extract is not None:
shape = (bb_extract.height, bb_extract.width, 3)
else:
shape = (643, 960, 3)
kpsoi = KeypointsOnImage(keypoints, shape=shape)
if size is not None:
shape_resized = _compute_resized_shape(shape, size)
kpsoi = kpsoi.on(shape_resized)
return kpsoi
def quokka_bounding_boxes(size=None, extract=None):
"""
Returns example bounding boxes on the standard example quokke image.
Currently only a single bounding box is returned that covers the quokka.
Parameters
----------
size : None or float or tuple of int or tuple of float, optional
Size of the output image on which the BBs are placed. If None, then the BBs
are not projected to any new size (positions on the original image are used).
Floats lead to relative size changes, ints to absolute sizes in pixels.
extract : None or 'square' or tuple of number or imgaug.BoundingBox or imgaug.BoundingBoxesOnImage
Subarea to extract from the image. See :func:`imgaug.quokka`.
Returns
-------
bbsoi : imgaug.BoundingBoxesOnImage
Example BBs on the quokka image.
"""
left, top = 0, 0
if extract is not None:
bb_extract = _quokka_normalize_extract(extract)
left = bb_extract.x1
top = bb_extract.y1
with open(QUOKKA_ANNOTATIONS_FP, "r") as f:
json_dict = json.load(f)
bbs = []
for bb_dict in json_dict["bounding_boxes"]:
bbs.append(
BoundingBox(
x1=bb_dict["x1"] - left,
y1=bb_dict["y1"] - top,
x2=bb_dict["x2"] - left,
y2=bb_dict["y2"] - top
)
)
if extract is not None:
shape = (bb_extract.height, bb_extract.width, 3)
else:
shape = (643, 960, 3)
bbsoi = BoundingBoxesOnImage(bbs, shape=shape)
if size is not None:
shape_resized = _compute_resized_shape(shape, size)
bbsoi = bbsoi.on(shape_resized)
return bbsoi
def angle_between_vectors(v1, v2):
"""
Returns the angle in radians between vectors `v1` and `v2`.
From http://stackoverflow.com/questions/2827393/angles-between-two-n-dimensional-vectors-in-python
Parameters
----------
v1 : (N,) ndarray
First vector.
v2 : (N,) ndarray
Second vector.
Returns
-------
out : float
Angle in radians.
Examples
--------
>>> angle_between_vectors(np.float32([1, 0, 0]), np.float32([0, 1, 0]))
1.5707963267948966
>>> angle_between_vectors(np.float32([1, 0, 0]), np.float32([1, 0, 0]))
0.0
>>> angle_between_vectors(np.float32([1, 0, 0]), np.float32([-1, 0, 0]))
3.141592653589793
"""
v1_u = v1 / np.linalg.norm(v1)
v2_u = v2 / np.linalg.norm(v2)
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
# TODO is this used anywhere?
def compute_line_intersection_point(x1, y1, x2, y2, x3, y3, x4, y4):
def _make_line(p1, p2):
A = (p1[1] - p2[1])
B = (p2[0] - p1[0])
C = (p1[0]*p2[1] - p2[0]*p1[1])
return A, B, -C
L1 = _make_line((x1, y1), (x2, y2))
L2 = _make_line((x3, y3), (x4, y4))
D = L1[0] * L2[1] - L1[1] * L2[0]
Dx = L1[2] * L2[1] - L1[1] * L2[2]
Dy = L1[0] * L2[2] - L1[2] * L2[0]
if D != 0:
x = Dx / D
y = Dy / D
return x,y
else:
return False
def draw_text(img, y, x, text, color=(0, 255, 0), size=25):
"""
Draw text on an image.
This uses by default DejaVuSans as its font, which is included in the
library.
Parameters
----------
img : (H,W,3) ndarray
The image array to draw text on.
Expected to be of dtype uint8 or float32 (value range 0.0 to 255.0).
y : int
x-coordinate of the top left corner of the text.
x : int
y- coordinate of the top left corner of the text.
text : str
The text to draw.
color : iterable of int, optional
Color of the text to draw. For RGB-images this is expected to be an RGB color.
size : int, optional
Font size of the text to draw.
Returns
-------
img_np : (H,W,3) ndarray
Input image with text drawn on it.
"""
# keeping PIL here so that it is not a dependency of the library right now
from PIL import Image, ImageDraw, ImageFont
do_assert(img.dtype in [np.uint8, np.float32])
input_dtype = img.dtype
if img.dtype == np.float32:
img = img.astype(np.uint8)
for i in range(len(color)):
val = color[i]
if isinstance(val, float):
val = int(val * 255)
val = np.clip(val, 0, 255)
color[i] = val
img = Image.fromarray(img)
font = ImageFont.truetype(DEFAULT_FONT_FP, size)
context = ImageDraw.Draw(img)
context.text((x, y), text, fill=tuple(color), font=font)
img_np = np.asarray(img)
img_np.setflags(write=True) # PIL/asarray returns read only array
if img_np.dtype != input_dtype:
img_np = img_np.astype(input_dtype)
return img_np
# TODO rename sizes to size?
# TODO accept lists too as images
def imresize_many_images(images, sizes=None, interpolation=None):
"""
Resize many images to a specified size.
Parameters
----------
images : (N,H,W,[C]) ndarray or list of (H,W,[C]) ndarray
Array of the images to resize.
Expected to usually be of dtype uint8.
sizes : float or iterable of int or iterable of float
The new size of the images, given either as a fraction (a single float) or as
a ``(height, width)`` tuple of two integers or as a ``(height fraction, width fraction)``
tuple of two floats.
interpolation : None or str or int, optional
The interpolation to use during resize.
If int, then expected to be one of:
* ``cv2.INTER_NEAREST`` (nearest neighbour interpolation)
* ``cv2.INTER_LINEAR`` (linear interpolation)
* ``cv2.INTER_AREA`` (area interpolation)
* ``cv2.INTER_CUBIC`` (cubic interpolation)
If string, then expected to be one of:
* ``nearest`` (identical to ``cv2.INTER_NEAREST``)
* ``linear`` (identical to ``cv2.INTER_LINEAR``)
* ``area`` (identical to ``cv2.INTER_AREA``)
* ``cubic`` (identical to ``cv2.INTER_CUBIC``)
If None, the interpolation will be chosen automatically. For size
increases, area interpolation will be picked and for size decreases,
linear interpolation will be picked.
Returns
-------
result : (N,H',W',[C]) ndarray
Array of the resized images.
Examples
--------
>>> imresize_many_images(np.zeros((2, 16, 16, 3), dtype=np.uint8), 2.0)
Converts 2 RGB images of height and width 16 to images of height and width 16*2 = 32.
>>> imresize_many_images(np.zeros((2, 16, 16, 3), dtype=np.uint8), (16, 32))
Converts 2 RGB images of height and width 16 to images of height 16 and width 32.
>>> imresize_many_images(np.zeros((2, 16, 16, 3), dtype=np.uint8), (2.0, 4.0))
Converts 2 RGB images of height and width 16 to images of height 32 and width 64.
"""
# we just do nothing if the input contains zero images
# one could also argue that an exception would be appropiate here
if len(images) == 0:
return images
# verify that all input images have height/width > 0
do_assert(
all([image.shape[0] > 0 and image.shape[1] > 0 for image in images]),
("Cannot resize images, because at least one image has a height and/or width of zero. "
+ "Observed shapes were: %s.") % (str([image.shape for image in images]),)
)
# verify that sizes contains only values >0
if is_single_number(sizes) and sizes <= 0:
raise Exception(
"Cannot resize to the target size %.8f, because the value is zero or lower than zero." % (sizes,))
elif isinstance(sizes, tuple) and (sizes[0] <= 0 or sizes[1] <= 0):
sizes_str = [
"int %d" % (sizes[0],) if is_single_integer(sizes[0]) else "float %.8f" % (sizes[0],),
"int %d" % (sizes[1],) if is_single_integer(sizes[1]) else "float %.8f" % (sizes[1],),
]
sizes_str = "(%s, %s)" % (sizes_str[0], sizes_str[1])
raise Exception(
"Cannot resize to the target sizes %s. At least one value is zero or lower than zero." % (sizes_str,))
# change afterward the validation to make the above error messages match the original input
if is_single_number(sizes):
sizes = (sizes, sizes)
else:
do_assert(len(sizes) == 2, "Expected tuple with exactly two entries, got %d entries." % (len(sizes),))
do_assert(all([is_single_number(val) for val in sizes]),
"Expected tuple with two ints or floats, got types %s." % (str([type(val) for val in sizes]),))
# if input is a list, call this function N times for N images
# but check beforehand if all images have the same shape, then just convert to a single array and de-convert
# afterwards
if isinstance(images, list):
nb_shapes = len(set([image.shape for image in images]))
if nb_shapes == 1:
return list(imresize_many_images(np.array(images), sizes=sizes, interpolation=interpolation))
else:
return [imresize_many_images(image[np.newaxis, ...], sizes=sizes, interpolation=interpolation)[0, ...]
for image in images]
shape = images.shape
do_assert(images.ndim in [3, 4], "Expected array of shape (N, H, W, [C]), got shape %s" % (str(shape),))
nb_images = shape[0]
im_height, im_width = shape[1], shape[2]
nb_channels = shape[3] if images.ndim > 3 else None
height, width = sizes[0], sizes[1]
height = int(np.round(im_height * height)) if is_single_float(height) else height
width = int(np.round(im_width * width)) if is_single_float(width) else width
if height == im_height and width == im_width:
return np.copy(images)
ip = interpolation
do_assert(ip is None or ip in IMRESIZE_VALID_INTERPOLATIONS)
if ip is None:
if height > im_height or width > im_width:
ip = cv2.INTER_AREA
else:
ip = cv2.INTER_LINEAR
elif ip in ["nearest", cv2.INTER_NEAREST]:
ip = cv2.INTER_NEAREST
elif ip in ["linear", cv2.INTER_LINEAR]:
ip = cv2.INTER_LINEAR
elif ip in ["area", cv2.INTER_AREA]:
ip = cv2.INTER_AREA
else: # if ip in ["cubic", cv2.INTER_CUBIC]:
ip = cv2.INTER_CUBIC
result_shape = (nb_images, height, width)
if nb_channels is not None:
result_shape = result_shape + (nb_channels,)
result = np.zeros(result_shape, dtype=images.dtype)
for img_idx in sm.xrange(nb_images):
# TODO fallback to scipy here if image isn't uint8
result_img = cv2.resize(images[img_idx], (width, height), interpolation=ip)
# cv2 removes the channel axis if input was (H, W, 1)
# we re-add it (but only if input was not (H, W))
if len(result_img.shape) == 2 and nb_channels is not None and nb_channels == 1:
result_img = result_img[:, :, np.newaxis]
result[img_idx] = result_img.astype(images.dtype)
return result
def imresize_single_image(image, sizes, interpolation=None):
"""
Resizes a single image.
Parameters
----------
image : (H,W,C) ndarray or (H,W) ndarray
Array of the image to resize.
Expected to usually be of dtype uint8.
sizes : float or iterable of int or iterable of float
See :func:`imgaug.imresize_many_images`.
interpolation : None or str or int, optional
See :func:`imgaug.imresize_many_images`.
Returns
-------
out : (H',W',C) ndarray or (H',W') ndarray
The resized image.
"""
grayscale = False
if image.ndim == 2:
grayscale = True
image = image[:, :, np.newaxis]
do_assert(len(image.shape) == 3, image.shape)
rs = imresize_many_images(image[np.newaxis, :, :, :], sizes, interpolation=interpolation)
if grayscale:
return np.squeeze(rs[0, :, :, 0])
else:
return rs[0, ...]
def pad(arr, top=0, right=0, bottom=0, left=0, mode="constant", cval=0):
"""
Pad an image-like array on its top/right/bottom/left side.
This function is a wrapper around :func:`numpy.pad`.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Image-like array to pad.
top : int, optional
Amount of pixels to add at the top side of the image. Must be 0 or greater.
right : int, optional
Amount of pixels to add at the right side of the image. Must be 0 or greater.
bottom : int, optional
Amount of pixels to add at the bottom side of the image. Must be 0 or greater.
left : int, optional
Amount of pixels to add at the left side of the image. Must be 0 or greater.
mode : str, optional
Padding mode to use. See :func:`numpy.pad` for details.
In case of mode ``constant``, the parameter `cval` will be used as the ``constant_values``
parameter to :func:`numpy.pad`.
In case of mode ``linear_ramp``, the parameter `cval` will be used as the ``end_values``
parameter to :func:`numpy.pad`.
cval : number, optional
Value to use for padding if `mode` is ``constant``. See :func:`numpy.pad` for details.
Returns
-------
arr_pad : (H',W') ndarray or (H',W',C) ndarray
Padded array with height ``H'=H+top+bottom`` and width ``W'=W+left+right``.
"""
do_assert(arr.ndim in [2, 3])
do_assert(top >= 0)
do_assert(right >= 0)
do_assert(bottom >= 0)
do_assert(left >= 0)
if top > 0 or right > 0 or bottom > 0 or left > 0:
paddings_np = [(top, bottom), (left, right)] # paddings for 2d case
if arr.ndim == 3:
paddings_np.append((0, 0)) # add paddings for 3d case
if mode == "constant":
arr_pad = np.pad(arr, paddings_np, mode=mode, constant_values=cval)
elif mode == "linear_ramp":
arr_pad = np.pad(arr, paddings_np, mode=mode, end_values=cval)
else:
arr_pad = np.pad(arr, paddings_np, mode=mode)
return arr_pad
return np.copy(arr)
def compute_paddings_for_aspect_ratio(arr, aspect_ratio):
"""
Compute the amount of pixels by which an array has to be padded to fulfill an aspect ratio.
The aspect ratio is given as width/height.
Depending on which dimension is smaller (height or width), only the corresponding
sides (left/right or top/bottom) will be padded. In each case, both of the sides will
be padded equally.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Image-like array for which to compute pad amounts.
aspect_ratio : float
Target aspect ratio, given as width/height. E.g. 2.0 denotes the image having twice
as much width as height.
Returns
-------
result : tuple of int
Required paddign amounts to reach the target aspect ratio, given as a tuple
of the form ``(top, right, bottom, left)``.
"""
do_assert(arr.ndim in [2, 3])
do_assert(aspect_ratio > 0)
height, width = arr.shape[0:2]
do_assert(height > 0)
aspect_ratio_current = width / height
pad_top = 0
pad_right = 0
pad_bottom = 0
pad_left = 0
if aspect_ratio_current < aspect_ratio:
# vertical image, height > width
diff = (aspect_ratio * height) - width
pad_right = int(np.ceil(diff / 2))
pad_left = int(np.floor(diff / 2))
elif aspect_ratio_current > aspect_ratio:
# horizontal image, width > height
diff = ((1/aspect_ratio) * width) - height
pad_top = int(np.floor(diff / 2))
pad_bottom = int(np.ceil(diff / 2))
return pad_top, pad_right, pad_bottom, pad_left
def pad_to_aspect_ratio(arr, aspect_ratio, mode="constant", cval=0, return_pad_amounts=False):
"""
Pad an image-like array on its sides so that it matches a target aspect ratio.
Depending on which dimension is smaller (height or width), only the corresponding
sides (left/right or top/bottom) will be padded. In each case, both of the sides will
be padded equally.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Image-like array to pad.
aspect_ratio : float
Target aspect ratio, given as width/height. E.g. 2.0 denotes the image having twice
as much width as height.
mode : str, optional
Padding mode to use. See :func:`numpy.pad` for details.
cval : number, optional
Value to use for padding if `mode` is ``constant``. See :func:`numpy.pad` for details.
return_pad_amounts : bool, optional
If False, then only the padded image will be returned. If True, a tuple with two
entries will be returned, where the first entry is the padded image and the second
entry are the amounts by which each image side was padded. These amounts are again a
tuple of the form (top, right, bottom, left), with each value being an integer.
Returns
-------
arr_padded : (H',W') ndarray or (H',W',C) ndarray
Padded image as (H',W') or (H',W',C) ndarray, fulfulling the given aspect_ratio.
tuple of int
Amounts by which the image was padded on each side, given as a tuple ``(top, right, bottom, left)``.
This tuple is only returned if `return_pad_amounts` was set to True.
Otherwise only ``arr_padded`` is returned.
"""
pad_top, pad_right, pad_bottom, pad_left = compute_paddings_for_aspect_ratio(arr, aspect_ratio)
arr_padded = pad(
arr,
top=pad_top,
right=pad_right,
bottom=pad_bottom,
left=pad_left,
mode=mode,
cval=cval
)
if return_pad_amounts:
return arr_padded, (pad_top, pad_right, pad_bottom, pad_left)
else:
return arr_padded
def pool(arr, block_size, func, cval=0, preserve_dtype=True):
"""
Rescale an array by pooling values within blocks.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Image-like array to pool. Ideally of datatype ``numpy.float64``.
block_size : int or tuple of int
Spatial size of each group of values to pool, aka kernel size.
If a single integer, then a symmetric block of that size along height and width will be used.
If a tuple of two values, it is assumed to be the block size along height and width of the image-like,
with pooling happening per channel.
If a tuple of three values, it is assumed to be the block size along height, width and channels.
func : callable
Function to apply to a given block in order to convert it to a single number,
e.g. :func:`numpy.average`, :func:`numpy.min`, :func:`numpy.max`.
cval : number, optional
Value to use in order to pad the array along its border if the array cannot be divided
by `block_size` without remainder.
preserve_dtype : bool, optional
Whether to convert the array back to the input datatype if it is changed away from
that in the pooling process.
Returns
-------
arr_reduced : (H',W') ndarray or (H',W',C') ndarray
Array after pooling.
"""
do_assert(arr.ndim in [2, 3])
is_valid_int = is_single_integer(block_size) and block_size >= 1
is_valid_tuple = is_iterable(block_size) and len(block_size) in [2, 3] \
and [is_single_integer(val) and val >= 1 for val in block_size]
do_assert(is_valid_int or is_valid_tuple)
if is_single_integer(block_size):
block_size = [block_size, block_size]
if len(block_size) < arr.ndim:
block_size = list(block_size) + [1]
input_dtype = arr.dtype
arr_reduced = skimage.measure.block_reduce(arr, tuple(block_size), func, cval=cval)
if preserve_dtype and arr_reduced.dtype.type != input_dtype:
arr_reduced = arr_reduced.astype(input_dtype)
return arr_reduced
def avg_pool(arr, block_size, cval=0, preserve_dtype=True):
"""
Rescale an array using average pooling.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Image-like array to pool. See :func:`imgaug.pool` for details.
block_size : int or tuple of int or tuple of int
Size of each block of values to pool. See :func:`imgaug.pool` for details.
cval : number, optional
Padding value. See :func:`imgaug.pool` for details.
preserve_dtype : bool, optional
Whether to preserve the input array dtype. See :func:`imgaug.pool` for details.
Returns
-------
arr_reduced : (H',W') ndarray or (H',W',C') ndarray
Array after average pooling.
"""
return pool(arr, block_size, np.average, cval=cval, preserve_dtype=preserve_dtype)
def max_pool(arr, block_size, cval=0, preserve_dtype=True):
"""
Rescale an array using max-pooling.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Image-like array to pool. See :func:`imgaug.pool` for details.
block_size : int or tuple of int or tuple of int
Size of each block of values to pool. See `imgaug.pool` for details.
cval : number, optional
Padding value. See :func:`imgaug.pool` for details.
preserve_dtype : bool, optional
Whether to preserve the input array dtype. See :func:`imgaug.pool` for details.
Returns
-------
arr_reduced : (H',W') ndarray or (H',W',C') ndarray
Array after max-pooling.
"""
return pool(arr, block_size, np.max, cval=cval, preserve_dtype=preserve_dtype)
def draw_grid(images, rows=None, cols=None):
"""
Converts multiple input images into a single image showing them in a grid.
Parameters
----------
images : (N,H,W,3) ndarray or iterable of (H,W,3) array
The input images to convert to a grid.
Expected to be RGB and have dtype uint8.
rows : None or int, optional
The number of rows to show in the grid.
If None, it will be automatically derived.
cols : None or int, optional
The number of cols to show in the grid.
If None, it will be automatically derived.
Returns
-------
grid : (H',W',3) ndarray
Image of the generated grid.
"""
if is_np_array(images):
do_assert(images.ndim == 4)
else:
do_assert(is_iterable(images) and is_np_array(images[0]) and images[0].ndim == 3)
nb_images = len(images)
do_assert(nb_images > 0)
cell_height = max([image.shape[0] for image in images])
cell_width = max([image.shape[1] for image in images])
channels = set([image.shape[2] for image in images])
do_assert(
len(channels) == 1,
"All images are expected to have the same number of channels, "
+ "but got channel set %s with length %d instead." % (str(channels), len(channels))
)
nb_channels = list(channels)[0]
if rows is None and cols is None:
rows = cols = int(math.ceil(math.sqrt(nb_images)))
elif rows is not None:
cols = int(math.ceil(nb_images / rows))
elif cols is not None:
rows = int(math.ceil(nb_images / cols))
do_assert(rows * cols >= nb_images)
width = cell_width * cols
height = cell_height * rows
grid = np.zeros((height, width, nb_channels), dtype=np.uint8)
cell_idx = 0
for row_idx in sm.xrange(rows):
for col_idx in sm.xrange(cols):
if cell_idx < nb_images:
image = images[cell_idx]
cell_y1 = cell_height * row_idx
cell_y2 = cell_y1 + image.shape[0]
cell_x1 = cell_width * col_idx
cell_x2 = cell_x1 + image.shape[1]
grid[cell_y1:cell_y2, cell_x1:cell_x2, :] = image
cell_idx += 1
return grid
def show_grid(images, rows=None, cols=None):
"""
Converts the input images to a grid image and shows it in a new window.
Parameters
----------
images : (N,H,W,3) ndarray or iterable of (H,W,3) array
See :func:`imgaug.draw_grid`.
rows : None or int, optional
See :func:`imgaug.draw_grid`.
cols : None or int, optional
See :func:`imgaug.draw_grid`.
"""
grid = draw_grid(images, rows=rows, cols=cols)
imshow(grid)
def imshow(image, backend=IMSHOW_BACKEND_DEFAULT):
"""
Shows an image in a window.
Parameters
----------
image : (H,W,3) ndarray
Image to show.
backend : {'matplotlib', 'cv2'}, optional
Library to use to show the image. May be either matplotlib or OpenCV ('cv2').
OpenCV tends to be faster, but apparently causes more technical issues.
"""
do_assert(backend in ["matplotlib", "cv2"], "Expected backend 'matplotlib' or 'cv2', got %s." % (backend,))
if backend == "cv2":
image_bgr = image
if image.ndim == 3 and image.shape[2] in [3, 4]:
image_bgr = image[..., 0:3][..., ::-1]
win_name = "imgaug-default-window"
cv2.namedWindow(win_name, cv2.WINDOW_NORMAL)
cv2.imshow(win_name, image_bgr)
cv2.waitKey(0)
cv2.destroyWindow(win_name)
else:
plt.imshow(image, cmap="gray")
plt.gcf().canvas.set_window_title("imgaug.imshow(%s)" % (image.shape,))
plt.show()
def do_assert(condition, message="Assertion failed."):
"""
Function that behaves equally to an `assert` statement, but raises an
Exception.
This is added because `assert` statements are removed in optimized code.
It replaces `assert` statements throughout the library that should be
kept even in optimized code.
Parameters
----------
condition : bool
If False, an exception is raised.
message : str, optional
Error message.
"""
if not condition:
raise AssertionError(str(message))
class HooksImages(object):
"""
Class to intervene with image augmentation runs.
This is e.g. useful to dynamically deactivate some augmenters.
Parameters
----------
activator : None or callable, optional
A function that gives permission to execute an augmenter.
The expected interface is ``f(images, augmenter, parents, default)``,
where ``images`` are the input images to augment, ``augmenter`` is the
instance of the augmenter to execute, ``parents`` are previously
executed augmenters and ``default`` is an expected default value to be
returned if the activator function does not plan to make a decision
for the given inputs.
propagator : None or callable, optional
A function that gives permission to propagate the augmentation further
to the children of an augmenter. This happens after the activator.
In theory, an augmenter may augment images itself (if allowed by the
activator) and then execute child augmenters afterwards (if allowed by
the propagator). If the activator returned False, the propagation step
will never be executed.
The expected interface is ``f(images, augmenter, parents, default)``,
with all arguments having identical meaning to the activator.
preprocessor : None or callable, optional
A function to call before an augmenter performed any augmentations.
The interface is ``f(images, augmenter, parents)``,
with all arguments having identical meaning to the activator.
It is expected to return the input images, optionally modified.
postprocessor : None or callable, optional
A function to call after an augmenter performed augmentations.
The interface is the same as for the preprocessor.
Examples
--------
>>> seq = iaa.Sequential([
>>> iaa.GaussianBlur(3.0, name="blur"),
>>> iaa.Dropout(0.05, name="dropout"),
>>> iaa.Affine(translate_px=-5, name="affine")
>>> ])
>>>
>>> def activator(images, augmenter, parents, default):
>>> return False if augmenter.name in ["blur", "dropout"] else default
>>>
>>> seq_det = seq.to_deterministic()
>>> images_aug = seq_det.augment_images(images)
>>> heatmaps_aug = seq_det.augment_images(
>>> heatmaps,
>>> hooks=ia.HooksImages(activator=activator)
>>> )
This augments images and their respective heatmaps in the same way.
The heatmaps however are only modified by Affine, not by GaussianBlur or
Dropout.
"""
def __init__(self, activator=None, propagator=None, preprocessor=None, postprocessor=None):
self.activator = activator
self.propagator = propagator
self.preprocessor = preprocessor
self.postprocessor = postprocessor
def is_activated(self, images, augmenter, parents, default):
"""
Returns whether an augmenter may be executed.
Returns
-------
bool
If True, the augmenter may be executed. If False, it may not be executed.
"""
if self.activator is None:
return default
else:
return self.activator(images, augmenter, parents, default)
def is_propagating(self, images, augmenter, parents, default):
"""
Returns whether an augmenter may call its children to augment an
image. This is independent of the augmenter itself possible changing
the image, without calling its children. (Most (all?) augmenters with
children currently dont perform any changes themselves.)
Returns
-------
bool
If True, the augmenter may be propagate to its children. If False, it may not.
"""
if self.propagator is None:
return default
else:
return self.propagator(images, augmenter, parents, default)
def preprocess(self, images, augmenter, parents):
"""
A function to be called before the augmentation of images starts (per augmenter).
Returns
-------
(N,H,W,C) ndarray or (N,H,W) ndarray or list of (H,W,C) ndarray or list of (H,W) ndarray
The input images, optionally modified.
"""
if self.preprocessor is None:
return images
else:
return self.preprocessor(images, augmenter, parents)
def postprocess(self, images, augmenter, parents):
"""
A function to be called after the augmentation of images was
performed.
Returns
-------
(N,H,W,C) ndarray or (N,H,W) ndarray or list of (H,W,C) ndarray or list of (H,W) ndarray
The input images, optionally modified.
"""
if self.postprocessor is None:
return images
else:
return self.postprocessor(images, augmenter, parents)
class HooksHeatmaps(HooksImages):
"""
Class to intervene with heatmap augmentation runs.
This is e.g. useful to dynamically deactivate some augmenters.
This class is currently the same as the one for images. This may or may
not change in the future.
"""
pass
class HooksKeypoints(HooksImages):
"""
Class to intervene with keypoint augmentation runs.
This is e.g. useful to dynamically deactivate some augmenters.
This class is currently the same as the one for images. This may or may
not change in the future.
"""
pass
def compute_geometric_median(X, eps=1e-5):
"""
Estimate the geometric median of points in 2D.
Code from https://stackoverflow.com/a/30305181
Parameters
----------
X : (N,2) ndarray
Points in 2D. Second axis must be given in xy-form.
eps : float, optional
Distance threshold when to return the median.
Returns
-------
(2,) ndarray
Geometric median as xy-coordinate.
"""
y = np.mean(X, 0)
while True:
D = scipy.spatial.distance.cdist(X, [y])
nonzeros = (D != 0)[:, 0]
Dinv = 1 / D[nonzeros]
Dinvs = np.sum(Dinv)
W = Dinv / Dinvs
T = np.sum(W * X[nonzeros], 0)
num_zeros = len(X) - np.sum(nonzeros)
if num_zeros == 0:
y1 = T
elif num_zeros == len(X):
return y
else:
R = (T - y) * Dinvs
r = np.linalg.norm(R)
rinv = 0 if r == 0 else num_zeros/r
y1 = max(0, 1-rinv)*T + min(1, rinv)*y
if scipy.spatial.distance.euclidean(y, y1) < eps:
return y1
y = y1
class Keypoint(object):
"""
A single keypoint (aka landmark) on an image.
Parameters
----------
x : number
Coordinate of the keypoint on the x axis.
y : number
Coordinate of the keypoint on the y axis.
"""
def __init__(self, x, y):
self.x = x
self.y = y
@property
def x_int(self):
"""
Return the keypoint's x-coordinate, rounded to the closest integer.
Returns
-------
result : int
Keypoint's x-coordinate, rounded to the closest integer.
"""
return int(np.round(self.x))
@property
def y_int(self):
"""
Return the keypoint's y-coordinate, rounded to the closest integer.
Returns
-------
result : int
Keypoint's y-coordinate, rounded to the closest integer.
"""
return int(np.round(self.y))
def project(self, from_shape, to_shape):
"""
Project the keypoint onto a new position on a new image.
E.g. if the keypoint is on its original image at x=(10 of 100 pixels)
and y=(20 of 100 pixels) and is projected onto a new image with
size (width=200, height=200), its new position will be (20, 40).
This is intended for cases where the original image is resized.
It cannot be used for more complex changes (e.g. padding, cropping).
Parameters
----------
from_shape : tuple of int
Shape of the original image. (Before resize.)
to_shape : tuple of int
Shape of the new image. (After resize.)
Returns
-------
imgaug.Keypoint
Keypoint object with new coordinates.
"""
if from_shape[0:2] == to_shape[0:2]:
return Keypoint(x=self.x, y=self.y)
else:
from_height, from_width = from_shape[0:2]
to_height, to_width = to_shape[0:2]
x = (self.x / from_width) * to_width
y = (self.y / from_height) * to_height
return Keypoint(x=x, y=y)
def shift(self, x=0, y=0):
"""
Move the keypoint around on an image.
Parameters
----------
x : number, optional
Move by this value on the x axis.
y : number, optional
Move by this value on the y axis.
Returns
-------
imgaug.Keypoint
Keypoint object with new coordinates.
"""
return Keypoint(self.x + x, self.y + y)
def generate_similar_points_manhattan(self, nb_steps, step_size, return_array=False):
"""
Generate nearby points to this keypoint based on manhattan distance.
To generate the first neighbouring points, a distance of S (step size) is moved from the
center point (this keypoint) to the top, right, bottom and left, resulting in four new
points. From these new points, the pattern is repeated. Overlapping points are ignored.
The resulting points have a shape similar to a square rotated by 45 degrees.
Parameters
----------
nb_steps : int
The number of steps to move from the center point. nb_steps=1 results in a total of
5 output points (1 center point + 4 neighbours).
step_size : number
The step size to move from every point to its neighbours.
return_array : bool, optional
Whether to return the generated points as a list of keypoints or an array
of shape ``(N,2)``, where ``N`` is the number of generated points and the second axis contains
the x- (first value) and y- (second value) coordinates.
Returns
-------
points : list of imgaug.Keypoint or (N,2) ndarray
If return_array was False, then a list of Keypoint.
Otherwise a numpy array of shape ``(N,2)``, where ``N`` is the number of generated points and
the second axis contains the x- (first value) and y- (second value) coordinates.
The center keypoint (the one on which this function was called) is always included.
"""
# TODO add test
# Points generates in manhattan style with S steps have a shape similar to a 45deg rotated
# square. The center line with the origin point has S+1+S = 1+2*S points (S to the left,
# S to the right). The lines above contain (S+1+S)-2 + (S+1+S)-2-2 + ... + 1 points. E.g.
# for S=2 it would be 3+1=4 and for S=3 it would be 5+3+1=9. Same for the lines below the
# center. Hence the total number of points is S+1+S + 2*(S^2).
points = np.zeros((nb_steps + 1 + nb_steps + 2*(nb_steps**2), 2), dtype=np.float32)
# we start at the bottom-most line and move towards the top-most line
yy = np.linspace(self.y - nb_steps * step_size, self.y + nb_steps * step_size, nb_steps + 1 + nb_steps)
# bottom-most line contains only one point
width = 1
nth_point = 0
for i_y, y in enumerate(yy):
if width == 1:
xx = [self.x]
else:
xx = np.linspace(self.x - (width-1)//2 * step_size, self.x + (width-1)//2 * step_size, width)
for x in xx:
points[nth_point] = [x, y]
nth_point += 1
if i_y < nb_steps:
width += 2
else:
width -= 2
if return_array:
return points
return [Keypoint(x=points[i, 0], y=points[i, 1]) for i in sm.xrange(points.shape[0])]
def __repr__(self):
return self.__str__()
def __str__(self):
return "Keypoint(x=%.8f, y=%.8f)" % (self.x, self.y)
class KeypointsOnImage(object):
"""
Object that represents all keypoints on a single image.
Parameters
----------
keypoints : list of imgaug.Keypoint
List of keypoints on the image.
shape : tuple of int
The shape of the image on which the keypoints are placed.
Examples
--------
>>> kps = [Keypoint(x=10, y=20), Keypoint(x=34, y=60)]
>>> kps_oi = KeypointsOnImage(kps, shape=image.shape)
"""
def __init__(self, keypoints, shape):
self.keypoints = keypoints
if is_np_array(shape):
self.shape = shape.shape
else:
do_assert(isinstance(shape, (tuple, list)))
self.shape = tuple(shape)
@property
def height(self):
return self.shape[0]
@property
def width(self):
return self.shape[1]
@property
def empty(self):
"""
Returns whether this object contains zero keypoints.
Returns
-------
result : bool
True if this object contains zero keypoints.
"""
return len(self.keypoints) == 0
def on(self, image):
"""
Project keypoints from one image to a new one.
Parameters
----------
image : ndarray or tuple of int
New image onto which the keypoints are to be projected.
May also simply be that new image's shape tuple.
Returns
-------
keypoints : imgaug.KeypointsOnImage
Object containing all projected keypoints.
"""
if is_np_array(image):
shape = image.shape
else:
shape = image
if shape[0:2] == self.shape[0:2]:
return self.deepcopy()
else:
keypoints = [kp.project(self.shape, shape) for kp in self.keypoints]
return KeypointsOnImage(keypoints, shape)
def draw_on_image(self, image, color=(0, 255, 0), size=3, copy=True, raise_if_out_of_image=False):
"""
Draw all keypoints onto a given image. Each keypoint is marked by a square of a chosen color and size.
Parameters
----------
image : (H,W,3) ndarray
The image onto which to draw the keypoints.
This image should usually have the same shape as
set in KeypointsOnImage.shape.
color : int or list of int or tuple of int or (3,) ndarray, optional
The RGB color of all keypoints. If a single int ``C``, then that is
equivalent to ``(C,C,C)``.
size : int, optional
The size of each point. If set to ``C``, each square will have size ``C x C``.
copy : bool, optional
Whether to copy the image before drawing the points.
raise_if_out_of_image : bool, optional
Whether to raise an exception if any keypoint is outside of the image.
Returns
-------
image : (H,W,3) ndarray
Image with drawn keypoints.
"""
if copy:
image = np.copy(image)
height, width = image.shape[0:2]
for keypoint in self.keypoints:
y, x = keypoint.y_int, keypoint.x_int
if 0 <= y < height and 0 <= x < width:
x1 = max(x - size//2, 0)
x2 = min(x + 1 + size//2, width)
y1 = max(y - size//2, 0)
y2 = min(y + 1 + size//2, height)
image[y1:y2, x1:x2] = color
else:
if raise_if_out_of_image:
raise Exception("Cannot draw keypoint x=%.8f, y=%.8f on image with shape %s." % (y, x, image.shape))
return image
def shift(self, x=0, y=0):
"""
Move the keypoints around on an image.
Parameters
----------
x : number, optional
Move each keypoint by this value on the x axis.
y : number, optional
Move each keypoint by this value on the y axis.
Returns
-------
out : KeypointsOnImage
Keypoints after moving them.
"""
keypoints = [keypoint.shift(x=x, y=y) for keypoint in self.keypoints]
return KeypointsOnImage(keypoints, self.shape)
def get_coords_array(self):
"""
Convert the coordinates of all keypoints in this object to an array of shape (N,2).
Returns
-------
result : (N, 2) ndarray
Where N is the number of keypoints. Each first value is the
x coordinate, each second value is the y coordinate.
"""
result = np.zeros((len(self.keypoints), 2), np.float32)
for i, keypoint in enumerate(self.keypoints):
result[i, 0] = keypoint.x
result[i, 1] = keypoint.y
return result
@staticmethod
def from_coords_array(coords, shape):
"""
Convert an array (N,2) with a given image shape to a KeypointsOnImage object.
Parameters
----------
coords : (N, 2) ndarray
Coordinates of ``N`` keypoints on the original image.
Each first entry ``coords[i, 0]`` is expected to be the x coordinate.
Each second entry ``coords[i, 1]`` is expected to be the y coordinate.
shape : tuple
Shape tuple of the image on which the keypoints are placed.
Returns
-------
out : KeypointsOnImage
KeypointsOnImage object that contains all keypoints from the array.
"""
keypoints = [Keypoint(x=coords[i, 0], y=coords[i, 1]) for i in sm.xrange(coords.shape[0])]
return KeypointsOnImage(keypoints, shape)
def to_keypoint_image(self, size=1):
"""
Draws a new black image of shape ``(H,W,N)`` in which all keypoint coordinates are set to 255.
(H=shape height, W=shape width, N=number of keypoints)
This function can be used as a helper when augmenting keypoints with a method that only supports the
augmentation of images.
Parameters
-------
size : int
Size of each (squared) point.
Returns
-------
image : (H,W,N) ndarray
Image in which the keypoints are marked. H is the height,
defined in KeypointsOnImage.shape[0] (analogous W). N is the
number of keypoints.
"""
do_assert(len(self.keypoints) > 0)
height, width = self.shape[0:2]
image = np.zeros((height, width, len(self.keypoints)), dtype=np.uint8)
do_assert(size % 2 != 0)
sizeh = max(0, (size-1)//2)
for i, keypoint in enumerate(self.keypoints):
# TODO for float values spread activation over several cells
# here and do voting at the end
y = keypoint.y_int
x = keypoint.x_int
x1 = np.clip(x - sizeh, 0, width-1)
x2 = np.clip(x + sizeh + 1, 0, width)
y1 = np.clip(y - sizeh, 0, height-1)
y2 = np.clip(y + sizeh + 1, 0, height)
if x1 < x2 and y1 < y2:
image[y1:y2, x1:x2, i] = 128
if 0 <= y < height and 0 <= x < width:
image[y, x, i] = 255
return image
@staticmethod
def from_keypoint_image(image, if_not_found_coords={"x": -1, "y": -1}, threshold=1, nb_channels=None): # pylint: disable=locally-disabled, dangerous-default-value, line-too-long
"""
Converts an image generated by ``to_keypoint_image()`` back to a KeypointsOnImage object.
Parameters
----------
image : (H,W,N) ndarray
The keypoints image. N is the number of keypoints.
if_not_found_coords : tuple or list or dict or None, optional
Coordinates to use for keypoints that cannot be found in `image`.
If this is a list/tuple, it must have two integer values.
If it is a dictionary, it must have the keys ``x`` and ``y`` with
each containing one integer value.
If this is None, then the keypoint will not be added to the final
KeypointsOnImage object.
threshold : int, optional
The search for keypoints works by searching for the argmax in
each channel. This parameters contains the minimum value that
the max must have in order to be viewed as a keypoint.
nb_channels : None or int, optional
Number of channels of the image on which the keypoints are placed.
Some keypoint augmenters require that information.
If set to None, the keypoint's shape will be set
to ``(height, width)``, otherwise ``(height, width, nb_channels)``.
Returns
-------
out : KeypointsOnImage
The extracted keypoints.
"""
do_assert(len(image.shape) == 3)
height, width, nb_keypoints = image.shape
drop_if_not_found = False
if if_not_found_coords is None:
drop_if_not_found = True
if_not_found_x = -1
if_not_found_y = -1
elif isinstance(if_not_found_coords, (tuple, list)):
do_assert(len(if_not_found_coords) == 2)
if_not_found_x = if_not_found_coords[0]
if_not_found_y = if_not_found_coords[1]
elif isinstance(if_not_found_coords, dict):
if_not_found_x = if_not_found_coords["x"]
if_not_found_y = if_not_found_coords["y"]
else:
raise Exception("Expected if_not_found_coords to be None or tuple or list or dict, got %s." % (
type(if_not_found_coords),))
keypoints = []
for i in sm.xrange(nb_keypoints):
maxidx_flat = np.argmax(image[..., i])
maxidx_ndim = np.unravel_index(maxidx_flat, (height, width))
found = (image[maxidx_ndim[0], maxidx_ndim[1], i] >= threshold)
if found:
keypoints.append(Keypoint(x=maxidx_ndim[1], y=maxidx_ndim[0]))
else:
if drop_if_not_found:
pass # dont add the keypoint to the result list, i.e. drop it
else:
keypoints.append(Keypoint(x=if_not_found_x, y=if_not_found_y))
out_shape = (height, width)
if nb_channels is not None:
out_shape += (nb_channels,)
return KeypointsOnImage(keypoints, shape=out_shape)
def to_distance_maps(self, inverted=False):
"""
Generates a ``(H,W,K)`` output containing ``K`` distance maps for ``K`` keypoints.
The k-th distance map contains at every location ``(y, x)`` the euclidean distance to the k-th keypoint.
This function can be used as a helper when augmenting keypoints with a method that only supports
the augmentation of images.
Parameters
-------
inverted : bool, optional
If True, inverted distance maps are returned where each distance value d is replaced
by ``d/(d+1)``, i.e. the distance maps have values in the range ``(0.0, 1.0]`` with 1.0
denoting exactly the position of the respective keypoint.
Returns
-------
distance_maps : (H,W,K) ndarray
A ``float32`` array containing ``K`` distance maps for ``K`` keypoints. Each location
``(y, x, k)`` in the array denotes the euclidean distance at ``(y, x)`` to the ``k``-th keypoint.
In inverted mode the distance ``d`` is replaced by ``d/(d+1)``. The height and width
of the array match the height and width in ``KeypointsOnImage.shape``.
"""
do_assert(len(self.keypoints) > 0)
height, width = self.shape[0:2]
distance_maps = np.zeros((height, width, len(self.keypoints)), dtype=np.float32)
yy = np.arange(0, height)
xx = np.arange(0, width)
grid_xx, grid_yy = np.meshgrid(xx, yy)
for i, keypoint in enumerate(self.keypoints):
y, x = keypoint.y, keypoint.x
distance_maps[:, :, i] = (grid_xx - x) ** 2 + (grid_yy - y) ** 2
distance_maps = np.sqrt(distance_maps)
if inverted:
return 1/(distance_maps+1)
return distance_maps
# TODO add option to if_not_found_coords to reuse old keypoint coords
@staticmethod
def from_distance_maps(distance_maps, inverted=False, if_not_found_coords={"x": -1, "y": -1}, threshold=None, # pylint: disable=locally-disabled, dangerous-default-value, line-too-long
nb_channels=None):
"""
Converts maps generated by ``to_distance_maps()`` back to a KeypointsOnImage object.
Parameters
----------
distance_maps : (H,W,N) ndarray
The distance maps. N is the number of keypoints.
inverted : bool, optional
Whether the given distance maps were generated in inverted or normal mode.
if_not_found_coords : tuple or list or dict or None, optional
Coordinates to use for keypoints that cannot be found in ``distance_maps``.
If this is a list/tuple, it must have two integer values.
If it is a dictionary, it must have the keys ``x`` and ``y``, with each
containing one integer value.
If this is None, then the keypoint will not be added to the final
KeypointsOnImage object.
threshold : float, optional
The search for keypoints works by searching for the argmin (non-inverted) or
argmax (inverted) in each channel. This parameters contains the maximum (non-inverted)
or minimum (inverted) value to accept in order to view a hit as a keypoint.
Use None to use no min/max.
nb_channels : None or int, optional
Number of channels of the image on which the keypoints are placed.
Some keypoint augmenters require that information.
If set to None, the keypoint's shape will be set
to ``(height, width)``, otherwise ``(height, width, nb_channels)``.
Returns
-------
imgaug.KeypointsOnImage
The extracted keypoints.
"""
do_assert(len(distance_maps.shape) == 3)
height, width, nb_keypoints = distance_maps.shape
drop_if_not_found = False
if if_not_found_coords is None:
drop_if_not_found = True
if_not_found_x = -1
if_not_found_y = -1
elif isinstance(if_not_found_coords, (tuple, list)):
do_assert(len(if_not_found_coords) == 2)
if_not_found_x = if_not_found_coords[0]
if_not_found_y = if_not_found_coords[1]
elif isinstance(if_not_found_coords, dict):
if_not_found_x = if_not_found_coords["x"]
if_not_found_y = if_not_found_coords["y"]
else:
raise Exception("Expected if_not_found_coords to be None or tuple or list or dict, got %s." % (
type(if_not_found_coords),))
keypoints = []
for i in sm.xrange(nb_keypoints):
# TODO introduce voting here among all distance values that have min/max values
if inverted:
hitidx_flat = np.argmax(distance_maps[..., i])
else:
hitidx_flat = np.argmin(distance_maps[..., i])
hitidx_ndim = np.unravel_index(hitidx_flat, (height, width))
if not inverted and threshold is not None:
found = (distance_maps[hitidx_ndim[0], hitidx_ndim[1], i] < threshold)
elif inverted and threshold is not None:
found = (distance_maps[hitidx_ndim[0], hitidx_ndim[1], i] >= threshold)
else:
found = True
if found:
keypoints.append(Keypoint(x=hitidx_ndim[1], y=hitidx_ndim[0]))
else:
if drop_if_not_found:
pass # dont add the keypoint to the result list, i.e. drop it
else:
keypoints.append(Keypoint(x=if_not_found_x, y=if_not_found_y))
out_shape = (height, width)
if nb_channels is not None:
out_shape += (nb_channels,)
return KeypointsOnImage(keypoints, shape=out_shape)
def copy(self):
"""
Create a shallow copy of the KeypointsOnImage object.
Returns
-------
imgaug.KeypointsOnImage
Shallow copy.
"""
return copy.copy(self)
def deepcopy(self):
"""
Create a deep copy of the KeypointsOnImage object.
Returns
-------
imgaug.KeypointsOnImage
Deep copy.
"""
# for some reason deepcopy is way slower here than manual copy
kps = [Keypoint(x=kp.x, y=kp.y) for kp in self.keypoints]
return KeypointsOnImage(kps, tuple(self.shape))
def __repr__(self):
return self.__str__()
def __str__(self):
return "KeypointsOnImage(%s, shape=%s)" % (str(self.keypoints), self.shape)
# TODO functions: square(), to_aspect_ratio(), contains_point()
class BoundingBox(object):
"""
Class representing bounding boxes.
Each bounding box is parameterized by its top left and bottom right corners. Both are given
as x and y-coordinates. The corners are intended to lie inside the bounding box area.
As a result, a bounding box that lies completely inside the image but has maximum extensions
would have coordinates ``(0.0, 0.0)`` and ``(W - epsilon, H - epsilon)``. Note that coordinates
are saved internally as floats.
Parameters
----------
x1 : number
X-coordinate of the top left of the bounding box.
y1 : number
Y-coordinate of the top left of the bounding box.
x2 : number
X-coordinate of the bottom right of the bounding box.
y2 : number
Y-coordinate of the bottom right of the bounding box.
label : None or str, optional
Label of the bounding box, e.g. a string representing the class.
"""
def __init__(self, x1, y1, x2, y2, label=None):
"""Create a new BoundingBox instance."""
if x1 > x2:
x2, x1 = x1, x2
do_assert(x2 >= x1)
if y1 > y2:
y2, y1 = y1, y2
do_assert(y2 >= y1)
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
self.label = label
@property
def x1_int(self):
"""
Return the x-coordinate of the top left corner as an integer.
Returns
-------
int
X-coordinate of the top left corner, rounded to the closest integer.
"""
return int(np.round(self.x1)) # use numpy's round to have consistent behaviour between python versions
@property
def y1_int(self):
"""
Return the y-coordinate of the top left corner as an integer.
Returns
-------
int
Y-coordinate of the top left corner, rounded to the closest integer.
"""
return int(np.round(self.y1)) # use numpy's round to have consistent behaviour between python versions
@property
def x2_int(self):
"""
Return the x-coordinate of the bottom left corner as an integer.
Returns
-------
int
X-coordinate of the bottom left corner, rounded to the closest integer.
"""
return int(np.round(self.x2)) # use numpy's round to have consistent behaviour between python versions
@property
def y2_int(self):
"""
Return the y-coordinate of the bottom left corner as an integer.
Returns
-------
int
Y-coordinate of the bottom left corner, rounded to the closest integer.
"""
return int(np.round(self.y2)) # use numpy's round to have consistent behaviour between python versions
@property
def height(self):
"""
Estimate the height of the bounding box.
Returns
-------
number
Height of the bounding box.
"""
return self.y2 - self.y1
@property
def width(self):
"""
Estimate the width of the bounding box.
Returns
-------
number
Width of the bounding box.
"""
return self.x2 - self.x1
@property
def center_x(self):
"""
Estimate the x-coordinate of the center point of the bounding box.
Returns
-------
number
X-coordinate of the center point of the bounding box.
"""
return self.x1 + self.width/2
@property
def center_y(self):
"""
Estimate the y-coordinate of the center point of the bounding box.
Returns
-------
number
Y-coordinate of the center point of the bounding box.
"""
return self.y1 + self.height/2
@property
def area(self):
"""
Estimate the area of the bounding box.
Returns
-------
number
Area of the bounding box, i.e. `height * width`.
"""
return self.height * self.width
def contains(self, other):
"""
Estimate whether the bounding box contains a point.
Parameters
----------
other : imgaug.Keypoint
Point to check for.
Returns
-------
bool
True if the point is contained in the bounding box, False otherwise.
"""
x, y = other.x, other.y
return self.x1 <= x <= self.x2 and self.y1 <= y <= self.y2
def project(self, from_shape, to_shape):
"""
Project the bounding box onto a new position on a new image.
E.g. if the bounding box is on its original image at
x1=(10 of 100 pixels) and y1=(20 of 100 pixels) and is projected onto
a new image with size (width=200, height=200), its new position will
be (x1=20, y1=40). (Analogous for x2/y2.)
This is intended for cases where the original image is resized.
It cannot be used for more complex changes (e.g. padding, cropping).
Parameters
----------
from_shape : tuple of int
Shape of the original image. (Before resize.)
to_shape : tuple of int
Shape of the new image. (After resize.)
Returns
-------
out : imgaug.BoundingBox
BoundingBox object with new coordinates.
"""
if from_shape[0:2] == to_shape[0:2]:
return self.copy()
else:
from_height, from_width = from_shape[0:2]
to_height, to_width = to_shape[0:2]
do_assert(from_height > 0)
do_assert(from_width > 0)
do_assert(to_height > 0)
do_assert(to_width > 0)
x1 = (self.x1 / from_width) * to_width
y1 = (self.y1 / from_height) * to_height
x2 = (self.x2 / from_width) * to_width
y2 = (self.y2 / from_height) * to_height
return self.copy(
x1=x1,
y1=y1,
x2=x2,
y2=y2,
label=self.label
)
def extend(self, all_sides=0, top=0, right=0, bottom=0, left=0):
"""
Extend the size of the bounding box along its sides.
Parameters
----------
all_sides : number, optional
Value by which to extend the bounding box size along all sides.
top : number, optional
Value by which to extend the bounding box size along its top side.
right : number, optional
Value by which to extend the bounding box size along its right side.
bottom : number, optional
Value by which to extend the bounding box size along its bottom side.
left : number, optional
Value by which to extend the bounding box size along its left side.
Returns
-------
imgaug.BoundingBox
Extended bounding box.
"""
return BoundingBox(
x1=self.x1 - all_sides - left,
x2=self.x2 + all_sides + right,
y1=self.y1 - all_sides - top,
y2=self.y2 + all_sides + bottom
)
def intersection(self, other, default=None):
"""
Compute the intersection bounding box of this bounding box and another one.
Note that in extreme cases, the intersection can be a single point, meaning that the intersection bounding box
will exist, but then also has a height and width of zero.
Parameters
----------
other : imgaug.BoundingBox
Other bounding box with which to generate the intersection.
default : any, optional
Default value to return if there is no intersection.
Returns
-------
imgaug.BoundingBox or any
Intersection bounding box of the two bounding boxes if there is an intersection.
If there is no intersection, the default value will be returned, which can by anything.
"""
x1_i = max(self.x1, other.x1)
y1_i = max(self.y1, other.y1)
x2_i = min(self.x2, other.x2)
y2_i = min(self.y2, other.y2)
if x1_i > x2_i or y1_i > y2_i:
return default
else:
return BoundingBox(x1=x1_i, y1=y1_i, x2=x2_i, y2=y2_i)
def union(self, other):
"""
Compute the union bounding box of this bounding box and another one.
This is equivalent to drawing a bounding box around all corners points of both
bounding boxes.
Parameters
----------
other : imgaug.BoundingBox
Other bounding box with which to generate the union.
Returns
-------
imgaug.BoundingBox
Union bounding box of the two bounding boxes.
"""
return BoundingBox(
x1=min(self.x1, other.x1),
y1=min(self.y1, other.y1),
x2=max(self.x2, other.x2),
y2=max(self.y2, other.y2),
)
def iou(self, other):
"""
Compute the IoU of this bounding box with another one.
IoU is the intersection over union, defined as::
``area(intersection(A, B)) / area(union(A, B))``
``= area(intersection(A, B)) / (area(A) + area(B) - area(intersection(A, B)))``
Parameters
----------
other : imgaug.BoundingBox
Other bounding box with which to compare.
Returns
-------
float
IoU between the two bounding boxes.
"""
inters = self.intersection(other)
if inters is None:
return 0.0
else:
area_union = self.area + other.area - inters.area
return inters.area / area_union if area_union > 0 else 0.0
def is_fully_within_image(self, image):
"""
Estimate whether the bounding box is fully inside the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use.
If an ndarray, its shape will be used.
If a tuple, it is assumed to represent the image shape
and must contain at least two integers.
Returns
-------
bool
True if the bounding box is fully inside the image area. False otherwise.
"""
if isinstance(image, tuple):
shape = image
else:
shape = image.shape
height, width = shape[0:2]
return self.x1 >= 0 and self.x2 < width and self.y1 >= 0 and self.y2 < height
def is_partly_within_image(self, image):
"""
Estimate whether the bounding box is at least partially inside the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use.
If an ndarray, its shape will be used.
If a tuple, it is assumed to represent the image shape
and must contain at least two integers.
Returns
-------
bool
True if the bounding box is at least partially inside the image area. False otherwise.
"""
if isinstance(image, tuple):
shape = image
else:
shape = image.shape
height, width = shape[0:2]
eps = np.finfo(np.float32).eps
img_bb = BoundingBox(x1=0, x2=width-eps, y1=0, y2=height-eps)
return self.intersection(img_bb) is not None
def is_out_of_image(self, image, fully=True, partly=False):
"""
Estimate whether the bounding box is partially or fully outside of the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use. If an ndarray, its shape will be used. If a tuple, it is
assumed to represent the image shape and must contain at least two integers.
fully : bool, optional
Whether to return True if the bounding box is fully outside fo the image area.
partly : bool, optional
Whether to return True if the bounding box is at least partially outside fo the
image area.
Returns
-------
bool
True if the bounding box is partially/fully outside of the image area, depending
on defined parameters. False otherwise.
"""
if self.is_fully_within_image(image):
return False
elif self.is_partly_within_image(image):
return partly
else:
return fully
def cut_out_of_image(self, image):
"""
Cut off all parts of the bounding box that are outside of the image.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use for the clipping of the bounding box.
If an ndarray, its shape will be used.
If a tuple, it is assumed to represent the image shape and must contain at least two integers.
Returns
-------
result : imgaug.BoundingBox
Bounding box, clipped to fall within the image dimensions.
"""
if isinstance(image, tuple):
shape = image
else:
shape = image.shape
height, width = shape[0:2]
do_assert(height > 0)
do_assert(width > 0)
eps = np.finfo(np.float32).eps
x1 = np.clip(self.x1, 0, width - eps)
x2 = np.clip(self.x2, 0, width - eps)
y1 = np.clip(self.y1, 0, height - eps)
y2 = np.clip(self.y2, 0, height - eps)
return self.copy(
x1=x1,
y1=y1,
x2=x2,
y2=y2,
label=self.label
)
def shift(self, top=None, right=None, bottom=None, left=None):
"""
Shift the bounding box from one or more image sides, i.e. move it on the x/y-axis.
Parameters
----------
top : None or int, optional
Amount of pixels by which to shift the bounding box from the top.
right : None or int, optional
Amount of pixels by which to shift the bounding box from the right.
bottom : None or int, optional
Amount of pixels by which to shift the bounding box from the bottom.
left : None or int, optional
Amount of pixels by which to shift the bounding box from the left.
Returns
-------
result : imgaug.BoundingBox
Shifted bounding box.
"""
top = top if top is not None else 0
right = right if right is not None else 0
bottom = bottom if bottom is not None else 0
left = left if left is not None else 0
return self.copy(
x1=self.x1+left-right,
x2=self.x2+left-right,
y1=self.y1+top-bottom,
y2=self.y2+top-bottom
)
# TODO add explicit test for zero-sized BBs (worked when tested by hand)
def draw_on_image(self, image, color=(0, 255, 0), alpha=1.0, thickness=1, copy=True, raise_if_out_of_image=False):
"""
Draw the bounding box on an image.
Parameters
----------
image : (H,W,C) ndarray(uint8)
The image onto which to draw the bounding box.
color : iterable of int, optional
The color to use, corresponding to the channel layout of the image. Usually RGB.
alpha : float, optional
The transparency of the drawn bounding box, where 1.0 denotes no transparency and
0.0 is invisible.
thickness : int, optional
The thickness of the bounding box in pixels. If the value is larger than 1, then
additional pixels will be added around the bounding box (i.e. extension towards the
outside).
copy : bool, optional
Whether to copy the input image or change it in-place.
raise_if_out_of_image : bool, optional
Whether to raise an error if the bounding box is partially/fully outside of the
image. If set to False, no error will be raised and only the parts inside the image
will be drawn.
Returns
-------
result : (H,W,C) ndarray(uint8)
Image with bounding box drawn on it.
"""
if raise_if_out_of_image and self.is_out_of_image(image):
raise Exception("Cannot draw bounding box x1=%.8f, y1=%.8f, x2=%.8f, y2=%.8f on image with shape %s." % (
self.x1, self.y1, self.x2, self.y2, image.shape))
result = np.copy(image) if copy else image
if isinstance(color, (tuple, list)):
color = np.uint8(color)
for i in range(thickness):
y1, y2, x1, x2 = self.y1_int, self.y2_int, self.x1_int, self.x2_int
# When y values get into the range (H-0.5, H), the *_int functions round them to H.
# That is technically sensible, but in the case of drawing means that the border lies
# just barely outside of the image, making the border disappear, even though the BB
# is fully inside the image. Here we correct for that because of beauty reasons.
# Same is the case for x coordinates.
if self.is_fully_within_image(image):
y1 = np.clip(y1, 0, image.shape[0]-1)
y2 = np.clip(y2, 0, image.shape[0]-1)
x1 = np.clip(x1, 0, image.shape[1]-1)
x2 = np.clip(x2, 0, image.shape[1]-1)
y = [y1-i, y1-i, y2+i, y2+i]
x = [x1-i, x2+i, x2+i, x1-i]
rr, cc = skimage.draw.polygon_perimeter(y, x, shape=result.shape)
if alpha >= 0.99:
result[rr, cc, :] = color
else:
if is_float_array(result):
result[rr, cc, :] = (1 - alpha) * result[rr, cc, :] + alpha * color
result = np.clip(result, 0, 255)
else:
input_dtype = result.dtype
result = result.astype(np.float32)
result[rr, cc, :] = (1 - alpha) * result[rr, cc, :] + alpha * color
result = np.clip(result, 0, 255).astype(input_dtype)
return result
def extract_from_image(self, image, prevent_zero_size=True):
"""
Extract the image pixels within the bounding box.
This function will zero-pad the image if the bounding box is partially/fully outside of
the image.
Parameters
----------
image : (H,W) ndarray or (H,W,C) ndarray
The image from which to extract the pixels within the bounding box.
prevent_zero_size : bool, optional
Whether to prevent height or width of the extracted image from becoming zero.
If this is set to True and height or width of the bounding box is below 1, the height/width will
be increased to 1. This can be useful to prevent problems, e.g. with image saving or plotting.
If it is set to False, images will be returned as ``(H', W')`` or ``(H', W', 3)`` with ``H`` or
``W`` potentially being 0.
Returns
-------
image : (H',W') ndarray or (H',W',C) ndarray
Pixels within the bounding box. Zero-padded if the bounding box is partially/fully
outside of the image. If prevent_zero_size is activated, it is guarantueed that ``H'>0``
and ``W'>0``, otherwise only ``H'>=0`` and ``W'>=0``.
"""
pad_top = 0
pad_right = 0
pad_bottom = 0
pad_left = 0
height, width = image.shape[0], image.shape[1]
x1, x2, y1, y2 = self.x1_int, self.x2_int, self.y1_int, self.y2_int
# When y values get into the range (H-0.5, H), the *_int functions round them to H.
# That is technically sensible, but in the case of extraction leads to a black border,
# which is both ugly and unexpected after calling cut_out_of_image(). Here we correct for
# that because of beauty reasons.
# Same is the case for x coordinates.
if self.is_fully_within_image(image):
y1 = np.clip(y1, 0, image.shape[0]-1)
y2 = np.clip(y2, 0, image.shape[0]-1)
x1 = np.clip(x1, 0, image.shape[1]-1)
x2 = np.clip(x2, 0, image.shape[1]-1)
# TODO add test
if prevent_zero_size:
if abs(x2 - x1) < 1:
x2 = x1 + 1
if abs(y2 - y1) < 1:
y2 = y1 + 1
# if the bb is outside of the image area, the following pads the image
# first with black pixels until the bb is inside the image
# and only then extracts the image area
# TODO probably more efficient to initialize an array of zeros
# and copy only the portions of the bb into that array that are
# natively inside the image area
if x1 < 0:
pad_left = abs(x1)
x2 = x2 + abs(x1)
x1 = 0
if y1 < 0:
pad_top = abs(y1)
y2 = y2 + abs(y1)
y1 = 0
if x2 >= width:
pad_right = x2 - (width - 1)
if y2 >= height:
pad_bottom = y2 - (height - 1)
if any([val > 0 for val in [pad_top, pad_right, pad_bottom, pad_left]]):
if len(image.shape) == 2:
image = np.pad(image, ((pad_top, pad_bottom), (pad_left, pad_right)), mode="constant")
else:
image = np.pad(image, ((pad_top, pad_bottom), (pad_left, pad_right), (0, 0)), mode="constant")
return image[y1:y2, x1:x2]
# TODO also add to_heatmap
# TODO add this to BoundingBoxesOnImage
def to_keypoints(self):
"""
Convert the corners of the bounding box to keypoints (clockwise, starting at top left).
Returns
-------
list of imgaug.Keypoint
Corners of the bounding box as keypoints.
"""
return [
Keypoint(x=self.x1, y=self.y1),
Keypoint(x=self.x2, y=self.y1),
Keypoint(x=self.x2, y=self.y2),
Keypoint(x=self.x1, y=self.y2)
]
def copy(self, x1=None, y1=None, x2=None, y2=None, label=None):
"""
Create a shallow copy of the BoundingBox object.
Parameters
----------
x1 : None or number
If not None, then the x1 coordinate of the copied object will be set to this value.
y1 : None or number
If not None, then the y1 coordinate of the copied object will be set to this value.
x2 : None or number
If not None, then the x2 coordinate of the copied object will be set to this value.
y2 : None or number
If not None, then the y2 coordinate of the copied object will be set to this value.
label : None or string
If not None, then the label of the copied object will be set to this value.
Returns
-------
imgaug.BoundingBox
Shallow copy.
"""
return BoundingBox(
x1=self.x1 if x1 is None else x1,
x2=self.x2 if x2 is None else x2,
y1=self.y1 if y1 is None else y1,
y2=self.y2 if y2 is None else y2,
label=self.label if label is None else label
)
def deepcopy(self, x1=None, y1=None, x2=None, y2=None, label=None):
"""
Create a deep copy of the BoundingBox object.
Parameters
----------
x1 : None or number
If not None, then the x1 coordinate of the copied object will be set to this value.
y1 : None or number
If not None, then the y1 coordinate of the copied object will be set to this value.
x2 : None or number
If not None, then the x2 coordinate of the copied object will be set to this value.
y2 : None or number
If not None, then the y2 coordinate of the copied object will be set to this value.
label : None or string
If not None, then the label of the copied object will be set to this value.
Returns
-------
imgaug.BoundingBox
Deep copy.
"""
return self.copy(x1=x1, y1=y1, x2=x2, y2=y2, label=label)
def __repr__(self):
return self.__str__()
def __str__(self):
return "BoundingBox(x1=%.4f, y1=%.4f, x2=%.4f, y2=%.4f, label=%s)" % (
self.x1, self.y1, self.x2, self.y2, self.label)
class BoundingBoxesOnImage(object):
"""
Object that represents all bounding boxes on a single image.
Parameters
----------
bounding_boxes : list of imgaug.BoundingBox
List of bounding boxes on the image.
shape : tuple of int
The shape of the image on which the bounding boxes are placed.
Examples
--------
>>> bbs = [
>>> BoundingBox(x1=10, y1=20, x2=20, y2=30),
>>> BoundingBox(x1=25, y1=50, x2=30, y2=70)
>>> ]
>>> bbs_oi = BoundingBoxesOnImage(bbs, shape=image.shape)
"""
def __init__(self, bounding_boxes, shape):
self.bounding_boxes = bounding_boxes
if is_np_array(shape):
self.shape = shape.shape
else:
do_assert(isinstance(shape, (tuple, list)))
self.shape = tuple(shape)
# TODO remove this? here it is image height at BoundingBox it is bounding box height
@property
def height(self):
"""
Get the height of the image on which the bounding boxes fall.
Returns
-------
int
Image height.
"""
return self.shape[0]
# TODO remove this? here it is image width at BoundingBox it is bounding box width
@property
def width(self):
"""
Get the width of the image on which the bounding boxes fall.
Returns
-------
int
Image width.
"""
return self.shape[1]
@property
def empty(self):
"""
Returns whether this object contains zero bounding boxes.
Returns
-------
bool
True if this object contains zero bounding boxes.
"""
return len(self.bounding_boxes) == 0
def on(self, image):
"""
Project bounding boxes from one image to a new one.
Parameters
----------
image : ndarray or tuple of int
New image onto which the bounding boxes are to be projected.
May also simply be that new image's shape tuple.
Returns
-------
bounding_boxes : imgaug.BoundingBoxesOnImage
Object containing all projected bounding boxes.
"""
if is_np_array(image):
shape = image.shape
else:
shape = image
if shape[0:2] == self.shape[0:2]:
return self.deepcopy()
else:
bounding_boxes = [bb.project(self.shape, shape) for bb in self.bounding_boxes]
return BoundingBoxesOnImage(bounding_boxes, shape)
@classmethod
def from_xyxy_array(cls, xyxy, shape):
"""
Convert an (N,4) ndarray to a BoundingBoxesOnImage object.
This is the inverse of :func:`imgaug.BoundingBoxesOnImage.to_xyxy_array`.
Parameters
----------
xyxy : (N,4) ndarray
Array containing the corner coordinates (top-left, bottom-right) of ``N`` bounding boxes
in the form ``(x1, y1, x2, y2)``. Should usually be of dtype ``float32``.
shape : tuple of int
Shape of the image on which the bounding boxes are placed.
Should usually be ``(H, W, C)`` or ``(H, W)``.
Returns
-------
imgaug.BoundingBoxesOnImage
Object containing a list of BoundingBox objects following the provided corner coordinates.
"""
do_assert(xyxy.shape[1] == 4, "Expected input array of shape (N, 4), got shape %s." % (xyxy.shape,))
boxes = [BoundingBox(*row) for row in xyxy]
return cls(boxes, shape)
def to_xyxy_array(self, dtype=np.float32):
"""
Convert the BoundingBoxesOnImage object to an (N,4) ndarray.
This is the inverse of :func:`imgaug.BoundingBoxesOnImage.from_xyxy_array`.
Parameters
----------
dtype : numpy.dtype, optional
Desired output datatype of the ndarray.
Returns
-------
ndarray
(N,4) ndarray array, where ``N`` denotes the number of bounding boxes and ``4`` denotes the
top-left and bottom-right bounding box corner coordinates in form ``(x1, y1, x2, y2)``.
"""
xyxy_array = np.zeros((len(self.bounding_boxes), 4), dtype=np.float32)
for i, box in enumerate(self.bounding_boxes):
xyxy_array[i] = [box.x1, box.y1, box.x2, box.y2]
return xyxy_array.astype(dtype)
def draw_on_image(self, image, color=(0, 255, 0), alpha=1.0, thickness=1, copy=True, raise_if_out_of_image=False):
"""
Draw all bounding boxes onto a given image.
Parameters
----------
image : (H,W,3) ndarray
The image onto which to draw the bounding boxes.
This image should usually have the same shape as
set in BoundingBoxesOnImage.shape.
color : int or list of int or tuple of int or (3,) ndarray, optional
The RGB color of all bounding boxes. If a single int ``C``, then that is
equivalent to ``(C,C,C)``.
alpha : float, optional
Alpha/transparency of the bounding box.
thickness : int, optional
Thickness in pixels.
copy : bool, optional
Whether to copy the image before drawing the points.
raise_if_out_of_image : bool, optional
Whether to raise an exception if any bounding box is outside of the image.
Returns
-------
image : (H,W,3) ndarray
Image with drawn bounding boxes.
"""
# TODO improve efficiency here by copying only once
for bb in self.bounding_boxes:
image = bb.draw_on_image(
image,
color=color,
alpha=alpha,
thickness=thickness,
copy=copy,
raise_if_out_of_image=raise_if_out_of_image
)
return image
def remove_out_of_image(self, fully=True, partly=False):
"""
Remove all bounding boxes that are fully or partially outside of the image.
Parameters
----------
fully : bool, optional
Whether to remove bounding boxes that are fully outside of the image.
partly : bool, optional
Whether to remove bounding boxes that are partially outside of the image.
Returns
-------
imgaug.BoundingBoxesOnImage
Reduced set of bounding boxes, with those that were fully/partially outside of
the image removed.
"""
bbs_clean = [bb for bb in self.bounding_boxes
if not bb.is_out_of_image(self.shape, fully=fully, partly=partly)]
return BoundingBoxesOnImage(bbs_clean, shape=self.shape)
def cut_out_of_image(self):
"""
Cut off all parts from all bounding boxes that are outside of the image.
Returns
-------
imgaug.BoundingBoxesOnImage
Bounding boxes, clipped to fall within the image dimensions.
"""
bbs_cut = [bb.cut_out_of_image(self.shape)
for bb in self.bounding_boxes if bb.is_partly_within_image(self.shape)]
return BoundingBoxesOnImage(bbs_cut, shape=self.shape)
def shift(self, top=None, right=None, bottom=None, left=None):
"""
Shift all bounding boxes from one or more image sides, i.e. move them on the x/y-axis.
Parameters
----------
top : None or int, optional
Amount of pixels by which to shift all bounding boxes from the top.
right : None or int, optional
Amount of pixels by which to shift all bounding boxes from the right.
bottom : None or int, optional
Amount of pixels by which to shift all bounding boxes from the bottom.
left : None or int, optional
Amount of pixels by which to shift all bounding boxes from the left.
Returns
-------
imgaug.BoundingBoxesOnImage
Shifted bounding boxes.
"""
bbs_new = [bb.shift(top=top, right=right, bottom=bottom, left=left) for bb in self.bounding_boxes]
return BoundingBoxesOnImage(bbs_new, shape=self.shape)
def copy(self):
"""
Create a shallow copy of the BoundingBoxesOnImage object.
Returns
-------
imgaug.BoundingBoxesOnImage
Shallow copy.
"""
return copy.copy(self)
def deepcopy(self):
"""
Create a deep copy of the BoundingBoxesOnImage object.
Returns
-------
imgaug.BoundingBoxesOnImage
Deep copy.
"""
# Manual copy is far faster than deepcopy for KeypointsOnImage,
# so use manual copy here too
bbs = [bb.deepcopy() for bb in self.bounding_boxes]
return BoundingBoxesOnImage(bbs, tuple(self.shape))
def __repr__(self):
return self.__str__()
def __str__(self):
return "BoundingBoxesOnImage(%s, shape=%s)" % (str(self.bounding_boxes), self.shape)
# TODO somehow merge with BoundingBox
# TODO add functions: simplify() (eg via shapely.ops.simplify()),
# extend(all_sides=0, top=0, right=0, bottom=0, left=0),
# intersection(other, default=None), union(other), iou(other), to_heatmap, to_mask
class Polygon(object):
"""
Class representing polygons.
Each polygon is parameterized by its corner points, given as absolute x- and y-coordinates
with sub-pixel accuracy.
Parameters
----------
exterior : list of imgaug.Keypoint or list of tuple of float or (N,2) ndarray
List of points defining the polygon. May be either a list of Keypoint objects or a list of tuples in xy-form
or a numpy array of shape (N,2) for N points in xy-form.
All coordinates are expected to be the absolute coordinates in the image, given as floats, e.g. x=10.7
and y=3.4 for a point at coordinates (10.7, 3.4). Their order is expected to be clock-wise. They are expected
to not be closed (i.e. first and last coordinate differ).
label : None or str, optional
Label of the polygon, e.g. a string representing the class.
"""
def __init__(self, exterior, label=None):
"""Create a new Polygon instance."""
if isinstance(exterior, list):
if not exterior:
# for empty lists, make sure that the shape is (0, 2) and not (0,) as that is also expected when the
# input is a numpy array
self.exterior = np.zeros((0, 2), dtype=np.float32)
elif isinstance(exterior[0], Keypoint):
# list of Keypoint
self.exterior = np.float32([[point.x, point.y] for point in exterior])
else:
# list of tuples (x, y)
self.exterior = np.float32([[point[0], point[1]] for point in exterior])
else:
do_assert(is_np_array(exterior))
do_assert(exterior.ndim == 2)
do_assert(exterior.shape[1] == 2)
self.exterior = np.float32(exterior)
# Remove last point if it is essentially the same as the first point (polygons are always assumed to be
# closed anyways). This also prevents problems with shapely, which seems to add the last point automatically.
if len(self.exterior) >= 2 and np.allclose(self.exterior[0, :], self.exterior[-1, :]):
self.exterior = self.exterior[:-1]
self.label = label
@property
def xx(self):
"""
Return the x-coordinates of all points in the exterior.
Returns
-------
(N,2) ndarray
X-coordinates of all points in the exterior as a float32 ndarray.
"""
return self.exterior[:, 0]
@property
def yy(self):
"""
Return the y-coordinates of all points in the exterior.
Returns
-------
(N,2) ndarray
Y-coordinates of all points in the exterior as a float32 ndarray.
"""
return self.exterior[:, 1]
@property
def xx_int(self):
"""
Return the x-coordinates of all points in the exterior, rounded to the closest integer value.
Returns
-------
(N,2) ndarray
X-coordinates of all points in the exterior, rounded to the closest integer value.
Result dtype is int32.
"""
return np.int32(np.round(self.xx))
@property
def yy_int(self):
"""
Return the y-coordinates of all points in the exterior, rounded to the closest integer value.
Returns
-------
(N,2) ndarray
Y-coordinates of all points in the exterior, rounded to the closest integer value.
Result dtype is int32.
"""
return np.int32(np.round(self.yy))
@property
def is_valid(self):
"""
Estimate whether the polygon has a valid shape.
To to be considered valid, the polygons must be made up of at least 3 points and have concave shape.
Multiple consecutive points are allowed to have the same coordinates.
Returns
-------
bool
True if polygon has at least 3 points and is concave, otherwise False.
"""
if len(self.exterior) < 3:
return False
return self.to_shapely_polygon().is_valid
@property
def area(self):
"""
Estimate the area of the polygon.
Returns
-------
number
Area of the polygon.
"""
if len(self.exterior) < 3:
raise Exception("Cannot compute the polygon's area because it contains less than three points.")
poly = self.to_shapely_polygon()
return poly.area
def project(self, from_shape, to_shape):
"""
Project the polygon onto an image with different shape.
The relative coordinates of all points remain the same.
E.g. a point at (x=20, y=20) on an image (width=100, height=200) will be
projected on a new image (width=200, height=100) to (x=40, y=10).
This is intended for cases where the original image is resized.
It cannot be used for more complex changes (e.g. padding, cropping).
Parameters
----------
from_shape : tuple of int
Shape of the original image. (Before resize.)
to_shape : tuple of int
Shape of the new image. (After resize.)
Returns
-------
imgaug.Polygon
Polygon object with new coordinates.
"""
if from_shape[0:2] == to_shape[0:2]:
return self.copy()
exterior = [Keypoint(x=x, y=y).project(from_shape, to_shape) for x, y in self.exterior]
return self.copy(exterior=exterior)
def find_closest_point_index(self, x, y, return_distance=False):
"""
Find the index of the point within the exterior that is closest to the given coordinates.
"Closeness" is here defined based on euclidean distance.
This method will raise an AssertionError if the exterior contains no points.
Parameters
----------
x : number
X-coordinate around which to search for close points.
y : number
Y-coordinate around which to search for close points.
return_distance : bool, optional
Whether to also return the distance of the closest point.
Returns
-------
int
Index of the closest point.
number
Euclidean distance to the the closest point.
This value is only returned if `return_distance` was set to True.
"""
do_assert(len(self.exterior) > 0)
distances = []
for x2, y2 in self.exterior:
d = (x2 - x) ** 2 + (y2 - y) ** 2
distances.append(d)
distances = np.sqrt(distances)
closest_idx = np.argmin(distances)
if return_distance:
return closest_idx, distances[closest_idx]
return closest_idx
def _compute_inside_image_point_mask(self, image):
if isinstance(image, tuple):
shape = image
else:
shape = image.shape
h, w = shape[0:2]
return np.logical_and(
np.logical_and(0 <= self.exterior[:, 0], self.exterior[:, 0] < w),
np.logical_and(0 <= self.exterior[:, 1], self.exterior[:, 1] < h)
)
# TODO keep this method? it is almost an alias for is_out_of_image()
def is_fully_within_image(self, image):
"""
Estimate whether the polygon is fully inside the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use.
If an ndarray, its shape will be used.
If a tuple, it is assumed to represent the image shape and must contain at least two integers.
Returns
-------
bool
True if the polygon is fully inside the image area.
False otherwise.
"""
return not self.is_out_of_image(image, fully=True, partly=True)
# TODO keep this method? it is almost an alias for is_out_of_image()
def is_partly_within_image(self, image):
"""
Estimate whether the polygon is at least partially inside the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use.
If an ndarray, its shape will be used.
If a tuple, it is assumed to represent the image shape and must contain at least two integers.
Returns
-------
bool
True if the polygon is at least partially inside the image area.
False otherwise.
"""
return not self.is_out_of_image(image, fully=True, partly=False)
def is_out_of_image(self, image, fully=True, partly=False):
"""
Estimate whether the polygon is partially or fully outside of the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use.
If an ndarray, its shape will be used.
If a tuple, it is assumed to represent the image shape and must contain at least two integers.
fully : bool, optional
Whether to return True if the polygon is fully outside fo the image area.
partly : bool, optional
Whether to return True if the polygon is at least partially outside fo the image area.
Returns
-------
bool
True if the polygon is partially/fully outside of the image area, depending
on defined parameters. False otherwise.
"""
if len(self.exterior) == 0:
raise Exception("Cannot determine whether the polygon is inside the image, because it contains no points.")
inside = self._compute_inside_image_point_mask(image)
nb_inside = sum(inside)
if nb_inside == len(inside):
return False
elif nb_inside > 0:
return partly
else:
return fully
# TODO mark as deprecated
# TODO rename cut_* to clip_* in BoundingBox
def cut_out_of_image(self, image):
return self.clip_out_of_image(image)
def clip_out_of_image(self, image):
"""
Cut off all parts of the polygon that are outside of the image.
This operation may lead to new points being created.
As a single polygon may be split into multiple new polygons, the result is a MultiPolygon.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use for the clipping of the polygon.
If an ndarray, its shape will be used.
If a tuple, it is assumed to represent the image shape and must contain at least two integers.
Returns
-------
imgaug.MultiPolygon
Polygon, clipped to fall within the image dimensions.
Returned as MultiPolygon, because the clipping can split the polygon into multiple parts.
"""
# if fully out of image, clip everything away, nothing remaining
if self.is_out_of_image(image, fully=True, partly=False):
return MultiPolygon([])
h, w = image.shape[0:2]
poly_shapely = self.to_shapely_polygon()
poly_image = shapely.geometry.Polygon([(0, 0), (w, 0), (w, h), (0, h)])
multipoly_inter_shapely = poly_shapely.intersection(poly_image)
if not isinstance(multipoly_inter_shapely, shapely.geometry.MultiPolygon):
do_assert(isinstance(multipoly_inter_shapely, shapely.geometry.Polygon))
multipoly_inter_shapely = shapely.geometry.MultiPolygon([multipoly_inter_shapely])
polygons = []
for poly_inter_shapely in multipoly_inter_shapely.geoms:
polygons.append(Polygon.from_shapely(poly_inter_shapely, label=self.label))
# shapely changes the order of points, we try here to preserve it as good as possible
polygons_reordered = []
for polygon in polygons:
found = False
for x, y in self.exterior:
closest_idx, dist = polygon.find_closest_point_index(x=x, y=y, return_distance=True)
if dist < 1e-6:
polygon_reordered = polygon.change_first_point_by_index(closest_idx)
polygons_reordered.append(polygon_reordered)
found = True
break
do_assert(found) # could only not find closest points if new polys are empty
return MultiPolygon(polygons_reordered)
def shift(self, top=None, right=None, bottom=None, left=None):
"""
Shift the polygon from one or more image sides, i.e. move it on the x/y-axis.
Parameters
----------
top : None or int, optional
Amount of pixels by which to shift the polygon from the top.
right : None or int, optional
Amount of pixels by which to shift the polygon from the right.
bottom : None or int, optional
Amount of pixels by which to shift the polygon from the bottom.
left : None or int, optional
Amount of pixels by which to shift the polygon from the left.
Returns
-------
imgaug.Polygon
Shifted polygon.
"""
top = top if top is not None else 0
right = right if right is not None else 0
bottom = bottom if bottom is not None else 0
left = left if left is not None else 0
exterior = np.copy(self.exterior)
exterior[:, 0] += (left - right)
exterior[:, 1] += (top - bottom)
return self.deepcopy(exterior=exterior)
# TODO add boundary thickness
def draw_on_image(self,
image,
color=(0, 255, 0), color_perimeter=(0, 128, 0),
alpha=0.5, alpha_perimeter=1.0,
raise_if_out_of_image=False):
"""
Draw the polygon on an image.
Parameters
----------
image : (H,W,C) ndarray
The image onto which to draw the polygon. Usually expected to be of dtype uint8, though other dtypes
are also handled.
color : iterable of int, optional
The color to use for the polygon (excluding perimeter). Must correspond to the channel layout of the
image. Usually RGB.
color_perimeter : iterable of int, optional
The color to use for the perimeter/border of the polygon. Must correspond to the channel layout of the
image. Usually RGB.
alpha : float, optional
The transparency of the polygon (excluding the perimeter), where 1.0 denotes no transparency and 0.0 is
invisible.
alpha_perimeter : float, optional
The transparency of the polygon's perimeter/border, where 1.0 denotes no transparency and 0.0 is
invisible.
raise_if_out_of_image : bool, optional
Whether to raise an error if the polygon is partially/fully outside of the
image. If set to False, no error will be raised and only the parts inside the image
will be drawn.
Returns
-------
result : (H,W,C) ndarray
Image with polygon drawn on it. Result dtype is the same as the input dtype.
"""
# TODO separate this into draw_face_on_image() and draw_border_on_image()
if raise_if_out_of_image and self.is_out_of_image(image):
raise Exception("Cannot draw polygon %s on image with shape %s." % (
str(self), image.shape
))
xx = self.xx_int
yy = self.yy_int
# TODO np.clip to image plane if is_fully_within_image(), similar to how it is done for bounding boxes
# TODO improve efficiency by only drawing in rectangle that covers poly instead of drawing in the whole image
# TODO for a rectangular polygon, the face coordinates include the top/left boundary but not the right/bottom
# boundary. This may be unintuitive when not drawing the boundary. Maybe somehow remove the boundary
# coordinates from the face coordinates after generating both?
rr, cc = skimage.draw.polygon(yy, xx, shape=image.shape)
rr_perimeter, cc_perimeter = skimage.draw.polygon_perimeter(yy, xx, shape=image.shape)
params = (rr, cc, color, alpha)
params_perimeter = (rr_perimeter, cc_perimeter, color_perimeter, alpha_perimeter)
input_dtype = image.dtype
result = image.astype(np.float32)
for rr, cc, color, alpha in [params, params_perimeter]:
color = np.float32(color)
if alpha >= 0.99:
result[rr, cc, :] = color
elif alpha < 1e-4:
pass # invisible, do nothing
else:
result[rr, cc, :] = (1 - alpha) * result[rr, cc, :] + alpha * color
if input_dtype.type == np.uint8:
result = np.clip(result, 0, 255).astype(input_dtype) # TODO make clipping more flexible
else:
result = result.astype(input_dtype)
return result
def extract_from_image(self, image):
"""
Extract the image pixels within the polygon.
This function will zero-pad the image if the polygon is partially/fully outside of
the image.
Parameters
----------
image : (H,W) ndarray or (H,W,C) ndarray
The image from which to extract the pixels within the polygon.
Returns
-------
result : (H',W') ndarray or (H',W',C) ndarray
Pixels within the polygon. Zero-padded if the polygon is partially/fully
outside of the image.
"""
do_assert(image.ndim in [2, 3])
if len(self.exterior) <= 2:
raise Exception("Polygon must be made up of at least 3 points to extract its area from an image.")
bb = self.to_bounding_box()
bb_area = bb.extract_from_image(image)
if self.is_out_of_image(image, fully=True, partly=False):
return bb_area
xx = self.xx_int
yy = self.yy_int
xx_mask = xx - np.min(xx)
yy_mask = yy - np.min(yy)
height_mask = np.max(yy_mask)
width_mask = np.max(xx_mask)
rr_face, cc_face = skimage.draw.polygon(yy_mask, xx_mask, shape=(height_mask, width_mask))
mask = np.zeros((height_mask, width_mask), dtype=np.bool)
mask[rr_face, cc_face] = True
if image.ndim == 3:
mask = np.tile(mask[:, :, np.newaxis], (1, 1, image.shape[2]))
return bb_area * mask
def change_first_point_by_coords(self, x, y, max_distance=1e-4):
"""
Set the first point of the exterior to the given point based on its coordinates.
If multiple points are found, the closest one will be picked.
If no matching points are found, an exception is raised.
Note: This method does *not* work in-place.
Parameters
----------
x : number
X-coordinate of the point.
y : number
Y-coordinate of the point.
max_distance : number
Maximum distance past which possible matches are ignored.
Returns
-------
imgaug.Polygon
Copy of this polygon with the new point order.
"""
if len(self.exterior) == 0:
raise Exception("Cannot reorder polygon points, because it contains no points.")
closest_idx, closest_dist = self.find_closest_point_index(x=x, y=y, return_distance=True)
if max_distance is not None and closest_dist > max_distance:
closest_point = self.exterior[closest_idx, :]
raise Exception(
"Closest found point (%.9f, %.9f) exceeds max_distance of %.9f exceeded" % (
closest_point[0], closest_point[1], closest_dist)
)
return self.change_first_point_by_index(closest_idx)
def change_first_point_by_index(self, point_idx):
"""
Set the first point of the exterior to the given point based on its index.
Note: This method does *not* work in-place.
Parameters
----------
point_idx : int
Index of the desired starting point.
Returns
-------
imgaug.Polygon
Copy of this polygon with the new point order.
"""
do_assert(0 <= point_idx < len(self.exterior))
if point_idx == 0:
return self.deepcopy()
exterior = np.concatenate(
(self.exterior[point_idx:, :], self.exterior[:point_idx, :]),
axis=0
)
return self.deepcopy(exterior=exterior)
def to_shapely_polygon(self):
"""
Convert this polygon to a Shapely polygon.
Returns
-------
shapely.geometry.Polygon
The Shapely polygon matching this polygon's exterior.
"""
return shapely.geometry.Polygon([(point[0], point[1]) for point in self.exterior])
def to_shapely_line_string(self, closed=False, interpolate=0):
"""
Convert this polygon to a Shapely LineString object.
Parameters
----------
closed : bool, optional
Whether to return the line string with the last point being identical to the first point.
interpolate : int, optional
Number of points to interpolate between any pair of two consecutive points. These points are added
to the final line string.
Returns
-------
shapely.geometry.LineString
The Shapely LineString matching the polygon's exterior.
"""
return _convert_points_to_shapely_line_string(self.exterior, closed=closed, interpolate=interpolate)
def to_bounding_box(self):
"""
Convert this polygon to a bounding box tightly containing the whole polygon.
Returns
-------
imgaug.BoundingBox
The bounding box tightly containing the polygon.
"""
xx = self.xx
yy = self.yy
return BoundingBox(x1=min(xx), x2=max(xx), y1=min(yy), y2=max(yy), label=self.label)
@staticmethod
def from_shapely(polygon_shapely, label=None):
"""
Create a polygon from a Shapely polygon.
Note: This will remove any holes in the Shapely polygon.
Parameters
----------
polygon_shapely : shapely.geometry.Polygon
The shapely polygon.
label : None or str, optional
The label of the new polygon.
Returns
-------
imgaug.Polygon
A polygon with the same exterior as the Shapely polygon.
"""
do_assert(isinstance(polygon_shapely, shapely.geometry.Polygon))
# polygon_shapely.exterior can be None if the polygon was instantiated without points
if polygon_shapely.exterior is None or len(polygon_shapely.exterior.coords) == 0:
return Polygon([], label=label)
exterior = np.float32([[x, y] for (x, y) in polygon_shapely.exterior.coords])
return Polygon(exterior, label=label)
def exterior_almost_equals(self, other_polygon, max_distance=1e-6, interpolate=8):
"""
Estimate whether the geometry of the exterior of this polygon and another polygon are comparable.
The two exteriors can have different numbers of points, but any point randomly sampled on the exterior
of one polygon should be close to the closest point on the exterior of the other polygon.
Note that this method works approximately. One can come up with polygons with fairly different shapes that
will still be estimated as equal by this method. In practice however this should be unlikely to be the case.
The probability for something like that goes down as the interpolation parameter is increased.
Parameters
----------
other_polygon : imgaug.Polygon or (N,2) ndarray
The other polygon with which to compare the exterior.
If this is an ndarray, it is assumed to represent an exterior.
It must then have dtype float32 and shape (N,2) with the second dimension denoting xy-coordinates.
max_distance : number
The maximum euclidean distance between a point on one polygon and the closest point on the other polygon.
If the distance is exceeded for any such pair, the two exteriors are not viewed as equal.
The points are other the points contained in the polygon's exterior ndarray or interpolated points
between these.
interpolate : int
How many points to interpolate between the points of the polygon's exteriors.
If this is set to zero, then only the points given by the polygon's exterior ndarrays will be used.
Higher values make it less likely that unequal polygons are evaluated as equal.
Returns
-------
bool
Whether the two polygon's exteriors can be viewed as equal (approximate test).
"""
atol = max_distance
ext_a = self.exterior
ext_b = other_polygon.exterior if not is_np_array(other_polygon) else other_polygon
len_a = len(ext_a)
len_b = len(ext_b)
if len_a == 0 and len_b == 0:
return True
elif len_a == 0 and len_b > 0:
return False
elif len_a > 0 and len_b == 0:
return False
# neither A nor B is zero-sized at this point
# if A or B only contain points identical to the first point, merge them to one point
if len_a > 1:
if all([np.allclose(ext_a[0, :], ext_a[1 + i, :], rtol=0, atol=atol) for i in sm.xrange(len_a - 1)]):
ext_a = ext_a[0:1, :]
len_a = 1
if len_b > 1:
if all([np.allclose(ext_b[0, :], ext_b[1 + i, :], rtol=0, atol=atol) for i in sm.xrange(len_b - 1)]):
ext_b = ext_b[0:1, :]
len_b = 1
# handle polygons that contain a single point
if len_a == 1 and len_b == 1:
return np.allclose(ext_a[0, :], ext_b[0, :], rtol=0, atol=atol)
elif len_a == 1:
return all([np.allclose(ext_a[0, :], ext_b[i, :], rtol=0, atol=atol) for i in sm.xrange(len_b)])
elif len_b == 1:
return all([np.allclose(ext_b[0, :], ext_a[i, :], rtol=0, atol=atol) for i in sm.xrange(len_a)])
# After this point, both polygons have at least 2 points, i.e. LineStrings can be used.
# We can also safely go back to the original exteriors (before close points were merged).
ls_a = self.to_shapely_line_string(closed=True, interpolate=interpolate)
ls_b = other_polygon.to_shapely_line_string(closed=True, interpolate=interpolate) \
if not is_np_array(other_polygon) \
else _convert_points_to_shapely_line_string(other_polygon, closed=True, interpolate=interpolate)
# Measure the distance from each point in A to LineString B and vice versa.
# Make sure that no point violates the tolerance.
# Note that we can't just use LineString.almost_equals(LineString) -- that seems to expect the same number
# and order of points in both LineStrings (failed with duplicated points).
for x, y in ls_a.coords:
point = shapely.geometry.Point(x, y)
if not ls_b.distance(point) <= max_distance:
return False
for x, y in ls_b.coords:
point = shapely.geometry.Point(x, y)
if not ls_a.distance(point) <= max_distance:
return False
return True
def almost_equals(self, other, max_distance=1e-6, interpolate=8):
"""
Compare this polygon with another one and estimate whether they can be viewed as equal.
This is the same as :func:`imgaug.Polygon.exterior_almost_equals` but additionally compares the labels.
Parameters
----------
other
The object to compare against. If not a Polygon, then False will be returned.
max_distance : float
See :func:`imgaug.Polygon.exterior_almost_equals`.
interpolate : int
See :func:`imgaug.Polygon.exterior_almost_equals`.
Returns
-------
bool
Whether the two polygons can be viewed as equal. In the case of the exteriors this is an approximate test.
"""
if not isinstance(other, Polygon):
return False
if self.label is not None or other.label is not None:
if self.label is None:
return False
if other.label is None:
return False
if self.label != other.label:
return False
return self.exterior_almost_equals(other, max_distance=max_distance, interpolate=interpolate)
def copy(self, exterior=None, label=None):
"""
Create a shallow copy of the Polygon object.
Parameters
----------
exterior : list of imgaug.Keypoint or list of tuple or (N,2) ndarray, optional
List of points defining the polygon. See :func:`imgaug.Polygon.__init__` for details.
label : None or str, optional
If not None, then the label of the copied object will be set to this value.
Returns
-------
imgaug.Polygon
Shallow copy.
"""
return self.deepcopy(exterior=exterior, label=label)
def deepcopy(self, exterior=None, label=None):
"""
Create a deep copy of the Polygon object.
Parameters
----------
exterior : list of Keypoint or list of tuple or (N,2) ndarray, optional
List of points defining the polygon. See `imgaug.Polygon.__init__` for details.
label : None or str
If not None, then the label of the copied object will be set to this value.
Returns
-------
imgaug.Polygon
Deep copy.
"""
return Polygon(
exterior=np.copy(self.exterior) if exterior is None else exterior,
label=self.label if label is None else label
)
def __repr__(self):
return self.__str__()
def __str__(self):
points_str = ", ".join(["(x=%.3f, y=%.3f)" % (point[0], point[1]) for point in self.exterior])
return "Polygon([%s] (%d points), label=%s)" % (points_str, len(self.exterior), self.label)
def _convert_points_to_shapely_line_string(points, closed=False, interpolate=0):
if len(points) <= 1:
raise Exception(
("Conversion to shapely line string requires at least two points, but points input contains "
"only %d points.") % (len(points),)
)
points_tuples = [(point[0], point[1]) for point in points]
# interpolate points between each consecutive pair of points
if interpolate > 0:
points_tuples = _interpolate_points(points_tuples, interpolate)
# close if requested and not yet closed
if closed and len(points) > 1: # here intentionally used points instead of points_tuples
points_tuples.append(points_tuples[0])
return shapely.geometry.LineString(points_tuples)
def _interpolate_point_pair(point_a, point_b, nb_steps):
if nb_steps < 1:
return []
x1, y1 = point_a
x2, y2 = point_b
vec = np.float32([x2 - x1, y2 - y1])
step_size = vec / (1 + nb_steps)
return [(x1 + (i + 1) * step_size[0], y1 + (i + 1) * step_size[1]) for i in sm.xrange(nb_steps)]
def _interpolate_points(points, nb_steps, closed=True):
if len(points) <= 1:
return points
if closed:
points = list(points) + [points[0]]
points_interp = []
for point_a, point_b in zip(points[:-1], points[1:]):
points_interp.extend([point_a] + _interpolate_point_pair(point_a, point_b, nb_steps))
if not closed:
points_interp.append(points[-1])
# close does not have to be reverted here, as last point is not included in the extend()
return points_interp
def _interpolate_points_by_max_distance(points, max_distance, closed=True):
do_assert(max_distance > 0, "max_distance must have value greater than 0, got %.8f" % (max_distance,))
if len(points) <= 1:
return points
if closed:
points = list(points) + [points[0]]
points_interp = []
for point_a, point_b in zip(points[:-1], points[1:]):
dist = np.sqrt((point_a[0] - point_b[0]) ** 2 + (point_a[1] - point_b[1]) ** 2)
nb_steps = int((dist / max_distance) - 1)
points_interp.extend([point_a] + _interpolate_point_pair(point_a, point_b, nb_steps))
if not closed:
points_interp.append(points[-1])
return points_interp
class MultiPolygon(object):
"""
Class that represents several polygons.
Parameters
----------
geoms : list of imgaug.Polygon
List of the polygons.
"""
def __init__(self, geoms):
"""Create a new MultiPolygon instance."""
do_assert(len(geoms) == 0 or all([isinstance(el, Polygon) for el in geoms]))
self.geoms = geoms
@staticmethod
def from_shapely(geometry, label=None):
"""
Create a MultiPolygon from a Shapely MultiPolygon, a Shapely Polygon or a Shapely GeometryCollection.
This also creates all necessary Polygons contained by this MultiPolygon.
Parameters
----------
geometry : shapely.geometry.MultiPolygon or shapely.geometry.Polygon\
or shapely.geometry.collection.GeometryCollection
The object to convert to a MultiPolygon.
label : None or str, optional
A label assigned to all Polygons within the MultiPolygon.
Returns
-------
imgaug.MultiPolygon
The derived MultiPolygon.
"""
if isinstance(geometry, shapely.geometry.MultiPolygon):
return MultiPolygon([Polygon.from_shapely(poly, label=label) for poly in geometry.geoms])
elif isinstance(geometry, shapely.geometry.Polygon):
return MultiPolygon([Polygon.from_shapely(geometry, label=label)])
elif isinstance(geometry, shapely.geometry.collection.GeometryCollection):
do_assert(all([isinstance(poly, shapely.geometry.Polygon) for poly in geometry.geoms]))
return MultiPolygon([Polygon.from_shapely(poly, label=label) for poly in geometry.geoms])
else:
raise Exception("Unknown datatype '%s'. Expected shapely.geometry.Polygon or "
"shapely.geometry.MultiPolygon or "
"shapely.geometry.collections.GeometryCollection." % (type(geometry),))
class HeatmapsOnImage(object):
"""
Object representing heatmaps on images.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Array representing the heatmap(s).
Must be of dtype float32.
If multiple heatmaps are provided, then ``C`` is expected to denote their number.
shape : tuple of int
Shape of the image on which the heatmap(s) is/are placed. NOT the shape of the
heatmap(s) array, unless it is identical to the image shape (note the likely
difference between the arrays in the number of channels).
If there is not a corresponding image, use the shape of the heatmaps array.
min_value : float, optional
Minimum value for the heatmaps that `arr` represents. This will usually be ``0.0``.
max_value : float, optional
Maximum value for the heatmaps that `arr` represents. This will usually be ``1.0``.
"""
def __init__(self, arr, shape, min_value=0.0, max_value=1.0):
"""Construct a new HeatmapsOnImage object."""
do_assert(arr.dtype.type in [np.float32])
do_assert(arr.ndim in [2, 3])
do_assert(len(shape) in [2, 3])
do_assert(min_value < max_value)
do_assert(np.min(arr.flat[0:50]) >= min_value - np.finfo(arr.dtype).eps)
do_assert(np.max(arr.flat[0:50]) <= max_value + np.finfo(arr.dtype).eps)
if arr.ndim == 2:
arr = arr[..., np.newaxis]
self.arr_was_2d = True
else:
self.arr_was_2d = False
eps = np.finfo(np.float32).eps
min_is_zero = 0.0 - eps < min_value < 0.0 + eps
max_is_one = 1.0 - eps < max_value < 1.0 + eps
if min_is_zero and max_is_one:
self.arr_0to1 = arr
else:
self.arr_0to1 = (arr - min_value) / (max_value - min_value)
self.shape = shape
self.min_value = min_value
self.max_value = max_value
def get_arr(self):
"""
Get the heatmap's array within the value range originally provided in ``__init__()``.
The HeatmapsOnImage object saves heatmaps internally in the value range ``(min=0.0, max=1.0)``.
This function converts the internal representation to ``(min=min_value, max=max_value)``,
where ``min_value`` and ``max_value`` are provided upon instantiation of the object.
Returns
-------
result : (H,W) ndarray or (H,W,C) ndarray
Heatmap array. Dtype is float32.
"""
if self.arr_was_2d and self.arr_0to1.shape[2] == 1:
arr = self.arr_0to1[:, :, 0]
else:
arr = self.arr_0to1
eps = np.finfo(np.float32).eps
min_is_zero = 0.0 - eps < self.min_value < 0.0 + eps
max_is_one = 1.0 - eps < self.max_value < 1.0 + eps
if min_is_zero and max_is_one:
return np.copy(arr)
else:
diff = self.max_value - self.min_value
return self.min_value + diff * arr
# TODO
# def find_global_maxima(self):
# raise NotImplementedError()
def draw(self, size=None, cmap="jet"):
"""
Render the heatmaps as RGB images.
Parameters
----------
size : None or float or iterable of int or iterable of float, optional
Size of the rendered RGB image as ``(height, width)``.
See :func:`imgaug.imresize_single_image` for details.
If set to None, no resizing is performed and the size of the heatmaps array is used.
cmap : str or None, optional
Color map of ``matplotlib`` to use in order to convert the heatmaps to RGB images.
If set to None, no color map will be used and the heatmaps will be converted
to simple intensity maps.
Returns
-------
heatmaps_drawn : list of (H,W,3) ndarray
Rendered heatmaps. One per heatmap array channel. Dtype is uint8.
"""
heatmaps_uint8 = self.to_uint8()
heatmaps_drawn = []
for c in sm.xrange(heatmaps_uint8.shape[2]):
# c:c+1 here, because the additional axis is needed by imresize_single_image
heatmap_c = heatmaps_uint8[..., c:c+1]
if size is not None:
heatmap_c_rs = imresize_single_image(heatmap_c, size, interpolation="nearest")
else:
heatmap_c_rs = heatmap_c
heatmap_c_rs = np.squeeze(heatmap_c_rs).astype(np.float32) / 255.0
if cmap is not None:
cmap_func = plt.get_cmap(cmap)
heatmap_cmapped = cmap_func(heatmap_c_rs)
heatmap_cmapped = np.delete(heatmap_cmapped, 3, 2)
else:
heatmap_cmapped = np.tile(heatmap_c_rs[..., np.newaxis], (1, 1, 3))
heatmap_cmapped = np.clip(heatmap_cmapped * 255, 0, 255).astype(np.uint8)
heatmaps_drawn.append(heatmap_cmapped)
return heatmaps_drawn
def draw_on_image(self, image, alpha=0.75, cmap="jet", resize="heatmaps"):
"""
Draw the heatmaps as overlays over an image.
Parameters
----------
image : (H,W,3) ndarray
Image onto which to draw the heatmaps. Expected to be of dtype uint8.
alpha : float, optional
Alpha/opacity value to use for the mixing of image and heatmaps.
Higher values mean that the heatmaps will be more visible and the image less visible.
cmap : str or None, optional
Color map to use. See :func:`imgaug.HeatmapsOnImage.draw` for details.
resize : {'heatmaps', 'image'}, optional
In case of size differences between the image and heatmaps, either the image or
the heatmaps can be resized. This parameter controls which of the two will be resized
to the other's size.
Returns
-------
mix : list of (H,W,3) ndarray
Rendered overlays. One per heatmap array channel. Dtype is uint8.
"""
# assert RGB image
do_assert(image.ndim == 3)
do_assert(image.shape[2] == 3)
do_assert(image.dtype.type == np.uint8)
do_assert(0 - 1e-8 <= alpha <= 1.0 + 1e-8)
do_assert(resize in ["heatmaps", "image"])
if resize == "image":
image = imresize_single_image(image, self.arr_0to1.shape[0:2], interpolation="cubic")
heatmaps_drawn = self.draw(
size=image.shape[0:2] if resize == "heatmaps" else None,
cmap=cmap
)
mix = [
np.clip((1-alpha) * image + alpha * heatmap_i, 0, 255).astype(np.uint8)
for heatmap_i
in heatmaps_drawn
]
return mix
def invert(self):
"""
Inverts each value in the heatmap, shifting low towards high values and vice versa.
This changes each value to::
v' = max - (v - min)
where ``v`` is the value at some spatial location, ``min`` is the minimum value in the heatmap
and ``max`` is the maximum value.
As the heatmap uses internally a 0.0 to 1.0 representation, this simply becomes ``v' = 1.0 - v``.
Note that the attributes ``min_value`` and ``max_value`` are not switched. They both keep their values.
This function can be useful e.g. when working with depth maps, where algorithms might have
an easier time representing the furthest away points with zeros, requiring an inverted
depth map.
Returns
-------
arr_inv : imgaug.HeatmapsOnImage
Inverted heatmap.
"""
arr_inv = HeatmapsOnImage.from_0to1(1 - self.arr_0to1, shape=self.shape, min_value=self.min_value,
max_value=self.max_value)
arr_inv.arr_was_2d = self.arr_was_2d
return arr_inv
def pad(self, top=0, right=0, bottom=0, left=0, mode="constant", cval=0.0):
"""
Pad the heatmaps on their top/right/bottom/left side.
Parameters
----------
top : int, optional
Amount of pixels to add at the top side of the heatmaps. Must be 0 or greater.
right : int, optional
Amount of pixels to add at the right side of the heatmaps. Must be 0 or greater.
bottom : int, optional
Amount of pixels to add at the bottom side of the heatmaps. Must be 0 or greater.
left : int, optional
Amount of pixels to add at the left side of the heatmaps. Must be 0 or greater.
mode : string, optional
Padding mode to use. See :func:`numpy.pad` for details.
cval : number, optional
Value to use for padding if `mode` is ``constant``. See :func:`numpy.pad` for details.
Returns
-------
imgaug.HeatmapsOnImage
Padded heatmaps of height ``H'=H+top+bottom`` and width ``W'=W+left+right``.
"""
arr_0to1_padded = pad(self.arr_0to1, top=top, right=right, bottom=bottom, left=left, mode=mode, cval=cval)
return HeatmapsOnImage.from_0to1(arr_0to1_padded, shape=self.shape, min_value=self.min_value,
max_value=self.max_value)
def pad_to_aspect_ratio(self, aspect_ratio, mode="constant", cval=0.0, return_pad_amounts=False):
"""
Pad the heatmaps on their sides so that they match a target aspect ratio.
Depending on which dimension is smaller (height or width), only the corresponding
sides (left/right or top/bottom) will be padded. In each case, both of the sides will
be padded equally.
Parameters
----------
aspect_ratio : float
Target aspect ratio, given as width/height. E.g. 2.0 denotes the image having twice
as much width as height.
mode : str, optional
Padding mode to use. See :func:`numpy.pad` for details.
cval : number, optional
Value to use for padding if `mode` is ``constant``. See :func:`numpy.pad` for details.
return_pad_amounts : bool, optional
If False, then only the padded image will be returned. If True, a tuple with two
entries will be returned, where the first entry is the padded image and the second
entry are the amounts by which each image side was padded. These amounts are again a
tuple of the form (top, right, bottom, left), with each value being an integer.
Returns
-------
heatmaps : imgaug.HeatmapsOnImage
Padded heatmaps as HeatmapsOnImage object.
pad_amounts : tuple of int
Amounts by which the heatmaps were padded on each side, given as a tuple ``(top, right, bottom, left)``.
This tuple is only returned if `return_pad_amounts` was set to True.
"""
arr_0to1_padded, pad_amounts = pad_to_aspect_ratio(self.arr_0to1, aspect_ratio=aspect_ratio, mode=mode,
cval=cval, return_pad_amounts=True)
heatmaps = HeatmapsOnImage.from_0to1(arr_0to1_padded, shape=self.shape, min_value=self.min_value,
max_value=self.max_value)
if return_pad_amounts:
return heatmaps, pad_amounts
else:
return heatmaps
def avg_pool(self, block_size):
"""
Rescale the heatmap(s) array using average pooling of a given block/kernel size.
Parameters
----------
block_size : int or tuple of int
Size of each block of values to pool, aka kernel size. See :func:`imgaug.pool` for details.
Returns
-------
imgaug.HeatmapsOnImage
Heatmaps after average pooling.
"""
arr_0to1_reduced = avg_pool(self.arr_0to1, block_size, cval=0.0)
return HeatmapsOnImage.from_0to1(arr_0to1_reduced, shape=self.shape, min_value=self.min_value,
max_value=self.max_value)
def max_pool(self, block_size):
"""
Rescale the heatmap(s) array using max-pooling of a given block/kernel size.
Parameters
----------
block_size : int or tuple of int
Size of each block of values to pool, aka kernel size. See :func:`imgaug.pool` for details.
Returns
-------
imgaug.HeatmapsOnImage
Heatmaps after max-pooling.
"""
arr_0to1_reduced = max_pool(self.arr_0to1, block_size)
return HeatmapsOnImage.from_0to1(arr_0to1_reduced, shape=self.shape, min_value=self.min_value,
max_value=self.max_value)
def scale(self, sizes, interpolation="cubic"):
"""
Rescale the heatmap(s) array to the provided size given the provided interpolation.
Parameters
----------
sizes : float or iterable of int or iterable of float
New size of the array in ``(height, width)``. See :func:`imgaug.imresize_single_image` for details.
interpolation : None or str or int, optional
The interpolation to use during resize. See :func:`imgaug.imresize_single_image` for details.
Returns
-------
imgaug.HeatmapsOnImage
Rescaled heatmaps object.
"""
arr_0to1_rescaled = imresize_single_image(self.arr_0to1, sizes, interpolation=interpolation)
# cubic interpolation can lead to values outside of [0.0, 1.0],
# see https://github.com/opencv/opencv/issues/7195
# TODO area interpolation too?
arr_0to1_rescaled = np.clip(arr_0to1_rescaled, 0.0, 1.0)
return HeatmapsOnImage.from_0to1(arr_0to1_rescaled, shape=self.shape, min_value=self.min_value,
max_value=self.max_value)
def to_uint8(self):
"""
Convert this heatmaps object to a 0-to-255 array.
Returns
-------
arr_uint8 : (H,W,C) ndarray
Heatmap as a 0-to-255 array (dtype is uint8).
"""
# TODO this always returns (H,W,C), even if input ndarray was originall (H,W)
# does it make sense here to also return (H,W) if self.arr_was_2d?
arr_0to255 = np.clip(np.round(self.arr_0to1 * 255), 0, 255)
arr_uint8 = arr_0to255.astype(np.uint8)
return arr_uint8
@staticmethod
def from_uint8(arr_uint8, shape, min_value=0.0, max_value=1.0):
"""
Create a heatmaps object from an heatmap array containing values ranging from 0 to 255.
Parameters
----------
arr_uint8 : (H,W) ndarray or (H,W,C) ndarray
Heatmap(s) array, where ``H`` is height, ``W`` is width and ``C`` is the number of heatmap channels.
Expected dtype is uint8.
shape : tuple of int
Shape of the image on which the heatmap(s) is/are placed. NOT the shape of the
heatmap(s) array, unless it is identical to the image shape (note the likely
difference between the arrays in the number of channels).
If there is not a corresponding image, use the shape of the heatmaps array.
min_value : float, optional
Minimum value for the heatmaps that the 0-to-255 array represents. This will usually
be 0.0. It is used when calling :func:`imgaug.HeatmapsOnImage.get_arr`, which converts the
underlying ``(0, 255)`` array to value range ``(min_value, max_value)``.
max_value : float, optional
Maximum value for the heatmaps that 0-to-255 array represents.
See parameter `min_value` for details.
Returns
-------
imgaug.HeatmapsOnImage
Heatmaps object.
"""
arr_0to1 = arr_uint8.astype(np.float32) / 255.0
return HeatmapsOnImage.from_0to1(arr_0to1, shape, min_value=min_value, max_value=max_value)
@staticmethod
def from_0to1(arr_0to1, shape, min_value=0.0, max_value=1.0):
"""
Create a heatmaps object from an heatmap array containing values ranging from 0.0 to 1.0.
Parameters
----------
arr_0to1 : (H,W) or (H,W,C) ndarray
Heatmap(s) array, where ``H`` is height, ``W`` is width and ``C`` is the number of heatmap channels.
Expected dtype is float32.
shape : tuple of ints
Shape of the image on which the heatmap(s) is/are placed. NOT the shape of the
heatmap(s) array, unless it is identical to the image shape (note the likely
difference between the arrays in the number of channels).
If there is not a corresponding image, use the shape of the heatmaps array.
min_value : float, optional
Minimum value for the heatmaps that the 0-to-1 array represents. This will usually
be 0.0. It is used when calling :func:`imgaug.HeatmapsOnImage.get_arr`, which converts the
underlying ``(0.0, 1.0)`` array to value range ``(min_value, max_value)``.
E.g. if you started with heatmaps in the range ``(-1.0, 1.0)`` and projected these
to (0.0, 1.0), you should call this function with ``min_value=-1.0``, ``max_value=1.0``
so that :func:`imgaug.HeatmapsOnImage.get_arr` returns heatmap arrays having value
range (-1.0, 1.0).
max_value : float, optional
Maximum value for the heatmaps that to 0-to-255 array represents.
See parameter min_value for details.
Returns
-------
heatmaps : imgaug.HeatmapsOnImage
Heatmaps object.
"""
heatmaps = HeatmapsOnImage(arr_0to1, shape, min_value=0.0, max_value=1.0)
heatmaps.min_value = min_value
heatmaps.max_value = max_value
return heatmaps
@classmethod
def change_normalization(cls, arr, source, target):
"""
Change the value range of a heatmap from one min-max to another min-max.
E.g. the value range may be changed from min=0.0, max=1.0 to min=-1.0, max=1.0.
Parameters
----------
arr : ndarray
Heatmap array to modify.
source : tuple of float
Current value range of the input array, given as (min, max), where both are float values.
target : tuple of float
Desired output value range of the array, given as (min, max), where both are float values.
Returns
-------
arr_target : ndarray
Input array, with value range projected to the desired target value range.
"""
do_assert(is_np_array(arr))
if isinstance(source, HeatmapsOnImage):
source = (source.min_value, source.max_value)
else:
do_assert(isinstance(source, tuple))
do_assert(len(source) == 2)
do_assert(source[0] < source[1])
if isinstance(target, HeatmapsOnImage):
target = (target.min_value, target.max_value)
else:
do_assert(isinstance(target, tuple))
do_assert(len(target) == 2)
do_assert(target[0] < target[1])
# Check if source and target are the same (with a tiny bit of tolerance)
# if so, evade compuation and just copy the array instead.
# This is reasonable, as source and target will often both be (0.0, 1.0).
eps = np.finfo(arr.dtype).eps
mins_same = source[0] - 10*eps < target[0] < source[0] + 10*eps
maxs_same = source[1] - 10*eps < target[1] < source[1] + 10*eps
if mins_same and maxs_same:
return np.copy(arr)
min_source, max_source = source
min_target, max_target = target
diff_source = max_source - min_source
diff_target = max_target - min_target
arr_0to1 = (arr - min_source) / diff_source
arr_target = min_target + arr_0to1 * diff_target
return arr_target
def copy(self):
"""
Create a shallow copy of the Heatmaps object.
Returns
-------
imgaug.HeatmapsOnImage
Shallow copy.
"""
return self.deepcopy()
def deepcopy(self):
"""
Create a deep copy of the Heatmaps object.
Returns
-------
imgaug.HeatmapsOnImage
Deep copy.
"""
return HeatmapsOnImage(self.get_arr(), shape=self.shape, min_value=self.min_value, max_value=self.max_value)
class SegmentationMapOnImage(object):
"""
Object representing a segmentation map associated with an image.
Attributes
----------
DEFAULT_SEGMENT_COLORS : list of tuple of int
Standard RGB colors to use during drawing, ordered by class index.
Parameters
----------
arr : (H,W) ndarray or (H,W,1) ndarray or (H,W,C) ndarray
Array representing the segmentation map. May have datatypes bool, integer or float.
* If bool: Assumed to be of shape (H,W), (H,W,1) or (H,W,C). If (H,W) or (H,W,1) it
is assumed to be for the case of having a single class (where any False denotes
background). Otherwise there are assumed to be C channels, one for each class,
with each of them containing a mask for that class. The masks may overlap.
* If integer: Assumed to be of shape (H,W) or (H,W,1). Each pixel is assumed to
contain an integer denoting the class index. Classes are assumed to be
non-overlapping. The number of classes cannot be guessed from this input, hence
nb_classes must be set.
* If float: Assumed to b eof shape (H,W), (H,W,1) or (H,W,C) with meanings being
similar to the case of `bool`. Values are expected to fall always in the range
0.0 to 1.0 and are usually expected to be either 0.0 or 1.0 upon instantiation
of a new segmentation map. Classes may overlap.
shape : iterable of int
Shape of the corresponding image (NOT the segmentation map array). This is expected
to be ``(H, W)`` or ``(H, W, C)`` with ``C`` usually being 3. If there is no corresponding image,
then use the segmentation map's shape instead.
nb_classes : int or None
Total number of unique classes that may appear in an segmentation map, i.e. the max
class index. This may be None if the input array is of type bool or float. The number
of classes however must be provided if the input array is of type int, as then the
number of classes cannot be guessed.
"""
DEFAULT_SEGMENT_COLORS = [
(0, 0, 0), # black
(230, 25, 75), # red
(60, 180, 75), # green
(255, 225, 25), # yellow
(0, 130, 200), # blue
(245, 130, 48), # orange
(145, 30, 180), # purple
(70, 240, 240), # cyan
(240, 50, 230), # magenta
(210, 245, 60), # lime
(250, 190, 190), # pink
(0, 128, 128), # teal
(230, 190, 255), # lavender
(170, 110, 40), # brown
(255, 250, 200), # beige
(128, 0, 0), # maroon
(170, 255, 195), # mint
(128, 128, 0), # olive
(255, 215, 180), # coral
(0, 0, 128), # navy
(128, 128, 128), # grey
(255, 255, 255), # white
# --
(115, 12, 37), # dark red
(30, 90, 37), # dark green
(127, 112, 12), # dark yellow
(0, 65, 100), # dark blue
(122, 65, 24), # dark orange
(72, 15, 90), # dark purple
(35, 120, 120), # dark cyan
(120, 25, 115), # dark magenta
(105, 122, 30), # dark lime
(125, 95, 95), # dark pink
(0, 64, 64), # dark teal
(115, 95, 127), # dark lavender
(85, 55, 20), # dark brown
(127, 125, 100), # dark beige
(64, 0, 0), # dark maroon
(85, 127, 97), # dark mint
(64, 64, 0), # dark olive
(127, 107, 90), # dark coral
(0, 0, 64), # dark navy
(64, 64, 64), # dark grey
]
def __init__(self, arr, shape, nb_classes=None):
do_assert(is_np_array(arr), "Expected to get numpy array, got %s." % (type(arr),))
if arr.dtype.type == np.bool_:
do_assert(arr.ndim in [2, 3])
self.input_was = ("bool", arr.ndim)
if arr.ndim == 2:
arr = arr[..., np.newaxis]
arr = arr.astype(np.float32)
elif arr.dtype.type in NP_INT_TYPES.union(NP_UINT_TYPES):
do_assert(arr.ndim == 2 or (arr.ndim == 3 and arr.shape[2] == 1))
do_assert(nb_classes is not None)
do_assert(nb_classes > 0)
do_assert(np.min(arr.flat[0:100]) >= 0)
do_assert(np.max(arr.flat[0:100]) <= nb_classes)
self.input_was = ("int", arr.dtype.type, arr.ndim)
if arr.ndim == 3:
arr = arr[..., 0]
# TODO improve efficiency here by building only sub-heatmaps for classes actually
# present in the image. This would also get rid of nb_classes.
arr = np.eye(nb_classes)[arr] # from class indices to one hot
arr = arr.astype(np.float32)
elif arr.dtype.type in NP_FLOAT_TYPES:
do_assert(arr.ndim == 3)
self.input_was = ("float", arr.dtype.type, arr.ndim)
arr = arr.astype(np.float32)
else:
raise Exception(("Input was expected to be an ndarray of dtype bool or any dtype in %s or any dtype in %s. "
"Got dtype %s.") % (
str(NP_INT_TYPES.union(NP_UINT_TYPES)), str(NP_FLOAT_TYPES), str(arr.dtype)))
do_assert(arr.ndim == 3)
do_assert(arr.dtype.type == np.float32)
self.arr = arr
self.shape = shape
self.nb_classes = nb_classes if nb_classes is not None else arr.shape[2]
def get_arr_int(self, background_threshold=0.01, background_class_id=None):
"""
Get the segmentation map array as an integer array of shape (H, W).
Each pixel in that array contains an integer value representing the pixel's class.
If multiple classes overlap, the one with the highest local float value is picked.
If that highest local value is below `background_threshold`, the method instead uses
the background class id as the pixel's class value.
By default, class id 0 is the background class. This may only be changed if the original
input to the segmentation map object was an integer map.
Parameters
----------
background_threshold : float, optional
At each pixel, each class-heatmap has a value between 0.0 and 1.0. If none of the
class-heatmaps has a value above this threshold, the method uses the background class
id instead.
background_class_id : None or int, optional
Class id to fall back to if no class-heatmap passes the threshold at a spatial
location. May only be provided if the original input was an integer mask and in these
cases defaults to 0. If the input were float or boolean masks, the background class id
may not be set as it is assumed that the background is implicitly defined
as 'any spatial location that has zero-like values in all masks'.
Returns
-------
result : (H,W) ndarray
Segmentation map array (int32).
If the original input consisted of boolean or float masks, then the highest possible
class id is ``1+C``, where ``C`` is the number of provided float/boolean masks. The value
``0`` in the integer mask then denotes the background class.
"""
if self.input_was[0] in ["bool", "float"]:
do_assert(background_class_id is None,
"The background class id may only be changed if the original input to SegmentationMapOnImage "
+ "was an *integer* based segmentation map.")
if background_class_id is None:
background_class_id = 0
channelwise_max_idx = np.argmax(self.arr, axis=2)
# for bool and float input masks, we assume that the background is implicitly given,
# i.e. anything where all masks/channels have zero-like values
# for int, we assume that the background class is explicitly given and has the index 0
if self.input_was[0] in ["bool", "float"]:
result = 1 + channelwise_max_idx
else: # integer mask was provided
result = channelwise_max_idx
if background_threshold is not None and background_threshold > 0:
probs = np.amax(self.arr, axis=2)
result[probs < background_threshold] = background_class_id
return result.astype(np.int32)
# TODO
# def get_arr_bool(self, allow_overlapping=False, threshold=0.5, background_threshold=0.01, background_class_id=0):
# raise NotImplementedError()
def draw(self, size=None, background_threshold=0.01, background_class_id=None, colors=None,
return_foreground_mask=False):
"""
Render the segmentation map as an RGB image.
Parameters
----------
size : None or float or iterable of int or iterable of float, optional
Size of the rendered RGB image as ``(height, width)``.
See :func:`imgaug.imresize_single_image` for details.
If set to None, no resizing is performed and the size of the segmentation map array is used.
background_threshold : float, optional
See :func:`imgaug.SegmentationMapOnImage.get_arr_int`.
background_class_id : None or int, optional
See :func:`imgaug.SegmentationMapOnImage.get_arr_int`.
colors : None or list of tuple of int, optional
Colors to use. One for each class to draw. If None, then default colors will be used.
return_foreground_mask : bool, optional
Whether to return a mask of the same size as the drawn segmentation map, containing
True at any spatial location that is not the background class and False everywhere else.
Returns
-------
segmap_drawn : (H,W,3) ndarray
Rendered segmentation map (dtype is uint8).
foreground_mask : (H,W) ndarray
Mask indicating the locations of foreground classes (dtype is bool).
This value is only returned if `return_foreground_mask` is True.
"""
arr = self.get_arr_int(background_threshold=background_threshold, background_class_id=background_class_id)
nb_classes = 1 + np.max(arr)
segmap_drawn = np.zeros((arr.shape[0], arr.shape[1], 3), dtype=np.uint8)
if colors is None:
colors = SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS
do_assert(nb_classes <= len(colors),
"Can't draw all %d classes as it would exceed the maximum number of %d available colors." % (
nb_classes, len(colors),))
ids_in_map = np.unique(arr)
for c, color in zip(sm.xrange(nb_classes), colors):
if c in ids_in_map:
class_mask = (arr == c)
segmap_drawn[class_mask] = color
if return_foreground_mask:
background_class_id = 0 if background_class_id is None else background_class_id
foreground_mask = (arr != background_class_id)
else:
foreground_mask = None
if size is not None:
segmap_drawn = imresize_single_image(segmap_drawn, size, interpolation="nearest")
if foreground_mask is not None:
foreground_mask = imresize_single_image(
foreground_mask.astype(np.uint8), size, interpolation="nearest") > 0
if foreground_mask is not None:
return segmap_drawn, foreground_mask
return segmap_drawn
def draw_on_image(self, image, alpha=0.75, resize="segmentation_map", background_threshold=0.01,
background_class_id=None, colors=None, draw_background=False):
"""
Draw the segmentation map as an overlay over an image.
Parameters
----------
image : (H,W,3) ndarray
Image onto which to draw the segmentation map. Dtype is expected to be uint8.
alpha : float, optional
Alpha/opacity value to use for the mixing of image and segmentation map.
Higher values mean that the segmentation map will be more visible and the image less visible.
resize : {'segmentation_map', 'image'}, optional
In case of size differences between the image and segmentation map, either the image or
the segmentation map can be resized. This parameter controls which of the two will be
resized to the other's size.
background_threshold : float, optional
See :func:`imgaug.SegmentationMapOnImage.get_arr_int`.
background_class_id : None or int, optional
See :func:`imgaug.SegmentationMapOnImage.get_arr_int`.
colors : None or list of tuple of int, optional
Colors to use. One for each class to draw. If None, then default colors will be used.
draw_background : bool, optional
If True, the background will be drawn like any other class.
If False, the background will not be drawn, i.e. the respective background pixels
will be identical with the image's RGB color at the corresponding spatial location
and no color overlay will be applied.
Returns
-------
mix : (H,W,3) ndarray
Rendered overlays (dtype is uint8).
"""
# assert RGB image
do_assert(image.ndim == 3)
do_assert(image.shape[2] == 3)
do_assert(image.dtype.type == np.uint8)
do_assert(0 - 1e-8 <= alpha <= 1.0 + 1e-8)
do_assert(resize in ["segmentation_map", "image"])
if resize == "image":
image = imresize_single_image(image, self.arr.shape[0:2], interpolation="cubic")
segmap_drawn, foreground_mask = self.draw(
background_threshold=background_threshold,
background_class_id=background_class_id,
size=image.shape[0:2] if resize == "segmentation_map" else None,
colors=colors,
return_foreground_mask=True
)
if draw_background:
mix = np.clip(
(1-alpha) * image + alpha * segmap_drawn,
0,
255
).astype(np.uint8)
else:
foreground_mask = foreground_mask[..., np.newaxis]
mix = np.zeros_like(image)
mix += (~foreground_mask).astype(np.uint8) * image
mix += foreground_mask.astype(np.uint8) * np.clip(
(1-alpha) * image + alpha * segmap_drawn,
0,
255
).astype(np.uint8)
return mix
def pad(self, top=0, right=0, bottom=0, left=0, mode="constant", cval=0.0):
"""
Pad the segmentation map on its top/right/bottom/left side.
Parameters
----------
top : int, optional
Amount of pixels to add at the top side of the segmentation map. Must be 0 or greater.
right : int, optional
Amount of pixels to add at the right side of the segmentation map. Must be 0 or greater.
bottom : int, optional
Amount of pixels to add at the bottom side of the segmentation map. Must be 0 or greater.
left : int, optional
Amount of pixels to add at the left side of the segmentation map. Must be 0 or greater.
mode : str, optional
Padding mode to use. See :func:`numpy.pad` for details.
cval : number, optional
Value to use for padding if `mode` is ``constant``. See :func:`numpy.pad` for details.
Returns
-------
segmap : imgaug.SegmentationMapOnImage
Padded segmentation map of height ``H'=H+top+bottom`` and width ``W'=W+left+right``.
"""
arr_padded = pad(self.arr, top=top, right=right, bottom=bottom, left=left, mode=mode, cval=cval)
segmap = SegmentationMapOnImage(arr_padded, shape=self.shape)
segmap.input_was = self.input_was
return segmap
def pad_to_aspect_ratio(self, aspect_ratio, mode="constant", cval=0.0, return_pad_amounts=False):
"""
Pad the segmentation map on its sides so that its matches a target aspect ratio.
Depending on which dimension is smaller (height or width), only the corresponding
sides (left/right or top/bottom) will be padded. In each case, both of the sides will
be padded equally.
Parameters
----------
aspect_ratio : float
Target aspect ratio, given as width/height. E.g. 2.0 denotes the image having twice
as much width as height.
mode : str, optional
Padding mode to use. See :func:`numpy.pad` for details.
cval : number, optional
Value to use for padding if `mode` is ``constant``. See :func:`numpy.pad` for details.
return_pad_amounts : bool, optional
If False, then only the padded image will be returned. If True, a tuple with two
entries will be returned, where the first entry is the padded image and the second
entry are the amounts by which each image side was padded. These amounts are again a
tuple of the form (top, right, bottom, left), with each value being an integer.
Returns
-------
segmap : imgaug.SegmentationMapOnImage
Padded segmentation map as SegmentationMapOnImage object.
pad_amounts : tuple of int
Amounts by which the segmentation map was padded on each side, given as a
tuple ``(top, right, bottom, left)``.
This tuple is only returned if `return_pad_amounts` was set to True.
"""
arr_padded, pad_amounts = pad_to_aspect_ratio(self.arr, aspect_ratio=aspect_ratio, mode=mode, cval=cval,
return_pad_amounts=True)
segmap = SegmentationMapOnImage(arr_padded, shape=self.shape)
segmap.input_was = self.input_was
if return_pad_amounts:
return segmap, pad_amounts
else:
return segmap
def scale(self, sizes, interpolation="cubic"):
"""
Rescale the segmentation map array to the provided size given the provided interpolation.
Parameters
----------
sizes : float or iterable of int or iterable of float
New size of the array in ``(height, width)``. See :func:`imgaug.imresize_single_image` for details.
interpolation : None or str or int, optional
The interpolation to use during resize. See :func:`imgaug.imresize_single_image` for details.
Note: The segmentation map is internally stored as multiple float-based heatmaps,
making smooth interpolations potentially more reasonable than nearest neighbour
interpolation.
Returns
-------
segmap : imgaug.SegmentationMapOnImage
Rescaled segmentation map object.
"""
arr_rescaled = imresize_single_image(self.arr, sizes, interpolation=interpolation)
# cubic interpolation can lead to values outside of [0.0, 1.0],
# see https://github.com/opencv/opencv/issues/7195
# TODO area interpolation too?
arr_rescaled = np.clip(arr_rescaled, 0.0, 1.0)
segmap = SegmentationMapOnImage(arr_rescaled, shape=self.shape)
segmap.input_was = self.input_was
return segmap
def to_heatmaps(self, only_nonempty=False, not_none_if_no_nonempty=False):
"""
Convert segmentation map to heatmaps object.
Each segmentation map class will be represented as a single heatmap channel.
Parameters
----------
only_nonempty : bool, optional
If True, then only heatmaps for classes that appear in the segmentation map will be
generated. Additionally, a list of these class ids will be returned.
not_none_if_no_nonempty : bool, optional
If `only_nonempty` is True and for a segmentation map no channel was non-empty,
this function usually returns None as the heatmaps object. If however this parameter
is set to True, a heatmaps object with one channel (representing class 0)
will be returned as a fallback in these cases.
Returns
-------
imgaug.HeatmapsOnImage or None
Segmentation map as a heatmaps object.
If `only_nonempty` was set to True and no class appeared in the segmentation map,
then this is None.
class_indices : list of int
Class ids (0 to C-1) of the classes that were actually added to the heatmaps.
Only returned if `only_nonempty` was set to True.
"""
if not only_nonempty:
return HeatmapsOnImage.from_0to1(self.arr, self.shape, min_value=0.0, max_value=1.0)
else:
nonempty_mask = np.sum(self.arr, axis=(0, 1)) > 0 + 1e-4
if np.sum(nonempty_mask) == 0:
if not_none_if_no_nonempty:
nonempty_mask[0] = True
else:
return None, []
class_indices = np.arange(self.arr.shape[2])[nonempty_mask]
channels = self.arr[..., class_indices]
return HeatmapsOnImage(channels, self.shape, min_value=0.0, max_value=1.0), class_indices
@staticmethod
def from_heatmaps(heatmaps, class_indices=None, nb_classes=None):
"""
Convert heatmaps to segmentation map.
Assumes that each class is represented as a single heatmap channel.
Parameters
----------
heatmaps : imgaug.HeatmapsOnImage
Heatmaps to convert.
class_indices : None or list of int, optional
List of class indices represented by each heatmap channel. See also the
secondary output of :func:`imgaug.SegmentationMapOnImage.to_heatmap`.
If this is provided, it must have the same length as the number of heatmap channels.
nb_classes : None or int, optional
Number of classes. Must be provided if class_indices is set.
Returns
-------
imgaug.SegmentationMapOnImage
Segmentation map derived from heatmaps.
"""
if class_indices is None:
return SegmentationMapOnImage(heatmaps.arr_0to1, shape=heatmaps.shape)
else:
do_assert(nb_classes is not None)
do_assert(min(class_indices) >= 0)
do_assert(max(class_indices) < nb_classes)
do_assert(len(class_indices) == heatmaps.arr_0to1.shape[2])
arr_0to1 = heatmaps.arr_0to1
arr_0to1_full = np.zeros((arr_0to1.shape[0], arr_0to1.shape[1], nb_classes), dtype=np.float32)
class_indices_set = set(class_indices)
heatmap_channel = 0
for c in sm.xrange(nb_classes):
if c in class_indices_set:
arr_0to1_full[:, :, c] = arr_0to1[:, :, heatmap_channel]
heatmap_channel += 1
return SegmentationMapOnImage(arr_0to1_full, shape=heatmaps.shape)
def copy(self):
"""
Create a shallow copy of the segmentation map object.
Returns
-------
imgaug.SegmentationMapOnImage
Shallow copy.
"""
return self.deepcopy()
def deepcopy(self):
"""
Create a deep copy of the segmentation map object.
Returns
-------
imgaug.SegmentationMapOnImage
Deep copy.
"""
segmap = SegmentationMapOnImage(self.arr, shape=self.shape, nb_classes=self.nb_classes)
segmap.input_was = self.input_was
return segmap
############################
# Background augmentation
############################
class Batch(object):
"""
Class encapsulating a batch before and after augmentation.
Parameters
----------
images : None or (N,H,W,C) ndarray or (N,H,W) ndarray or list of (H,W,C) ndarray or list of (H,W) ndarray
The images to augment.
heatmaps : None or list of imgaug.HeatmapsOnImage
The heatmaps to augment.
segmentation_maps : None or list of SegmentationMapOnImage
The segmentation maps to augment.
keypoints : None or list of KeypointOnImage
The keypoints to augment.
bounding_boxes : None or list of BoundingBoxesOnImage
The bounding boxes to augment.
data
Additional data that is saved in the batch and may be read out
after augmentation. This could e.g. contain filepaths to each image
in `images`. As this object is usually used for background
augmentation with multiple processes, the augmented Batch objects might
not be returned in the original order, making this information useful.
"""
def __init__(self, images=None, heatmaps=None, segmentation_maps=None, keypoints=None, bounding_boxes=None,
data=None):
self.images = images
self.images_aug = None
self.heatmaps = heatmaps
self.heatmaps_aug = None
self.segmentation_maps = segmentation_maps
self.segmentation_maps_aug = None
self.keypoints = keypoints
self.keypoints_aug = None
self.bounding_boxes = bounding_boxes
self.bounding_boxes_aug = None
self.data = data
def deepcopy(self):
def _copy_images(images):
if images is None:
images_copy = None
elif is_np_array(images):
images_copy = np.copy(images)
else:
do_assert(is_iterable(images))
do_assert(all([is_np_array(image) for image in images]))
images_copy = list([np.copy(image) for image in images])
return images_copy
def _copy_augmentable_objects(augmentables, clazz):
if augmentables is None:
augmentables_copy = None
else:
do_assert(is_iterable(augmentables))
do_assert(all([isinstance(augmentable, clazz) for augmentable in augmentables]))
augmentables_copy = [augmentable.deepcopy() for augmentable in augmentables]
return augmentables_copy
batch = Batch(
images=_copy_images(self.images),
heatmaps=_copy_augmentable_objects(self.heatmaps, HeatmapsOnImage),
segmentation_maps=_copy_augmentable_objects(self.segmentation_maps, SegmentationMapOnImage),
keypoints=_copy_augmentable_objects(self.keypoints, KeypointsOnImage),
bounding_boxes=_copy_augmentable_objects(self.bounding_boxes, BoundingBoxesOnImage),
data=copy.deepcopy(self.data)
)
batch.images_aug = _copy_images(self.images_aug)
batch.heatmaps_aug = _copy_augmentable_objects(self.heatmaps_aug, HeatmapsOnImage)
batch.segmentation_maps_aug = _copy_augmentable_objects(self.segmentation_maps_aug, SegmentationMapOnImage)
batch.keypoints_aug = _copy_augmentable_objects(self.keypoints_aug, KeypointsOnImage)
batch.bounding_boxes_aug = _copy_augmentable_objects(self.bounding_boxes_aug, BoundingBoxesOnImage)
return batch
class BatchLoader(object):
"""
Class to load batches in the background.
Loaded batches can be accesses using :attr:`imgaug.BatchLoader.queue`.
Parameters
----------
load_batch_func : callable or generator
Generator or generator function (i.e. function that yields Batch objects)
or a function that returns a list of Batch objects.
Background loading automatically stops when the last batch was yielded or the
last batch in the list was reached.
queue_size : int, optional
Maximum number of batches to store in the queue. May be set higher
for small images and/or small batches.
nb_workers : int, optional
Number of workers to run in the background.
threaded : bool, optional
Whether to run the background processes using threads (True) or full processes (False).
"""
def __init__(self, load_batch_func, queue_size=50, nb_workers=1, threaded=True):
do_assert(queue_size >= 2)
do_assert(nb_workers >= 1)
self._queue_internal = multiprocessing.Queue(queue_size//2)
self.queue = multiprocessing.Queue(queue_size//2)
self.join_signal = multiprocessing.Event()
self.workers = []
self.threaded = threaded
seeds = current_random_state().randint(0, 10**6, size=(nb_workers,))
for i in range(nb_workers):
if threaded:
worker = threading.Thread(
target=self._load_batches,
args=(load_batch_func, self._queue_internal, self.join_signal, None)
)
else:
worker = multiprocessing.Process(
target=self._load_batches,
args=(load_batch_func, self._queue_internal, self.join_signal, seeds[i])
)
worker.daemon = True
worker.start()
self.workers.append(worker)
self.main_worker_thread = threading.Thread(
target=self._main_worker,
args=()
)
self.main_worker_thread.daemon = True
self.main_worker_thread.start()
def count_workers_alive(self):
return sum([int(worker.is_alive()) for worker in self.workers])
def all_finished(self):
"""
Determine whether the workers have finished the loading process.
Returns
-------
out : bool
True if all workers have finished. Else False.
"""
return self.count_workers_alive() == 0
def _main_worker(self):
workers_running = self.count_workers_alive()
while workers_running > 0 and not self.join_signal.is_set():
# wait for a new batch in the source queue and load it
try:
batch_str = self._queue_internal.get(timeout=0.1)
if batch_str == "":
workers_running -= 1
else:
self.queue.put(batch_str)
except QueueEmpty:
time.sleep(0.01)
except (EOFError, BrokenPipeError):
break
workers_running = self.count_workers_alive()
# All workers have finished, move the remaining entries from internal to external queue
while True:
try:
batch_str = self._queue_internal.get(timeout=0.005)
if batch_str != "":
self.queue.put(batch_str)
except QueueEmpty:
break
except (EOFError, BrokenPipeError):
break
self.queue.put(pickle.dumps(None, protocol=-1))
time.sleep(0.01)
def _load_batches(self, load_batch_func, queue, join_signal, seedval):
if seedval is not None:
random.seed(seedval)
np.random.seed(seedval)
seed(seedval)
try:
gen = load_batch_func() if not is_generator(load_batch_func) else load_batch_func
for batch in gen:
do_assert(isinstance(batch, Batch),
"Expected batch returned by load_batch_func to be of class imgaug.Batch, got %s." % (
type(batch),))
batch_pickled = pickle.dumps(batch, protocol=-1)
while not join_signal.is_set():
try:
queue.put(batch_pickled, timeout=0.005)
break
except QueueFull:
pass
if join_signal.is_set():
break
except Exception:
traceback.print_exc()
finally:
queue.put("")
time.sleep(0.01)
def terminate(self):
"""Stop all workers."""
if not self.join_signal.is_set():
self.join_signal.set()
# give minimal time to put generated batches in queue and gracefully shut down
time.sleep(0.01)
if self.main_worker_thread.is_alive():
self.main_worker_thread.join()
if self.threaded:
for worker in self.workers:
if worker.is_alive():
worker.join()
else:
for worker in self.workers:
if worker.is_alive():
worker.terminate()
worker.join()
# wait until all workers are fully terminated
while not self.all_finished():
time.sleep(0.001)
self.queue.put(pickle.dumps(None, protocol=-1))
time.sleep(0.01)
# clean the queue, this reportedly prevents hanging threads
while True:
try:
self._queue_internal.get(timeout=0.005)
except QueueEmpty:
break
if not self._queue_internal._closed:
self._queue_internal.close()
if not self.queue._closed:
self.queue.close()
self._queue_internal.join_thread()
self.queue.join_thread()
time.sleep(0.025)
def __del__(self):
if not self.join_signal.is_set():
self.join_signal.set()
class BackgroundAugmenter(object):
"""
Class to augment batches in the background (while training on the GPU).
This is a wrapper around the multiprocessing module.
Parameters
----------
batch_loader : BatchLoader
BatchLoader object to load data in the
background.
augseq : Augmenter
An augmenter to apply to all loaded images.
This may be e.g. a Sequential to apply multiple augmenters.
queue_size : int
Size of the queue that is used to temporarily save the augmentation
results. Larger values offer the background processes more room
to save results when the main process doesn't load much, i.e. they
can lead to smoother and faster training. For large images, high
values can block a lot of RAM though.
nb_workers : 'auto' or int
Number of background workers to spawn.
If ``auto``, it will be set to ``C-1``, where ``C`` is the number of CPU cores.
"""
def __init__(self, batch_loader, augseq, queue_size=50, nb_workers="auto"):
do_assert(queue_size > 0)
self.augseq = augseq
self.queue_source = batch_loader.queue
self.queue_result = multiprocessing.Queue(queue_size)
if nb_workers == "auto":
try:
nb_workers = multiprocessing.cpu_count()
except (ImportError, NotImplementedError):
nb_workers = 1
# try to reserve at least one core for the main process
nb_workers = max(1, nb_workers - 1)
else:
do_assert(nb_workers >= 1)
self.nb_workers = nb_workers
self.workers = []
self.nb_workers_finished = 0
seeds = current_random_state().randint(0, 10**6, size=(nb_workers,))
for i in range(nb_workers):
worker = multiprocessing.Process(
target=self._augment_images_worker,
args=(augseq, self.queue_source, self.queue_result, seeds[i])
)
worker.daemon = True
worker.start()
self.workers.append(worker)
def all_finished(self):
return self.nb_workers_finished == self.nb_workers
def get_batch(self):
"""
Returns a batch from the queue of augmented batches.
If workers are still running and there are no batches in the queue,
it will automatically wait for the next batch.
Returns
-------
out : None or imgaug.Batch
One batch or None if all workers have finished.
"""
if self.all_finished():
return None
batch_str = self.queue_result.get()
batch = pickle.loads(batch_str)
if batch is not None:
return batch
else:
self.nb_workers_finished += 1
if self.nb_workers_finished >= self.nb_workers:
try:
self.queue_source.get(timeout=0.001) # remove the None from the source queue
except QueueEmpty:
pass
return None
else:
return self.get_batch()
def _augment_images_worker(self, augseq, queue_source, queue_result, seedval):
"""
Augment endlessly images in the source queue.
This is a worker function for that endlessly queries the source queue (input batches),
augments batches in it and sends the result to the output queue.
"""
np.random.seed(seedval)
random.seed(seedval)
augseq.reseed(seedval)
seed(seedval)
loader_finished = False
while not loader_finished:
# wait for a new batch in the source queue and load it
try:
batch_str = queue_source.get(timeout=0.1)
batch = pickle.loads(batch_str)
if batch is None:
loader_finished = True
# put it back in so that other workers know that the loading queue is finished
queue_source.put(pickle.dumps(None, protocol=-1))
else:
batch_aug = list(augseq.augment_batches([batch], background=False))[0]
# send augmented batch to output queue
batch_str = pickle.dumps(batch_aug, protocol=-1)
queue_result.put(batch_str)
except QueueEmpty:
time.sleep(0.01)
queue_result.put(pickle.dumps(None, protocol=-1))
time.sleep(0.01)
def terminate(self):
"""
Terminates all background processes immediately.
This will also free their RAM.
"""
for worker in self.workers:
if worker.is_alive():
worker.terminate()
self.nb_workers_finished = len(self.workers)
if not self.queue_result._closed:
self.queue_result.close()
time.sleep(0.01)
def __del__(self):
time.sleep(0.1)
self.terminate()
Improve HeatmapsOnImage initializer asserts
from __future__ import print_function, division, absolute_import
import random
import math
import copy
import numbers
import multiprocessing
import threading
import traceback
import sys
import os
import time
import json
import types
import numpy as np
import cv2
import imageio
import scipy.spatial.distance
import six
import six.moves as sm
import skimage.draw
import skimage.measure
import collections
import matplotlib.pyplot as plt
import shapely
import shapely.geometry
import shapely.ops
if sys.version_info[0] == 2:
import cPickle as pickle
from Queue import Empty as QueueEmpty, Full as QueueFull
import socket
BrokenPipeError = socket.error
elif sys.version_info[0] == 3:
import pickle
from queue import Empty as QueueEmpty, Full as QueueFull
xrange = range
ALL = "ALL"
FILE_DIR = os.path.dirname(os.path.abspath(__file__))
# filepath to the quokka image, its annotations and depth map
QUOKKA_FP = os.path.join(FILE_DIR, "quokka.jpg")
QUOKKA_ANNOTATIONS_FP = os.path.join(FILE_DIR, "quokka_annotations.json")
QUOKKA_DEPTH_MAP_HALFRES_FP = os.path.join(FILE_DIR, "quokka_depth_map_halfres.png")
DEFAULT_FONT_FP = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"DejaVuSans.ttf"
)
# We instantiate a current/global random state here once.
# One can also call np.random, but that is (in contrast to np.random.RandomState)
# a module and hence cannot be copied via deepcopy. That's why we use RandomState
# here (and in all augmenters) instead of np.random.
CURRENT_RANDOM_STATE = np.random.RandomState(42)
NP_FLOAT_TYPES = set(np.sctypes["float"])
NP_INT_TYPES = set(np.sctypes["int"])
NP_UINT_TYPES = set(np.sctypes["uint"])
IMSHOW_BACKEND_DEFAULT = "matplotlib"
IMRESIZE_VALID_INTERPOLATIONS = ["nearest", "linear", "area", "cubic",
cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_AREA, cv2.INTER_CUBIC]
def is_np_array(val):
"""
Checks whether a variable is a numpy array.
Parameters
----------
val
The variable to check.
Returns
-------
out : bool
True if the variable is a numpy array. Otherwise False.
"""
# using np.generic here via isinstance(val, (np.ndarray, np.generic)) seems to also fire for scalar numpy values
# even though those are not arrays
return isinstance(val, np.ndarray)
def is_single_integer(val):
"""
Checks whether a variable is an integer.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is an integer. Otherwise False.
"""
return isinstance(val, numbers.Integral) and not isinstance(val, bool)
def is_single_float(val):
"""
Checks whether a variable is a float.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a float. Otherwise False.
"""
return isinstance(val, numbers.Real) and not is_single_integer(val) and not isinstance(val, bool)
def is_single_number(val):
"""
Checks whether a variable is a number, i.e. an integer or float.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a number. Otherwise False.
"""
return is_single_integer(val) or is_single_float(val)
def is_iterable(val):
"""
Checks whether a variable is iterable.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is an iterable. Otherwise False.
"""
return isinstance(val, collections.Iterable)
# TODO convert to is_single_string() or rename is_single_integer/float/number()
def is_string(val):
"""
Checks whether a variable is a string.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a string. Otherwise False.
"""
return isinstance(val, six.string_types)
def is_single_bool(val):
"""
Checks whether a variable is a boolean.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a boolean. Otherwise False.
"""
return type(val) == type(True)
def is_integer_array(val):
"""
Checks whether a variable is a numpy integer array.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a numpy integer array. Otherwise False.
"""
return is_np_array(val) and issubclass(val.dtype.type, np.integer)
def is_float_array(val):
"""
Checks whether a variable is a numpy float array.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a numpy float array. Otherwise False.
"""
return is_np_array(val) and issubclass(val.dtype.type, np.floating)
def is_callable(val):
"""
Checks whether a variable is a callable, e.g. a function.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a callable. Otherwise False.
"""
# python 3.x with x <= 2 does not support callable(), apparently
if sys.version_info[0] == 3 and sys.version_info[1] <= 2:
return hasattr(val, '__call__')
else:
return callable(val)
def is_generator(val):
"""
Checks whether a variable is a generator.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True is the variable is a generator. Otherwise False.
"""
return isinstance(val, types.GeneratorType)
def caller_name():
"""
Returns the name of the caller, e.g. a function.
Returns
-------
str
The name of the caller as a string
"""
return sys._getframe(1).f_code.co_name
def seed(seedval):
"""
Set the seed used by the global random state and thereby all randomness
in the library.
This random state is by default by all augmenters. Under special
circumstances (e.g. when an augmenter is switched to deterministic mode),
the global random state is replaced by another -- local -- one.
The replacement is dependent on the global random state.
Parameters
----------
seedval : int
The seed to use.
"""
CURRENT_RANDOM_STATE.seed(seedval)
def current_random_state():
"""
Returns the current/global random state of the library.
Returns
----------
numpy.random.RandomState
The current/global random state.
"""
return CURRENT_RANDOM_STATE
def new_random_state(seed=None, fully_random=False):
"""
Returns a new random state.
Parameters
----------
seed : None or int, optional
Optional seed value to use.
The same datatypes are allowed as for ``numpy.random.RandomState(seed)``.
fully_random : bool, optional
Whether to use numpy's random initialization for the
RandomState (used if set to True). If False, a seed is sampled from
the global random state, which is a bit faster and hence the default.
Returns
-------
numpy.random.RandomState
The new random state.
"""
if seed is None:
if not fully_random:
# sample manually a seed instead of just RandomState(),
# because the latter one
# is way slower.
seed = CURRENT_RANDOM_STATE.randint(0, 10**6, 1)[0]
return np.random.RandomState(seed)
def dummy_random_state():
"""
Returns a dummy random state that is always based on a seed of 1.
Returns
-------
numpy.random.RandomState
The new random state.
"""
return np.random.RandomState(1)
def copy_random_state(random_state, force_copy=False):
"""
Creates a copy of a random state.
Parameters
----------
random_state : numpy.random.RandomState
The random state to copy.
force_copy : bool, optional
If True, this function will always create a copy of every random
state. If False, it will not copy numpy's default random state,
but all other random states.
Returns
-------
rs_copy : numpy.random.RandomState
The copied random state.
"""
if random_state == np.random and not force_copy:
return random_state
else:
rs_copy = dummy_random_state()
orig_state = random_state.get_state()
rs_copy.set_state(orig_state)
return rs_copy
def derive_random_state(random_state):
"""
Create a new random states based on an existing random state or seed.
Parameters
----------
random_state : numpy.random.RandomState
Random state or seed from which to derive the new random state.
Returns
-------
numpy.random.RandomState
Derived random state.
"""
return derive_random_states(random_state, n=1)[0]
# TODO use this everywhere instead of manual seed + create
def derive_random_states(random_state, n=1):
"""
Create N new random states based on an existing random state or seed.
Parameters
----------
random_state : numpy.random.RandomState
Random state or seed from which to derive new random states.
n : int, optional
Number of random states to derive.
Returns
-------
list of numpy.random.RandomState
Derived random states.
"""
seed_ = random_state.randint(0, 10**6, 1)[0]
return [new_random_state(seed_+i) for i in sm.xrange(n)]
def forward_random_state(random_state):
"""
Forward the internal state of a random state.
This makes sure that future calls to the random_state will produce new random values.
Parameters
----------
random_state : numpy.random.RandomState
Random state to forward.
"""
random_state.uniform()
def _quokka_normalize_extract(extract):
"""
Generate a normalized rectangle to be extract from the standard quokka image.
Parameters
----------
extract : 'square' or tuple of number or imgaug.BoundingBox or imgaug.BoundingBoxesOnImage
Unnormalized representation of the image subarea to be extracted.
* If string ``square``, then a squared area ``(x: 0 to max 643, y: 0 to max 643)``
will be extracted from the image.
* If a tuple, then expected to contain four numbers denoting ``x1``, ``y1``, ``x2``
and ``y2``.
* If a BoundingBox, then that bounding box's area will be extracted from the image.
* If a BoundingBoxesOnImage, then expected to contain exactly one bounding box
and a shape matching the full image dimensions (i.e. (643, 960, *)). Then the
one bounding box will be used similar to BoundingBox.
Returns
-------
bb : imgaug.BoundingBox
Normalized representation of the area to extract from the standard quokka image.
"""
if extract == "square":
bb = BoundingBox(x1=0, y1=0, x2=643, y2=643)
elif isinstance(extract, tuple) and len(extract) == 4:
bb = BoundingBox(x1=extract[0], y1=extract[1], x2=extract[2], y2=extract[3])
elif isinstance(extract, BoundingBox):
bb = extract
elif isinstance(extract, BoundingBoxesOnImage):
do_assert(len(extract.bounding_boxes) == 1)
do_assert(extract.shape[0:2] == (643, 960))
bb = extract.bounding_boxes[0]
else:
raise Exception(
"Expected None or tuple of four entries or BoundingBox or BoundingBoxesOnImage "
+ "for parameter 'extract', got %s." % (type(extract),)
)
return bb
def _compute_resized_shape(from_shape, to_shape):
"""
Computes the intended new shape of an image-like array after resizing.
Parameters
----------
from_shape : tuple or ndarray
Old shape of the array. Usually expected to be a tuple of form ``(H, W)`` or ``(H, W, C)`` or
alternatively an array with two or three dimensions.
to_shape : None or tuple of ints or tuple of floats or int or float
New shape of the array.
* If None, then `from_shape` will be used as the new shape.
* If an int ``V``, then the new shape will be ``(V, V, [C])``, where ``C`` will be added if it
is part of `from_shape`.
* If a float ``V``, then the new shape will be ``(H*V, W*V, [C])``, where ``H`` and ``W`` are the old
height/width.
* If a tuple ``(H', W', [C'])`` of ints, then ``H'`` and ``W'`` will be used as the new height
and width.
* If a tuple ``(H', W', [C'])`` of floats (except ``C``), then ``H'`` and ``W'`` will
be used as the new height and width.
Returns
-------
to_shape_computed : tuple of int
New shape.
"""
if is_np_array(from_shape):
from_shape = from_shape.shape
if is_np_array(to_shape):
to_shape = to_shape.shape
to_shape_computed = list(from_shape)
if to_shape is None:
pass
elif isinstance(to_shape, tuple):
if len(from_shape) == 3 and len(to_shape) == 3:
do_assert(from_shape[2] == to_shape[2])
elif len(to_shape) == 3:
to_shape_computed.append(to_shape[2])
if all([is_single_integer(v) for v in to_shape[0:2]]):
to_shape_computed[0] = to_shape[0]
to_shape_computed[1] = to_shape[1]
elif all([is_single_float(v) for v in to_shape[0:2]]):
to_shape_computed[0] = int(np.round(from_shape[0] * to_shape[0])) if to_shape[0] is not None else from_shape[0]
to_shape_computed[1] = int(np.round(from_shape[1] * to_shape[1])) if to_shape[1] is not None else from_shape[1]
elif is_single_integer(to_shape) or is_single_float(to_shape):
to_shape_computed = _compute_resized_shape(from_shape, (to_shape, to_shape))
else:
raise Exception("Expected to_shape to be None or ndarray or tuple of floats or tuple of ints or single int "
+ "or single float, got %s." % (type(to_shape),))
return to_shape_computed
def quokka(size=None, extract=None):
"""
Returns an image of a quokka as a numpy array.
Parameters
----------
size : None or float or tuple of int, optional
Size of the output image. Input into :func:`imgaug.imresize_single_image`.
Usually expected to be a tuple ``(H, W)``, where ``H`` is the desired height
and ``W`` is the width. If None, then the image will not be resized.
extract : None or 'square' or tuple of number or imgaug.BoundingBox or imgaug.BoundingBoxesOnImage
Subarea of the quokka image to extract:
* If None, then the whole image will be used.
* If string ``square``, then a squared area ``(x: 0 to max 643, y: 0 to max 643)`` will
be extracted from the image.
* If a tuple, then expected to contain four numbers denoting ``x1``, ``y1``, ``x2``
and ``y2``.
* If a BoundingBox, then that bounding box's area will be extracted from the image.
* If a BoundingBoxesOnImage, then expected to contain exactly one bounding box
and a shape matching the full image dimensions (i.e. ``(643, 960, *)``). Then the
one bounding box will be used similar to BoundingBox.
Returns
-------
img : (H,W,3) ndarray
The image array of dtype uint8.
"""
img = imageio.imread(QUOKKA_FP, pilmode="RGB")
if extract is not None:
bb = _quokka_normalize_extract(extract)
img = bb.extract_from_image(img)
if size is not None:
shape_resized = _compute_resized_shape(img.shape, size)
img = imresize_single_image(img, shape_resized[0:2])
return img
def quokka_square(size=None):
"""
Returns an (square) image of a quokka as a numpy array.
Parameters
----------
size : None or float or tuple of int, optional
Size of the output image. Input into :func:`imgaug.imresize_single_image`.
Usually expected to be a tuple ``(H, W)``, where ``H`` is the desired height
and ``W`` is the width. If None, then the image will not be resized.
Returns
-------
img : (H,W,3) ndarray
The image array of dtype uint8.
"""
return quokka(size=size, extract="square")
def quokka_heatmap(size=None, extract=None):
"""
Returns a heatmap (here: depth map) for the standard example quokka image.
Parameters
----------
size : None or float or tuple of int, optional
See :func:`imgaug.quokka`.
extract : None or 'square' or tuple of number or imgaug.BoundingBox or imgaug.BoundingBoxesOnImage
See :func:`imgaug.quokka`.
Returns
-------
result : imgaug.HeatmapsOnImage
Depth map as an heatmap object. Values close to 0.0 denote objects that are close to
the camera. Values close to 1.0 denote objects that are furthest away (among all shown
objects).
"""
img = imageio.imread(QUOKKA_DEPTH_MAP_HALFRES_FP, pilmode="RGB")
img = imresize_single_image(img, (643, 960), interpolation="cubic")
if extract is not None:
bb = _quokka_normalize_extract(extract)
img = bb.extract_from_image(img)
if size is None:
size = (643, 960)
shape_resized = _compute_resized_shape(img.shape, size)
img = imresize_single_image(img, shape_resized[0:2])
img_0to1 = img.astype(np.float32) / 255.0
img_0to1 = 1 - img_0to1 # depth map was saved as 0 being furthest away
return HeatmapsOnImage(img_0to1, shape=img_0to1.shape[0:2] + (3,))
def quokka_segmentation_map(size=None, extract=None):
"""
Returns a segmentation map for the standard example quokka image.
Parameters
----------
size : None or float or tuple of int, optional
See :func:`imgaug.quokka`.
extract : None or 'square' or tuple of number or imgaug.BoundingBox or imgaug.BoundingBoxesOnImage
See :func:`imgaug.quokka`.
Returns
-------
result : imgaug.SegmentationMapOnImage
Segmentation map object.
"""
with open(QUOKKA_ANNOTATIONS_FP, "r") as f:
json_dict = json.load(f)
xx = []
yy = []
for kp_dict in json_dict["polygons"][0]["keypoints"]:
x = kp_dict["x"]
y = kp_dict["y"]
xx.append(x)
yy.append(y)
img_seg = np.zeros((643, 960, 1), dtype=np.float32)
rr, cc = skimage.draw.polygon(np.array(yy), np.array(xx), shape=img_seg.shape)
img_seg[rr, cc] = 1.0
if extract is not None:
bb = _quokka_normalize_extract(extract)
img_seg = bb.extract_from_image(img_seg)
segmap = SegmentationMapOnImage(img_seg, shape=img_seg.shape[0:2] + (3,))
if size is not None:
shape_resized = _compute_resized_shape(img_seg.shape, size)
segmap = segmap.scale(shape_resized[0:2])
segmap.shape = tuple(shape_resized[0:2]) + (3,)
return segmap
def quokka_keypoints(size=None, extract=None):
"""
Returns example keypoints on the standard example quokke image.
The keypoints cover the eyes, ears, nose and paws.
Parameters
----------
size : None or float or tuple of int or tuple of float, optional
Size of the output image on which the keypoints are placed. If None, then the keypoints
are not projected to any new size (positions on the original image are used).
Floats lead to relative size changes, ints to absolute sizes in pixels.
extract : None or 'square' or tuple of number or imgaug.BoundingBox or imgaug.BoundingBoxesOnImage
Subarea to extract from the image. See :func:`imgaug.quokka`.
Returns
-------
kpsoi : imgaug.KeypointsOnImage
Example keypoints on the quokka image.
"""
left, top = 0, 0
if extract is not None:
bb_extract = _quokka_normalize_extract(extract)
left = bb_extract.x1
top = bb_extract.y1
with open(QUOKKA_ANNOTATIONS_FP, "r") as f:
json_dict = json.load(f)
keypoints = []
for kp_dict in json_dict["keypoints"]:
keypoints.append(Keypoint(x=kp_dict["x"] - left, y=kp_dict["y"] - top))
if extract is not None:
shape = (bb_extract.height, bb_extract.width, 3)
else:
shape = (643, 960, 3)
kpsoi = KeypointsOnImage(keypoints, shape=shape)
if size is not None:
shape_resized = _compute_resized_shape(shape, size)
kpsoi = kpsoi.on(shape_resized)
return kpsoi
def quokka_bounding_boxes(size=None, extract=None):
"""
Returns example bounding boxes on the standard example quokke image.
Currently only a single bounding box is returned that covers the quokka.
Parameters
----------
size : None or float or tuple of int or tuple of float, optional
Size of the output image on which the BBs are placed. If None, then the BBs
are not projected to any new size (positions on the original image are used).
Floats lead to relative size changes, ints to absolute sizes in pixels.
extract : None or 'square' or tuple of number or imgaug.BoundingBox or imgaug.BoundingBoxesOnImage
Subarea to extract from the image. See :func:`imgaug.quokka`.
Returns
-------
bbsoi : imgaug.BoundingBoxesOnImage
Example BBs on the quokka image.
"""
left, top = 0, 0
if extract is not None:
bb_extract = _quokka_normalize_extract(extract)
left = bb_extract.x1
top = bb_extract.y1
with open(QUOKKA_ANNOTATIONS_FP, "r") as f:
json_dict = json.load(f)
bbs = []
for bb_dict in json_dict["bounding_boxes"]:
bbs.append(
BoundingBox(
x1=bb_dict["x1"] - left,
y1=bb_dict["y1"] - top,
x2=bb_dict["x2"] - left,
y2=bb_dict["y2"] - top
)
)
if extract is not None:
shape = (bb_extract.height, bb_extract.width, 3)
else:
shape = (643, 960, 3)
bbsoi = BoundingBoxesOnImage(bbs, shape=shape)
if size is not None:
shape_resized = _compute_resized_shape(shape, size)
bbsoi = bbsoi.on(shape_resized)
return bbsoi
def angle_between_vectors(v1, v2):
"""
Returns the angle in radians between vectors `v1` and `v2`.
From http://stackoverflow.com/questions/2827393/angles-between-two-n-dimensional-vectors-in-python
Parameters
----------
v1 : (N,) ndarray
First vector.
v2 : (N,) ndarray
Second vector.
Returns
-------
out : float
Angle in radians.
Examples
--------
>>> angle_between_vectors(np.float32([1, 0, 0]), np.float32([0, 1, 0]))
1.5707963267948966
>>> angle_between_vectors(np.float32([1, 0, 0]), np.float32([1, 0, 0]))
0.0
>>> angle_between_vectors(np.float32([1, 0, 0]), np.float32([-1, 0, 0]))
3.141592653589793
"""
v1_u = v1 / np.linalg.norm(v1)
v2_u = v2 / np.linalg.norm(v2)
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
# TODO is this used anywhere?
def compute_line_intersection_point(x1, y1, x2, y2, x3, y3, x4, y4):
def _make_line(p1, p2):
A = (p1[1] - p2[1])
B = (p2[0] - p1[0])
C = (p1[0]*p2[1] - p2[0]*p1[1])
return A, B, -C
L1 = _make_line((x1, y1), (x2, y2))
L2 = _make_line((x3, y3), (x4, y4))
D = L1[0] * L2[1] - L1[1] * L2[0]
Dx = L1[2] * L2[1] - L1[1] * L2[2]
Dy = L1[0] * L2[2] - L1[2] * L2[0]
if D != 0:
x = Dx / D
y = Dy / D
return x,y
else:
return False
def draw_text(img, y, x, text, color=(0, 255, 0), size=25):
"""
Draw text on an image.
This uses by default DejaVuSans as its font, which is included in the
library.
Parameters
----------
img : (H,W,3) ndarray
The image array to draw text on.
Expected to be of dtype uint8 or float32 (value range 0.0 to 255.0).
y : int
x-coordinate of the top left corner of the text.
x : int
y- coordinate of the top left corner of the text.
text : str
The text to draw.
color : iterable of int, optional
Color of the text to draw. For RGB-images this is expected to be an RGB color.
size : int, optional
Font size of the text to draw.
Returns
-------
img_np : (H,W,3) ndarray
Input image with text drawn on it.
"""
# keeping PIL here so that it is not a dependency of the library right now
from PIL import Image, ImageDraw, ImageFont
do_assert(img.dtype in [np.uint8, np.float32])
input_dtype = img.dtype
if img.dtype == np.float32:
img = img.astype(np.uint8)
for i in range(len(color)):
val = color[i]
if isinstance(val, float):
val = int(val * 255)
val = np.clip(val, 0, 255)
color[i] = val
img = Image.fromarray(img)
font = ImageFont.truetype(DEFAULT_FONT_FP, size)
context = ImageDraw.Draw(img)
context.text((x, y), text, fill=tuple(color), font=font)
img_np = np.asarray(img)
img_np.setflags(write=True) # PIL/asarray returns read only array
if img_np.dtype != input_dtype:
img_np = img_np.astype(input_dtype)
return img_np
# TODO rename sizes to size?
# TODO accept lists too as images
def imresize_many_images(images, sizes=None, interpolation=None):
"""
Resize many images to a specified size.
Parameters
----------
images : (N,H,W,[C]) ndarray or list of (H,W,[C]) ndarray
Array of the images to resize.
Expected to usually be of dtype uint8.
sizes : float or iterable of int or iterable of float
The new size of the images, given either as a fraction (a single float) or as
a ``(height, width)`` tuple of two integers or as a ``(height fraction, width fraction)``
tuple of two floats.
interpolation : None or str or int, optional
The interpolation to use during resize.
If int, then expected to be one of:
* ``cv2.INTER_NEAREST`` (nearest neighbour interpolation)
* ``cv2.INTER_LINEAR`` (linear interpolation)
* ``cv2.INTER_AREA`` (area interpolation)
* ``cv2.INTER_CUBIC`` (cubic interpolation)
If string, then expected to be one of:
* ``nearest`` (identical to ``cv2.INTER_NEAREST``)
* ``linear`` (identical to ``cv2.INTER_LINEAR``)
* ``area`` (identical to ``cv2.INTER_AREA``)
* ``cubic`` (identical to ``cv2.INTER_CUBIC``)
If None, the interpolation will be chosen automatically. For size
increases, area interpolation will be picked and for size decreases,
linear interpolation will be picked.
Returns
-------
result : (N,H',W',[C]) ndarray
Array of the resized images.
Examples
--------
>>> imresize_many_images(np.zeros((2, 16, 16, 3), dtype=np.uint8), 2.0)
Converts 2 RGB images of height and width 16 to images of height and width 16*2 = 32.
>>> imresize_many_images(np.zeros((2, 16, 16, 3), dtype=np.uint8), (16, 32))
Converts 2 RGB images of height and width 16 to images of height 16 and width 32.
>>> imresize_many_images(np.zeros((2, 16, 16, 3), dtype=np.uint8), (2.0, 4.0))
Converts 2 RGB images of height and width 16 to images of height 32 and width 64.
"""
# we just do nothing if the input contains zero images
# one could also argue that an exception would be appropiate here
if len(images) == 0:
return images
# verify that all input images have height/width > 0
do_assert(
all([image.shape[0] > 0 and image.shape[1] > 0 for image in images]),
("Cannot resize images, because at least one image has a height and/or width of zero. "
+ "Observed shapes were: %s.") % (str([image.shape for image in images]),)
)
# verify that sizes contains only values >0
if is_single_number(sizes) and sizes <= 0:
raise Exception(
"Cannot resize to the target size %.8f, because the value is zero or lower than zero." % (sizes,))
elif isinstance(sizes, tuple) and (sizes[0] <= 0 or sizes[1] <= 0):
sizes_str = [
"int %d" % (sizes[0],) if is_single_integer(sizes[0]) else "float %.8f" % (sizes[0],),
"int %d" % (sizes[1],) if is_single_integer(sizes[1]) else "float %.8f" % (sizes[1],),
]
sizes_str = "(%s, %s)" % (sizes_str[0], sizes_str[1])
raise Exception(
"Cannot resize to the target sizes %s. At least one value is zero or lower than zero." % (sizes_str,))
# change afterward the validation to make the above error messages match the original input
if is_single_number(sizes):
sizes = (sizes, sizes)
else:
do_assert(len(sizes) == 2, "Expected tuple with exactly two entries, got %d entries." % (len(sizes),))
do_assert(all([is_single_number(val) for val in sizes]),
"Expected tuple with two ints or floats, got types %s." % (str([type(val) for val in sizes]),))
# if input is a list, call this function N times for N images
# but check beforehand if all images have the same shape, then just convert to a single array and de-convert
# afterwards
if isinstance(images, list):
nb_shapes = len(set([image.shape for image in images]))
if nb_shapes == 1:
return list(imresize_many_images(np.array(images), sizes=sizes, interpolation=interpolation))
else:
return [imresize_many_images(image[np.newaxis, ...], sizes=sizes, interpolation=interpolation)[0, ...]
for image in images]
shape = images.shape
do_assert(images.ndim in [3, 4], "Expected array of shape (N, H, W, [C]), got shape %s" % (str(shape),))
nb_images = shape[0]
im_height, im_width = shape[1], shape[2]
nb_channels = shape[3] if images.ndim > 3 else None
height, width = sizes[0], sizes[1]
height = int(np.round(im_height * height)) if is_single_float(height) else height
width = int(np.round(im_width * width)) if is_single_float(width) else width
if height == im_height and width == im_width:
return np.copy(images)
ip = interpolation
do_assert(ip is None or ip in IMRESIZE_VALID_INTERPOLATIONS)
if ip is None:
if height > im_height or width > im_width:
ip = cv2.INTER_AREA
else:
ip = cv2.INTER_LINEAR
elif ip in ["nearest", cv2.INTER_NEAREST]:
ip = cv2.INTER_NEAREST
elif ip in ["linear", cv2.INTER_LINEAR]:
ip = cv2.INTER_LINEAR
elif ip in ["area", cv2.INTER_AREA]:
ip = cv2.INTER_AREA
else: # if ip in ["cubic", cv2.INTER_CUBIC]:
ip = cv2.INTER_CUBIC
result_shape = (nb_images, height, width)
if nb_channels is not None:
result_shape = result_shape + (nb_channels,)
result = np.zeros(result_shape, dtype=images.dtype)
for img_idx in sm.xrange(nb_images):
# TODO fallback to scipy here if image isn't uint8
result_img = cv2.resize(images[img_idx], (width, height), interpolation=ip)
# cv2 removes the channel axis if input was (H, W, 1)
# we re-add it (but only if input was not (H, W))
if len(result_img.shape) == 2 and nb_channels is not None and nb_channels == 1:
result_img = result_img[:, :, np.newaxis]
result[img_idx] = result_img.astype(images.dtype)
return result
def imresize_single_image(image, sizes, interpolation=None):
"""
Resizes a single image.
Parameters
----------
image : (H,W,C) ndarray or (H,W) ndarray
Array of the image to resize.
Expected to usually be of dtype uint8.
sizes : float or iterable of int or iterable of float
See :func:`imgaug.imresize_many_images`.
interpolation : None or str or int, optional
See :func:`imgaug.imresize_many_images`.
Returns
-------
out : (H',W',C) ndarray or (H',W') ndarray
The resized image.
"""
grayscale = False
if image.ndim == 2:
grayscale = True
image = image[:, :, np.newaxis]
do_assert(len(image.shape) == 3, image.shape)
rs = imresize_many_images(image[np.newaxis, :, :, :], sizes, interpolation=interpolation)
if grayscale:
return np.squeeze(rs[0, :, :, 0])
else:
return rs[0, ...]
def pad(arr, top=0, right=0, bottom=0, left=0, mode="constant", cval=0):
"""
Pad an image-like array on its top/right/bottom/left side.
This function is a wrapper around :func:`numpy.pad`.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Image-like array to pad.
top : int, optional
Amount of pixels to add at the top side of the image. Must be 0 or greater.
right : int, optional
Amount of pixels to add at the right side of the image. Must be 0 or greater.
bottom : int, optional
Amount of pixels to add at the bottom side of the image. Must be 0 or greater.
left : int, optional
Amount of pixels to add at the left side of the image. Must be 0 or greater.
mode : str, optional
Padding mode to use. See :func:`numpy.pad` for details.
In case of mode ``constant``, the parameter `cval` will be used as the ``constant_values``
parameter to :func:`numpy.pad`.
In case of mode ``linear_ramp``, the parameter `cval` will be used as the ``end_values``
parameter to :func:`numpy.pad`.
cval : number, optional
Value to use for padding if `mode` is ``constant``. See :func:`numpy.pad` for details.
Returns
-------
arr_pad : (H',W') ndarray or (H',W',C) ndarray
Padded array with height ``H'=H+top+bottom`` and width ``W'=W+left+right``.
"""
do_assert(arr.ndim in [2, 3])
do_assert(top >= 0)
do_assert(right >= 0)
do_assert(bottom >= 0)
do_assert(left >= 0)
if top > 0 or right > 0 or bottom > 0 or left > 0:
paddings_np = [(top, bottom), (left, right)] # paddings for 2d case
if arr.ndim == 3:
paddings_np.append((0, 0)) # add paddings for 3d case
if mode == "constant":
arr_pad = np.pad(arr, paddings_np, mode=mode, constant_values=cval)
elif mode == "linear_ramp":
arr_pad = np.pad(arr, paddings_np, mode=mode, end_values=cval)
else:
arr_pad = np.pad(arr, paddings_np, mode=mode)
return arr_pad
return np.copy(arr)
def compute_paddings_for_aspect_ratio(arr, aspect_ratio):
"""
Compute the amount of pixels by which an array has to be padded to fulfill an aspect ratio.
The aspect ratio is given as width/height.
Depending on which dimension is smaller (height or width), only the corresponding
sides (left/right or top/bottom) will be padded. In each case, both of the sides will
be padded equally.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Image-like array for which to compute pad amounts.
aspect_ratio : float
Target aspect ratio, given as width/height. E.g. 2.0 denotes the image having twice
as much width as height.
Returns
-------
result : tuple of int
Required paddign amounts to reach the target aspect ratio, given as a tuple
of the form ``(top, right, bottom, left)``.
"""
do_assert(arr.ndim in [2, 3])
do_assert(aspect_ratio > 0)
height, width = arr.shape[0:2]
do_assert(height > 0)
aspect_ratio_current = width / height
pad_top = 0
pad_right = 0
pad_bottom = 0
pad_left = 0
if aspect_ratio_current < aspect_ratio:
# vertical image, height > width
diff = (aspect_ratio * height) - width
pad_right = int(np.ceil(diff / 2))
pad_left = int(np.floor(diff / 2))
elif aspect_ratio_current > aspect_ratio:
# horizontal image, width > height
diff = ((1/aspect_ratio) * width) - height
pad_top = int(np.floor(diff / 2))
pad_bottom = int(np.ceil(diff / 2))
return pad_top, pad_right, pad_bottom, pad_left
def pad_to_aspect_ratio(arr, aspect_ratio, mode="constant", cval=0, return_pad_amounts=False):
"""
Pad an image-like array on its sides so that it matches a target aspect ratio.
Depending on which dimension is smaller (height or width), only the corresponding
sides (left/right or top/bottom) will be padded. In each case, both of the sides will
be padded equally.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Image-like array to pad.
aspect_ratio : float
Target aspect ratio, given as width/height. E.g. 2.0 denotes the image having twice
as much width as height.
mode : str, optional
Padding mode to use. See :func:`numpy.pad` for details.
cval : number, optional
Value to use for padding if `mode` is ``constant``. See :func:`numpy.pad` for details.
return_pad_amounts : bool, optional
If False, then only the padded image will be returned. If True, a tuple with two
entries will be returned, where the first entry is the padded image and the second
entry are the amounts by which each image side was padded. These amounts are again a
tuple of the form (top, right, bottom, left), with each value being an integer.
Returns
-------
arr_padded : (H',W') ndarray or (H',W',C) ndarray
Padded image as (H',W') or (H',W',C) ndarray, fulfulling the given aspect_ratio.
tuple of int
Amounts by which the image was padded on each side, given as a tuple ``(top, right, bottom, left)``.
This tuple is only returned if `return_pad_amounts` was set to True.
Otherwise only ``arr_padded`` is returned.
"""
pad_top, pad_right, pad_bottom, pad_left = compute_paddings_for_aspect_ratio(arr, aspect_ratio)
arr_padded = pad(
arr,
top=pad_top,
right=pad_right,
bottom=pad_bottom,
left=pad_left,
mode=mode,
cval=cval
)
if return_pad_amounts:
return arr_padded, (pad_top, pad_right, pad_bottom, pad_left)
else:
return arr_padded
def pool(arr, block_size, func, cval=0, preserve_dtype=True):
"""
Rescale an array by pooling values within blocks.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Image-like array to pool. Ideally of datatype ``numpy.float64``.
block_size : int or tuple of int
Spatial size of each group of values to pool, aka kernel size.
If a single integer, then a symmetric block of that size along height and width will be used.
If a tuple of two values, it is assumed to be the block size along height and width of the image-like,
with pooling happening per channel.
If a tuple of three values, it is assumed to be the block size along height, width and channels.
func : callable
Function to apply to a given block in order to convert it to a single number,
e.g. :func:`numpy.average`, :func:`numpy.min`, :func:`numpy.max`.
cval : number, optional
Value to use in order to pad the array along its border if the array cannot be divided
by `block_size` without remainder.
preserve_dtype : bool, optional
Whether to convert the array back to the input datatype if it is changed away from
that in the pooling process.
Returns
-------
arr_reduced : (H',W') ndarray or (H',W',C') ndarray
Array after pooling.
"""
do_assert(arr.ndim in [2, 3])
is_valid_int = is_single_integer(block_size) and block_size >= 1
is_valid_tuple = is_iterable(block_size) and len(block_size) in [2, 3] \
and [is_single_integer(val) and val >= 1 for val in block_size]
do_assert(is_valid_int or is_valid_tuple)
if is_single_integer(block_size):
block_size = [block_size, block_size]
if len(block_size) < arr.ndim:
block_size = list(block_size) + [1]
input_dtype = arr.dtype
arr_reduced = skimage.measure.block_reduce(arr, tuple(block_size), func, cval=cval)
if preserve_dtype and arr_reduced.dtype.type != input_dtype:
arr_reduced = arr_reduced.astype(input_dtype)
return arr_reduced
def avg_pool(arr, block_size, cval=0, preserve_dtype=True):
"""
Rescale an array using average pooling.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Image-like array to pool. See :func:`imgaug.pool` for details.
block_size : int or tuple of int or tuple of int
Size of each block of values to pool. See :func:`imgaug.pool` for details.
cval : number, optional
Padding value. See :func:`imgaug.pool` for details.
preserve_dtype : bool, optional
Whether to preserve the input array dtype. See :func:`imgaug.pool` for details.
Returns
-------
arr_reduced : (H',W') ndarray or (H',W',C') ndarray
Array after average pooling.
"""
return pool(arr, block_size, np.average, cval=cval, preserve_dtype=preserve_dtype)
def max_pool(arr, block_size, cval=0, preserve_dtype=True):
"""
Rescale an array using max-pooling.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Image-like array to pool. See :func:`imgaug.pool` for details.
block_size : int or tuple of int or tuple of int
Size of each block of values to pool. See `imgaug.pool` for details.
cval : number, optional
Padding value. See :func:`imgaug.pool` for details.
preserve_dtype : bool, optional
Whether to preserve the input array dtype. See :func:`imgaug.pool` for details.
Returns
-------
arr_reduced : (H',W') ndarray or (H',W',C') ndarray
Array after max-pooling.
"""
return pool(arr, block_size, np.max, cval=cval, preserve_dtype=preserve_dtype)
def draw_grid(images, rows=None, cols=None):
"""
Converts multiple input images into a single image showing them in a grid.
Parameters
----------
images : (N,H,W,3) ndarray or iterable of (H,W,3) array
The input images to convert to a grid.
Expected to be RGB and have dtype uint8.
rows : None or int, optional
The number of rows to show in the grid.
If None, it will be automatically derived.
cols : None or int, optional
The number of cols to show in the grid.
If None, it will be automatically derived.
Returns
-------
grid : (H',W',3) ndarray
Image of the generated grid.
"""
if is_np_array(images):
do_assert(images.ndim == 4)
else:
do_assert(is_iterable(images) and is_np_array(images[0]) and images[0].ndim == 3)
nb_images = len(images)
do_assert(nb_images > 0)
cell_height = max([image.shape[0] for image in images])
cell_width = max([image.shape[1] for image in images])
channels = set([image.shape[2] for image in images])
do_assert(
len(channels) == 1,
"All images are expected to have the same number of channels, "
+ "but got channel set %s with length %d instead." % (str(channels), len(channels))
)
nb_channels = list(channels)[0]
if rows is None and cols is None:
rows = cols = int(math.ceil(math.sqrt(nb_images)))
elif rows is not None:
cols = int(math.ceil(nb_images / rows))
elif cols is not None:
rows = int(math.ceil(nb_images / cols))
do_assert(rows * cols >= nb_images)
width = cell_width * cols
height = cell_height * rows
grid = np.zeros((height, width, nb_channels), dtype=np.uint8)
cell_idx = 0
for row_idx in sm.xrange(rows):
for col_idx in sm.xrange(cols):
if cell_idx < nb_images:
image = images[cell_idx]
cell_y1 = cell_height * row_idx
cell_y2 = cell_y1 + image.shape[0]
cell_x1 = cell_width * col_idx
cell_x2 = cell_x1 + image.shape[1]
grid[cell_y1:cell_y2, cell_x1:cell_x2, :] = image
cell_idx += 1
return grid
def show_grid(images, rows=None, cols=None):
"""
Converts the input images to a grid image and shows it in a new window.
Parameters
----------
images : (N,H,W,3) ndarray or iterable of (H,W,3) array
See :func:`imgaug.draw_grid`.
rows : None or int, optional
See :func:`imgaug.draw_grid`.
cols : None or int, optional
See :func:`imgaug.draw_grid`.
"""
grid = draw_grid(images, rows=rows, cols=cols)
imshow(grid)
def imshow(image, backend=IMSHOW_BACKEND_DEFAULT):
"""
Shows an image in a window.
Parameters
----------
image : (H,W,3) ndarray
Image to show.
backend : {'matplotlib', 'cv2'}, optional
Library to use to show the image. May be either matplotlib or OpenCV ('cv2').
OpenCV tends to be faster, but apparently causes more technical issues.
"""
do_assert(backend in ["matplotlib", "cv2"], "Expected backend 'matplotlib' or 'cv2', got %s." % (backend,))
if backend == "cv2":
image_bgr = image
if image.ndim == 3 and image.shape[2] in [3, 4]:
image_bgr = image[..., 0:3][..., ::-1]
win_name = "imgaug-default-window"
cv2.namedWindow(win_name, cv2.WINDOW_NORMAL)
cv2.imshow(win_name, image_bgr)
cv2.waitKey(0)
cv2.destroyWindow(win_name)
else:
plt.imshow(image, cmap="gray")
plt.gcf().canvas.set_window_title("imgaug.imshow(%s)" % (image.shape,))
plt.show()
def do_assert(condition, message="Assertion failed."):
"""
Function that behaves equally to an `assert` statement, but raises an
Exception.
This is added because `assert` statements are removed in optimized code.
It replaces `assert` statements throughout the library that should be
kept even in optimized code.
Parameters
----------
condition : bool
If False, an exception is raised.
message : str, optional
Error message.
"""
if not condition:
raise AssertionError(str(message))
class HooksImages(object):
"""
Class to intervene with image augmentation runs.
This is e.g. useful to dynamically deactivate some augmenters.
Parameters
----------
activator : None or callable, optional
A function that gives permission to execute an augmenter.
The expected interface is ``f(images, augmenter, parents, default)``,
where ``images`` are the input images to augment, ``augmenter`` is the
instance of the augmenter to execute, ``parents`` are previously
executed augmenters and ``default`` is an expected default value to be
returned if the activator function does not plan to make a decision
for the given inputs.
propagator : None or callable, optional
A function that gives permission to propagate the augmentation further
to the children of an augmenter. This happens after the activator.
In theory, an augmenter may augment images itself (if allowed by the
activator) and then execute child augmenters afterwards (if allowed by
the propagator). If the activator returned False, the propagation step
will never be executed.
The expected interface is ``f(images, augmenter, parents, default)``,
with all arguments having identical meaning to the activator.
preprocessor : None or callable, optional
A function to call before an augmenter performed any augmentations.
The interface is ``f(images, augmenter, parents)``,
with all arguments having identical meaning to the activator.
It is expected to return the input images, optionally modified.
postprocessor : None or callable, optional
A function to call after an augmenter performed augmentations.
The interface is the same as for the preprocessor.
Examples
--------
>>> seq = iaa.Sequential([
>>> iaa.GaussianBlur(3.0, name="blur"),
>>> iaa.Dropout(0.05, name="dropout"),
>>> iaa.Affine(translate_px=-5, name="affine")
>>> ])
>>>
>>> def activator(images, augmenter, parents, default):
>>> return False if augmenter.name in ["blur", "dropout"] else default
>>>
>>> seq_det = seq.to_deterministic()
>>> images_aug = seq_det.augment_images(images)
>>> heatmaps_aug = seq_det.augment_images(
>>> heatmaps,
>>> hooks=ia.HooksImages(activator=activator)
>>> )
This augments images and their respective heatmaps in the same way.
The heatmaps however are only modified by Affine, not by GaussianBlur or
Dropout.
"""
def __init__(self, activator=None, propagator=None, preprocessor=None, postprocessor=None):
self.activator = activator
self.propagator = propagator
self.preprocessor = preprocessor
self.postprocessor = postprocessor
def is_activated(self, images, augmenter, parents, default):
"""
Returns whether an augmenter may be executed.
Returns
-------
bool
If True, the augmenter may be executed. If False, it may not be executed.
"""
if self.activator is None:
return default
else:
return self.activator(images, augmenter, parents, default)
def is_propagating(self, images, augmenter, parents, default):
"""
Returns whether an augmenter may call its children to augment an
image. This is independent of the augmenter itself possible changing
the image, without calling its children. (Most (all?) augmenters with
children currently dont perform any changes themselves.)
Returns
-------
bool
If True, the augmenter may be propagate to its children. If False, it may not.
"""
if self.propagator is None:
return default
else:
return self.propagator(images, augmenter, parents, default)
def preprocess(self, images, augmenter, parents):
"""
A function to be called before the augmentation of images starts (per augmenter).
Returns
-------
(N,H,W,C) ndarray or (N,H,W) ndarray or list of (H,W,C) ndarray or list of (H,W) ndarray
The input images, optionally modified.
"""
if self.preprocessor is None:
return images
else:
return self.preprocessor(images, augmenter, parents)
def postprocess(self, images, augmenter, parents):
"""
A function to be called after the augmentation of images was
performed.
Returns
-------
(N,H,W,C) ndarray or (N,H,W) ndarray or list of (H,W,C) ndarray or list of (H,W) ndarray
The input images, optionally modified.
"""
if self.postprocessor is None:
return images
else:
return self.postprocessor(images, augmenter, parents)
class HooksHeatmaps(HooksImages):
"""
Class to intervene with heatmap augmentation runs.
This is e.g. useful to dynamically deactivate some augmenters.
This class is currently the same as the one for images. This may or may
not change in the future.
"""
pass
class HooksKeypoints(HooksImages):
"""
Class to intervene with keypoint augmentation runs.
This is e.g. useful to dynamically deactivate some augmenters.
This class is currently the same as the one for images. This may or may
not change in the future.
"""
pass
def compute_geometric_median(X, eps=1e-5):
"""
Estimate the geometric median of points in 2D.
Code from https://stackoverflow.com/a/30305181
Parameters
----------
X : (N,2) ndarray
Points in 2D. Second axis must be given in xy-form.
eps : float, optional
Distance threshold when to return the median.
Returns
-------
(2,) ndarray
Geometric median as xy-coordinate.
"""
y = np.mean(X, 0)
while True:
D = scipy.spatial.distance.cdist(X, [y])
nonzeros = (D != 0)[:, 0]
Dinv = 1 / D[nonzeros]
Dinvs = np.sum(Dinv)
W = Dinv / Dinvs
T = np.sum(W * X[nonzeros], 0)
num_zeros = len(X) - np.sum(nonzeros)
if num_zeros == 0:
y1 = T
elif num_zeros == len(X):
return y
else:
R = (T - y) * Dinvs
r = np.linalg.norm(R)
rinv = 0 if r == 0 else num_zeros/r
y1 = max(0, 1-rinv)*T + min(1, rinv)*y
if scipy.spatial.distance.euclidean(y, y1) < eps:
return y1
y = y1
class Keypoint(object):
"""
A single keypoint (aka landmark) on an image.
Parameters
----------
x : number
Coordinate of the keypoint on the x axis.
y : number
Coordinate of the keypoint on the y axis.
"""
def __init__(self, x, y):
self.x = x
self.y = y
@property
def x_int(self):
"""
Return the keypoint's x-coordinate, rounded to the closest integer.
Returns
-------
result : int
Keypoint's x-coordinate, rounded to the closest integer.
"""
return int(np.round(self.x))
@property
def y_int(self):
"""
Return the keypoint's y-coordinate, rounded to the closest integer.
Returns
-------
result : int
Keypoint's y-coordinate, rounded to the closest integer.
"""
return int(np.round(self.y))
def project(self, from_shape, to_shape):
"""
Project the keypoint onto a new position on a new image.
E.g. if the keypoint is on its original image at x=(10 of 100 pixels)
and y=(20 of 100 pixels) and is projected onto a new image with
size (width=200, height=200), its new position will be (20, 40).
This is intended for cases where the original image is resized.
It cannot be used for more complex changes (e.g. padding, cropping).
Parameters
----------
from_shape : tuple of int
Shape of the original image. (Before resize.)
to_shape : tuple of int
Shape of the new image. (After resize.)
Returns
-------
imgaug.Keypoint
Keypoint object with new coordinates.
"""
if from_shape[0:2] == to_shape[0:2]:
return Keypoint(x=self.x, y=self.y)
else:
from_height, from_width = from_shape[0:2]
to_height, to_width = to_shape[0:2]
x = (self.x / from_width) * to_width
y = (self.y / from_height) * to_height
return Keypoint(x=x, y=y)
def shift(self, x=0, y=0):
"""
Move the keypoint around on an image.
Parameters
----------
x : number, optional
Move by this value on the x axis.
y : number, optional
Move by this value on the y axis.
Returns
-------
imgaug.Keypoint
Keypoint object with new coordinates.
"""
return Keypoint(self.x + x, self.y + y)
def generate_similar_points_manhattan(self, nb_steps, step_size, return_array=False):
"""
Generate nearby points to this keypoint based on manhattan distance.
To generate the first neighbouring points, a distance of S (step size) is moved from the
center point (this keypoint) to the top, right, bottom and left, resulting in four new
points. From these new points, the pattern is repeated. Overlapping points are ignored.
The resulting points have a shape similar to a square rotated by 45 degrees.
Parameters
----------
nb_steps : int
The number of steps to move from the center point. nb_steps=1 results in a total of
5 output points (1 center point + 4 neighbours).
step_size : number
The step size to move from every point to its neighbours.
return_array : bool, optional
Whether to return the generated points as a list of keypoints or an array
of shape ``(N,2)``, where ``N`` is the number of generated points and the second axis contains
the x- (first value) and y- (second value) coordinates.
Returns
-------
points : list of imgaug.Keypoint or (N,2) ndarray
If return_array was False, then a list of Keypoint.
Otherwise a numpy array of shape ``(N,2)``, where ``N`` is the number of generated points and
the second axis contains the x- (first value) and y- (second value) coordinates.
The center keypoint (the one on which this function was called) is always included.
"""
# TODO add test
# Points generates in manhattan style with S steps have a shape similar to a 45deg rotated
# square. The center line with the origin point has S+1+S = 1+2*S points (S to the left,
# S to the right). The lines above contain (S+1+S)-2 + (S+1+S)-2-2 + ... + 1 points. E.g.
# for S=2 it would be 3+1=4 and for S=3 it would be 5+3+1=9. Same for the lines below the
# center. Hence the total number of points is S+1+S + 2*(S^2).
points = np.zeros((nb_steps + 1 + nb_steps + 2*(nb_steps**2), 2), dtype=np.float32)
# we start at the bottom-most line and move towards the top-most line
yy = np.linspace(self.y - nb_steps * step_size, self.y + nb_steps * step_size, nb_steps + 1 + nb_steps)
# bottom-most line contains only one point
width = 1
nth_point = 0
for i_y, y in enumerate(yy):
if width == 1:
xx = [self.x]
else:
xx = np.linspace(self.x - (width-1)//2 * step_size, self.x + (width-1)//2 * step_size, width)
for x in xx:
points[nth_point] = [x, y]
nth_point += 1
if i_y < nb_steps:
width += 2
else:
width -= 2
if return_array:
return points
return [Keypoint(x=points[i, 0], y=points[i, 1]) for i in sm.xrange(points.shape[0])]
def __repr__(self):
return self.__str__()
def __str__(self):
return "Keypoint(x=%.8f, y=%.8f)" % (self.x, self.y)
class KeypointsOnImage(object):
"""
Object that represents all keypoints on a single image.
Parameters
----------
keypoints : list of imgaug.Keypoint
List of keypoints on the image.
shape : tuple of int
The shape of the image on which the keypoints are placed.
Examples
--------
>>> kps = [Keypoint(x=10, y=20), Keypoint(x=34, y=60)]
>>> kps_oi = KeypointsOnImage(kps, shape=image.shape)
"""
def __init__(self, keypoints, shape):
self.keypoints = keypoints
if is_np_array(shape):
self.shape = shape.shape
else:
do_assert(isinstance(shape, (tuple, list)))
self.shape = tuple(shape)
@property
def height(self):
return self.shape[0]
@property
def width(self):
return self.shape[1]
@property
def empty(self):
"""
Returns whether this object contains zero keypoints.
Returns
-------
result : bool
True if this object contains zero keypoints.
"""
return len(self.keypoints) == 0
def on(self, image):
"""
Project keypoints from one image to a new one.
Parameters
----------
image : ndarray or tuple of int
New image onto which the keypoints are to be projected.
May also simply be that new image's shape tuple.
Returns
-------
keypoints : imgaug.KeypointsOnImage
Object containing all projected keypoints.
"""
if is_np_array(image):
shape = image.shape
else:
shape = image
if shape[0:2] == self.shape[0:2]:
return self.deepcopy()
else:
keypoints = [kp.project(self.shape, shape) for kp in self.keypoints]
return KeypointsOnImage(keypoints, shape)
def draw_on_image(self, image, color=(0, 255, 0), size=3, copy=True, raise_if_out_of_image=False):
"""
Draw all keypoints onto a given image. Each keypoint is marked by a square of a chosen color and size.
Parameters
----------
image : (H,W,3) ndarray
The image onto which to draw the keypoints.
This image should usually have the same shape as
set in KeypointsOnImage.shape.
color : int or list of int or tuple of int or (3,) ndarray, optional
The RGB color of all keypoints. If a single int ``C``, then that is
equivalent to ``(C,C,C)``.
size : int, optional
The size of each point. If set to ``C``, each square will have size ``C x C``.
copy : bool, optional
Whether to copy the image before drawing the points.
raise_if_out_of_image : bool, optional
Whether to raise an exception if any keypoint is outside of the image.
Returns
-------
image : (H,W,3) ndarray
Image with drawn keypoints.
"""
if copy:
image = np.copy(image)
height, width = image.shape[0:2]
for keypoint in self.keypoints:
y, x = keypoint.y_int, keypoint.x_int
if 0 <= y < height and 0 <= x < width:
x1 = max(x - size//2, 0)
x2 = min(x + 1 + size//2, width)
y1 = max(y - size//2, 0)
y2 = min(y + 1 + size//2, height)
image[y1:y2, x1:x2] = color
else:
if raise_if_out_of_image:
raise Exception("Cannot draw keypoint x=%.8f, y=%.8f on image with shape %s." % (y, x, image.shape))
return image
def shift(self, x=0, y=0):
"""
Move the keypoints around on an image.
Parameters
----------
x : number, optional
Move each keypoint by this value on the x axis.
y : number, optional
Move each keypoint by this value on the y axis.
Returns
-------
out : KeypointsOnImage
Keypoints after moving them.
"""
keypoints = [keypoint.shift(x=x, y=y) for keypoint in self.keypoints]
return KeypointsOnImage(keypoints, self.shape)
def get_coords_array(self):
"""
Convert the coordinates of all keypoints in this object to an array of shape (N,2).
Returns
-------
result : (N, 2) ndarray
Where N is the number of keypoints. Each first value is the
x coordinate, each second value is the y coordinate.
"""
result = np.zeros((len(self.keypoints), 2), np.float32)
for i, keypoint in enumerate(self.keypoints):
result[i, 0] = keypoint.x
result[i, 1] = keypoint.y
return result
@staticmethod
def from_coords_array(coords, shape):
"""
Convert an array (N,2) with a given image shape to a KeypointsOnImage object.
Parameters
----------
coords : (N, 2) ndarray
Coordinates of ``N`` keypoints on the original image.
Each first entry ``coords[i, 0]`` is expected to be the x coordinate.
Each second entry ``coords[i, 1]`` is expected to be the y coordinate.
shape : tuple
Shape tuple of the image on which the keypoints are placed.
Returns
-------
out : KeypointsOnImage
KeypointsOnImage object that contains all keypoints from the array.
"""
keypoints = [Keypoint(x=coords[i, 0], y=coords[i, 1]) for i in sm.xrange(coords.shape[0])]
return KeypointsOnImage(keypoints, shape)
def to_keypoint_image(self, size=1):
"""
Draws a new black image of shape ``(H,W,N)`` in which all keypoint coordinates are set to 255.
(H=shape height, W=shape width, N=number of keypoints)
This function can be used as a helper when augmenting keypoints with a method that only supports the
augmentation of images.
Parameters
-------
size : int
Size of each (squared) point.
Returns
-------
image : (H,W,N) ndarray
Image in which the keypoints are marked. H is the height,
defined in KeypointsOnImage.shape[0] (analogous W). N is the
number of keypoints.
"""
do_assert(len(self.keypoints) > 0)
height, width = self.shape[0:2]
image = np.zeros((height, width, len(self.keypoints)), dtype=np.uint8)
do_assert(size % 2 != 0)
sizeh = max(0, (size-1)//2)
for i, keypoint in enumerate(self.keypoints):
# TODO for float values spread activation over several cells
# here and do voting at the end
y = keypoint.y_int
x = keypoint.x_int
x1 = np.clip(x - sizeh, 0, width-1)
x2 = np.clip(x + sizeh + 1, 0, width)
y1 = np.clip(y - sizeh, 0, height-1)
y2 = np.clip(y + sizeh + 1, 0, height)
if x1 < x2 and y1 < y2:
image[y1:y2, x1:x2, i] = 128
if 0 <= y < height and 0 <= x < width:
image[y, x, i] = 255
return image
@staticmethod
def from_keypoint_image(image, if_not_found_coords={"x": -1, "y": -1}, threshold=1, nb_channels=None): # pylint: disable=locally-disabled, dangerous-default-value, line-too-long
"""
Converts an image generated by ``to_keypoint_image()`` back to a KeypointsOnImage object.
Parameters
----------
image : (H,W,N) ndarray
The keypoints image. N is the number of keypoints.
if_not_found_coords : tuple or list or dict or None, optional
Coordinates to use for keypoints that cannot be found in `image`.
If this is a list/tuple, it must have two integer values.
If it is a dictionary, it must have the keys ``x`` and ``y`` with
each containing one integer value.
If this is None, then the keypoint will not be added to the final
KeypointsOnImage object.
threshold : int, optional
The search for keypoints works by searching for the argmax in
each channel. This parameters contains the minimum value that
the max must have in order to be viewed as a keypoint.
nb_channels : None or int, optional
Number of channels of the image on which the keypoints are placed.
Some keypoint augmenters require that information.
If set to None, the keypoint's shape will be set
to ``(height, width)``, otherwise ``(height, width, nb_channels)``.
Returns
-------
out : KeypointsOnImage
The extracted keypoints.
"""
do_assert(len(image.shape) == 3)
height, width, nb_keypoints = image.shape
drop_if_not_found = False
if if_not_found_coords is None:
drop_if_not_found = True
if_not_found_x = -1
if_not_found_y = -1
elif isinstance(if_not_found_coords, (tuple, list)):
do_assert(len(if_not_found_coords) == 2)
if_not_found_x = if_not_found_coords[0]
if_not_found_y = if_not_found_coords[1]
elif isinstance(if_not_found_coords, dict):
if_not_found_x = if_not_found_coords["x"]
if_not_found_y = if_not_found_coords["y"]
else:
raise Exception("Expected if_not_found_coords to be None or tuple or list or dict, got %s." % (
type(if_not_found_coords),))
keypoints = []
for i in sm.xrange(nb_keypoints):
maxidx_flat = np.argmax(image[..., i])
maxidx_ndim = np.unravel_index(maxidx_flat, (height, width))
found = (image[maxidx_ndim[0], maxidx_ndim[1], i] >= threshold)
if found:
keypoints.append(Keypoint(x=maxidx_ndim[1], y=maxidx_ndim[0]))
else:
if drop_if_not_found:
pass # dont add the keypoint to the result list, i.e. drop it
else:
keypoints.append(Keypoint(x=if_not_found_x, y=if_not_found_y))
out_shape = (height, width)
if nb_channels is not None:
out_shape += (nb_channels,)
return KeypointsOnImage(keypoints, shape=out_shape)
def to_distance_maps(self, inverted=False):
"""
Generates a ``(H,W,K)`` output containing ``K`` distance maps for ``K`` keypoints.
The k-th distance map contains at every location ``(y, x)`` the euclidean distance to the k-th keypoint.
This function can be used as a helper when augmenting keypoints with a method that only supports
the augmentation of images.
Parameters
-------
inverted : bool, optional
If True, inverted distance maps are returned where each distance value d is replaced
by ``d/(d+1)``, i.e. the distance maps have values in the range ``(0.0, 1.0]`` with 1.0
denoting exactly the position of the respective keypoint.
Returns
-------
distance_maps : (H,W,K) ndarray
A ``float32`` array containing ``K`` distance maps for ``K`` keypoints. Each location
``(y, x, k)`` in the array denotes the euclidean distance at ``(y, x)`` to the ``k``-th keypoint.
In inverted mode the distance ``d`` is replaced by ``d/(d+1)``. The height and width
of the array match the height and width in ``KeypointsOnImage.shape``.
"""
do_assert(len(self.keypoints) > 0)
height, width = self.shape[0:2]
distance_maps = np.zeros((height, width, len(self.keypoints)), dtype=np.float32)
yy = np.arange(0, height)
xx = np.arange(0, width)
grid_xx, grid_yy = np.meshgrid(xx, yy)
for i, keypoint in enumerate(self.keypoints):
y, x = keypoint.y, keypoint.x
distance_maps[:, :, i] = (grid_xx - x) ** 2 + (grid_yy - y) ** 2
distance_maps = np.sqrt(distance_maps)
if inverted:
return 1/(distance_maps+1)
return distance_maps
# TODO add option to if_not_found_coords to reuse old keypoint coords
@staticmethod
def from_distance_maps(distance_maps, inverted=False, if_not_found_coords={"x": -1, "y": -1}, threshold=None, # pylint: disable=locally-disabled, dangerous-default-value, line-too-long
nb_channels=None):
"""
Converts maps generated by ``to_distance_maps()`` back to a KeypointsOnImage object.
Parameters
----------
distance_maps : (H,W,N) ndarray
The distance maps. N is the number of keypoints.
inverted : bool, optional
Whether the given distance maps were generated in inverted or normal mode.
if_not_found_coords : tuple or list or dict or None, optional
Coordinates to use for keypoints that cannot be found in ``distance_maps``.
If this is a list/tuple, it must have two integer values.
If it is a dictionary, it must have the keys ``x`` and ``y``, with each
containing one integer value.
If this is None, then the keypoint will not be added to the final
KeypointsOnImage object.
threshold : float, optional
The search for keypoints works by searching for the argmin (non-inverted) or
argmax (inverted) in each channel. This parameters contains the maximum (non-inverted)
or minimum (inverted) value to accept in order to view a hit as a keypoint.
Use None to use no min/max.
nb_channels : None or int, optional
Number of channels of the image on which the keypoints are placed.
Some keypoint augmenters require that information.
If set to None, the keypoint's shape will be set
to ``(height, width)``, otherwise ``(height, width, nb_channels)``.
Returns
-------
imgaug.KeypointsOnImage
The extracted keypoints.
"""
do_assert(len(distance_maps.shape) == 3)
height, width, nb_keypoints = distance_maps.shape
drop_if_not_found = False
if if_not_found_coords is None:
drop_if_not_found = True
if_not_found_x = -1
if_not_found_y = -1
elif isinstance(if_not_found_coords, (tuple, list)):
do_assert(len(if_not_found_coords) == 2)
if_not_found_x = if_not_found_coords[0]
if_not_found_y = if_not_found_coords[1]
elif isinstance(if_not_found_coords, dict):
if_not_found_x = if_not_found_coords["x"]
if_not_found_y = if_not_found_coords["y"]
else:
raise Exception("Expected if_not_found_coords to be None or tuple or list or dict, got %s." % (
type(if_not_found_coords),))
keypoints = []
for i in sm.xrange(nb_keypoints):
# TODO introduce voting here among all distance values that have min/max values
if inverted:
hitidx_flat = np.argmax(distance_maps[..., i])
else:
hitidx_flat = np.argmin(distance_maps[..., i])
hitidx_ndim = np.unravel_index(hitidx_flat, (height, width))
if not inverted and threshold is not None:
found = (distance_maps[hitidx_ndim[0], hitidx_ndim[1], i] < threshold)
elif inverted and threshold is not None:
found = (distance_maps[hitidx_ndim[0], hitidx_ndim[1], i] >= threshold)
else:
found = True
if found:
keypoints.append(Keypoint(x=hitidx_ndim[1], y=hitidx_ndim[0]))
else:
if drop_if_not_found:
pass # dont add the keypoint to the result list, i.e. drop it
else:
keypoints.append(Keypoint(x=if_not_found_x, y=if_not_found_y))
out_shape = (height, width)
if nb_channels is not None:
out_shape += (nb_channels,)
return KeypointsOnImage(keypoints, shape=out_shape)
def copy(self):
"""
Create a shallow copy of the KeypointsOnImage object.
Returns
-------
imgaug.KeypointsOnImage
Shallow copy.
"""
return copy.copy(self)
def deepcopy(self):
"""
Create a deep copy of the KeypointsOnImage object.
Returns
-------
imgaug.KeypointsOnImage
Deep copy.
"""
# for some reason deepcopy is way slower here than manual copy
kps = [Keypoint(x=kp.x, y=kp.y) for kp in self.keypoints]
return KeypointsOnImage(kps, tuple(self.shape))
def __repr__(self):
return self.__str__()
def __str__(self):
return "KeypointsOnImage(%s, shape=%s)" % (str(self.keypoints), self.shape)
# TODO functions: square(), to_aspect_ratio(), contains_point()
class BoundingBox(object):
"""
Class representing bounding boxes.
Each bounding box is parameterized by its top left and bottom right corners. Both are given
as x and y-coordinates. The corners are intended to lie inside the bounding box area.
As a result, a bounding box that lies completely inside the image but has maximum extensions
would have coordinates ``(0.0, 0.0)`` and ``(W - epsilon, H - epsilon)``. Note that coordinates
are saved internally as floats.
Parameters
----------
x1 : number
X-coordinate of the top left of the bounding box.
y1 : number
Y-coordinate of the top left of the bounding box.
x2 : number
X-coordinate of the bottom right of the bounding box.
y2 : number
Y-coordinate of the bottom right of the bounding box.
label : None or str, optional
Label of the bounding box, e.g. a string representing the class.
"""
def __init__(self, x1, y1, x2, y2, label=None):
"""Create a new BoundingBox instance."""
if x1 > x2:
x2, x1 = x1, x2
do_assert(x2 >= x1)
if y1 > y2:
y2, y1 = y1, y2
do_assert(y2 >= y1)
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
self.label = label
@property
def x1_int(self):
"""
Return the x-coordinate of the top left corner as an integer.
Returns
-------
int
X-coordinate of the top left corner, rounded to the closest integer.
"""
return int(np.round(self.x1)) # use numpy's round to have consistent behaviour between python versions
@property
def y1_int(self):
"""
Return the y-coordinate of the top left corner as an integer.
Returns
-------
int
Y-coordinate of the top left corner, rounded to the closest integer.
"""
return int(np.round(self.y1)) # use numpy's round to have consistent behaviour between python versions
@property
def x2_int(self):
"""
Return the x-coordinate of the bottom left corner as an integer.
Returns
-------
int
X-coordinate of the bottom left corner, rounded to the closest integer.
"""
return int(np.round(self.x2)) # use numpy's round to have consistent behaviour between python versions
@property
def y2_int(self):
"""
Return the y-coordinate of the bottom left corner as an integer.
Returns
-------
int
Y-coordinate of the bottom left corner, rounded to the closest integer.
"""
return int(np.round(self.y2)) # use numpy's round to have consistent behaviour between python versions
@property
def height(self):
"""
Estimate the height of the bounding box.
Returns
-------
number
Height of the bounding box.
"""
return self.y2 - self.y1
@property
def width(self):
"""
Estimate the width of the bounding box.
Returns
-------
number
Width of the bounding box.
"""
return self.x2 - self.x1
@property
def center_x(self):
"""
Estimate the x-coordinate of the center point of the bounding box.
Returns
-------
number
X-coordinate of the center point of the bounding box.
"""
return self.x1 + self.width/2
@property
def center_y(self):
"""
Estimate the y-coordinate of the center point of the bounding box.
Returns
-------
number
Y-coordinate of the center point of the bounding box.
"""
return self.y1 + self.height/2
@property
def area(self):
"""
Estimate the area of the bounding box.
Returns
-------
number
Area of the bounding box, i.e. `height * width`.
"""
return self.height * self.width
def contains(self, other):
"""
Estimate whether the bounding box contains a point.
Parameters
----------
other : imgaug.Keypoint
Point to check for.
Returns
-------
bool
True if the point is contained in the bounding box, False otherwise.
"""
x, y = other.x, other.y
return self.x1 <= x <= self.x2 and self.y1 <= y <= self.y2
def project(self, from_shape, to_shape):
"""
Project the bounding box onto a new position on a new image.
E.g. if the bounding box is on its original image at
x1=(10 of 100 pixels) and y1=(20 of 100 pixels) and is projected onto
a new image with size (width=200, height=200), its new position will
be (x1=20, y1=40). (Analogous for x2/y2.)
This is intended for cases where the original image is resized.
It cannot be used for more complex changes (e.g. padding, cropping).
Parameters
----------
from_shape : tuple of int
Shape of the original image. (Before resize.)
to_shape : tuple of int
Shape of the new image. (After resize.)
Returns
-------
out : imgaug.BoundingBox
BoundingBox object with new coordinates.
"""
if from_shape[0:2] == to_shape[0:2]:
return self.copy()
else:
from_height, from_width = from_shape[0:2]
to_height, to_width = to_shape[0:2]
do_assert(from_height > 0)
do_assert(from_width > 0)
do_assert(to_height > 0)
do_assert(to_width > 0)
x1 = (self.x1 / from_width) * to_width
y1 = (self.y1 / from_height) * to_height
x2 = (self.x2 / from_width) * to_width
y2 = (self.y2 / from_height) * to_height
return self.copy(
x1=x1,
y1=y1,
x2=x2,
y2=y2,
label=self.label
)
def extend(self, all_sides=0, top=0, right=0, bottom=0, left=0):
"""
Extend the size of the bounding box along its sides.
Parameters
----------
all_sides : number, optional
Value by which to extend the bounding box size along all sides.
top : number, optional
Value by which to extend the bounding box size along its top side.
right : number, optional
Value by which to extend the bounding box size along its right side.
bottom : number, optional
Value by which to extend the bounding box size along its bottom side.
left : number, optional
Value by which to extend the bounding box size along its left side.
Returns
-------
imgaug.BoundingBox
Extended bounding box.
"""
return BoundingBox(
x1=self.x1 - all_sides - left,
x2=self.x2 + all_sides + right,
y1=self.y1 - all_sides - top,
y2=self.y2 + all_sides + bottom
)
def intersection(self, other, default=None):
"""
Compute the intersection bounding box of this bounding box and another one.
Note that in extreme cases, the intersection can be a single point, meaning that the intersection bounding box
will exist, but then also has a height and width of zero.
Parameters
----------
other : imgaug.BoundingBox
Other bounding box with which to generate the intersection.
default : any, optional
Default value to return if there is no intersection.
Returns
-------
imgaug.BoundingBox or any
Intersection bounding box of the two bounding boxes if there is an intersection.
If there is no intersection, the default value will be returned, which can by anything.
"""
x1_i = max(self.x1, other.x1)
y1_i = max(self.y1, other.y1)
x2_i = min(self.x2, other.x2)
y2_i = min(self.y2, other.y2)
if x1_i > x2_i or y1_i > y2_i:
return default
else:
return BoundingBox(x1=x1_i, y1=y1_i, x2=x2_i, y2=y2_i)
def union(self, other):
"""
Compute the union bounding box of this bounding box and another one.
This is equivalent to drawing a bounding box around all corners points of both
bounding boxes.
Parameters
----------
other : imgaug.BoundingBox
Other bounding box with which to generate the union.
Returns
-------
imgaug.BoundingBox
Union bounding box of the two bounding boxes.
"""
return BoundingBox(
x1=min(self.x1, other.x1),
y1=min(self.y1, other.y1),
x2=max(self.x2, other.x2),
y2=max(self.y2, other.y2),
)
def iou(self, other):
"""
Compute the IoU of this bounding box with another one.
IoU is the intersection over union, defined as::
``area(intersection(A, B)) / area(union(A, B))``
``= area(intersection(A, B)) / (area(A) + area(B) - area(intersection(A, B)))``
Parameters
----------
other : imgaug.BoundingBox
Other bounding box with which to compare.
Returns
-------
float
IoU between the two bounding boxes.
"""
inters = self.intersection(other)
if inters is None:
return 0.0
else:
area_union = self.area + other.area - inters.area
return inters.area / area_union if area_union > 0 else 0.0
def is_fully_within_image(self, image):
"""
Estimate whether the bounding box is fully inside the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use.
If an ndarray, its shape will be used.
If a tuple, it is assumed to represent the image shape
and must contain at least two integers.
Returns
-------
bool
True if the bounding box is fully inside the image area. False otherwise.
"""
if isinstance(image, tuple):
shape = image
else:
shape = image.shape
height, width = shape[0:2]
return self.x1 >= 0 and self.x2 < width and self.y1 >= 0 and self.y2 < height
def is_partly_within_image(self, image):
"""
Estimate whether the bounding box is at least partially inside the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use.
If an ndarray, its shape will be used.
If a tuple, it is assumed to represent the image shape
and must contain at least two integers.
Returns
-------
bool
True if the bounding box is at least partially inside the image area. False otherwise.
"""
if isinstance(image, tuple):
shape = image
else:
shape = image.shape
height, width = shape[0:2]
eps = np.finfo(np.float32).eps
img_bb = BoundingBox(x1=0, x2=width-eps, y1=0, y2=height-eps)
return self.intersection(img_bb) is not None
def is_out_of_image(self, image, fully=True, partly=False):
"""
Estimate whether the bounding box is partially or fully outside of the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use. If an ndarray, its shape will be used. If a tuple, it is
assumed to represent the image shape and must contain at least two integers.
fully : bool, optional
Whether to return True if the bounding box is fully outside fo the image area.
partly : bool, optional
Whether to return True if the bounding box is at least partially outside fo the
image area.
Returns
-------
bool
True if the bounding box is partially/fully outside of the image area, depending
on defined parameters. False otherwise.
"""
if self.is_fully_within_image(image):
return False
elif self.is_partly_within_image(image):
return partly
else:
return fully
def cut_out_of_image(self, image):
"""
Cut off all parts of the bounding box that are outside of the image.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use for the clipping of the bounding box.
If an ndarray, its shape will be used.
If a tuple, it is assumed to represent the image shape and must contain at least two integers.
Returns
-------
result : imgaug.BoundingBox
Bounding box, clipped to fall within the image dimensions.
"""
if isinstance(image, tuple):
shape = image
else:
shape = image.shape
height, width = shape[0:2]
do_assert(height > 0)
do_assert(width > 0)
eps = np.finfo(np.float32).eps
x1 = np.clip(self.x1, 0, width - eps)
x2 = np.clip(self.x2, 0, width - eps)
y1 = np.clip(self.y1, 0, height - eps)
y2 = np.clip(self.y2, 0, height - eps)
return self.copy(
x1=x1,
y1=y1,
x2=x2,
y2=y2,
label=self.label
)
def shift(self, top=None, right=None, bottom=None, left=None):
"""
Shift the bounding box from one or more image sides, i.e. move it on the x/y-axis.
Parameters
----------
top : None or int, optional
Amount of pixels by which to shift the bounding box from the top.
right : None or int, optional
Amount of pixels by which to shift the bounding box from the right.
bottom : None or int, optional
Amount of pixels by which to shift the bounding box from the bottom.
left : None or int, optional
Amount of pixels by which to shift the bounding box from the left.
Returns
-------
result : imgaug.BoundingBox
Shifted bounding box.
"""
top = top if top is not None else 0
right = right if right is not None else 0
bottom = bottom if bottom is not None else 0
left = left if left is not None else 0
return self.copy(
x1=self.x1+left-right,
x2=self.x2+left-right,
y1=self.y1+top-bottom,
y2=self.y2+top-bottom
)
# TODO add explicit test for zero-sized BBs (worked when tested by hand)
def draw_on_image(self, image, color=(0, 255, 0), alpha=1.0, thickness=1, copy=True, raise_if_out_of_image=False):
"""
Draw the bounding box on an image.
Parameters
----------
image : (H,W,C) ndarray(uint8)
The image onto which to draw the bounding box.
color : iterable of int, optional
The color to use, corresponding to the channel layout of the image. Usually RGB.
alpha : float, optional
The transparency of the drawn bounding box, where 1.0 denotes no transparency and
0.0 is invisible.
thickness : int, optional
The thickness of the bounding box in pixels. If the value is larger than 1, then
additional pixels will be added around the bounding box (i.e. extension towards the
outside).
copy : bool, optional
Whether to copy the input image or change it in-place.
raise_if_out_of_image : bool, optional
Whether to raise an error if the bounding box is partially/fully outside of the
image. If set to False, no error will be raised and only the parts inside the image
will be drawn.
Returns
-------
result : (H,W,C) ndarray(uint8)
Image with bounding box drawn on it.
"""
if raise_if_out_of_image and self.is_out_of_image(image):
raise Exception("Cannot draw bounding box x1=%.8f, y1=%.8f, x2=%.8f, y2=%.8f on image with shape %s." % (
self.x1, self.y1, self.x2, self.y2, image.shape))
result = np.copy(image) if copy else image
if isinstance(color, (tuple, list)):
color = np.uint8(color)
for i in range(thickness):
y1, y2, x1, x2 = self.y1_int, self.y2_int, self.x1_int, self.x2_int
# When y values get into the range (H-0.5, H), the *_int functions round them to H.
# That is technically sensible, but in the case of drawing means that the border lies
# just barely outside of the image, making the border disappear, even though the BB
# is fully inside the image. Here we correct for that because of beauty reasons.
# Same is the case for x coordinates.
if self.is_fully_within_image(image):
y1 = np.clip(y1, 0, image.shape[0]-1)
y2 = np.clip(y2, 0, image.shape[0]-1)
x1 = np.clip(x1, 0, image.shape[1]-1)
x2 = np.clip(x2, 0, image.shape[1]-1)
y = [y1-i, y1-i, y2+i, y2+i]
x = [x1-i, x2+i, x2+i, x1-i]
rr, cc = skimage.draw.polygon_perimeter(y, x, shape=result.shape)
if alpha >= 0.99:
result[rr, cc, :] = color
else:
if is_float_array(result):
result[rr, cc, :] = (1 - alpha) * result[rr, cc, :] + alpha * color
result = np.clip(result, 0, 255)
else:
input_dtype = result.dtype
result = result.astype(np.float32)
result[rr, cc, :] = (1 - alpha) * result[rr, cc, :] + alpha * color
result = np.clip(result, 0, 255).astype(input_dtype)
return result
def extract_from_image(self, image, prevent_zero_size=True):
"""
Extract the image pixels within the bounding box.
This function will zero-pad the image if the bounding box is partially/fully outside of
the image.
Parameters
----------
image : (H,W) ndarray or (H,W,C) ndarray
The image from which to extract the pixels within the bounding box.
prevent_zero_size : bool, optional
Whether to prevent height or width of the extracted image from becoming zero.
If this is set to True and height or width of the bounding box is below 1, the height/width will
be increased to 1. This can be useful to prevent problems, e.g. with image saving or plotting.
If it is set to False, images will be returned as ``(H', W')`` or ``(H', W', 3)`` with ``H`` or
``W`` potentially being 0.
Returns
-------
image : (H',W') ndarray or (H',W',C) ndarray
Pixels within the bounding box. Zero-padded if the bounding box is partially/fully
outside of the image. If prevent_zero_size is activated, it is guarantueed that ``H'>0``
and ``W'>0``, otherwise only ``H'>=0`` and ``W'>=0``.
"""
pad_top = 0
pad_right = 0
pad_bottom = 0
pad_left = 0
height, width = image.shape[0], image.shape[1]
x1, x2, y1, y2 = self.x1_int, self.x2_int, self.y1_int, self.y2_int
# When y values get into the range (H-0.5, H), the *_int functions round them to H.
# That is technically sensible, but in the case of extraction leads to a black border,
# which is both ugly and unexpected after calling cut_out_of_image(). Here we correct for
# that because of beauty reasons.
# Same is the case for x coordinates.
if self.is_fully_within_image(image):
y1 = np.clip(y1, 0, image.shape[0]-1)
y2 = np.clip(y2, 0, image.shape[0]-1)
x1 = np.clip(x1, 0, image.shape[1]-1)
x2 = np.clip(x2, 0, image.shape[1]-1)
# TODO add test
if prevent_zero_size:
if abs(x2 - x1) < 1:
x2 = x1 + 1
if abs(y2 - y1) < 1:
y2 = y1 + 1
# if the bb is outside of the image area, the following pads the image
# first with black pixels until the bb is inside the image
# and only then extracts the image area
# TODO probably more efficient to initialize an array of zeros
# and copy only the portions of the bb into that array that are
# natively inside the image area
if x1 < 0:
pad_left = abs(x1)
x2 = x2 + abs(x1)
x1 = 0
if y1 < 0:
pad_top = abs(y1)
y2 = y2 + abs(y1)
y1 = 0
if x2 >= width:
pad_right = x2 - (width - 1)
if y2 >= height:
pad_bottom = y2 - (height - 1)
if any([val > 0 for val in [pad_top, pad_right, pad_bottom, pad_left]]):
if len(image.shape) == 2:
image = np.pad(image, ((pad_top, pad_bottom), (pad_left, pad_right)), mode="constant")
else:
image = np.pad(image, ((pad_top, pad_bottom), (pad_left, pad_right), (0, 0)), mode="constant")
return image[y1:y2, x1:x2]
# TODO also add to_heatmap
# TODO add this to BoundingBoxesOnImage
def to_keypoints(self):
"""
Convert the corners of the bounding box to keypoints (clockwise, starting at top left).
Returns
-------
list of imgaug.Keypoint
Corners of the bounding box as keypoints.
"""
return [
Keypoint(x=self.x1, y=self.y1),
Keypoint(x=self.x2, y=self.y1),
Keypoint(x=self.x2, y=self.y2),
Keypoint(x=self.x1, y=self.y2)
]
def copy(self, x1=None, y1=None, x2=None, y2=None, label=None):
"""
Create a shallow copy of the BoundingBox object.
Parameters
----------
x1 : None or number
If not None, then the x1 coordinate of the copied object will be set to this value.
y1 : None or number
If not None, then the y1 coordinate of the copied object will be set to this value.
x2 : None or number
If not None, then the x2 coordinate of the copied object will be set to this value.
y2 : None or number
If not None, then the y2 coordinate of the copied object will be set to this value.
label : None or string
If not None, then the label of the copied object will be set to this value.
Returns
-------
imgaug.BoundingBox
Shallow copy.
"""
return BoundingBox(
x1=self.x1 if x1 is None else x1,
x2=self.x2 if x2 is None else x2,
y1=self.y1 if y1 is None else y1,
y2=self.y2 if y2 is None else y2,
label=self.label if label is None else label
)
def deepcopy(self, x1=None, y1=None, x2=None, y2=None, label=None):
"""
Create a deep copy of the BoundingBox object.
Parameters
----------
x1 : None or number
If not None, then the x1 coordinate of the copied object will be set to this value.
y1 : None or number
If not None, then the y1 coordinate of the copied object will be set to this value.
x2 : None or number
If not None, then the x2 coordinate of the copied object will be set to this value.
y2 : None or number
If not None, then the y2 coordinate of the copied object will be set to this value.
label : None or string
If not None, then the label of the copied object will be set to this value.
Returns
-------
imgaug.BoundingBox
Deep copy.
"""
return self.copy(x1=x1, y1=y1, x2=x2, y2=y2, label=label)
def __repr__(self):
return self.__str__()
def __str__(self):
return "BoundingBox(x1=%.4f, y1=%.4f, x2=%.4f, y2=%.4f, label=%s)" % (
self.x1, self.y1, self.x2, self.y2, self.label)
class BoundingBoxesOnImage(object):
"""
Object that represents all bounding boxes on a single image.
Parameters
----------
bounding_boxes : list of imgaug.BoundingBox
List of bounding boxes on the image.
shape : tuple of int
The shape of the image on which the bounding boxes are placed.
Examples
--------
>>> bbs = [
>>> BoundingBox(x1=10, y1=20, x2=20, y2=30),
>>> BoundingBox(x1=25, y1=50, x2=30, y2=70)
>>> ]
>>> bbs_oi = BoundingBoxesOnImage(bbs, shape=image.shape)
"""
def __init__(self, bounding_boxes, shape):
self.bounding_boxes = bounding_boxes
if is_np_array(shape):
self.shape = shape.shape
else:
do_assert(isinstance(shape, (tuple, list)))
self.shape = tuple(shape)
# TODO remove this? here it is image height at BoundingBox it is bounding box height
@property
def height(self):
"""
Get the height of the image on which the bounding boxes fall.
Returns
-------
int
Image height.
"""
return self.shape[0]
# TODO remove this? here it is image width at BoundingBox it is bounding box width
@property
def width(self):
"""
Get the width of the image on which the bounding boxes fall.
Returns
-------
int
Image width.
"""
return self.shape[1]
@property
def empty(self):
"""
Returns whether this object contains zero bounding boxes.
Returns
-------
bool
True if this object contains zero bounding boxes.
"""
return len(self.bounding_boxes) == 0
def on(self, image):
"""
Project bounding boxes from one image to a new one.
Parameters
----------
image : ndarray or tuple of int
New image onto which the bounding boxes are to be projected.
May also simply be that new image's shape tuple.
Returns
-------
bounding_boxes : imgaug.BoundingBoxesOnImage
Object containing all projected bounding boxes.
"""
if is_np_array(image):
shape = image.shape
else:
shape = image
if shape[0:2] == self.shape[0:2]:
return self.deepcopy()
else:
bounding_boxes = [bb.project(self.shape, shape) for bb in self.bounding_boxes]
return BoundingBoxesOnImage(bounding_boxes, shape)
@classmethod
def from_xyxy_array(cls, xyxy, shape):
"""
Convert an (N,4) ndarray to a BoundingBoxesOnImage object.
This is the inverse of :func:`imgaug.BoundingBoxesOnImage.to_xyxy_array`.
Parameters
----------
xyxy : (N,4) ndarray
Array containing the corner coordinates (top-left, bottom-right) of ``N`` bounding boxes
in the form ``(x1, y1, x2, y2)``. Should usually be of dtype ``float32``.
shape : tuple of int
Shape of the image on which the bounding boxes are placed.
Should usually be ``(H, W, C)`` or ``(H, W)``.
Returns
-------
imgaug.BoundingBoxesOnImage
Object containing a list of BoundingBox objects following the provided corner coordinates.
"""
do_assert(xyxy.shape[1] == 4, "Expected input array of shape (N, 4), got shape %s." % (xyxy.shape,))
boxes = [BoundingBox(*row) for row in xyxy]
return cls(boxes, shape)
def to_xyxy_array(self, dtype=np.float32):
"""
Convert the BoundingBoxesOnImage object to an (N,4) ndarray.
This is the inverse of :func:`imgaug.BoundingBoxesOnImage.from_xyxy_array`.
Parameters
----------
dtype : numpy.dtype, optional
Desired output datatype of the ndarray.
Returns
-------
ndarray
(N,4) ndarray array, where ``N`` denotes the number of bounding boxes and ``4`` denotes the
top-left and bottom-right bounding box corner coordinates in form ``(x1, y1, x2, y2)``.
"""
xyxy_array = np.zeros((len(self.bounding_boxes), 4), dtype=np.float32)
for i, box in enumerate(self.bounding_boxes):
xyxy_array[i] = [box.x1, box.y1, box.x2, box.y2]
return xyxy_array.astype(dtype)
def draw_on_image(self, image, color=(0, 255, 0), alpha=1.0, thickness=1, copy=True, raise_if_out_of_image=False):
"""
Draw all bounding boxes onto a given image.
Parameters
----------
image : (H,W,3) ndarray
The image onto which to draw the bounding boxes.
This image should usually have the same shape as
set in BoundingBoxesOnImage.shape.
color : int or list of int or tuple of int or (3,) ndarray, optional
The RGB color of all bounding boxes. If a single int ``C``, then that is
equivalent to ``(C,C,C)``.
alpha : float, optional
Alpha/transparency of the bounding box.
thickness : int, optional
Thickness in pixels.
copy : bool, optional
Whether to copy the image before drawing the points.
raise_if_out_of_image : bool, optional
Whether to raise an exception if any bounding box is outside of the image.
Returns
-------
image : (H,W,3) ndarray
Image with drawn bounding boxes.
"""
# TODO improve efficiency here by copying only once
for bb in self.bounding_boxes:
image = bb.draw_on_image(
image,
color=color,
alpha=alpha,
thickness=thickness,
copy=copy,
raise_if_out_of_image=raise_if_out_of_image
)
return image
def remove_out_of_image(self, fully=True, partly=False):
"""
Remove all bounding boxes that are fully or partially outside of the image.
Parameters
----------
fully : bool, optional
Whether to remove bounding boxes that are fully outside of the image.
partly : bool, optional
Whether to remove bounding boxes that are partially outside of the image.
Returns
-------
imgaug.BoundingBoxesOnImage
Reduced set of bounding boxes, with those that were fully/partially outside of
the image removed.
"""
bbs_clean = [bb for bb in self.bounding_boxes
if not bb.is_out_of_image(self.shape, fully=fully, partly=partly)]
return BoundingBoxesOnImage(bbs_clean, shape=self.shape)
def cut_out_of_image(self):
"""
Cut off all parts from all bounding boxes that are outside of the image.
Returns
-------
imgaug.BoundingBoxesOnImage
Bounding boxes, clipped to fall within the image dimensions.
"""
bbs_cut = [bb.cut_out_of_image(self.shape)
for bb in self.bounding_boxes if bb.is_partly_within_image(self.shape)]
return BoundingBoxesOnImage(bbs_cut, shape=self.shape)
def shift(self, top=None, right=None, bottom=None, left=None):
"""
Shift all bounding boxes from one or more image sides, i.e. move them on the x/y-axis.
Parameters
----------
top : None or int, optional
Amount of pixels by which to shift all bounding boxes from the top.
right : None or int, optional
Amount of pixels by which to shift all bounding boxes from the right.
bottom : None or int, optional
Amount of pixels by which to shift all bounding boxes from the bottom.
left : None or int, optional
Amount of pixels by which to shift all bounding boxes from the left.
Returns
-------
imgaug.BoundingBoxesOnImage
Shifted bounding boxes.
"""
bbs_new = [bb.shift(top=top, right=right, bottom=bottom, left=left) for bb in self.bounding_boxes]
return BoundingBoxesOnImage(bbs_new, shape=self.shape)
def copy(self):
"""
Create a shallow copy of the BoundingBoxesOnImage object.
Returns
-------
imgaug.BoundingBoxesOnImage
Shallow copy.
"""
return copy.copy(self)
def deepcopy(self):
"""
Create a deep copy of the BoundingBoxesOnImage object.
Returns
-------
imgaug.BoundingBoxesOnImage
Deep copy.
"""
# Manual copy is far faster than deepcopy for KeypointsOnImage,
# so use manual copy here too
bbs = [bb.deepcopy() for bb in self.bounding_boxes]
return BoundingBoxesOnImage(bbs, tuple(self.shape))
def __repr__(self):
return self.__str__()
def __str__(self):
return "BoundingBoxesOnImage(%s, shape=%s)" % (str(self.bounding_boxes), self.shape)
# TODO somehow merge with BoundingBox
# TODO add functions: simplify() (eg via shapely.ops.simplify()),
# extend(all_sides=0, top=0, right=0, bottom=0, left=0),
# intersection(other, default=None), union(other), iou(other), to_heatmap, to_mask
class Polygon(object):
"""
Class representing polygons.
Each polygon is parameterized by its corner points, given as absolute x- and y-coordinates
with sub-pixel accuracy.
Parameters
----------
exterior : list of imgaug.Keypoint or list of tuple of float or (N,2) ndarray
List of points defining the polygon. May be either a list of Keypoint objects or a list of tuples in xy-form
or a numpy array of shape (N,2) for N points in xy-form.
All coordinates are expected to be the absolute coordinates in the image, given as floats, e.g. x=10.7
and y=3.4 for a point at coordinates (10.7, 3.4). Their order is expected to be clock-wise. They are expected
to not be closed (i.e. first and last coordinate differ).
label : None or str, optional
Label of the polygon, e.g. a string representing the class.
"""
def __init__(self, exterior, label=None):
"""Create a new Polygon instance."""
if isinstance(exterior, list):
if not exterior:
# for empty lists, make sure that the shape is (0, 2) and not (0,) as that is also expected when the
# input is a numpy array
self.exterior = np.zeros((0, 2), dtype=np.float32)
elif isinstance(exterior[0], Keypoint):
# list of Keypoint
self.exterior = np.float32([[point.x, point.y] for point in exterior])
else:
# list of tuples (x, y)
self.exterior = np.float32([[point[0], point[1]] for point in exterior])
else:
do_assert(is_np_array(exterior))
do_assert(exterior.ndim == 2)
do_assert(exterior.shape[1] == 2)
self.exterior = np.float32(exterior)
# Remove last point if it is essentially the same as the first point (polygons are always assumed to be
# closed anyways). This also prevents problems with shapely, which seems to add the last point automatically.
if len(self.exterior) >= 2 and np.allclose(self.exterior[0, :], self.exterior[-1, :]):
self.exterior = self.exterior[:-1]
self.label = label
@property
def xx(self):
"""
Return the x-coordinates of all points in the exterior.
Returns
-------
(N,2) ndarray
X-coordinates of all points in the exterior as a float32 ndarray.
"""
return self.exterior[:, 0]
@property
def yy(self):
"""
Return the y-coordinates of all points in the exterior.
Returns
-------
(N,2) ndarray
Y-coordinates of all points in the exterior as a float32 ndarray.
"""
return self.exterior[:, 1]
@property
def xx_int(self):
"""
Return the x-coordinates of all points in the exterior, rounded to the closest integer value.
Returns
-------
(N,2) ndarray
X-coordinates of all points in the exterior, rounded to the closest integer value.
Result dtype is int32.
"""
return np.int32(np.round(self.xx))
@property
def yy_int(self):
"""
Return the y-coordinates of all points in the exterior, rounded to the closest integer value.
Returns
-------
(N,2) ndarray
Y-coordinates of all points in the exterior, rounded to the closest integer value.
Result dtype is int32.
"""
return np.int32(np.round(self.yy))
@property
def is_valid(self):
"""
Estimate whether the polygon has a valid shape.
To to be considered valid, the polygons must be made up of at least 3 points and have concave shape.
Multiple consecutive points are allowed to have the same coordinates.
Returns
-------
bool
True if polygon has at least 3 points and is concave, otherwise False.
"""
if len(self.exterior) < 3:
return False
return self.to_shapely_polygon().is_valid
@property
def area(self):
"""
Estimate the area of the polygon.
Returns
-------
number
Area of the polygon.
"""
if len(self.exterior) < 3:
raise Exception("Cannot compute the polygon's area because it contains less than three points.")
poly = self.to_shapely_polygon()
return poly.area
def project(self, from_shape, to_shape):
"""
Project the polygon onto an image with different shape.
The relative coordinates of all points remain the same.
E.g. a point at (x=20, y=20) on an image (width=100, height=200) will be
projected on a new image (width=200, height=100) to (x=40, y=10).
This is intended for cases where the original image is resized.
It cannot be used for more complex changes (e.g. padding, cropping).
Parameters
----------
from_shape : tuple of int
Shape of the original image. (Before resize.)
to_shape : tuple of int
Shape of the new image. (After resize.)
Returns
-------
imgaug.Polygon
Polygon object with new coordinates.
"""
if from_shape[0:2] == to_shape[0:2]:
return self.copy()
exterior = [Keypoint(x=x, y=y).project(from_shape, to_shape) for x, y in self.exterior]
return self.copy(exterior=exterior)
def find_closest_point_index(self, x, y, return_distance=False):
"""
Find the index of the point within the exterior that is closest to the given coordinates.
"Closeness" is here defined based on euclidean distance.
This method will raise an AssertionError if the exterior contains no points.
Parameters
----------
x : number
X-coordinate around which to search for close points.
y : number
Y-coordinate around which to search for close points.
return_distance : bool, optional
Whether to also return the distance of the closest point.
Returns
-------
int
Index of the closest point.
number
Euclidean distance to the the closest point.
This value is only returned if `return_distance` was set to True.
"""
do_assert(len(self.exterior) > 0)
distances = []
for x2, y2 in self.exterior:
d = (x2 - x) ** 2 + (y2 - y) ** 2
distances.append(d)
distances = np.sqrt(distances)
closest_idx = np.argmin(distances)
if return_distance:
return closest_idx, distances[closest_idx]
return closest_idx
def _compute_inside_image_point_mask(self, image):
if isinstance(image, tuple):
shape = image
else:
shape = image.shape
h, w = shape[0:2]
return np.logical_and(
np.logical_and(0 <= self.exterior[:, 0], self.exterior[:, 0] < w),
np.logical_and(0 <= self.exterior[:, 1], self.exterior[:, 1] < h)
)
# TODO keep this method? it is almost an alias for is_out_of_image()
def is_fully_within_image(self, image):
"""
Estimate whether the polygon is fully inside the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use.
If an ndarray, its shape will be used.
If a tuple, it is assumed to represent the image shape and must contain at least two integers.
Returns
-------
bool
True if the polygon is fully inside the image area.
False otherwise.
"""
return not self.is_out_of_image(image, fully=True, partly=True)
# TODO keep this method? it is almost an alias for is_out_of_image()
def is_partly_within_image(self, image):
"""
Estimate whether the polygon is at least partially inside the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use.
If an ndarray, its shape will be used.
If a tuple, it is assumed to represent the image shape and must contain at least two integers.
Returns
-------
bool
True if the polygon is at least partially inside the image area.
False otherwise.
"""
return not self.is_out_of_image(image, fully=True, partly=False)
def is_out_of_image(self, image, fully=True, partly=False):
"""
Estimate whether the polygon is partially or fully outside of the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use.
If an ndarray, its shape will be used.
If a tuple, it is assumed to represent the image shape and must contain at least two integers.
fully : bool, optional
Whether to return True if the polygon is fully outside fo the image area.
partly : bool, optional
Whether to return True if the polygon is at least partially outside fo the image area.
Returns
-------
bool
True if the polygon is partially/fully outside of the image area, depending
on defined parameters. False otherwise.
"""
if len(self.exterior) == 0:
raise Exception("Cannot determine whether the polygon is inside the image, because it contains no points.")
inside = self._compute_inside_image_point_mask(image)
nb_inside = sum(inside)
if nb_inside == len(inside):
return False
elif nb_inside > 0:
return partly
else:
return fully
# TODO mark as deprecated
# TODO rename cut_* to clip_* in BoundingBox
def cut_out_of_image(self, image):
return self.clip_out_of_image(image)
def clip_out_of_image(self, image):
"""
Cut off all parts of the polygon that are outside of the image.
This operation may lead to new points being created.
As a single polygon may be split into multiple new polygons, the result is a MultiPolygon.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use for the clipping of the polygon.
If an ndarray, its shape will be used.
If a tuple, it is assumed to represent the image shape and must contain at least two integers.
Returns
-------
imgaug.MultiPolygon
Polygon, clipped to fall within the image dimensions.
Returned as MultiPolygon, because the clipping can split the polygon into multiple parts.
"""
# if fully out of image, clip everything away, nothing remaining
if self.is_out_of_image(image, fully=True, partly=False):
return MultiPolygon([])
h, w = image.shape[0:2]
poly_shapely = self.to_shapely_polygon()
poly_image = shapely.geometry.Polygon([(0, 0), (w, 0), (w, h), (0, h)])
multipoly_inter_shapely = poly_shapely.intersection(poly_image)
if not isinstance(multipoly_inter_shapely, shapely.geometry.MultiPolygon):
do_assert(isinstance(multipoly_inter_shapely, shapely.geometry.Polygon))
multipoly_inter_shapely = shapely.geometry.MultiPolygon([multipoly_inter_shapely])
polygons = []
for poly_inter_shapely in multipoly_inter_shapely.geoms:
polygons.append(Polygon.from_shapely(poly_inter_shapely, label=self.label))
# shapely changes the order of points, we try here to preserve it as good as possible
polygons_reordered = []
for polygon in polygons:
found = False
for x, y in self.exterior:
closest_idx, dist = polygon.find_closest_point_index(x=x, y=y, return_distance=True)
if dist < 1e-6:
polygon_reordered = polygon.change_first_point_by_index(closest_idx)
polygons_reordered.append(polygon_reordered)
found = True
break
do_assert(found) # could only not find closest points if new polys are empty
return MultiPolygon(polygons_reordered)
def shift(self, top=None, right=None, bottom=None, left=None):
"""
Shift the polygon from one or more image sides, i.e. move it on the x/y-axis.
Parameters
----------
top : None or int, optional
Amount of pixels by which to shift the polygon from the top.
right : None or int, optional
Amount of pixels by which to shift the polygon from the right.
bottom : None or int, optional
Amount of pixels by which to shift the polygon from the bottom.
left : None or int, optional
Amount of pixels by which to shift the polygon from the left.
Returns
-------
imgaug.Polygon
Shifted polygon.
"""
top = top if top is not None else 0
right = right if right is not None else 0
bottom = bottom if bottom is not None else 0
left = left if left is not None else 0
exterior = np.copy(self.exterior)
exterior[:, 0] += (left - right)
exterior[:, 1] += (top - bottom)
return self.deepcopy(exterior=exterior)
# TODO add boundary thickness
def draw_on_image(self,
image,
color=(0, 255, 0), color_perimeter=(0, 128, 0),
alpha=0.5, alpha_perimeter=1.0,
raise_if_out_of_image=False):
"""
Draw the polygon on an image.
Parameters
----------
image : (H,W,C) ndarray
The image onto which to draw the polygon. Usually expected to be of dtype uint8, though other dtypes
are also handled.
color : iterable of int, optional
The color to use for the polygon (excluding perimeter). Must correspond to the channel layout of the
image. Usually RGB.
color_perimeter : iterable of int, optional
The color to use for the perimeter/border of the polygon. Must correspond to the channel layout of the
image. Usually RGB.
alpha : float, optional
The transparency of the polygon (excluding the perimeter), where 1.0 denotes no transparency and 0.0 is
invisible.
alpha_perimeter : float, optional
The transparency of the polygon's perimeter/border, where 1.0 denotes no transparency and 0.0 is
invisible.
raise_if_out_of_image : bool, optional
Whether to raise an error if the polygon is partially/fully outside of the
image. If set to False, no error will be raised and only the parts inside the image
will be drawn.
Returns
-------
result : (H,W,C) ndarray
Image with polygon drawn on it. Result dtype is the same as the input dtype.
"""
# TODO separate this into draw_face_on_image() and draw_border_on_image()
if raise_if_out_of_image and self.is_out_of_image(image):
raise Exception("Cannot draw polygon %s on image with shape %s." % (
str(self), image.shape
))
xx = self.xx_int
yy = self.yy_int
# TODO np.clip to image plane if is_fully_within_image(), similar to how it is done for bounding boxes
# TODO improve efficiency by only drawing in rectangle that covers poly instead of drawing in the whole image
# TODO for a rectangular polygon, the face coordinates include the top/left boundary but not the right/bottom
# boundary. This may be unintuitive when not drawing the boundary. Maybe somehow remove the boundary
# coordinates from the face coordinates after generating both?
rr, cc = skimage.draw.polygon(yy, xx, shape=image.shape)
rr_perimeter, cc_perimeter = skimage.draw.polygon_perimeter(yy, xx, shape=image.shape)
params = (rr, cc, color, alpha)
params_perimeter = (rr_perimeter, cc_perimeter, color_perimeter, alpha_perimeter)
input_dtype = image.dtype
result = image.astype(np.float32)
for rr, cc, color, alpha in [params, params_perimeter]:
color = np.float32(color)
if alpha >= 0.99:
result[rr, cc, :] = color
elif alpha < 1e-4:
pass # invisible, do nothing
else:
result[rr, cc, :] = (1 - alpha) * result[rr, cc, :] + alpha * color
if input_dtype.type == np.uint8:
result = np.clip(result, 0, 255).astype(input_dtype) # TODO make clipping more flexible
else:
result = result.astype(input_dtype)
return result
def extract_from_image(self, image):
"""
Extract the image pixels within the polygon.
This function will zero-pad the image if the polygon is partially/fully outside of
the image.
Parameters
----------
image : (H,W) ndarray or (H,W,C) ndarray
The image from which to extract the pixels within the polygon.
Returns
-------
result : (H',W') ndarray or (H',W',C) ndarray
Pixels within the polygon. Zero-padded if the polygon is partially/fully
outside of the image.
"""
do_assert(image.ndim in [2, 3])
if len(self.exterior) <= 2:
raise Exception("Polygon must be made up of at least 3 points to extract its area from an image.")
bb = self.to_bounding_box()
bb_area = bb.extract_from_image(image)
if self.is_out_of_image(image, fully=True, partly=False):
return bb_area
xx = self.xx_int
yy = self.yy_int
xx_mask = xx - np.min(xx)
yy_mask = yy - np.min(yy)
height_mask = np.max(yy_mask)
width_mask = np.max(xx_mask)
rr_face, cc_face = skimage.draw.polygon(yy_mask, xx_mask, shape=(height_mask, width_mask))
mask = np.zeros((height_mask, width_mask), dtype=np.bool)
mask[rr_face, cc_face] = True
if image.ndim == 3:
mask = np.tile(mask[:, :, np.newaxis], (1, 1, image.shape[2]))
return bb_area * mask
def change_first_point_by_coords(self, x, y, max_distance=1e-4):
"""
Set the first point of the exterior to the given point based on its coordinates.
If multiple points are found, the closest one will be picked.
If no matching points are found, an exception is raised.
Note: This method does *not* work in-place.
Parameters
----------
x : number
X-coordinate of the point.
y : number
Y-coordinate of the point.
max_distance : number
Maximum distance past which possible matches are ignored.
Returns
-------
imgaug.Polygon
Copy of this polygon with the new point order.
"""
if len(self.exterior) == 0:
raise Exception("Cannot reorder polygon points, because it contains no points.")
closest_idx, closest_dist = self.find_closest_point_index(x=x, y=y, return_distance=True)
if max_distance is not None and closest_dist > max_distance:
closest_point = self.exterior[closest_idx, :]
raise Exception(
"Closest found point (%.9f, %.9f) exceeds max_distance of %.9f exceeded" % (
closest_point[0], closest_point[1], closest_dist)
)
return self.change_first_point_by_index(closest_idx)
def change_first_point_by_index(self, point_idx):
"""
Set the first point of the exterior to the given point based on its index.
Note: This method does *not* work in-place.
Parameters
----------
point_idx : int
Index of the desired starting point.
Returns
-------
imgaug.Polygon
Copy of this polygon with the new point order.
"""
do_assert(0 <= point_idx < len(self.exterior))
if point_idx == 0:
return self.deepcopy()
exterior = np.concatenate(
(self.exterior[point_idx:, :], self.exterior[:point_idx, :]),
axis=0
)
return self.deepcopy(exterior=exterior)
def to_shapely_polygon(self):
"""
Convert this polygon to a Shapely polygon.
Returns
-------
shapely.geometry.Polygon
The Shapely polygon matching this polygon's exterior.
"""
return shapely.geometry.Polygon([(point[0], point[1]) for point in self.exterior])
def to_shapely_line_string(self, closed=False, interpolate=0):
"""
Convert this polygon to a Shapely LineString object.
Parameters
----------
closed : bool, optional
Whether to return the line string with the last point being identical to the first point.
interpolate : int, optional
Number of points to interpolate between any pair of two consecutive points. These points are added
to the final line string.
Returns
-------
shapely.geometry.LineString
The Shapely LineString matching the polygon's exterior.
"""
return _convert_points_to_shapely_line_string(self.exterior, closed=closed, interpolate=interpolate)
def to_bounding_box(self):
"""
Convert this polygon to a bounding box tightly containing the whole polygon.
Returns
-------
imgaug.BoundingBox
The bounding box tightly containing the polygon.
"""
xx = self.xx
yy = self.yy
return BoundingBox(x1=min(xx), x2=max(xx), y1=min(yy), y2=max(yy), label=self.label)
@staticmethod
def from_shapely(polygon_shapely, label=None):
"""
Create a polygon from a Shapely polygon.
Note: This will remove any holes in the Shapely polygon.
Parameters
----------
polygon_shapely : shapely.geometry.Polygon
The shapely polygon.
label : None or str, optional
The label of the new polygon.
Returns
-------
imgaug.Polygon
A polygon with the same exterior as the Shapely polygon.
"""
do_assert(isinstance(polygon_shapely, shapely.geometry.Polygon))
# polygon_shapely.exterior can be None if the polygon was instantiated without points
if polygon_shapely.exterior is None or len(polygon_shapely.exterior.coords) == 0:
return Polygon([], label=label)
exterior = np.float32([[x, y] for (x, y) in polygon_shapely.exterior.coords])
return Polygon(exterior, label=label)
def exterior_almost_equals(self, other_polygon, max_distance=1e-6, interpolate=8):
"""
Estimate whether the geometry of the exterior of this polygon and another polygon are comparable.
The two exteriors can have different numbers of points, but any point randomly sampled on the exterior
of one polygon should be close to the closest point on the exterior of the other polygon.
Note that this method works approximately. One can come up with polygons with fairly different shapes that
will still be estimated as equal by this method. In practice however this should be unlikely to be the case.
The probability for something like that goes down as the interpolation parameter is increased.
Parameters
----------
other_polygon : imgaug.Polygon or (N,2) ndarray
The other polygon with which to compare the exterior.
If this is an ndarray, it is assumed to represent an exterior.
It must then have dtype float32 and shape (N,2) with the second dimension denoting xy-coordinates.
max_distance : number
The maximum euclidean distance between a point on one polygon and the closest point on the other polygon.
If the distance is exceeded for any such pair, the two exteriors are not viewed as equal.
The points are other the points contained in the polygon's exterior ndarray or interpolated points
between these.
interpolate : int
How many points to interpolate between the points of the polygon's exteriors.
If this is set to zero, then only the points given by the polygon's exterior ndarrays will be used.
Higher values make it less likely that unequal polygons are evaluated as equal.
Returns
-------
bool
Whether the two polygon's exteriors can be viewed as equal (approximate test).
"""
atol = max_distance
ext_a = self.exterior
ext_b = other_polygon.exterior if not is_np_array(other_polygon) else other_polygon
len_a = len(ext_a)
len_b = len(ext_b)
if len_a == 0 and len_b == 0:
return True
elif len_a == 0 and len_b > 0:
return False
elif len_a > 0 and len_b == 0:
return False
# neither A nor B is zero-sized at this point
# if A or B only contain points identical to the first point, merge them to one point
if len_a > 1:
if all([np.allclose(ext_a[0, :], ext_a[1 + i, :], rtol=0, atol=atol) for i in sm.xrange(len_a - 1)]):
ext_a = ext_a[0:1, :]
len_a = 1
if len_b > 1:
if all([np.allclose(ext_b[0, :], ext_b[1 + i, :], rtol=0, atol=atol) for i in sm.xrange(len_b - 1)]):
ext_b = ext_b[0:1, :]
len_b = 1
# handle polygons that contain a single point
if len_a == 1 and len_b == 1:
return np.allclose(ext_a[0, :], ext_b[0, :], rtol=0, atol=atol)
elif len_a == 1:
return all([np.allclose(ext_a[0, :], ext_b[i, :], rtol=0, atol=atol) for i in sm.xrange(len_b)])
elif len_b == 1:
return all([np.allclose(ext_b[0, :], ext_a[i, :], rtol=0, atol=atol) for i in sm.xrange(len_a)])
# After this point, both polygons have at least 2 points, i.e. LineStrings can be used.
# We can also safely go back to the original exteriors (before close points were merged).
ls_a = self.to_shapely_line_string(closed=True, interpolate=interpolate)
ls_b = other_polygon.to_shapely_line_string(closed=True, interpolate=interpolate) \
if not is_np_array(other_polygon) \
else _convert_points_to_shapely_line_string(other_polygon, closed=True, interpolate=interpolate)
# Measure the distance from each point in A to LineString B and vice versa.
# Make sure that no point violates the tolerance.
# Note that we can't just use LineString.almost_equals(LineString) -- that seems to expect the same number
# and order of points in both LineStrings (failed with duplicated points).
for x, y in ls_a.coords:
point = shapely.geometry.Point(x, y)
if not ls_b.distance(point) <= max_distance:
return False
for x, y in ls_b.coords:
point = shapely.geometry.Point(x, y)
if not ls_a.distance(point) <= max_distance:
return False
return True
def almost_equals(self, other, max_distance=1e-6, interpolate=8):
"""
Compare this polygon with another one and estimate whether they can be viewed as equal.
This is the same as :func:`imgaug.Polygon.exterior_almost_equals` but additionally compares the labels.
Parameters
----------
other
The object to compare against. If not a Polygon, then False will be returned.
max_distance : float
See :func:`imgaug.Polygon.exterior_almost_equals`.
interpolate : int
See :func:`imgaug.Polygon.exterior_almost_equals`.
Returns
-------
bool
Whether the two polygons can be viewed as equal. In the case of the exteriors this is an approximate test.
"""
if not isinstance(other, Polygon):
return False
if self.label is not None or other.label is not None:
if self.label is None:
return False
if other.label is None:
return False
if self.label != other.label:
return False
return self.exterior_almost_equals(other, max_distance=max_distance, interpolate=interpolate)
def copy(self, exterior=None, label=None):
"""
Create a shallow copy of the Polygon object.
Parameters
----------
exterior : list of imgaug.Keypoint or list of tuple or (N,2) ndarray, optional
List of points defining the polygon. See :func:`imgaug.Polygon.__init__` for details.
label : None or str, optional
If not None, then the label of the copied object will be set to this value.
Returns
-------
imgaug.Polygon
Shallow copy.
"""
return self.deepcopy(exterior=exterior, label=label)
def deepcopy(self, exterior=None, label=None):
"""
Create a deep copy of the Polygon object.
Parameters
----------
exterior : list of Keypoint or list of tuple or (N,2) ndarray, optional
List of points defining the polygon. See `imgaug.Polygon.__init__` for details.
label : None or str
If not None, then the label of the copied object will be set to this value.
Returns
-------
imgaug.Polygon
Deep copy.
"""
return Polygon(
exterior=np.copy(self.exterior) if exterior is None else exterior,
label=self.label if label is None else label
)
def __repr__(self):
return self.__str__()
def __str__(self):
points_str = ", ".join(["(x=%.3f, y=%.3f)" % (point[0], point[1]) for point in self.exterior])
return "Polygon([%s] (%d points), label=%s)" % (points_str, len(self.exterior), self.label)
def _convert_points_to_shapely_line_string(points, closed=False, interpolate=0):
if len(points) <= 1:
raise Exception(
("Conversion to shapely line string requires at least two points, but points input contains "
"only %d points.") % (len(points),)
)
points_tuples = [(point[0], point[1]) for point in points]
# interpolate points between each consecutive pair of points
if interpolate > 0:
points_tuples = _interpolate_points(points_tuples, interpolate)
# close if requested and not yet closed
if closed and len(points) > 1: # here intentionally used points instead of points_tuples
points_tuples.append(points_tuples[0])
return shapely.geometry.LineString(points_tuples)
def _interpolate_point_pair(point_a, point_b, nb_steps):
if nb_steps < 1:
return []
x1, y1 = point_a
x2, y2 = point_b
vec = np.float32([x2 - x1, y2 - y1])
step_size = vec / (1 + nb_steps)
return [(x1 + (i + 1) * step_size[0], y1 + (i + 1) * step_size[1]) for i in sm.xrange(nb_steps)]
def _interpolate_points(points, nb_steps, closed=True):
if len(points) <= 1:
return points
if closed:
points = list(points) + [points[0]]
points_interp = []
for point_a, point_b in zip(points[:-1], points[1:]):
points_interp.extend([point_a] + _interpolate_point_pair(point_a, point_b, nb_steps))
if not closed:
points_interp.append(points[-1])
# close does not have to be reverted here, as last point is not included in the extend()
return points_interp
def _interpolate_points_by_max_distance(points, max_distance, closed=True):
do_assert(max_distance > 0, "max_distance must have value greater than 0, got %.8f" % (max_distance,))
if len(points) <= 1:
return points
if closed:
points = list(points) + [points[0]]
points_interp = []
for point_a, point_b in zip(points[:-1], points[1:]):
dist = np.sqrt((point_a[0] - point_b[0]) ** 2 + (point_a[1] - point_b[1]) ** 2)
nb_steps = int((dist / max_distance) - 1)
points_interp.extend([point_a] + _interpolate_point_pair(point_a, point_b, nb_steps))
if not closed:
points_interp.append(points[-1])
return points_interp
class MultiPolygon(object):
"""
Class that represents several polygons.
Parameters
----------
geoms : list of imgaug.Polygon
List of the polygons.
"""
def __init__(self, geoms):
"""Create a new MultiPolygon instance."""
do_assert(len(geoms) == 0 or all([isinstance(el, Polygon) for el in geoms]))
self.geoms = geoms
@staticmethod
def from_shapely(geometry, label=None):
"""
Create a MultiPolygon from a Shapely MultiPolygon, a Shapely Polygon or a Shapely GeometryCollection.
This also creates all necessary Polygons contained by this MultiPolygon.
Parameters
----------
geometry : shapely.geometry.MultiPolygon or shapely.geometry.Polygon\
or shapely.geometry.collection.GeometryCollection
The object to convert to a MultiPolygon.
label : None or str, optional
A label assigned to all Polygons within the MultiPolygon.
Returns
-------
imgaug.MultiPolygon
The derived MultiPolygon.
"""
if isinstance(geometry, shapely.geometry.MultiPolygon):
return MultiPolygon([Polygon.from_shapely(poly, label=label) for poly in geometry.geoms])
elif isinstance(geometry, shapely.geometry.Polygon):
return MultiPolygon([Polygon.from_shapely(geometry, label=label)])
elif isinstance(geometry, shapely.geometry.collection.GeometryCollection):
do_assert(all([isinstance(poly, shapely.geometry.Polygon) for poly in geometry.geoms]))
return MultiPolygon([Polygon.from_shapely(poly, label=label) for poly in geometry.geoms])
else:
raise Exception("Unknown datatype '%s'. Expected shapely.geometry.Polygon or "
"shapely.geometry.MultiPolygon or "
"shapely.geometry.collections.GeometryCollection." % (type(geometry),))
class HeatmapsOnImage(object):
"""
Object representing heatmaps on images.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Array representing the heatmap(s).
Must be of dtype float32.
If multiple heatmaps are provided, then ``C`` is expected to denote their number.
shape : tuple of int
Shape of the image on which the heatmap(s) is/are placed. NOT the shape of the
heatmap(s) array, unless it is identical to the image shape (note the likely
difference between the arrays in the number of channels).
If there is not a corresponding image, use the shape of the heatmaps array.
min_value : float, optional
Minimum value for the heatmaps that `arr` represents. This will usually be ``0.0``.
max_value : float, optional
Maximum value for the heatmaps that `arr` represents. This will usually be ``1.0``.
"""
def __init__(self, arr, shape, min_value=0.0, max_value=1.0):
"""Construct a new HeatmapsOnImage object."""
do_assert(is_np_array(arr), "Expected numpy array as heatmap input array, got type %s" % (type(arr),))
do_assert(arr.dtype.type in [np.float32],
"Heatmap input array expected to be of dtype float32, got dtype %s." % (arr.dtype,))
do_assert(arr.ndim in [2, 3], "Heatmap input array must be 2d or 3d, got shape %s." % (arr.shape,))
do_assert(len(shape) in [2, 3],
"Argument 'shape' in HeatmapsOnImage expected to be 2d or 3d, got shape %s." % (shape,))
do_assert(min_value < max_value)
do_assert(np.min(arr.flat[0:50]) >= min_value - np.finfo(arr.dtype).eps,
("Value range of heatmap was chosen to be (%.8f, %.8f), but found value below minimum in first "
+ "50 heatmap array values.") % (min_value, max_value))
do_assert(np.max(arr.flat[0:50]) <= max_value + np.finfo(arr.dtype).eps,
("Value range of heatmap was chosen to be (%.8f, %.8f), but found value above maximum in first "
+ "50 heatmap array values.") % (min_value, max_value))
if arr.ndim == 2:
arr = arr[..., np.newaxis]
self.arr_was_2d = True
else:
self.arr_was_2d = False
eps = np.finfo(np.float32).eps
min_is_zero = 0.0 - eps < min_value < 0.0 + eps
max_is_one = 1.0 - eps < max_value < 1.0 + eps
if min_is_zero and max_is_one:
self.arr_0to1 = arr
else:
self.arr_0to1 = (arr - min_value) / (max_value - min_value)
self.shape = shape
self.min_value = min_value
self.max_value = max_value
def get_arr(self):
"""
Get the heatmap's array within the value range originally provided in ``__init__()``.
The HeatmapsOnImage object saves heatmaps internally in the value range ``(min=0.0, max=1.0)``.
This function converts the internal representation to ``(min=min_value, max=max_value)``,
where ``min_value`` and ``max_value`` are provided upon instantiation of the object.
Returns
-------
result : (H,W) ndarray or (H,W,C) ndarray
Heatmap array. Dtype is float32.
"""
if self.arr_was_2d and self.arr_0to1.shape[2] == 1:
arr = self.arr_0to1[:, :, 0]
else:
arr = self.arr_0to1
eps = np.finfo(np.float32).eps
min_is_zero = 0.0 - eps < self.min_value < 0.0 + eps
max_is_one = 1.0 - eps < self.max_value < 1.0 + eps
if min_is_zero and max_is_one:
return np.copy(arr)
else:
diff = self.max_value - self.min_value
return self.min_value + diff * arr
# TODO
# def find_global_maxima(self):
# raise NotImplementedError()
def draw(self, size=None, cmap="jet"):
"""
Render the heatmaps as RGB images.
Parameters
----------
size : None or float or iterable of int or iterable of float, optional
Size of the rendered RGB image as ``(height, width)``.
See :func:`imgaug.imresize_single_image` for details.
If set to None, no resizing is performed and the size of the heatmaps array is used.
cmap : str or None, optional
Color map of ``matplotlib`` to use in order to convert the heatmaps to RGB images.
If set to None, no color map will be used and the heatmaps will be converted
to simple intensity maps.
Returns
-------
heatmaps_drawn : list of (H,W,3) ndarray
Rendered heatmaps. One per heatmap array channel. Dtype is uint8.
"""
heatmaps_uint8 = self.to_uint8()
heatmaps_drawn = []
for c in sm.xrange(heatmaps_uint8.shape[2]):
# c:c+1 here, because the additional axis is needed by imresize_single_image
heatmap_c = heatmaps_uint8[..., c:c+1]
if size is not None:
heatmap_c_rs = imresize_single_image(heatmap_c, size, interpolation="nearest")
else:
heatmap_c_rs = heatmap_c
heatmap_c_rs = np.squeeze(heatmap_c_rs).astype(np.float32) / 255.0
if cmap is not None:
cmap_func = plt.get_cmap(cmap)
heatmap_cmapped = cmap_func(heatmap_c_rs)
heatmap_cmapped = np.delete(heatmap_cmapped, 3, 2)
else:
heatmap_cmapped = np.tile(heatmap_c_rs[..., np.newaxis], (1, 1, 3))
heatmap_cmapped = np.clip(heatmap_cmapped * 255, 0, 255).astype(np.uint8)
heatmaps_drawn.append(heatmap_cmapped)
return heatmaps_drawn
def draw_on_image(self, image, alpha=0.75, cmap="jet", resize="heatmaps"):
"""
Draw the heatmaps as overlays over an image.
Parameters
----------
image : (H,W,3) ndarray
Image onto which to draw the heatmaps. Expected to be of dtype uint8.
alpha : float, optional
Alpha/opacity value to use for the mixing of image and heatmaps.
Higher values mean that the heatmaps will be more visible and the image less visible.
cmap : str or None, optional
Color map to use. See :func:`imgaug.HeatmapsOnImage.draw` for details.
resize : {'heatmaps', 'image'}, optional
In case of size differences between the image and heatmaps, either the image or
the heatmaps can be resized. This parameter controls which of the two will be resized
to the other's size.
Returns
-------
mix : list of (H,W,3) ndarray
Rendered overlays. One per heatmap array channel. Dtype is uint8.
"""
# assert RGB image
do_assert(image.ndim == 3)
do_assert(image.shape[2] == 3)
do_assert(image.dtype.type == np.uint8)
do_assert(0 - 1e-8 <= alpha <= 1.0 + 1e-8)
do_assert(resize in ["heatmaps", "image"])
if resize == "image":
image = imresize_single_image(image, self.arr_0to1.shape[0:2], interpolation="cubic")
heatmaps_drawn = self.draw(
size=image.shape[0:2] if resize == "heatmaps" else None,
cmap=cmap
)
mix = [
np.clip((1-alpha) * image + alpha * heatmap_i, 0, 255).astype(np.uint8)
for heatmap_i
in heatmaps_drawn
]
return mix
def invert(self):
"""
Inverts each value in the heatmap, shifting low towards high values and vice versa.
This changes each value to::
v' = max - (v - min)
where ``v`` is the value at some spatial location, ``min`` is the minimum value in the heatmap
and ``max`` is the maximum value.
As the heatmap uses internally a 0.0 to 1.0 representation, this simply becomes ``v' = 1.0 - v``.
Note that the attributes ``min_value`` and ``max_value`` are not switched. They both keep their values.
This function can be useful e.g. when working with depth maps, where algorithms might have
an easier time representing the furthest away points with zeros, requiring an inverted
depth map.
Returns
-------
arr_inv : imgaug.HeatmapsOnImage
Inverted heatmap.
"""
arr_inv = HeatmapsOnImage.from_0to1(1 - self.arr_0to1, shape=self.shape, min_value=self.min_value,
max_value=self.max_value)
arr_inv.arr_was_2d = self.arr_was_2d
return arr_inv
def pad(self, top=0, right=0, bottom=0, left=0, mode="constant", cval=0.0):
"""
Pad the heatmaps on their top/right/bottom/left side.
Parameters
----------
top : int, optional
Amount of pixels to add at the top side of the heatmaps. Must be 0 or greater.
right : int, optional
Amount of pixels to add at the right side of the heatmaps. Must be 0 or greater.
bottom : int, optional
Amount of pixels to add at the bottom side of the heatmaps. Must be 0 or greater.
left : int, optional
Amount of pixels to add at the left side of the heatmaps. Must be 0 or greater.
mode : string, optional
Padding mode to use. See :func:`numpy.pad` for details.
cval : number, optional
Value to use for padding if `mode` is ``constant``. See :func:`numpy.pad` for details.
Returns
-------
imgaug.HeatmapsOnImage
Padded heatmaps of height ``H'=H+top+bottom`` and width ``W'=W+left+right``.
"""
arr_0to1_padded = pad(self.arr_0to1, top=top, right=right, bottom=bottom, left=left, mode=mode, cval=cval)
return HeatmapsOnImage.from_0to1(arr_0to1_padded, shape=self.shape, min_value=self.min_value,
max_value=self.max_value)
def pad_to_aspect_ratio(self, aspect_ratio, mode="constant", cval=0.0, return_pad_amounts=False):
"""
Pad the heatmaps on their sides so that they match a target aspect ratio.
Depending on which dimension is smaller (height or width), only the corresponding
sides (left/right or top/bottom) will be padded. In each case, both of the sides will
be padded equally.
Parameters
----------
aspect_ratio : float
Target aspect ratio, given as width/height. E.g. 2.0 denotes the image having twice
as much width as height.
mode : str, optional
Padding mode to use. See :func:`numpy.pad` for details.
cval : number, optional
Value to use for padding if `mode` is ``constant``. See :func:`numpy.pad` for details.
return_pad_amounts : bool, optional
If False, then only the padded image will be returned. If True, a tuple with two
entries will be returned, where the first entry is the padded image and the second
entry are the amounts by which each image side was padded. These amounts are again a
tuple of the form (top, right, bottom, left), with each value being an integer.
Returns
-------
heatmaps : imgaug.HeatmapsOnImage
Padded heatmaps as HeatmapsOnImage object.
pad_amounts : tuple of int
Amounts by which the heatmaps were padded on each side, given as a tuple ``(top, right, bottom, left)``.
This tuple is only returned if `return_pad_amounts` was set to True.
"""
arr_0to1_padded, pad_amounts = pad_to_aspect_ratio(self.arr_0to1, aspect_ratio=aspect_ratio, mode=mode,
cval=cval, return_pad_amounts=True)
heatmaps = HeatmapsOnImage.from_0to1(arr_0to1_padded, shape=self.shape, min_value=self.min_value,
max_value=self.max_value)
if return_pad_amounts:
return heatmaps, pad_amounts
else:
return heatmaps
def avg_pool(self, block_size):
"""
Rescale the heatmap(s) array using average pooling of a given block/kernel size.
Parameters
----------
block_size : int or tuple of int
Size of each block of values to pool, aka kernel size. See :func:`imgaug.pool` for details.
Returns
-------
imgaug.HeatmapsOnImage
Heatmaps after average pooling.
"""
arr_0to1_reduced = avg_pool(self.arr_0to1, block_size, cval=0.0)
return HeatmapsOnImage.from_0to1(arr_0to1_reduced, shape=self.shape, min_value=self.min_value,
max_value=self.max_value)
def max_pool(self, block_size):
"""
Rescale the heatmap(s) array using max-pooling of a given block/kernel size.
Parameters
----------
block_size : int or tuple of int
Size of each block of values to pool, aka kernel size. See :func:`imgaug.pool` for details.
Returns
-------
imgaug.HeatmapsOnImage
Heatmaps after max-pooling.
"""
arr_0to1_reduced = max_pool(self.arr_0to1, block_size)
return HeatmapsOnImage.from_0to1(arr_0to1_reduced, shape=self.shape, min_value=self.min_value,
max_value=self.max_value)
def scale(self, sizes, interpolation="cubic"):
"""
Rescale the heatmap(s) array to the provided size given the provided interpolation.
Parameters
----------
sizes : float or iterable of int or iterable of float
New size of the array in ``(height, width)``. See :func:`imgaug.imresize_single_image` for details.
interpolation : None or str or int, optional
The interpolation to use during resize. See :func:`imgaug.imresize_single_image` for details.
Returns
-------
imgaug.HeatmapsOnImage
Rescaled heatmaps object.
"""
arr_0to1_rescaled = imresize_single_image(self.arr_0to1, sizes, interpolation=interpolation)
# cubic interpolation can lead to values outside of [0.0, 1.0],
# see https://github.com/opencv/opencv/issues/7195
# TODO area interpolation too?
arr_0to1_rescaled = np.clip(arr_0to1_rescaled, 0.0, 1.0)
return HeatmapsOnImage.from_0to1(arr_0to1_rescaled, shape=self.shape, min_value=self.min_value,
max_value=self.max_value)
def to_uint8(self):
"""
Convert this heatmaps object to a 0-to-255 array.
Returns
-------
arr_uint8 : (H,W,C) ndarray
Heatmap as a 0-to-255 array (dtype is uint8).
"""
# TODO this always returns (H,W,C), even if input ndarray was originall (H,W)
# does it make sense here to also return (H,W) if self.arr_was_2d?
arr_0to255 = np.clip(np.round(self.arr_0to1 * 255), 0, 255)
arr_uint8 = arr_0to255.astype(np.uint8)
return arr_uint8
@staticmethod
def from_uint8(arr_uint8, shape, min_value=0.0, max_value=1.0):
"""
Create a heatmaps object from an heatmap array containing values ranging from 0 to 255.
Parameters
----------
arr_uint8 : (H,W) ndarray or (H,W,C) ndarray
Heatmap(s) array, where ``H`` is height, ``W`` is width and ``C`` is the number of heatmap channels.
Expected dtype is uint8.
shape : tuple of int
Shape of the image on which the heatmap(s) is/are placed. NOT the shape of the
heatmap(s) array, unless it is identical to the image shape (note the likely
difference between the arrays in the number of channels).
If there is not a corresponding image, use the shape of the heatmaps array.
min_value : float, optional
Minimum value for the heatmaps that the 0-to-255 array represents. This will usually
be 0.0. It is used when calling :func:`imgaug.HeatmapsOnImage.get_arr`, which converts the
underlying ``(0, 255)`` array to value range ``(min_value, max_value)``.
max_value : float, optional
Maximum value for the heatmaps that 0-to-255 array represents.
See parameter `min_value` for details.
Returns
-------
imgaug.HeatmapsOnImage
Heatmaps object.
"""
arr_0to1 = arr_uint8.astype(np.float32) / 255.0
return HeatmapsOnImage.from_0to1(arr_0to1, shape, min_value=min_value, max_value=max_value)
@staticmethod
def from_0to1(arr_0to1, shape, min_value=0.0, max_value=1.0):
"""
Create a heatmaps object from an heatmap array containing values ranging from 0.0 to 1.0.
Parameters
----------
arr_0to1 : (H,W) or (H,W,C) ndarray
Heatmap(s) array, where ``H`` is height, ``W`` is width and ``C`` is the number of heatmap channels.
Expected dtype is float32.
shape : tuple of ints
Shape of the image on which the heatmap(s) is/are placed. NOT the shape of the
heatmap(s) array, unless it is identical to the image shape (note the likely
difference between the arrays in the number of channels).
If there is not a corresponding image, use the shape of the heatmaps array.
min_value : float, optional
Minimum value for the heatmaps that the 0-to-1 array represents. This will usually
be 0.0. It is used when calling :func:`imgaug.HeatmapsOnImage.get_arr`, which converts the
underlying ``(0.0, 1.0)`` array to value range ``(min_value, max_value)``.
E.g. if you started with heatmaps in the range ``(-1.0, 1.0)`` and projected these
to (0.0, 1.0), you should call this function with ``min_value=-1.0``, ``max_value=1.0``
so that :func:`imgaug.HeatmapsOnImage.get_arr` returns heatmap arrays having value
range (-1.0, 1.0).
max_value : float, optional
Maximum value for the heatmaps that to 0-to-255 array represents.
See parameter min_value for details.
Returns
-------
heatmaps : imgaug.HeatmapsOnImage
Heatmaps object.
"""
heatmaps = HeatmapsOnImage(arr_0to1, shape, min_value=0.0, max_value=1.0)
heatmaps.min_value = min_value
heatmaps.max_value = max_value
return heatmaps
@classmethod
def change_normalization(cls, arr, source, target):
"""
Change the value range of a heatmap from one min-max to another min-max.
E.g. the value range may be changed from min=0.0, max=1.0 to min=-1.0, max=1.0.
Parameters
----------
arr : ndarray
Heatmap array to modify.
source : tuple of float
Current value range of the input array, given as (min, max), where both are float values.
target : tuple of float
Desired output value range of the array, given as (min, max), where both are float values.
Returns
-------
arr_target : ndarray
Input array, with value range projected to the desired target value range.
"""
do_assert(is_np_array(arr))
if isinstance(source, HeatmapsOnImage):
source = (source.min_value, source.max_value)
else:
do_assert(isinstance(source, tuple))
do_assert(len(source) == 2)
do_assert(source[0] < source[1])
if isinstance(target, HeatmapsOnImage):
target = (target.min_value, target.max_value)
else:
do_assert(isinstance(target, tuple))
do_assert(len(target) == 2)
do_assert(target[0] < target[1])
# Check if source and target are the same (with a tiny bit of tolerance)
# if so, evade compuation and just copy the array instead.
# This is reasonable, as source and target will often both be (0.0, 1.0).
eps = np.finfo(arr.dtype).eps
mins_same = source[0] - 10*eps < target[0] < source[0] + 10*eps
maxs_same = source[1] - 10*eps < target[1] < source[1] + 10*eps
if mins_same and maxs_same:
return np.copy(arr)
min_source, max_source = source
min_target, max_target = target
diff_source = max_source - min_source
diff_target = max_target - min_target
arr_0to1 = (arr - min_source) / diff_source
arr_target = min_target + arr_0to1 * diff_target
return arr_target
def copy(self):
"""
Create a shallow copy of the Heatmaps object.
Returns
-------
imgaug.HeatmapsOnImage
Shallow copy.
"""
return self.deepcopy()
def deepcopy(self):
"""
Create a deep copy of the Heatmaps object.
Returns
-------
imgaug.HeatmapsOnImage
Deep copy.
"""
return HeatmapsOnImage(self.get_arr(), shape=self.shape, min_value=self.min_value, max_value=self.max_value)
class SegmentationMapOnImage(object):
"""
Object representing a segmentation map associated with an image.
Attributes
----------
DEFAULT_SEGMENT_COLORS : list of tuple of int
Standard RGB colors to use during drawing, ordered by class index.
Parameters
----------
arr : (H,W) ndarray or (H,W,1) ndarray or (H,W,C) ndarray
Array representing the segmentation map. May have datatypes bool, integer or float.
* If bool: Assumed to be of shape (H,W), (H,W,1) or (H,W,C). If (H,W) or (H,W,1) it
is assumed to be for the case of having a single class (where any False denotes
background). Otherwise there are assumed to be C channels, one for each class,
with each of them containing a mask for that class. The masks may overlap.
* If integer: Assumed to be of shape (H,W) or (H,W,1). Each pixel is assumed to
contain an integer denoting the class index. Classes are assumed to be
non-overlapping. The number of classes cannot be guessed from this input, hence
nb_classes must be set.
* If float: Assumed to b eof shape (H,W), (H,W,1) or (H,W,C) with meanings being
similar to the case of `bool`. Values are expected to fall always in the range
0.0 to 1.0 and are usually expected to be either 0.0 or 1.0 upon instantiation
of a new segmentation map. Classes may overlap.
shape : iterable of int
Shape of the corresponding image (NOT the segmentation map array). This is expected
to be ``(H, W)`` or ``(H, W, C)`` with ``C`` usually being 3. If there is no corresponding image,
then use the segmentation map's shape instead.
nb_classes : int or None
Total number of unique classes that may appear in an segmentation map, i.e. the max
class index. This may be None if the input array is of type bool or float. The number
of classes however must be provided if the input array is of type int, as then the
number of classes cannot be guessed.
"""
DEFAULT_SEGMENT_COLORS = [
(0, 0, 0), # black
(230, 25, 75), # red
(60, 180, 75), # green
(255, 225, 25), # yellow
(0, 130, 200), # blue
(245, 130, 48), # orange
(145, 30, 180), # purple
(70, 240, 240), # cyan
(240, 50, 230), # magenta
(210, 245, 60), # lime
(250, 190, 190), # pink
(0, 128, 128), # teal
(230, 190, 255), # lavender
(170, 110, 40), # brown
(255, 250, 200), # beige
(128, 0, 0), # maroon
(170, 255, 195), # mint
(128, 128, 0), # olive
(255, 215, 180), # coral
(0, 0, 128), # navy
(128, 128, 128), # grey
(255, 255, 255), # white
# --
(115, 12, 37), # dark red
(30, 90, 37), # dark green
(127, 112, 12), # dark yellow
(0, 65, 100), # dark blue
(122, 65, 24), # dark orange
(72, 15, 90), # dark purple
(35, 120, 120), # dark cyan
(120, 25, 115), # dark magenta
(105, 122, 30), # dark lime
(125, 95, 95), # dark pink
(0, 64, 64), # dark teal
(115, 95, 127), # dark lavender
(85, 55, 20), # dark brown
(127, 125, 100), # dark beige
(64, 0, 0), # dark maroon
(85, 127, 97), # dark mint
(64, 64, 0), # dark olive
(127, 107, 90), # dark coral
(0, 0, 64), # dark navy
(64, 64, 64), # dark grey
]
def __init__(self, arr, shape, nb_classes=None):
do_assert(is_np_array(arr), "Expected to get numpy array, got %s." % (type(arr),))
if arr.dtype.type == np.bool_:
do_assert(arr.ndim in [2, 3])
self.input_was = ("bool", arr.ndim)
if arr.ndim == 2:
arr = arr[..., np.newaxis]
arr = arr.astype(np.float32)
elif arr.dtype.type in NP_INT_TYPES.union(NP_UINT_TYPES):
do_assert(arr.ndim == 2 or (arr.ndim == 3 and arr.shape[2] == 1))
do_assert(nb_classes is not None)
do_assert(nb_classes > 0)
do_assert(np.min(arr.flat[0:100]) >= 0)
do_assert(np.max(arr.flat[0:100]) <= nb_classes)
self.input_was = ("int", arr.dtype.type, arr.ndim)
if arr.ndim == 3:
arr = arr[..., 0]
# TODO improve efficiency here by building only sub-heatmaps for classes actually
# present in the image. This would also get rid of nb_classes.
arr = np.eye(nb_classes)[arr] # from class indices to one hot
arr = arr.astype(np.float32)
elif arr.dtype.type in NP_FLOAT_TYPES:
do_assert(arr.ndim == 3)
self.input_was = ("float", arr.dtype.type, arr.ndim)
arr = arr.astype(np.float32)
else:
raise Exception(("Input was expected to be an ndarray of dtype bool or any dtype in %s or any dtype in %s. "
"Got dtype %s.") % (
str(NP_INT_TYPES.union(NP_UINT_TYPES)), str(NP_FLOAT_TYPES), str(arr.dtype)))
do_assert(arr.ndim == 3)
do_assert(arr.dtype.type == np.float32)
self.arr = arr
self.shape = shape
self.nb_classes = nb_classes if nb_classes is not None else arr.shape[2]
def get_arr_int(self, background_threshold=0.01, background_class_id=None):
"""
Get the segmentation map array as an integer array of shape (H, W).
Each pixel in that array contains an integer value representing the pixel's class.
If multiple classes overlap, the one with the highest local float value is picked.
If that highest local value is below `background_threshold`, the method instead uses
the background class id as the pixel's class value.
By default, class id 0 is the background class. This may only be changed if the original
input to the segmentation map object was an integer map.
Parameters
----------
background_threshold : float, optional
At each pixel, each class-heatmap has a value between 0.0 and 1.0. If none of the
class-heatmaps has a value above this threshold, the method uses the background class
id instead.
background_class_id : None or int, optional
Class id to fall back to if no class-heatmap passes the threshold at a spatial
location. May only be provided if the original input was an integer mask and in these
cases defaults to 0. If the input were float or boolean masks, the background class id
may not be set as it is assumed that the background is implicitly defined
as 'any spatial location that has zero-like values in all masks'.
Returns
-------
result : (H,W) ndarray
Segmentation map array (int32).
If the original input consisted of boolean or float masks, then the highest possible
class id is ``1+C``, where ``C`` is the number of provided float/boolean masks. The value
``0`` in the integer mask then denotes the background class.
"""
if self.input_was[0] in ["bool", "float"]:
do_assert(background_class_id is None,
"The background class id may only be changed if the original input to SegmentationMapOnImage "
+ "was an *integer* based segmentation map.")
if background_class_id is None:
background_class_id = 0
channelwise_max_idx = np.argmax(self.arr, axis=2)
# for bool and float input masks, we assume that the background is implicitly given,
# i.e. anything where all masks/channels have zero-like values
# for int, we assume that the background class is explicitly given and has the index 0
if self.input_was[0] in ["bool", "float"]:
result = 1 + channelwise_max_idx
else: # integer mask was provided
result = channelwise_max_idx
if background_threshold is not None and background_threshold > 0:
probs = np.amax(self.arr, axis=2)
result[probs < background_threshold] = background_class_id
return result.astype(np.int32)
# TODO
# def get_arr_bool(self, allow_overlapping=False, threshold=0.5, background_threshold=0.01, background_class_id=0):
# raise NotImplementedError()
def draw(self, size=None, background_threshold=0.01, background_class_id=None, colors=None,
return_foreground_mask=False):
"""
Render the segmentation map as an RGB image.
Parameters
----------
size : None or float or iterable of int or iterable of float, optional
Size of the rendered RGB image as ``(height, width)``.
See :func:`imgaug.imresize_single_image` for details.
If set to None, no resizing is performed and the size of the segmentation map array is used.
background_threshold : float, optional
See :func:`imgaug.SegmentationMapOnImage.get_arr_int`.
background_class_id : None or int, optional
See :func:`imgaug.SegmentationMapOnImage.get_arr_int`.
colors : None or list of tuple of int, optional
Colors to use. One for each class to draw. If None, then default colors will be used.
return_foreground_mask : bool, optional
Whether to return a mask of the same size as the drawn segmentation map, containing
True at any spatial location that is not the background class and False everywhere else.
Returns
-------
segmap_drawn : (H,W,3) ndarray
Rendered segmentation map (dtype is uint8).
foreground_mask : (H,W) ndarray
Mask indicating the locations of foreground classes (dtype is bool).
This value is only returned if `return_foreground_mask` is True.
"""
arr = self.get_arr_int(background_threshold=background_threshold, background_class_id=background_class_id)
nb_classes = 1 + np.max(arr)
segmap_drawn = np.zeros((arr.shape[0], arr.shape[1], 3), dtype=np.uint8)
if colors is None:
colors = SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS
do_assert(nb_classes <= len(colors),
"Can't draw all %d classes as it would exceed the maximum number of %d available colors." % (
nb_classes, len(colors),))
ids_in_map = np.unique(arr)
for c, color in zip(sm.xrange(nb_classes), colors):
if c in ids_in_map:
class_mask = (arr == c)
segmap_drawn[class_mask] = color
if return_foreground_mask:
background_class_id = 0 if background_class_id is None else background_class_id
foreground_mask = (arr != background_class_id)
else:
foreground_mask = None
if size is not None:
segmap_drawn = imresize_single_image(segmap_drawn, size, interpolation="nearest")
if foreground_mask is not None:
foreground_mask = imresize_single_image(
foreground_mask.astype(np.uint8), size, interpolation="nearest") > 0
if foreground_mask is not None:
return segmap_drawn, foreground_mask
return segmap_drawn
def draw_on_image(self, image, alpha=0.75, resize="segmentation_map", background_threshold=0.01,
background_class_id=None, colors=None, draw_background=False):
"""
Draw the segmentation map as an overlay over an image.
Parameters
----------
image : (H,W,3) ndarray
Image onto which to draw the segmentation map. Dtype is expected to be uint8.
alpha : float, optional
Alpha/opacity value to use for the mixing of image and segmentation map.
Higher values mean that the segmentation map will be more visible and the image less visible.
resize : {'segmentation_map', 'image'}, optional
In case of size differences between the image and segmentation map, either the image or
the segmentation map can be resized. This parameter controls which of the two will be
resized to the other's size.
background_threshold : float, optional
See :func:`imgaug.SegmentationMapOnImage.get_arr_int`.
background_class_id : None or int, optional
See :func:`imgaug.SegmentationMapOnImage.get_arr_int`.
colors : None or list of tuple of int, optional
Colors to use. One for each class to draw. If None, then default colors will be used.
draw_background : bool, optional
If True, the background will be drawn like any other class.
If False, the background will not be drawn, i.e. the respective background pixels
will be identical with the image's RGB color at the corresponding spatial location
and no color overlay will be applied.
Returns
-------
mix : (H,W,3) ndarray
Rendered overlays (dtype is uint8).
"""
# assert RGB image
do_assert(image.ndim == 3)
do_assert(image.shape[2] == 3)
do_assert(image.dtype.type == np.uint8)
do_assert(0 - 1e-8 <= alpha <= 1.0 + 1e-8)
do_assert(resize in ["segmentation_map", "image"])
if resize == "image":
image = imresize_single_image(image, self.arr.shape[0:2], interpolation="cubic")
segmap_drawn, foreground_mask = self.draw(
background_threshold=background_threshold,
background_class_id=background_class_id,
size=image.shape[0:2] if resize == "segmentation_map" else None,
colors=colors,
return_foreground_mask=True
)
if draw_background:
mix = np.clip(
(1-alpha) * image + alpha * segmap_drawn,
0,
255
).astype(np.uint8)
else:
foreground_mask = foreground_mask[..., np.newaxis]
mix = np.zeros_like(image)
mix += (~foreground_mask).astype(np.uint8) * image
mix += foreground_mask.astype(np.uint8) * np.clip(
(1-alpha) * image + alpha * segmap_drawn,
0,
255
).astype(np.uint8)
return mix
def pad(self, top=0, right=0, bottom=0, left=0, mode="constant", cval=0.0):
"""
Pad the segmentation map on its top/right/bottom/left side.
Parameters
----------
top : int, optional
Amount of pixels to add at the top side of the segmentation map. Must be 0 or greater.
right : int, optional
Amount of pixels to add at the right side of the segmentation map. Must be 0 or greater.
bottom : int, optional
Amount of pixels to add at the bottom side of the segmentation map. Must be 0 or greater.
left : int, optional
Amount of pixels to add at the left side of the segmentation map. Must be 0 or greater.
mode : str, optional
Padding mode to use. See :func:`numpy.pad` for details.
cval : number, optional
Value to use for padding if `mode` is ``constant``. See :func:`numpy.pad` for details.
Returns
-------
segmap : imgaug.SegmentationMapOnImage
Padded segmentation map of height ``H'=H+top+bottom`` and width ``W'=W+left+right``.
"""
arr_padded = pad(self.arr, top=top, right=right, bottom=bottom, left=left, mode=mode, cval=cval)
segmap = SegmentationMapOnImage(arr_padded, shape=self.shape)
segmap.input_was = self.input_was
return segmap
def pad_to_aspect_ratio(self, aspect_ratio, mode="constant", cval=0.0, return_pad_amounts=False):
"""
Pad the segmentation map on its sides so that its matches a target aspect ratio.
Depending on which dimension is smaller (height or width), only the corresponding
sides (left/right or top/bottom) will be padded. In each case, both of the sides will
be padded equally.
Parameters
----------
aspect_ratio : float
Target aspect ratio, given as width/height. E.g. 2.0 denotes the image having twice
as much width as height.
mode : str, optional
Padding mode to use. See :func:`numpy.pad` for details.
cval : number, optional
Value to use for padding if `mode` is ``constant``. See :func:`numpy.pad` for details.
return_pad_amounts : bool, optional
If False, then only the padded image will be returned. If True, a tuple with two
entries will be returned, where the first entry is the padded image and the second
entry are the amounts by which each image side was padded. These amounts are again a
tuple of the form (top, right, bottom, left), with each value being an integer.
Returns
-------
segmap : imgaug.SegmentationMapOnImage
Padded segmentation map as SegmentationMapOnImage object.
pad_amounts : tuple of int
Amounts by which the segmentation map was padded on each side, given as a
tuple ``(top, right, bottom, left)``.
This tuple is only returned if `return_pad_amounts` was set to True.
"""
arr_padded, pad_amounts = pad_to_aspect_ratio(self.arr, aspect_ratio=aspect_ratio, mode=mode, cval=cval,
return_pad_amounts=True)
segmap = SegmentationMapOnImage(arr_padded, shape=self.shape)
segmap.input_was = self.input_was
if return_pad_amounts:
return segmap, pad_amounts
else:
return segmap
def scale(self, sizes, interpolation="cubic"):
"""
Rescale the segmentation map array to the provided size given the provided interpolation.
Parameters
----------
sizes : float or iterable of int or iterable of float
New size of the array in ``(height, width)``. See :func:`imgaug.imresize_single_image` for details.
interpolation : None or str or int, optional
The interpolation to use during resize. See :func:`imgaug.imresize_single_image` for details.
Note: The segmentation map is internally stored as multiple float-based heatmaps,
making smooth interpolations potentially more reasonable than nearest neighbour
interpolation.
Returns
-------
segmap : imgaug.SegmentationMapOnImage
Rescaled segmentation map object.
"""
arr_rescaled = imresize_single_image(self.arr, sizes, interpolation=interpolation)
# cubic interpolation can lead to values outside of [0.0, 1.0],
# see https://github.com/opencv/opencv/issues/7195
# TODO area interpolation too?
arr_rescaled = np.clip(arr_rescaled, 0.0, 1.0)
segmap = SegmentationMapOnImage(arr_rescaled, shape=self.shape)
segmap.input_was = self.input_was
return segmap
def to_heatmaps(self, only_nonempty=False, not_none_if_no_nonempty=False):
"""
Convert segmentation map to heatmaps object.
Each segmentation map class will be represented as a single heatmap channel.
Parameters
----------
only_nonempty : bool, optional
If True, then only heatmaps for classes that appear in the segmentation map will be
generated. Additionally, a list of these class ids will be returned.
not_none_if_no_nonempty : bool, optional
If `only_nonempty` is True and for a segmentation map no channel was non-empty,
this function usually returns None as the heatmaps object. If however this parameter
is set to True, a heatmaps object with one channel (representing class 0)
will be returned as a fallback in these cases.
Returns
-------
imgaug.HeatmapsOnImage or None
Segmentation map as a heatmaps object.
If `only_nonempty` was set to True and no class appeared in the segmentation map,
then this is None.
class_indices : list of int
Class ids (0 to C-1) of the classes that were actually added to the heatmaps.
Only returned if `only_nonempty` was set to True.
"""
if not only_nonempty:
return HeatmapsOnImage.from_0to1(self.arr, self.shape, min_value=0.0, max_value=1.0)
else:
nonempty_mask = np.sum(self.arr, axis=(0, 1)) > 0 + 1e-4
if np.sum(nonempty_mask) == 0:
if not_none_if_no_nonempty:
nonempty_mask[0] = True
else:
return None, []
class_indices = np.arange(self.arr.shape[2])[nonempty_mask]
channels = self.arr[..., class_indices]
return HeatmapsOnImage(channels, self.shape, min_value=0.0, max_value=1.0), class_indices
@staticmethod
def from_heatmaps(heatmaps, class_indices=None, nb_classes=None):
"""
Convert heatmaps to segmentation map.
Assumes that each class is represented as a single heatmap channel.
Parameters
----------
heatmaps : imgaug.HeatmapsOnImage
Heatmaps to convert.
class_indices : None or list of int, optional
List of class indices represented by each heatmap channel. See also the
secondary output of :func:`imgaug.SegmentationMapOnImage.to_heatmap`.
If this is provided, it must have the same length as the number of heatmap channels.
nb_classes : None or int, optional
Number of classes. Must be provided if class_indices is set.
Returns
-------
imgaug.SegmentationMapOnImage
Segmentation map derived from heatmaps.
"""
if class_indices is None:
return SegmentationMapOnImage(heatmaps.arr_0to1, shape=heatmaps.shape)
else:
do_assert(nb_classes is not None)
do_assert(min(class_indices) >= 0)
do_assert(max(class_indices) < nb_classes)
do_assert(len(class_indices) == heatmaps.arr_0to1.shape[2])
arr_0to1 = heatmaps.arr_0to1
arr_0to1_full = np.zeros((arr_0to1.shape[0], arr_0to1.shape[1], nb_classes), dtype=np.float32)
class_indices_set = set(class_indices)
heatmap_channel = 0
for c in sm.xrange(nb_classes):
if c in class_indices_set:
arr_0to1_full[:, :, c] = arr_0to1[:, :, heatmap_channel]
heatmap_channel += 1
return SegmentationMapOnImage(arr_0to1_full, shape=heatmaps.shape)
def copy(self):
"""
Create a shallow copy of the segmentation map object.
Returns
-------
imgaug.SegmentationMapOnImage
Shallow copy.
"""
return self.deepcopy()
def deepcopy(self):
"""
Create a deep copy of the segmentation map object.
Returns
-------
imgaug.SegmentationMapOnImage
Deep copy.
"""
segmap = SegmentationMapOnImage(self.arr, shape=self.shape, nb_classes=self.nb_classes)
segmap.input_was = self.input_was
return segmap
############################
# Background augmentation
############################
class Batch(object):
"""
Class encapsulating a batch before and after augmentation.
Parameters
----------
images : None or (N,H,W,C) ndarray or (N,H,W) ndarray or list of (H,W,C) ndarray or list of (H,W) ndarray
The images to augment.
heatmaps : None or list of imgaug.HeatmapsOnImage
The heatmaps to augment.
segmentation_maps : None or list of SegmentationMapOnImage
The segmentation maps to augment.
keypoints : None or list of KeypointOnImage
The keypoints to augment.
bounding_boxes : None or list of BoundingBoxesOnImage
The bounding boxes to augment.
data
Additional data that is saved in the batch and may be read out
after augmentation. This could e.g. contain filepaths to each image
in `images`. As this object is usually used for background
augmentation with multiple processes, the augmented Batch objects might
not be returned in the original order, making this information useful.
"""
def __init__(self, images=None, heatmaps=None, segmentation_maps=None, keypoints=None, bounding_boxes=None,
data=None):
self.images = images
self.images_aug = None
self.heatmaps = heatmaps
self.heatmaps_aug = None
self.segmentation_maps = segmentation_maps
self.segmentation_maps_aug = None
self.keypoints = keypoints
self.keypoints_aug = None
self.bounding_boxes = bounding_boxes
self.bounding_boxes_aug = None
self.data = data
def deepcopy(self):
def _copy_images(images):
if images is None:
images_copy = None
elif is_np_array(images):
images_copy = np.copy(images)
else:
do_assert(is_iterable(images))
do_assert(all([is_np_array(image) for image in images]))
images_copy = list([np.copy(image) for image in images])
return images_copy
def _copy_augmentable_objects(augmentables, clazz):
if augmentables is None:
augmentables_copy = None
else:
do_assert(is_iterable(augmentables))
do_assert(all([isinstance(augmentable, clazz) for augmentable in augmentables]))
augmentables_copy = [augmentable.deepcopy() for augmentable in augmentables]
return augmentables_copy
batch = Batch(
images=_copy_images(self.images),
heatmaps=_copy_augmentable_objects(self.heatmaps, HeatmapsOnImage),
segmentation_maps=_copy_augmentable_objects(self.segmentation_maps, SegmentationMapOnImage),
keypoints=_copy_augmentable_objects(self.keypoints, KeypointsOnImage),
bounding_boxes=_copy_augmentable_objects(self.bounding_boxes, BoundingBoxesOnImage),
data=copy.deepcopy(self.data)
)
batch.images_aug = _copy_images(self.images_aug)
batch.heatmaps_aug = _copy_augmentable_objects(self.heatmaps_aug, HeatmapsOnImage)
batch.segmentation_maps_aug = _copy_augmentable_objects(self.segmentation_maps_aug, SegmentationMapOnImage)
batch.keypoints_aug = _copy_augmentable_objects(self.keypoints_aug, KeypointsOnImage)
batch.bounding_boxes_aug = _copy_augmentable_objects(self.bounding_boxes_aug, BoundingBoxesOnImage)
return batch
class BatchLoader(object):
"""
Class to load batches in the background.
Loaded batches can be accesses using :attr:`imgaug.BatchLoader.queue`.
Parameters
----------
load_batch_func : callable or generator
Generator or generator function (i.e. function that yields Batch objects)
or a function that returns a list of Batch objects.
Background loading automatically stops when the last batch was yielded or the
last batch in the list was reached.
queue_size : int, optional
Maximum number of batches to store in the queue. May be set higher
for small images and/or small batches.
nb_workers : int, optional
Number of workers to run in the background.
threaded : bool, optional
Whether to run the background processes using threads (True) or full processes (False).
"""
def __init__(self, load_batch_func, queue_size=50, nb_workers=1, threaded=True):
do_assert(queue_size >= 2)
do_assert(nb_workers >= 1)
self._queue_internal = multiprocessing.Queue(queue_size//2)
self.queue = multiprocessing.Queue(queue_size//2)
self.join_signal = multiprocessing.Event()
self.workers = []
self.threaded = threaded
seeds = current_random_state().randint(0, 10**6, size=(nb_workers,))
for i in range(nb_workers):
if threaded:
worker = threading.Thread(
target=self._load_batches,
args=(load_batch_func, self._queue_internal, self.join_signal, None)
)
else:
worker = multiprocessing.Process(
target=self._load_batches,
args=(load_batch_func, self._queue_internal, self.join_signal, seeds[i])
)
worker.daemon = True
worker.start()
self.workers.append(worker)
self.main_worker_thread = threading.Thread(
target=self._main_worker,
args=()
)
self.main_worker_thread.daemon = True
self.main_worker_thread.start()
def count_workers_alive(self):
return sum([int(worker.is_alive()) for worker in self.workers])
def all_finished(self):
"""
Determine whether the workers have finished the loading process.
Returns
-------
out : bool
True if all workers have finished. Else False.
"""
return self.count_workers_alive() == 0
def _main_worker(self):
workers_running = self.count_workers_alive()
while workers_running > 0 and not self.join_signal.is_set():
# wait for a new batch in the source queue and load it
try:
batch_str = self._queue_internal.get(timeout=0.1)
if batch_str == "":
workers_running -= 1
else:
self.queue.put(batch_str)
except QueueEmpty:
time.sleep(0.01)
except (EOFError, BrokenPipeError):
break
workers_running = self.count_workers_alive()
# All workers have finished, move the remaining entries from internal to external queue
while True:
try:
batch_str = self._queue_internal.get(timeout=0.005)
if batch_str != "":
self.queue.put(batch_str)
except QueueEmpty:
break
except (EOFError, BrokenPipeError):
break
self.queue.put(pickle.dumps(None, protocol=-1))
time.sleep(0.01)
def _load_batches(self, load_batch_func, queue, join_signal, seedval):
if seedval is not None:
random.seed(seedval)
np.random.seed(seedval)
seed(seedval)
try:
gen = load_batch_func() if not is_generator(load_batch_func) else load_batch_func
for batch in gen:
do_assert(isinstance(batch, Batch),
"Expected batch returned by load_batch_func to be of class imgaug.Batch, got %s." % (
type(batch),))
batch_pickled = pickle.dumps(batch, protocol=-1)
while not join_signal.is_set():
try:
queue.put(batch_pickled, timeout=0.005)
break
except QueueFull:
pass
if join_signal.is_set():
break
except Exception:
traceback.print_exc()
finally:
queue.put("")
time.sleep(0.01)
def terminate(self):
"""Stop all workers."""
if not self.join_signal.is_set():
self.join_signal.set()
# give minimal time to put generated batches in queue and gracefully shut down
time.sleep(0.01)
if self.main_worker_thread.is_alive():
self.main_worker_thread.join()
if self.threaded:
for worker in self.workers:
if worker.is_alive():
worker.join()
else:
for worker in self.workers:
if worker.is_alive():
worker.terminate()
worker.join()
# wait until all workers are fully terminated
while not self.all_finished():
time.sleep(0.001)
self.queue.put(pickle.dumps(None, protocol=-1))
time.sleep(0.01)
# clean the queue, this reportedly prevents hanging threads
while True:
try:
self._queue_internal.get(timeout=0.005)
except QueueEmpty:
break
if not self._queue_internal._closed:
self._queue_internal.close()
if not self.queue._closed:
self.queue.close()
self._queue_internal.join_thread()
self.queue.join_thread()
time.sleep(0.025)
def __del__(self):
if not self.join_signal.is_set():
self.join_signal.set()
class BackgroundAugmenter(object):
"""
Class to augment batches in the background (while training on the GPU).
This is a wrapper around the multiprocessing module.
Parameters
----------
batch_loader : BatchLoader
BatchLoader object to load data in the
background.
augseq : Augmenter
An augmenter to apply to all loaded images.
This may be e.g. a Sequential to apply multiple augmenters.
queue_size : int
Size of the queue that is used to temporarily save the augmentation
results. Larger values offer the background processes more room
to save results when the main process doesn't load much, i.e. they
can lead to smoother and faster training. For large images, high
values can block a lot of RAM though.
nb_workers : 'auto' or int
Number of background workers to spawn.
If ``auto``, it will be set to ``C-1``, where ``C`` is the number of CPU cores.
"""
def __init__(self, batch_loader, augseq, queue_size=50, nb_workers="auto"):
do_assert(queue_size > 0)
self.augseq = augseq
self.queue_source = batch_loader.queue
self.queue_result = multiprocessing.Queue(queue_size)
if nb_workers == "auto":
try:
nb_workers = multiprocessing.cpu_count()
except (ImportError, NotImplementedError):
nb_workers = 1
# try to reserve at least one core for the main process
nb_workers = max(1, nb_workers - 1)
else:
do_assert(nb_workers >= 1)
self.nb_workers = nb_workers
self.workers = []
self.nb_workers_finished = 0
seeds = current_random_state().randint(0, 10**6, size=(nb_workers,))
for i in range(nb_workers):
worker = multiprocessing.Process(
target=self._augment_images_worker,
args=(augseq, self.queue_source, self.queue_result, seeds[i])
)
worker.daemon = True
worker.start()
self.workers.append(worker)
def all_finished(self):
return self.nb_workers_finished == self.nb_workers
def get_batch(self):
"""
Returns a batch from the queue of augmented batches.
If workers are still running and there are no batches in the queue,
it will automatically wait for the next batch.
Returns
-------
out : None or imgaug.Batch
One batch or None if all workers have finished.
"""
if self.all_finished():
return None
batch_str = self.queue_result.get()
batch = pickle.loads(batch_str)
if batch is not None:
return batch
else:
self.nb_workers_finished += 1
if self.nb_workers_finished >= self.nb_workers:
try:
self.queue_source.get(timeout=0.001) # remove the None from the source queue
except QueueEmpty:
pass
return None
else:
return self.get_batch()
def _augment_images_worker(self, augseq, queue_source, queue_result, seedval):
"""
Augment endlessly images in the source queue.
This is a worker function for that endlessly queries the source queue (input batches),
augments batches in it and sends the result to the output queue.
"""
np.random.seed(seedval)
random.seed(seedval)
augseq.reseed(seedval)
seed(seedval)
loader_finished = False
while not loader_finished:
# wait for a new batch in the source queue and load it
try:
batch_str = queue_source.get(timeout=0.1)
batch = pickle.loads(batch_str)
if batch is None:
loader_finished = True
# put it back in so that other workers know that the loading queue is finished
queue_source.put(pickle.dumps(None, protocol=-1))
else:
batch_aug = list(augseq.augment_batches([batch], background=False))[0]
# send augmented batch to output queue
batch_str = pickle.dumps(batch_aug, protocol=-1)
queue_result.put(batch_str)
except QueueEmpty:
time.sleep(0.01)
queue_result.put(pickle.dumps(None, protocol=-1))
time.sleep(0.01)
def terminate(self):
"""
Terminates all background processes immediately.
This will also free their RAM.
"""
for worker in self.workers:
if worker.is_alive():
worker.terminate()
self.nb_workers_finished = len(self.workers)
if not self.queue_result._closed:
self.queue_result.close()
time.sleep(0.01)
def __del__(self):
time.sleep(0.1)
self.terminate()
|
import ConfigParser
class Config(dict):
__getattr__ = dict.__getitem__
__setattr__ = dict.__setitem__
def read_config(filename):
"""
This looks for [kleenex] in ``filename`` such as the following:
[kleenex]
db = sqlite:///coverage.db
parent = origin/master
discover = true
report = true
report_output = sys://stdout
record = true
skip_missing = true
max_distance = 4
allow_missing = true
"""
config = ConfigParser.RawConfigParser({
'db': 'sqlite:///coverage.db',
'parent': 'origin/master',
'discover': 'false',
'report': 'false',
'report_output': 'sys://stdout',
'record': 'false',
'skip_missing': 'true',
'max_distance': '4',
'test_missing': 'true',
}, dict_type=Config, allow_no_value=False)
config.read(filename)
ns = 'kleenex'
if not config.has_section(ns):
return config.defaults()
return Config({
'db': config.get(ns, 'db'),
'parent': config.get(ns, 'parent'),
'discover': config.getboolean(ns, 'discover'),
'report': config.getboolean(ns, 'report'),
'report_output': config.get(ns, 'report_output'),
'record': config.getboolean(ns, 'record'),
'skip_missing': config.getboolean(ns, 'skip_missing'),
'max_distance': config.getint(ns, 'max_distance'),
'test_missing': config.getboolean(ns, 'test_missing'),
})
Turn on reporting by default
import ConfigParser
class Config(dict):
__getattr__ = dict.__getitem__
__setattr__ = dict.__setitem__
def read_config(filename):
"""
This looks for [kleenex] in ``filename`` such as the following:
[kleenex]
db = sqlite:///coverage.db
parent = origin/master
discover = true
report = true
report_output = sys://stdout
record = true
skip_missing = true
max_distance = 4
allow_missing = true
"""
config = ConfigParser.RawConfigParser({
'db': 'sqlite:///coverage.db',
'parent': 'origin/master',
'discover': 'false',
'report': 'true',
'report_output': 'sys://stdout',
'record': 'false',
'skip_missing': 'true',
'max_distance': '4',
'test_missing': 'true',
}, dict_type=Config, allow_no_value=False)
config.read(filename)
ns = 'kleenex'
if not config.has_section(ns):
return config.defaults()
return Config({
'db': config.get(ns, 'db'),
'parent': config.get(ns, 'parent'),
'discover': config.getboolean(ns, 'discover'),
'report': config.getboolean(ns, 'report'),
'report_output': config.get(ns, 'report_output'),
'record': config.getboolean(ns, 'record'),
'skip_missing': config.getboolean(ns, 'skip_missing'),
'max_distance': config.getint(ns, 'max_distance'),
'test_missing': config.getboolean(ns, 'test_missing'),
}) |
"""Find spark home, and initialize by adding pyspark to sys.path.
If SPARK_HOME is defined, it will be used to put pyspark on sys.path.
Otherwise, common locations for spark (currently only Homebrew's default) will be searched.
"""
from glob import glob
import os
import sys
__version__ = '1.2.0.dev'
def find():
"""Find a local spark installation.
Will first check the SPARK_HOME env variable, and otherwise
search common installation locations, e.g. from homebrew
"""
spark_home = os.environ.get('SPARK_HOME', None)
if not spark_home:
for path in [
'/usr/local/opt/apache-spark/libexec', # OS X Homebrew
# Any other common places to look?
]:
if os.path.exists(path):
spark_home = path
break
if not spark_home:
raise ValueError("Couldn't find Spark, make sure SPARK_HOME env is set"
" or Spark is in an expected location (e.g. from homebrew installation).")
return spark_home
def change_rc(spark_home, spark_python, py4j):
"""Persists changes to environment by changing shell config.
Adds lines to .bashrc to set environment variables
including the adding of dependencies to the system path. Will only
edit this file if they already exist. Currently only works for bash.
Parameters
----------
spark_home : str
Path to Spark installation.
spark_python : str
Path to python subdirectory of Spark installation.
py4j : str
Path to py4j library.
"""
bashrc_location = os.path.expanduser("~/.bashrc")
if os.path.isfile(bashrc_location):
with open(bashrc_location, 'a') as bashrc:
bashrc.write("\n# Added by findspark\n")
bashrc.write("export SPARK_HOME=" + spark_home + "\n")
bashrc.write("export PYTHONPATH=" + spark_python + ":" +
py4j + ":$PYTHONPATH\n\n")
def edit_ipython_profile(spark_home, spark_python, py4j):
"""Adds a startup file to the current IPython profile to import pyspark.
The startup file sets the required environment variables and imports pyspark.
Parameters
----------
spark_home : str
Path to Spark installation.
spark_python : str
Path to python subdirectory of Spark installation.
py4j : str
Path to py4j library.
"""
from IPython import get_ipython
ip = get_ipython()
if ip:
profile_dir = ip.profile_dir.location
else:
from IPython.utils.path import locate_profile
profile_dir = locate_profile()
startup_file_loc = os.path.join(profile_dir, "startup", "findspark.py")
with open(startup_file_loc, 'w') as startup_file:
#Lines of code to be run when IPython starts
startup_file.write("import sys, os\n")
startup_file.write("os.environ['SPARK_HOME'] = '" + spark_home + "'\n")
startup_file.write("sys.path[:0] = " + str([spark_python, py4j]) + "\n")
startup_file.write("import pyspark\n")
def init(spark_home=None, python_path=None, edit_rc=False, edit_profile=False):
"""Make pyspark importable.
Sets environment variables and adds dependencies to sys.path.
If no Spark location is provided, will try to find an installation.
Parameters
----------
spark_home : str, optional, default = None
Path to Spark installation, will try to find automatically
if not provided.
python_path : str, optional, default = None
Path to Python for Spark workers (PYSPARK_PYTHON),
will use the currently running Python if not provided.
edit_rc : bool, optional, default = False
Whether to attempt to persist changes by appending to shell
config.
edit_profile : bool, optional, default = False
Whether to create an IPython startup file to automatically
configure and import pyspark.
"""
if not spark_home:
spark_home = find()
if not python_path:
python_path = sys.executable
# ensure SPARK_HOME is defined
os.environ['SPARK_HOME'] = spark_home
# ensure PYSPARK_PYTHON is defined
os.environ['PYSPARK_PYTHON'] = python_path
# add pyspark to sys.path
spark_python = os.path.join(spark_home, 'python')
py4j = glob(os.path.join(spark_python, 'lib', 'py4j-*.zip'))[0]
sys.path[:0] = [spark_python, py4j]
if edit_rc:
change_rc(spark_home, spark_python, py4j)
if edit_profile:
edit_ipython_profile(spark_home, spark_python, py4j)
def add_packages(packages):
"""Add external packages to the pyspark interpreter.
Set the PYSPARK_SUBMIT_ARGS properly.
Parameters
----------
packages: list of package names in string format
"""
#if the parameter is a string, convert to a single element list
if isinstance(packages,str):
packages = [packages]
os.environ["PYSPARK_SUBMIT_ARGS"] += " --packages "+ ",".join(packages) +" pyspark-shell"
def add_jars(jars):
"""Add external jars to the pyspark interpreter.
Set the PYSPARK_SUBMIT_ARGS properly.
Parameters
----------
jars: list of path to jars in string format
"""
#if the parameter is a string, convert to a single element list
if isinstance(jars,str):
jars = [jars]
os.environ["PYSPARK_SUBMIT_ARGS"] = " --jars "+ ",".join(packages) +" pyspark-shell"
packages --> jars in add_jars
"""Find spark home, and initialize by adding pyspark to sys.path.
If SPARK_HOME is defined, it will be used to put pyspark on sys.path.
Otherwise, common locations for spark (currently only Homebrew's default) will be searched.
"""
from glob import glob
import os
import sys
__version__ = '1.2.0.dev'
def find():
"""Find a local spark installation.
Will first check the SPARK_HOME env variable, and otherwise
search common installation locations, e.g. from homebrew
"""
spark_home = os.environ.get('SPARK_HOME', None)
if not spark_home:
for path in [
'/usr/local/opt/apache-spark/libexec', # OS X Homebrew
# Any other common places to look?
]:
if os.path.exists(path):
spark_home = path
break
if not spark_home:
raise ValueError("Couldn't find Spark, make sure SPARK_HOME env is set"
" or Spark is in an expected location (e.g. from homebrew installation).")
return spark_home
def change_rc(spark_home, spark_python, py4j):
"""Persists changes to environment by changing shell config.
Adds lines to .bashrc to set environment variables
including the adding of dependencies to the system path. Will only
edit this file if they already exist. Currently only works for bash.
Parameters
----------
spark_home : str
Path to Spark installation.
spark_python : str
Path to python subdirectory of Spark installation.
py4j : str
Path to py4j library.
"""
bashrc_location = os.path.expanduser("~/.bashrc")
if os.path.isfile(bashrc_location):
with open(bashrc_location, 'a') as bashrc:
bashrc.write("\n# Added by findspark\n")
bashrc.write("export SPARK_HOME=" + spark_home + "\n")
bashrc.write("export PYTHONPATH=" + spark_python + ":" +
py4j + ":$PYTHONPATH\n\n")
def edit_ipython_profile(spark_home, spark_python, py4j):
"""Adds a startup file to the current IPython profile to import pyspark.
The startup file sets the required environment variables and imports pyspark.
Parameters
----------
spark_home : str
Path to Spark installation.
spark_python : str
Path to python subdirectory of Spark installation.
py4j : str
Path to py4j library.
"""
from IPython import get_ipython
ip = get_ipython()
if ip:
profile_dir = ip.profile_dir.location
else:
from IPython.utils.path import locate_profile
profile_dir = locate_profile()
startup_file_loc = os.path.join(profile_dir, "startup", "findspark.py")
with open(startup_file_loc, 'w') as startup_file:
#Lines of code to be run when IPython starts
startup_file.write("import sys, os\n")
startup_file.write("os.environ['SPARK_HOME'] = '" + spark_home + "'\n")
startup_file.write("sys.path[:0] = " + str([spark_python, py4j]) + "\n")
startup_file.write("import pyspark\n")
def init(spark_home=None, python_path=None, edit_rc=False, edit_profile=False):
"""Make pyspark importable.
Sets environment variables and adds dependencies to sys.path.
If no Spark location is provided, will try to find an installation.
Parameters
----------
spark_home : str, optional, default = None
Path to Spark installation, will try to find automatically
if not provided.
python_path : str, optional, default = None
Path to Python for Spark workers (PYSPARK_PYTHON),
will use the currently running Python if not provided.
edit_rc : bool, optional, default = False
Whether to attempt to persist changes by appending to shell
config.
edit_profile : bool, optional, default = False
Whether to create an IPython startup file to automatically
configure and import pyspark.
"""
if not spark_home:
spark_home = find()
if not python_path:
python_path = sys.executable
# ensure SPARK_HOME is defined
os.environ['SPARK_HOME'] = spark_home
# ensure PYSPARK_PYTHON is defined
os.environ['PYSPARK_PYTHON'] = python_path
# add pyspark to sys.path
spark_python = os.path.join(spark_home, 'python')
py4j = glob(os.path.join(spark_python, 'lib', 'py4j-*.zip'))[0]
sys.path[:0] = [spark_python, py4j]
if edit_rc:
change_rc(spark_home, spark_python, py4j)
if edit_profile:
edit_ipython_profile(spark_home, spark_python, py4j)
def add_packages(packages):
"""Add external packages to the pyspark interpreter.
Set the PYSPARK_SUBMIT_ARGS properly.
Parameters
----------
packages: list of package names in string format
"""
#if the parameter is a string, convert to a single element list
if isinstance(packages,str):
packages = [packages]
os.environ["PYSPARK_SUBMIT_ARGS"] += " --packages "+ ",".join(packages) +" pyspark-shell"
def add_jars(jars):
"""Add external jars to the pyspark interpreter.
Set the PYSPARK_SUBMIT_ARGS properly.
Parameters
----------
jars: list of path to jars in string format
"""
#if the parameter is a string, convert to a single element list
if isinstance(jars,str):
jars = [jars]
os.environ["PYSPARK_SUBMIT_ARGS"] = " --jars "+ ",".join(jars) +" pyspark-shell"
|
#######################################################################
#
# Note: This file is a generated file--do not edit it directly!
# Instead make changes to the appropriate content in the database or
# write up a bug here:
#
# https://bugzilla.mozilla.org/enter_bug.cgi?product=support.mozilla.org
#
# with the specific lines that are problematic and why.
#
# You can generate this file by running:
#
# ./manage.py extract_db
#
#######################################################################
from django.utils.translation import pgettext
pgettext("DB: kbadge.Badge.title", """2021 KB Badge""")
pgettext("DB: kbadge.Badge.description", """This badge is awarded to contributors with 10 approved English edits during 2021.""")
pgettext("DB: kbadge.Badge.title", """2020 KB Badge""")
pgettext("DB: kbadge.Badge.description", """This badge is awarded to contributors with 10 approved English edits during 2020.""")
pgettext("DB: kbadge.Badge.title", """2019 KB Badge""")
pgettext("DB: kbadge.Badge.description", """This badge is awarded to contributors with 10 approved English edits during 2019.""")
pgettext("DB: kbadge.Badge.title", """2020 Support Forum Badge""")
pgettext("DB: kbadge.Badge.description", """This badge is awarded to contributors with 20 support forum replies during 2020.""")
pgettext("DB: kbadge.Badge.title", """2019 Support Forum Badge""")
pgettext("DB: kbadge.Badge.description", """This badge is awarded to contributors with 20 support forum replies during 2019.""")
pgettext("DB: kbadge.Badge.title", """2020 Army of Awesome Badge""")
pgettext("DB: kbadge.Badge.description", """This badge is awarded to contributors with 50 Army of Awesome tweets during 2020.""")
pgettext("DB: kbadge.Badge.title", """2019 Army of Awesome Badge""")
pgettext("DB: kbadge.Badge.description", """This badge is awarded to contributors with 50 Army of Awesome tweets during 2019.""")
pgettext("DB: kbadge.Badge.title", """2020 L10n Badge""")
pgettext("DB: kbadge.Badge.description", """This badge is awarded to contributors with 10 approved translations edits during 2020.""")
pgettext("DB: kbadge.Badge.title", """2019 L10n Badge""")
pgettext("DB: kbadge.Badge.description", """This badge is awarded to contributors with 10 approved translations edits during 2019.""")
pgettext("DB: kbadge.Badge.title", """2018 L10n Badge""")
pgettext("DB: kbadge.Badge.description", """This badge is awarded to contributors with 10 approved translations edits during 2018.""")
pgettext("DB: kbadge.Badge.title", """2018 KB Badge""")
pgettext("DB: kbadge.Badge.description", """This badge is awarded to contributors with 10 approved English edits during 2018.""")
pgettext("DB: kbadge.Badge.title", """2018 Army of Awesome Badge""")
pgettext("DB: kbadge.Badge.description", """This badge is awarded to contributors with 50 Army of Awesome tweets during 2018.""")
pgettext("DB: kbadge.Badge.title", """2018 Support Forum Badge""")
pgettext("DB: kbadge.Badge.description", """This badge is awarded to contributors with 20 support forum replies during 2018.""")
pgettext("DB: kbadge.Badge.title", """2017 L10n Badge""")
pgettext("DB: kbadge.Badge.description", """This badge is awarded to contributors with 10 approved translations edits during 2017.""")
pgettext("DB: kbadge.Badge.title", """2017 KB Badge""")
pgettext("DB: kbadge.Badge.description", """This badge is awarded to contributors with 10 approved English edits during 2017.""")
pgettext("DB: kbadge.Badge.title", """2017 Army of Awesome Badge""")
pgettext("DB: kbadge.Badge.description", """This badge is awarded to contributors with 50 Army of Awesome tweets during 2017.""")
pgettext("DB: kbadge.Badge.title", """2017 Support Forum Badge""")
pgettext("DB: kbadge.Badge.description", """This badge is awarded to contributors with 20 support forum replies during 2017.""")
pgettext("DB: kbadge.Badge.title", """2016 L10n Badge""")
pgettext("DB: kbadge.Badge.description", """This badge is awarded to contributors with 10 approved translations edits during 2016.""")
pgettext("DB: kbadge.Badge.title", """2016 Support Forum Badge""")
pgettext("DB: kbadge.Badge.description", """This badge is awarded to contributors with 30 support forum replies during 2016.""")
pgettext("DB: kbadge.Badge.title", """2016 KB Badge""")
pgettext("DB: kbadge.Badge.description", """This badge is awarded to contributors with 10 approved English edits during 2016.""")
pgettext("DB: kbadge.Badge.title", """2016 Army of Awesome Badge""")
pgettext("DB: kbadge.Badge.description", """This badge is awarded to contributors with 50 Army of Awesome tweets during 2016.""")
pgettext("DB: kbadge.Badge.title", """2015 Support Forum Badge""")
pgettext("DB: kbadge.Badge.description", """This badge is awarded to contributors with 30 support forum replies during 2015.""")
pgettext("DB: kbadge.Badge.title", """2015 Army of Awesome Badge""")
pgettext("DB: kbadge.Badge.description", """This badge is awarded to contributors with 50 Army of Awesome tweets during 2015.""")
pgettext("DB: kbadge.Badge.title", """2015 KB Badge""")
pgettext("DB: kbadge.Badge.description", """This badge is awarded to contributors with 10 approved English edits during 2015.""")
pgettext("DB: kbadge.Badge.title", """2015 L10n Badge""")
pgettext("DB: kbadge.Badge.description", """This badge is awarded to contributors with 10 approved translations edits during 2015.""")
pgettext("DB: kbadge.Badge.title", """2014 Army of Awesome Badge""")
pgettext("DB: kbadge.Badge.description", """The SUMO Army of Awesome 2014 mini-badge is part of the SUMO series. It represents contribution to SUMO in 2014; in this case: 50 Army of Awesome tweets.
Congrats to all SUMO Army of Awesome 2014 badge earners for advancing the Mozilla Mission!""")
pgettext("DB: kbadge.Badge.title", """2014 L10n Badge""")
pgettext("DB: kbadge.Badge.description", """The SUMO L10n 2014 mini-badge is part of the SUMO series. It represents contribution to SUMO in 2014; in this case: 10 approved translation edits of the SUMO Knowledge Base.
Congrats to all SUMO L10n 2014 badge earners for advancing the Mozilla Mission!""")
pgettext("DB: kbadge.Badge.title", """2014 KB Badge""")
pgettext("DB: kbadge.Badge.description", """The SUMO KB 2014 mini-badge is part of the SUMO series. It represents contribution to SUMO in 2014; in this case: 10 approved edits of the English SUMO Knowledge Base.
Congrats to all SUMO KB 2014 badge earners for advancing the Mozilla Mission""")
pgettext("DB: kbadge.Badge.title", """2014 Support Forum Badge""")
pgettext("DB: kbadge.Badge.description", """The SUMO Forum 2014 mini-badge is part of the SUMO series. It represents contribution to SUMO in 2014 in this case: 30 replies in the English SUMO Forum.
Congrats to all SUMO Forum 2014 badge earners for advancing the Mozilla Mission!""")
pgettext("DB: kbadge.Badge.title", """Firefox 29 Launch Team""")
pgettext("DB: kbadge.Badge.description", """Awarded to support contributors who contributed (KB article documentation, answering Forum Questions, localizing KB article documentation, tweets, etc) to the launch of Firefox 29, thanks!
Firefox 29 features:
1. Firefox Desktop: Australis new look and feel
AND Firefox Accounts based sync
2. Firefox for Android: Firefox Accounts based sync
MOAR:
https://sumo.etherpad.mozilla.org/sumo-australis-badges""")
pgettext("DB: kbadge.Badge.title", """2008 L10n Badge""")
pgettext("DB: kbadge.Badge.description", """This badge is awarded to contributors with 10 approved translations edits during 2008.""")
pgettext("DB: kbadge.Badge.title", """2009 L10n Badge""")
pgettext("DB: kbadge.Badge.description", """This badge is awarded to contributors with 10 approved translations edits during 2009.""")
pgettext("DB: kbadge.Badge.title", """2012 Army of Awesome Badge""")
pgettext("DB: kbadge.Badge.description", """The SUMO Army of Awesome 2012 mini-badge is part of the SUMO series. It represents contribution to SUMO in 2012; in this case: 50 Army of Awesome tweets.
Congrats to all SUMO Army of Awesome 2012 badge earners for advancing the Mozilla Mission!""")
pgettext("DB: kbadge.Badge.title", """2013 Army of Awesome Badge""")
pgettext("DB: kbadge.Badge.description", """The SUMO Army of Awesome 2013 mini-badge is part of the SUMO series. It represents contribution to SUMO in 2013; in this case: 50 Army of Awesome tweets.
Congrats to all SUMO Army of Awesome 2013 badge earners for advancing the Mozilla Mission!""")
pgettext("DB: kbadge.Badge.title", """2010 Army of Awesome Badge""")
pgettext("DB: kbadge.Badge.description", """The SUMO Army of Awesome 2010 mini-badge is part of the SUMO series. It represents contribution to SUMO in 2010; in this case: 50 Army of Awesome tweets.
Congrats to all SUMO Army of Awesome 2010 badge earners for advancing the Mozilla Mission!""")
pgettext("DB: kbadge.Badge.title", """2011 Army of Awesome Badge""")
pgettext("DB: kbadge.Badge.description", """The SUMO Army of Awesome 2011 mini-badge is part of the SUMO series. It represents contribution to SUMO in 2011; in this case: 50 Army of Awesome tweets.
Congrats to all SUMO Army of Awesome 2011 badge earners for advancing the Mozilla Mission!""")
pgettext("DB: kbadge.Badge.title", """2012 Support Forum Badge""")
pgettext("DB: kbadge.Badge.description", """The SUMO Forum 2012 mini-badge is part of the SUMO series. It represents contribution to SUMO in 2012 in this case: 30 replies in the English SUMO Forum.
Congrats to all SUMO Forum 2012 badge earners for advancing the Mozilla Mission!""")
pgettext("DB: kbadge.Badge.title", """2010 KB Badge""")
pgettext("DB: kbadge.Badge.description", """The SUMO KB 2010 mini-badge is part of the SUMO series. It represents contribution to SUMO in 2010; in this case: 10 approved edits of the English SUMO Knowledge Base.
Congrats to all SUMO KB 2010 badge earners for advancing the Mozilla Mission""")
pgettext("DB: kbadge.Badge.title", """2010 L10n Badge""")
pgettext("DB: kbadge.Badge.description", """The SUMO L10n 2010 mini-badge is part of the SUMO series. It represents contribution to SUMO in 2010; in this case: 10 approved translation edits of the SUMO Knowledge Base.
Congrats to all SUMO L10n 2010 badge earners for advancing the Mozilla Mission!""")
pgettext("DB: kbadge.Badge.title", """2010 Support Forum Badge""")
pgettext("DB: kbadge.Badge.description", """The SUMO Forum 2010 mini-badge is part of the SUMO series. It represents contribution to SUMO in 2010 in this case: 30 replies in the English SUMO Forum.
Congrats to all SUMO Forum 2010 badge earners for advancing the Mozilla Mission!""")
pgettext("DB: kbadge.Badge.title", """2011 KB Badge""")
pgettext("DB: kbadge.Badge.description", """The SUMO KB 2011 mini-badge is part of the SUMO series. It represents contribution to SUMO in 2011; in this case: 10 approved edits of the English SUMO Knowledge Base.
Congrats to all SUMO KB 2011 badge earners for advancing the Mozilla Mission!""")
pgettext("DB: kbadge.Badge.title", """2011 L10n Badge""")
pgettext("DB: kbadge.Badge.description", """The SUMO L10n 2011 mini-badge is part of the SUMO series. It represents contribution to SUMO in 2011; in this case: 10 approved translation edits of the SUMO Knowledge Base.
Congrats to all SUMO L10n 2011 badge earners for advancing the Mozilla Mission!""")
pgettext("DB: kbadge.Badge.title", """2011 Support Forum Badge""")
pgettext("DB: kbadge.Badge.description", """The SUMO Forum 2011 mini-badge is part of the SUMO series. It represents contribution to SUMO in 2011 in this case: 30 replies in the English SUMO Forum.
Congrats to all SUMO Forum 2011 badge earners for advancing the Mozilla Mission!
""")
pgettext("DB: kbadge.Badge.title", """2012 KB Badge""")
pgettext("DB: kbadge.Badge.description", """The SUMO KB 2012 mini-badge is part of the SUMO series. It represents contribution to SUMO in 2012; in this case: 10 approved edits of the English SUMO Knowledge Base.
Congrats to all SUMO KB 2012 badge earners for advancing the Mozilla Mission!""")
pgettext("DB: kbadge.Badge.title", """2013 KB Badge""")
pgettext("DB: kbadge.Badge.description", """The SUMO KB 2013 mini-badge is part of the SUMO series. It represents contribution to SUMO in 2013 in this case: 10 approved edits of the English SUMO Knowledge Base.
Congrats to all SUMO KB 2013 badge earners for advancing the Mozilla Mission!""")
pgettext("DB: kbadge.Badge.title", """2013 L10n Badge""")
pgettext("DB: kbadge.Badge.description", """The SUMO L10n 2013 mini-badge is part of the SUMO series. It represents contribution to SUMO in 2013 in this case: 10 approved translation edits of the SUMO Knowledge Base.
Congrats to all SUMO L10n 2013 badge earners for advancing the Mozilla Mission!""")
pgettext("DB: kbadge.Badge.title", """2013 Support Forum Badge""")
pgettext("DB: kbadge.Badge.description", """The SUMO Forum 2013 mini-badge is part of the SUMO series. It represents contribution to SUMO in 2013 in this case: 30 replies in the English SUMO Forum.
Congrats to all SUMO Forum 2013 badge earners for advancing the Mozilla Mission""")
pgettext("DB: kbadge.Badge.title", """2012 L10n Badge""")
pgettext("DB: kbadge.Badge.description", """The SUMO L10n 2012 mini-badge is part of the SUMO series. It represents contribution to SUMO in 2012; in this case: 10 approved translation edits of the SUMO Knowledge Base.
Congrats to all SUMO L10n 2012 badge earners for advancing the Mozilla Mission!""")
pgettext("DB: kbadge.Badge.title", """Kitsune Contributor""")
pgettext("DB: kbadge.Badge.description", """Badge awarded to those who have contributed to the Kitsune code base.""")
pgettext("DB: products.Topic.title", """Learn the Basics: get started""")
pgettext("DB: products.Topic.description", """Learn all you need to know to get started with Firefox.""")
pgettext("DB: products.Topic.title", """Bookmarks and tabs""")
pgettext("DB: products.Topic.description", """Access and organize your favorite webpages easily with bookmarks and tabs""")
pgettext("DB: products.Topic.title", """Basic browsing""")
pgettext("DB: products.Topic.description", """Search and navigate easily with these essential features""")
pgettext("DB: products.Topic.title", """Import settings from other browsers""")
pgettext("DB: products.Topic.description", """Learn how to import or export your information between Firefox and another browser""")
pgettext("DB: products.Topic.title", """Video, audio and interactive settings""")
pgettext("DB: products.Topic.description", """Change how Firefox handles videos, animations, music and other interactive content""")
pgettext("DB: products.Topic.title", """How to use Firefox""")
pgettext("DB: products.Topic.description", """How to browse, search and customize your settings""")
pgettext("DB: products.Topic.title", """Download, install and migration""")
pgettext("DB: products.Topic.description", """Learn how to download Firefox on your desktop devices or move information to and from other browsers.""")
pgettext("DB: products.Topic.title", """Tips and tricks""")
pgettext("DB: products.Topic.description", """Go beyond the basics with these shortcuts and other tips.""")
pgettext("DB: products.Topic.title", """Install and update""")
pgettext("DB: products.Topic.description", """How to install Firefox and keep it up to date""")
pgettext("DB: products.Topic.title", """Display and appearance""")
pgettext("DB: products.Topic.description", """Learn how to change your toolbar, font sizes and browser colors""")
pgettext("DB: products.Topic.title", """Install and update""")
pgettext("DB: products.Topic.description", """Download or update Firefox for Windows, Mac and Linux.""")
pgettext("DB: products.Topic.title", """Firefox Sync""")
pgettext("DB: products.Topic.description", """Firefox Sync settings""")
pgettext("DB: products.Topic.title", """Sync and save""")
pgettext("DB: products.Topic.description", """Sync information on all your devices""")
pgettext("DB: products.Topic.title", """Manage add-ons""")
pgettext("DB: products.Topic.description", """Enhance Firefox's functionality and appearance with add-ons""")
pgettext("DB: products.Topic.title", """Sync, share and save""")
pgettext("DB: products.Topic.description", """Sync browsing information and content across multiple devices with Firefox Accounts.""")
pgettext("DB: products.Topic.title", """Firefox settings""")
pgettext("DB: products.Topic.description", """Privacy and personalization""")
pgettext("DB: products.Topic.title", """Firefox Hello""")
pgettext("DB: products.Topic.description", """Have video or voice conversations using the Firefox browser""")
pgettext("DB: products.Topic.title", """Chat and share""")
pgettext("DB: products.Topic.description", """Connect on video and share pages with your network""")
pgettext("DB: products.Topic.title", """Personalize Firefox""")
pgettext("DB: products.Topic.description", """Make Firefox yours with these customization options.""")
pgettext("DB: products.Topic.title", """Customize controls, options and add-ons""")
pgettext("DB: products.Topic.description", """Make Firefox yours by adding and managing the features that you want.""")
pgettext("DB: products.Topic.title", """Personalize Firefox""")
pgettext("DB: products.Topic.description", """Change Firefox's appearance, behavior and settings.""")
pgettext("DB: products.Topic.title", """Privacy and security settings""")
pgettext("DB: products.Topic.description", """Learn how to keep your information safe and secure with Firefox's private browsing, password features and other security settings.""")
pgettext("DB: products.Topic.title", """Do more with apps""")
pgettext("DB: products.Topic.description", """Install open apps from the Marketplace to add more fun and functionality to your device""")
pgettext("DB: products.Topic.title", """Protect your privacy""")
pgettext("DB: products.Topic.description", """Keep your information safe from prying eyes with the latest privacy and security features.""")
pgettext("DB: products.Topic.title", """Get community support""")
pgettext("DB: products.Topic.description", """Get community support""")
pgettext("DB: products.Topic.title", """Manage preferences and add-ons""")
pgettext("DB: products.Topic.description", """Make Firefox yours through customization settings and add-ons""")
pgettext("DB: products.Topic.title", """Fix problems""")
pgettext("DB: products.Topic.description", """Troubleshoot slowness, crashing and error messages.""")
pgettext("DB: products.Topic.title", """Fix slowness, crashing, error messages and other problems""")
pgettext("DB: products.Topic.description", """Fix slowness, crashing, error messages and other problems""")
pgettext("DB: products.Topic.title", """Advanced and experimental features""")
pgettext("DB: products.Topic.description", """Learn tips beyond the basics and try features before they're released to the public.""")
pgettext("DB: products.Topic.title", """Tab basics""")
pgettext("DB: products.Topic.description", """Tab basics""")
pgettext("DB: products.Topic.title", """Firefox versions and languages""")
pgettext("DB: products.Topic.description", """Firefox versions and languages""")
pgettext("DB: products.Topic.title", """Copy your personal information from one browser to another""")
pgettext("DB: products.Topic.description", """Copy your personal information from one browser to another""")
pgettext("DB: products.Topic.title", """Cookies and cache""")
pgettext("DB: products.Topic.description", """Control the information that Firefox saves""")
pgettext("DB: products.Topic.title", """Passwords, forms, search, and history - control what Firefox suggests""")
pgettext("DB: products.Topic.description", """Passwords, forms, search, and history - control what Firefox suggests""")
pgettext("DB: products.Topic.title", """Firefox controls and buttons""")
pgettext("DB: products.Topic.description", """Firefox controls and buttons""")
pgettext("DB: products.Topic.title", """Tab settings""")
pgettext("DB: products.Topic.description", """Tab settings""")
pgettext("DB: products.Topic.title", """Customize Firefox with add-ons, plugins, and extensions""")
pgettext("DB: products.Topic.description", """Customize Firefox with add-ons, plugins, and extensions""")
pgettext("DB: products.Topic.title", """Firefox options, preferences and settings""")
pgettext("DB: products.Topic.description", """Firefox options, preferences and settings""")
pgettext("DB: products.Topic.title", """Bookmark options""")
pgettext("DB: products.Topic.description", """Bookmark options""")
pgettext("DB: products.Topic.title", """Fix problems with websites (Facebook, YouTube, webmail etc.)""")
pgettext("DB: products.Topic.description", """Fix problems with websites (Facebook, YouTube, webmail etc.)""")
pgettext("DB: products.Topic.title", """Error messages: what they mean and how to fix""")
pgettext("DB: products.Topic.description", """How to troubleshoot error messages on Firefox""")
pgettext("DB: products.Topic.title", """Unblock Firefox from connecting to the Internet""")
pgettext("DB: products.Topic.description", """Unblock Firefox from connecting to the Internet""")
pgettext("DB: products.Topic.title", """Procedures to diagnose and fix problems""")
pgettext("DB: products.Topic.description", """Procedures to diagnose and fix problems""")
pgettext("DB: products.Topic.title", """Videos, sound, pictures and animations don't work""")
pgettext("DB: products.Topic.description", """Videos, sound, pictures and animations don't work""")
pgettext("DB: products.Topic.title", """Firefox is slow or stops working""")
pgettext("DB: products.Topic.description", """Slowness or hanging""")
pgettext("DB: products.Topic.title", """Firefox crashes""")
pgettext("DB: products.Topic.description", """Crashing""")
pgettext("DB: products.Topic.title", """Firefox won't save settings or remember information""")
pgettext("DB: products.Topic.description", """Firefox won't save settings or remember information""")
pgettext("DB: products.Topic.title", """Problems with add-ons, plugins or unwanted software""")
pgettext("DB: products.Topic.description", """Problems with add-ons, plugins or unwanted software""")
pgettext("DB: products.Topic.title", """Mozilla Persona""")
pgettext("DB: products.Topic.description", """Mozilla Persona""")
pgettext("DB: products.Topic.title", """Hot topics""")
pgettext("DB: products.Topic.description", """Hot topics""")
pgettext("DB: products.Topic.title", """Other""")
pgettext("DB: products.Topic.description", """Other""")
pgettext("DB: products.Topic.title", """What's New""")
pgettext("DB: products.Topic.description", """Where to find release notes and upcoming features.""")
pgettext("DB: products.Topic.title", """Windows""")
pgettext("DB: products.Topic.description", """Deploying Firefox on Windows computers.""")
pgettext("DB: products.Topic.title", """Manage certificates""")
pgettext("DB: products.Topic.description", """Set up certificates on Firefox for your organization.""")
pgettext("DB: products.Topic.title", """Customization of Firefox in an enterprise environment""")
pgettext("DB: products.Topic.description", """Customization of Firefox in an enterprise environment""")
pgettext("DB: products.Topic.title", """Installation""")
pgettext("DB: products.Topic.description", """How to install Firefox for Enterprise""")
pgettext("DB: products.Topic.title", """Explore""")
pgettext("DB: products.Topic.description", """Learn about Firefox for Enterprise""")
pgettext("DB: products.Topic.title", """Mac""")
pgettext("DB: products.Topic.description", """Deploy Firefox on your organization's Mac computers""")
pgettext("DB: products.Topic.title", """Policies overview""")
pgettext("DB: products.Topic.description", """How to set up policies on Firefox for your organization.""")
pgettext("DB: products.Topic.title", """Deploy""")
pgettext("DB: products.Topic.description", """Deployment of Firefox in an enterprise environment""")
pgettext("DB: products.Topic.title", """Manage updates, policies & customization""")
pgettext("DB: products.Topic.description", """Policies for Firefox for Enterprise""")
pgettext("DB: products.Topic.title", """Autoconfiguration""")
pgettext("DB: products.Topic.description", """How to configure Firefox for Enterprise""")
pgettext("DB: products.Topic.title", """Linux""")
pgettext("DB: products.Topic.description", """Deploy Firefox on your organization's Linux machines.""")
pgettext("DB: products.Topic.title", """Manage settings via policy""")
pgettext("DB: products.Topic.description", """Change Firefox's settings using policies.""")
pgettext("DB: products.Topic.title", """Manage add-ons""")
pgettext("DB: products.Topic.description", """Working with add-ons on Firefox for your organization.""")
pgettext("DB: products.Topic.title", """Install and update""")
pgettext("DB: products.Topic.description", """Download and install the mobile app on your device.""")
pgettext("DB: products.Topic.title", """Browsing""")
pgettext("DB: products.Topic.description", """Surf and navigate the Web on Firefox Preview""")
pgettext("DB: products.Topic.title", """How do I use Firefox Preview?""")
pgettext("DB: products.Topic.description", """Get help with using features in Firefox Preview.""")
pgettext("DB: products.Topic.title", """Library""")
pgettext("DB: products.Topic.description", """Manage bookmarks and history""")
pgettext("DB: products.Topic.title", """Sync""")
pgettext("DB: products.Topic.description", """Sync your browsing information across other devices.""")
pgettext("DB: products.Topic.title", """Privacy and security""")
pgettext("DB: products.Topic.description", """Protect your privacy on Firefox Preview.""")
pgettext("DB: products.Topic.title", """Settings and preferences""")
pgettext("DB: products.Topic.description", """Manage themes and search settings""")
pgettext("DB: products.Topic.title", """Fix problems with Firefox Preview""")
pgettext("DB: products.Topic.description", """Troubleshoot issues with Firefox Preview""")
pgettext("DB: products.Topic.title", """Advanced Settings""")
pgettext("DB: products.Topic.description", """Do more with Firefox Preview""")
pgettext("DB: products.Topic.title", """Basic browsing""")
pgettext("DB: products.Topic.description", """Search and navigate easily with these essential features""")
pgettext("DB: products.Topic.title", """How to use Firefox for Android""")
pgettext("DB: products.Topic.description", """How to use Firefox for Android""")
pgettext("DB: products.Topic.title", """What's new in Firefox for Android""")
pgettext("DB: products.Topic.description", """See what new features are available in each release""")
pgettext("DB: products.Topic.title", """Get started""")
pgettext("DB: products.Topic.description", """Firefox OS basics""")
pgettext("DB: products.Topic.title", """Basic Features""")
pgettext("DB: products.Topic.description", """Learn the basic functionality for your Firefox OS phone. """)
pgettext("DB: products.Topic.title", """Download and Manage Apps""")
pgettext("DB: products.Topic.description", """Download apps from the Marketplace""")
pgettext("DB: products.Topic.title", """Date and Time""")
pgettext("DB: products.Topic.description", """Setting a date and time on your Firefox OS phone""")
pgettext("DB: products.Topic.title", """Display""")
pgettext("DB: products.Topic.description", """Customize your screen on your Firefox OS device.""")
pgettext("DB: products.Topic.title", """Install and update""")
pgettext("DB: products.Topic.description", """How to install and keep Firefox for Android up to date""")
pgettext("DB: products.Topic.title", """Calling and Contacts""")
pgettext("DB: products.Topic.description", """Learn how to add and manage contacts, as well as make one-to-one or conference calls on your Firefox OS phone.""")
pgettext("DB: products.Topic.title", """Save, share and sync""")
pgettext("DB: products.Topic.description", """Save, share and synchronize content with other devices""")
pgettext("DB: products.Topic.title", """Changing your settings""")
pgettext("DB: products.Topic.description", """Change Firefox's behavior.""")
pgettext("DB: products.Topic.title", """Email and Messages""")
pgettext("DB: products.Topic.description", """Keep in touch with your contacts through email and messaging.""")
pgettext("DB: products.Topic.title", """Cast to your TV""")
pgettext("DB: products.Topic.description", """Learn how to view content on another screen""")
pgettext("DB: products.Topic.title", """Popular articles""")
pgettext("DB: products.Topic.description", """Popular tips and solutions for Firefox for Android""")
pgettext("DB: products.Topic.title", """Music, Photos and Video""")
pgettext("DB: products.Topic.description", """Take pictures, record videos and listen to music on your Firefox OS phone.""")
pgettext("DB: products.Topic.title", """Protect your privacy""")
pgettext("DB: products.Topic.description", """Control how your information is saved or tracked""")
pgettext("DB: products.Topic.title", """Marketplace""")
pgettext("DB: products.Topic.description", """How to download, manage and use your favorite apps on your Firefox OS phone.""")
pgettext("DB: products.Topic.title", """Customize settings and preferences""")
pgettext("DB: products.Topic.description", """Make Firefox for Android yours with these customization options""")
pgettext("DB: products.Topic.title", """Privacy and security""")
pgettext("DB: products.Topic.description", """Keep your information safe with Firefox OS locks, privacy features and more.""")
pgettext("DB: products.Topic.title", """Do more with apps""")
pgettext("DB: products.Topic.description", """Learn to find and install open apps to add more fun and functionality to your device""")
pgettext("DB: products.Topic.title", """Settings""")
pgettext("DB: products.Topic.description", """Learn how to configure the Internet connection, display and time on your Firefox OS device.""")
pgettext("DB: products.Topic.title", """Internet and Connections""")
pgettext("DB: products.Topic.description", """Learn more about Wi-Fi, Bluetooth and NFC connections.""")
pgettext("DB: products.Topic.title", """Fix slowness, crashing, error messages and other problems""")
pgettext("DB: products.Topic.description", """Fix slowness, crashing, error messages and other problems""")
pgettext("DB: products.Topic.title", """Fix problems""")
pgettext("DB: products.Topic.description", """Learn how to troubleshoot issues on your Firefox OS phone.""")
pgettext("DB: products.Topic.title", """Get community support""")
pgettext("DB: products.Topic.description", """Get community support""")
pgettext("DB: products.Topic.title", """Learn the Basics: get started""")
pgettext("DB: products.Topic.description", """Learn all you need to know to get Firefox for Android up and running.""")
pgettext("DB: products.Topic.title", """View all Firefox OS articles""")
pgettext("DB: products.Topic.description", """View a list of all Firefox OS articles""")
pgettext("DB: products.Topic.title", """Download, install and migration""")
pgettext("DB: products.Topic.description", """Learn how to install and transfer information to Firefox for Android.""")
pgettext("DB: products.Topic.title", """Tips and tricks""")
pgettext("DB: products.Topic.description", """Tips and tricks""")
pgettext("DB: products.Topic.title", """Use bookmarks""")
pgettext("DB: products.Topic.description", """The basics of using bookmarks""")
pgettext("DB: products.Topic.title", """Firefox Sync settings""")
pgettext("DB: products.Topic.description", """Firefox Sync settings""")
pgettext("DB: products.Topic.title", """Tab basics""")
pgettext("DB: products.Topic.description", """Tab basics""")
pgettext("DB: products.Topic.title", """Privacy and security settings""")
pgettext("DB: products.Topic.description", """Keep your information safe with Firefox for Android's privacy and security settings.""")
pgettext("DB: products.Topic.title", """Customize controls, options and add-ons""")
pgettext("DB: products.Topic.description", """Make Firefox for Android work the way you want through customization.""")
pgettext("DB: products.Topic.title", """Cookies""")
pgettext("DB: products.Topic.description", """Cookies""")
pgettext("DB: products.Topic.title", """Working with messages""")
pgettext("DB: products.Topic.description", """Firefox OS SMS & email""")
pgettext("DB: products.Topic.title", """Firefox controls and buttons""")
pgettext("DB: products.Topic.description", """Firefox controls and buttons""")
pgettext("DB: products.Topic.title", """Customize Firefox with add-ons, plugins, and extensions""")
pgettext("DB: products.Topic.description", """Customize Firefox with add-ons, plugins, and extensions""")
pgettext("DB: products.Topic.title", """Fix problems with websites (Facebook, YouTube, webmail etc.)""")
pgettext("DB: products.Topic.description", """Fix problems with websites (Facebook, YouTube, webmail etc.)""")
pgettext("DB: products.Topic.title", """Firefox crashes""")
pgettext("DB: products.Topic.description", """Crashing""")
pgettext("DB: products.Topic.title", """Procedures to diagnose and fix problems""")
pgettext("DB: products.Topic.description", """Procedures to diagnose and fix problems""")
pgettext("DB: products.Topic.title", """Mozilla Persona""")
pgettext("DB: products.Topic.description", """Mozilla Persona""")
pgettext("DB: products.Topic.title", """Marketplace""")
pgettext("DB: products.Topic.description", """Firefox Marketplace""")
pgettext("DB: products.Topic.title", """Other""")
pgettext("DB: products.Topic.description", """Other""")
pgettext("DB: products.Topic.title", """Hot topics""")
pgettext("DB: products.Topic.description", """Hot topics""")
pgettext("DB: products.Topic.title", """Install and Update""")
pgettext("DB: products.Topic.description", """Install and keep Firefox up to date on your iPad, iPhone or iPod Touch.""")
pgettext("DB: products.Topic.title", """Reader View and List""")
pgettext("DB: products.Topic.description", """Read and save web pages in a clutter-free, reader-friendly view""")
pgettext("DB: products.Topic.title", """Basic browsing""")
pgettext("DB: products.Topic.description", """How to use bookmarks, tabs and basic Firefox features on your iOS device""")
pgettext("DB: products.Topic.title", """History""")
pgettext("DB: products.Topic.description", """Change your history settings on Firefox for iOS""")
pgettext("DB: products.Topic.title", """How to use Firefox for iOS""")
pgettext("DB: products.Topic.description", """General usage questions""")
pgettext("DB: products.Topic.title", """What's new in Firefox for iOS""")
pgettext("DB: products.Topic.description", """See what features are available in each release.""")
pgettext("DB: products.Topic.title", """Bookmarks and tabs""")
pgettext("DB: products.Topic.description", """Access websites easily with bookmarks and tab features""")
pgettext("DB: products.Topic.title", """Search""")
pgettext("DB: products.Topic.description", """Customize your search settings in Firefox for iOS""")
pgettext("DB: products.Topic.title", """Firefox for iOS is not working as expected""")
pgettext("DB: products.Topic.description", """Troubleshoot problems with Firefox for iOS.""")
pgettext("DB: products.Topic.title", """Privacy""")
pgettext("DB: products.Topic.description", """Protect your information with Firefox's privacy settings on iOS""")
pgettext("DB: products.Topic.title", """Sync, save and share""")
pgettext("DB: products.Topic.description", """Share web pages on Firefox for iOS""")
pgettext("DB: products.Topic.title", """Customize preferences""")
pgettext("DB: products.Topic.description", """Customize preferences for Firefox for iOS""")
pgettext("DB: products.Topic.title", """Crashes, errors and other issues""")
pgettext("DB: products.Topic.description", """Troubleshoot error message on Firefox for iOS""")
pgettext("DB: products.Topic.title", """Firefox for Fire TV""")
pgettext("DB: products.Topic.description", """Browser for the Amazon Fire TV.""")
pgettext("DB: products.Topic.title", """Firefox for Echo Show""")
pgettext("DB: products.Topic.description", """Browser for the Amazon Echo Show""")
pgettext("DB: products.Topic.title", """Get started""")
pgettext("DB: products.Topic.description", """Get Started with Firefox for Fire TV""")
pgettext("DB: products.Topic.title", """Get started""")
pgettext("DB: products.Topic.description", """Basics for using Firefox Private Network.""")
pgettext("DB: products.Topic.title", """Fix problems""")
pgettext("DB: products.Topic.description", """Troubleshoot problems with Firefox Fire TV""")
pgettext("DB: products.Topic.title", """Manage account and settings""")
pgettext("DB: products.Topic.description", """Change account and settings for Private Network.""")
pgettext("DB: products.Topic.title", """Fix problems""")
pgettext("DB: products.Topic.description", """Troubleshoot issues for Private Network""")
pgettext("DB: products.Topic.title", """Popcorn Maker""")
pgettext("DB: products.Topic.description", """Learn how to remix web video, audio and images into mashups that you can embed on other websites. """)
pgettext("DB: products.Topic.title", """Webmaker for Android""")
pgettext("DB: products.Topic.description", """Get help with the Webmaker app for Android.""")
pgettext("DB: products.Topic.title", """Intro to Open Badges""")
pgettext("DB: products.Topic.description", """Learn the basic about Open Badges""")
pgettext("DB: products.Topic.title", """Technical""")
pgettext("DB: products.Topic.description", """Find solutions for how to use the Firefox Private Network VPN""")
pgettext("DB: products.Topic.title", """Thimble""")
pgettext("DB: products.Topic.description", """Learn how to create and share your own webpages quickly and easily.""")
pgettext("DB: products.Topic.title", """BadgeKit""")
pgettext("DB: products.Topic.description", """Learn how to create, assess and issue badges""")
pgettext("DB: products.Topic.title", """Accounts""")
pgettext("DB: products.Topic.description", """Find solutions on managing your account""")
pgettext("DB: products.Topic.title", """X-Ray Goggles""")
pgettext("DB: products.Topic.description", """Learn how to inspect the code behind every webpage.""")
pgettext("DB: products.Topic.title", """Get Involved""")
pgettext("DB: products.Topic.description", """Help the Open Badges community""")
pgettext("DB: products.Topic.title", """Payments""")
pgettext("DB: products.Topic.description", """Manage your payment and subscription""")
pgettext("DB: products.Topic.title", """Get the most from webmaker.org""")
pgettext("DB: products.Topic.description", """Help or get help on a Webmaker project.""")
pgettext("DB: products.Topic.title", """Earn Badges""")
pgettext("DB: products.Topic.description", """Earn Badges for the skills you learn online and offline""")
pgettext("DB: products.Topic.title", """Troubleshooting""")
pgettext("DB: products.Topic.description", """Fix problems with Firefox Private Network VPN""")
pgettext("DB: products.Topic.title", """Events and help for Mentors""")
pgettext("DB: products.Topic.description", """Help teach digital skills and share creative ways of teaching technology.""")
pgettext("DB: products.Topic.title", """Issue Badges""")
pgettext("DB: products.Topic.description", """Issue digital badges to acknowledge new skills and achievements""")
pgettext("DB: products.Topic.title", """Display Badges""")
pgettext("DB: products.Topic.description", """Display your digital badges on your social networks, job sites and your own website.""")
pgettext("DB: products.Topic.title", """Knowledge Base""")
pgettext("DB: products.Topic.description", """Windows 8 Touch support articles""")
pgettext("DB: products.Topic.title", """Pocket Basics""")
pgettext("DB: products.Topic.description", """New to Pocket? Start here.""")
pgettext("DB: products.Topic.title", """Install and set up""")
pgettext("DB: products.Topic.description", """Sync your logins across Firefox and your apps.""")
pgettext("DB: products.Topic.title", """About Data Sharing""")
pgettext("DB: products.Topic.description", """In order to process or provide our products and services to you, we share your information with the following business partners. These entities are contractually obligated to handle the data in ways that are approved by Mozilla.""")
pgettext("DB: products.Topic.title", """Pocket for Mobile""")
pgettext("DB: products.Topic.description", """How to use Pocket on your iPhone, iPad, Android or Kobo.""")
pgettext("DB: products.Topic.title", """Manage settings and logins""")
pgettext("DB: products.Topic.description", """Setting up your device to work with Firefox Lockwise""")
pgettext("DB: products.Topic.title", """Managing Your Data""")
pgettext("DB: products.Topic.description", """Learn how to manage your data (including deleting) for specific products or services.""")
pgettext("DB: products.Topic.title", """Pocket for your Computer""")
pgettext("DB: products.Topic.description", """Using Pocket on the Web.""")
pgettext("DB: products.Topic.title", """Fix problems""")
pgettext("DB: products.Topic.description", """Troubleshoot issues with Firefox Lockwise""")
pgettext("DB: products.Topic.title", """Sensible Settings""")
pgettext("DB: products.Topic.description", """Give our users actionable and informed choices by informing and educating at the point of collection and providing a choice to opt-out whenever possible. """)
pgettext("DB: products.Topic.title", """Advanced""")
pgettext("DB: products.Topic.description", """Information for Developers and Beta users.""")
pgettext("DB: products.Topic.title", """Defense in Depth""")
pgettext("DB: products.Topic.description", """Make privacy a key factor in selecting and interacting with partners. """)
pgettext("DB: products.Topic.title", """How does it work?""")
pgettext("DB: products.Topic.description", """Basics to get started with Firefox for Windows 8 Touch.""")
pgettext("DB: products.Topic.title", """Settings""")
pgettext("DB: products.Topic.description", """How to configure and customize Firefox for Windows 8 Touch.""")
pgettext("DB: products.Topic.title", """Problems with websites""")
pgettext("DB: products.Topic.description", """Problems with websites that don't work well in Firefox for Windows 8 Touch.""")
pgettext("DB: products.Topic.title", """Other""")
pgettext("DB: products.Topic.description", """Other questions with Firefox for Windows 8 Touch.""")
pgettext("DB: products.Topic.title", """Get started""")
pgettext("DB: products.Topic.description", """How to use Firefox Reality""")
pgettext("DB: products.Topic.title", """Troubleshooting""")
pgettext("DB: products.Topic.description", """Fix problems with Firefox Reality""")
pgettext("DB: products.Topic.title", """Learn the Basics: get started""")
pgettext("DB: products.Topic.description", """Learn the Basics: get started""")
pgettext("DB: products.Topic.title", """Get community support""")
pgettext("DB: products.Topic.description", """Get community support""")
pgettext("DB: products.Topic.title", """Mozilla Persona""")
pgettext("DB: products.Topic.description", """Mozilla Persona""")
pgettext("DB: products.Topic.title", """Hot topics""")
pgettext("DB: products.Topic.description", """Hot topics""")
pgettext("DB: products.Topic.title", """Get Started""")
pgettext("DB: products.Topic.description", """Klar verwenden""")
pgettext("DB: products.Topic.title", """Firefox Klar for iOS""")
pgettext("DB: products.Topic.description", """Privacy browser for iOS""")
pgettext("DB: products.Topic.title", """Get started""")
pgettext("DB: products.Topic.description", """Everything you need to know to use Firefox Lite.""")
pgettext("DB: products.Topic.title", """Features""")
pgettext("DB: products.Topic.description", """Getting started with Hubs""")
pgettext("DB: products.Topic.title", """Firefox Klar for Android""")
pgettext("DB: products.Topic.description", """Privacy browser for Android""")
pgettext("DB: products.Topic.title", """Preferences""")
pgettext("DB: products.Topic.description", """Customize Firefox Lite to your desired settings""")
pgettext("DB: products.Topic.title", """Controls""")
pgettext("DB: products.Topic.description", """How to navigate Hubs""")
pgettext("DB: products.Topic.title", """Fix problems""")
pgettext("DB: products.Topic.description", """Troubleshoot problems with Firefox Lite.""")
pgettext("DB: products.Topic.title", """Moderation""")
pgettext("DB: products.Topic.description", """Tools for making Hubs a good experience for all.""")
pgettext("DB: products.Topic.title", """Firefox Focus for iOS""")
pgettext("DB: products.Topic.description", """Firefox Focus for iOS""")
pgettext("DB: products.Topic.title", """Firefox Focus for Android""")
pgettext("DB: products.Topic.description", """Privacy browser for Android""")
pgettext("DB: products.Topic.title", """Get started""")
pgettext("DB: products.Topic.description", """Learn the basics about ScreenshotGo""")
pgettext("DB: products.Topic.title", """Learn the Basics. Get Started""")
pgettext("DB: products.Topic.description", """Learn the Basics. Get Started""")
pgettext("DB: products.Topic.title", """Tips and tricks""")
pgettext("DB: products.Topic.description", """Learn tips and shortcuts to help you work faster""")
pgettext("DB: products.Topic.title", """Set up email""")
pgettext("DB: products.Topic.description", """Add and configure your email accounts on Thunderbird""")
pgettext("DB: products.Topic.title", """Install, Migrate and Update""")
pgettext("DB: products.Topic.description", """How to install and keep Thunderbird up to date""")
pgettext("DB: products.Topic.title", """Read, send and organize emails""")
pgettext("DB: products.Topic.description", """Learn how to manage your email messages""")
pgettext("DB: products.Topic.title", """Emails""")
pgettext("DB: products.Topic.description", """Learn to set up accounts, read, send and manage emails""")
pgettext("DB: products.Topic.title", """News Feeds (RSS), Blogs and Social""")
pgettext("DB: products.Topic.description", """Stay up to date with news feeds, blogs and social features""")
pgettext("DB: products.Topic.title", """Contacts""")
pgettext("DB: products.Topic.description", """How to use the address book on Thunderbird""")
pgettext("DB: products.Topic.title", """Calendar""")
pgettext("DB: products.Topic.description", """Related to the Lightning add-on for Calendar""")
pgettext("DB: products.Topic.title", """Customize controls, options and add-ons""")
pgettext("DB: products.Topic.description", """Customize controls, options and add-ons""")
pgettext("DB: products.Topic.title", """Thunderbird versions and languages""")
pgettext("DB: products.Topic.description", """Thunderbird versions and languages""")
pgettext("DB: products.Topic.title", """Passwords, forms and search""")
pgettext("DB: products.Topic.description", """Passwords, forms and search""")
pgettext("DB: products.Topic.title", """Thunderbird controls and buttons """)
pgettext("DB: products.Topic.description", """Learn all about Thunderbird controls and functionality.""")
pgettext("DB: products.Topic.title", """Fix problems with email providers (gmail, Yahoo, etc.) """)
pgettext("DB: products.Topic.description", """Learn how to fix problems with Gmail, Yahoo and other email providers""")
pgettext("DB: products.Topic.title", """Download, install and migration""")
pgettext("DB: products.Topic.description", """Download, install and Migration""")
pgettext("DB: products.Topic.title", """Copy your personal information from one Thunderbird to another""")
pgettext("DB: products.Topic.description", """Copy your personal information from one Thunderbird to another""")
pgettext("DB: products.Topic.title", """Tab settings""")
pgettext("DB: products.Topic.description", """Tab settings""")
pgettext("DB: products.Topic.title", """Error messages: what they mean and how to fix""")
pgettext("DB: products.Topic.description", """Error messages: what they mean and how to fix""")
pgettext("DB: products.Topic.title", """Privacy and security settings""")
pgettext("DB: products.Topic.description", """Keep your information safe with password and security settings""")
pgettext("DB: products.Topic.title", """Customize Thunderbird with add-ons, plugins, and extensions""")
pgettext("DB: products.Topic.description", """Customize Thunderbird with add-ons, plugins, and extensions""")
pgettext("DB: products.Topic.title", """Unblock Thunderbird from connecting to the Internet""")
pgettext("DB: products.Topic.description", """Unblock Thunderbird from connecting to the Internet""")
pgettext("DB: products.Topic.title", """Thunderbird options, preferences and settings """)
pgettext("DB: products.Topic.description", """Thunderbird options, preferences and settings """)
pgettext("DB: products.Topic.title", """Procedures to diagnose and fix problems""")
pgettext("DB: products.Topic.description", """Procedures to diagnose and fix problems""")
pgettext("DB: products.Topic.title", """Fix slowness, crashing, error messages and other problems""")
pgettext("DB: products.Topic.description", """Troubleshoot error messages on Thunderbird.""")
pgettext("DB: products.Topic.title", """Thunderbird is slow or stops working""")
pgettext("DB: products.Topic.description", """Thunderbird is slow or stops working""")
pgettext("DB: products.Topic.title", """Thunderbird crashes""")
pgettext("DB: products.Topic.description", """Thunderbird crashes""")
pgettext("DB: products.Topic.title", """Get community support""")
pgettext("DB: products.Topic.description", """Get community support""")
pgettext("DB: products.Topic.title", """Thunderbird won't save settings or remember information""")
pgettext("DB: products.Topic.description", """Thunderbird won't save settings or remember information""")
pgettext("DB: products.Topic.title", """Problems with add-ons, plugins or unwanted software""")
pgettext("DB: products.Topic.description", """Problems with add-ons, plugins or unwanted software""")
pgettext("DB: products.Topic.title", """How To""")
pgettext("DB: products.Topic.description", """Articles that tell you how you can do more with Thunderbird""")
pgettext("DB: products.Topic.title", """Other""")
pgettext("DB: products.Topic.description", """Other""")
pgettext("DB: products.Product.title", """Firefox""")
pgettext("DB: products.Product.description", """Web browser for Windows, Mac and Linux""")
pgettext("DB: products.Product.title", """Firefox for Enterprise""")
pgettext("DB: products.Product.description", """Firefox Quantum for businesses""")
pgettext("DB: products.Product.title", """Firefox Preview""")
pgettext("DB: products.Product.description", """Early version of an experimental Firefox browser for Android.""")
pgettext("DB: products.Product.title", """Firefox for Android""")
pgettext("DB: products.Product.description", """Web browser for Android smartphones and tablets""")
pgettext("DB: products.Product.title", """Firefox OS""")
pgettext("DB: products.Product.description", """Mobile OS for smartphones""")
pgettext("DB: products.Product.title", """Firefox for iOS""")
pgettext("DB: products.Product.description", """Firefox for iPhone, iPad and iPod touch devices""")
pgettext("DB: products.Product.title", """Firefox for Amazon Devices""")
pgettext("DB: products.Product.description", """Browser for Amazon devices""")
pgettext("DB: products.Product.title", """Firefox for Fire TV""")
pgettext("DB: products.Product.description", """Browser for Amazon Fire TV""")
pgettext("DB: products.Product.title", """Firefox Private Network Browser-level protection""")
pgettext("DB: products.Product.description", """Browse securely on public Wi-Fi""")
pgettext("DB: products.Product.title", """Firefox Private Network Device-level Protection""")
pgettext("DB: products.Product.description", """VPN for Windows 10 and Android devices""")
pgettext("DB: products.Product.title", """Open Badges""")
pgettext("DB: products.Product.description", """A new online standard to recognize and verify learning""")
pgettext("DB: products.Product.title", """Webmaker""")
pgettext("DB: products.Product.description", """Webmaker and other tools for teaching and learning the Web""")
pgettext("DB: products.Product.title", """Firefox for Windows 8 Touch""")
pgettext("DB: products.Product.description", """Firefox for Windows 8 touch devices""")
pgettext("DB: products.Product.title", """Firefox Lockwise""")
pgettext("DB: products.Product.description", """Mobile app that gives you access to passwords you've saved to Firefox.""")
pgettext("DB: products.Product.title", """Pocket""")
pgettext("DB: products.Product.description", """Discover and save stories for later""")
pgettext("DB: products.Product.title", """Privacy and Security""")
pgettext("DB: products.Product.description", """Learn more about Mozilla's privacy and security practices.""")
pgettext("DB: products.Product.title", """Contributors""")
pgettext("DB: products.Product.description", """Contributor articles""")
pgettext("DB: products.Product.title", """Firefox Reality""")
pgettext("DB: products.Product.description", """Web browser for virtual reality headsets""")
pgettext("DB: products.Product.title", """Firefox Send""")
pgettext("DB: products.Product.description", """An app for sending files to anyone.""")
pgettext("DB: products.Product.title", """Firefox Klar""")
pgettext("DB: products.Product.description", """Was ist Firefox Klar?""")
pgettext("DB: products.Product.title", """Firefox Lite""")
pgettext("DB: products.Product.description", """Mobile browser for Indonesia, India, The Philippines, and Thailand""")
pgettext("DB: products.Product.title", """Hubs""")
pgettext("DB: products.Product.description", """Social virtual reality for headsets and browsers""")
pgettext("DB: products.Product.title", """Firefox Focus""")
pgettext("DB: products.Product.description", """Automatic privacy browser and content blocker""")
pgettext("DB: products.Product.title", """Firefox ScreenshotGo""")
pgettext("DB: products.Product.description", """Screenshot app for mobile""")
pgettext("DB: products.Product.title", """Thunderbird""")
pgettext("DB: products.Product.description", """Email software for Windows, Mac and Linux""")
# This is a karma title.
pgettext("DB: karma.Title.name", """Administrator""")
# This is a karma title.
pgettext("DB: karma.Title.name", """Buddy of the Month! (10/2015)""")
# This is a karma title.
pgettext("DB: karma.Title.name", """Locale Leader""")
# This is a karma title.
pgettext("DB: karma.Title.name", """Moderator""")
# This is a karma title.
pgettext("DB: karma.Title.name", """SUMO Warrior""")
# This is a karma title.
pgettext("DB: karma.Title.name", """Top 10 Contributor""")
# This is a karma title.
pgettext("DB: karma.Title.name", """Top 25 Contributor""")
Update database strings.
#######################################################################
#
# Note: This file is a generated file--do not edit it directly!
# Instead make changes to the appropriate content in the database or
# write up a bug here:
#
# https://bugzilla.mozilla.org/enter_bug.cgi?product=support.mozilla.org
#
# with the specific lines that are problematic and why.
#
# You can generate this file by running:
#
# ./manage.py extract_db
#
#######################################################################
from django.utils.translation import pgettext
pgettext("DB: kbadge.Badge.title", """2021 KB Badge""")
pgettext("DB: kbadge.Badge.description", """This badge is awarded to contributors with 10 approved English edits during 2021.""")
pgettext("DB: kbadge.Badge.title", """2020 KB Badge""")
pgettext("DB: kbadge.Badge.description", """This badge is awarded to contributors with 10 approved English edits during 2020.""")
pgettext("DB: kbadge.Badge.title", """2019 KB Badge""")
pgettext("DB: kbadge.Badge.description", """This badge is awarded to contributors with 10 approved English edits during 2019.""")
pgettext("DB: kbadge.Badge.title", """2020 Support Forum Badge""")
pgettext("DB: kbadge.Badge.description", """This badge is awarded to contributors with 20 support forum replies during 2020.""")
pgettext("DB: kbadge.Badge.title", """2019 Support Forum Badge""")
pgettext("DB: kbadge.Badge.description", """This badge is awarded to contributors with 20 support forum replies during 2019.""")
pgettext("DB: kbadge.Badge.title", """2020 Army of Awesome Badge""")
pgettext("DB: kbadge.Badge.description", """This badge is awarded to contributors with 50 Army of Awesome tweets during 2020.""")
pgettext("DB: kbadge.Badge.title", """2019 Army of Awesome Badge""")
pgettext("DB: kbadge.Badge.description", """This badge is awarded to contributors with 50 Army of Awesome tweets during 2019.""")
pgettext("DB: kbadge.Badge.title", """2020 L10n Badge""")
pgettext("DB: kbadge.Badge.description", """This badge is awarded to contributors with 10 approved translations edits during 2020.""")
pgettext("DB: kbadge.Badge.title", """2019 L10n Badge""")
pgettext("DB: kbadge.Badge.description", """This badge is awarded to contributors with 10 approved translations edits during 2019.""")
pgettext("DB: kbadge.Badge.title", """2018 L10n Badge""")
pgettext("DB: kbadge.Badge.description", """This badge is awarded to contributors with 10 approved translations edits during 2018.""")
pgettext("DB: kbadge.Badge.title", """2018 KB Badge""")
pgettext("DB: kbadge.Badge.description", """This badge is awarded to contributors with 10 approved English edits during 2018.""")
pgettext("DB: kbadge.Badge.title", """2018 Army of Awesome Badge""")
pgettext("DB: kbadge.Badge.description", """This badge is awarded to contributors with 50 Army of Awesome tweets during 2018.""")
pgettext("DB: kbadge.Badge.title", """2018 Support Forum Badge""")
pgettext("DB: kbadge.Badge.description", """This badge is awarded to contributors with 20 support forum replies during 2018.""")
pgettext("DB: kbadge.Badge.title", """2017 L10n Badge""")
pgettext("DB: kbadge.Badge.description", """This badge is awarded to contributors with 10 approved translations edits during 2017.""")
pgettext("DB: kbadge.Badge.title", """2017 KB Badge""")
pgettext("DB: kbadge.Badge.description", """This badge is awarded to contributors with 10 approved English edits during 2017.""")
pgettext("DB: kbadge.Badge.title", """2017 Army of Awesome Badge""")
pgettext("DB: kbadge.Badge.description", """This badge is awarded to contributors with 50 Army of Awesome tweets during 2017.""")
pgettext("DB: kbadge.Badge.title", """2017 Support Forum Badge""")
pgettext("DB: kbadge.Badge.description", """This badge is awarded to contributors with 20 support forum replies during 2017.""")
pgettext("DB: kbadge.Badge.title", """2016 L10n Badge""")
pgettext("DB: kbadge.Badge.description", """This badge is awarded to contributors with 10 approved translations edits during 2016.""")
pgettext("DB: kbadge.Badge.title", """2016 Support Forum Badge""")
pgettext("DB: kbadge.Badge.description", """This badge is awarded to contributors with 30 support forum replies during 2016.""")
pgettext("DB: kbadge.Badge.title", """2016 KB Badge""")
pgettext("DB: kbadge.Badge.description", """This badge is awarded to contributors with 10 approved English edits during 2016.""")
pgettext("DB: kbadge.Badge.title", """2016 Army of Awesome Badge""")
pgettext("DB: kbadge.Badge.description", """This badge is awarded to contributors with 50 Army of Awesome tweets during 2016.""")
pgettext("DB: kbadge.Badge.title", """2015 Support Forum Badge""")
pgettext("DB: kbadge.Badge.description", """This badge is awarded to contributors with 30 support forum replies during 2015.""")
pgettext("DB: kbadge.Badge.title", """2015 Army of Awesome Badge""")
pgettext("DB: kbadge.Badge.description", """This badge is awarded to contributors with 50 Army of Awesome tweets during 2015.""")
pgettext("DB: kbadge.Badge.title", """2015 KB Badge""")
pgettext("DB: kbadge.Badge.description", """This badge is awarded to contributors with 10 approved English edits during 2015.""")
pgettext("DB: kbadge.Badge.title", """2015 L10n Badge""")
pgettext("DB: kbadge.Badge.description", """This badge is awarded to contributors with 10 approved translations edits during 2015.""")
pgettext("DB: kbadge.Badge.title", """2014 Army of Awesome Badge""")
pgettext("DB: kbadge.Badge.description", """The SUMO Army of Awesome 2014 mini-badge is part of the SUMO series. It represents contribution to SUMO in 2014; in this case: 50 Army of Awesome tweets.
Congrats to all SUMO Army of Awesome 2014 badge earners for advancing the Mozilla Mission!""")
pgettext("DB: kbadge.Badge.title", """2014 L10n Badge""")
pgettext("DB: kbadge.Badge.description", """The SUMO L10n 2014 mini-badge is part of the SUMO series. It represents contribution to SUMO in 2014; in this case: 10 approved translation edits of the SUMO Knowledge Base.
Congrats to all SUMO L10n 2014 badge earners for advancing the Mozilla Mission!""")
pgettext("DB: kbadge.Badge.title", """2014 KB Badge""")
pgettext("DB: kbadge.Badge.description", """The SUMO KB 2014 mini-badge is part of the SUMO series. It represents contribution to SUMO in 2014; in this case: 10 approved edits of the English SUMO Knowledge Base.
Congrats to all SUMO KB 2014 badge earners for advancing the Mozilla Mission""")
pgettext("DB: kbadge.Badge.title", """2014 Support Forum Badge""")
pgettext("DB: kbadge.Badge.description", """The SUMO Forum 2014 mini-badge is part of the SUMO series. It represents contribution to SUMO in 2014 in this case: 30 replies in the English SUMO Forum.
Congrats to all SUMO Forum 2014 badge earners for advancing the Mozilla Mission!""")
pgettext("DB: kbadge.Badge.title", """Firefox 29 Launch Team""")
pgettext("DB: kbadge.Badge.description", """Awarded to support contributors who contributed (KB article documentation, answering Forum Questions, localizing KB article documentation, tweets, etc) to the launch of Firefox 29, thanks!
Firefox 29 features:
1. Firefox Desktop: Australis new look and feel
AND Firefox Accounts based sync
2. Firefox for Android: Firefox Accounts based sync
MOAR:
https://sumo.etherpad.mozilla.org/sumo-australis-badges""")
pgettext("DB: kbadge.Badge.title", """2008 L10n Badge""")
pgettext("DB: kbadge.Badge.description", """This badge is awarded to contributors with 10 approved translations edits during 2008.""")
pgettext("DB: kbadge.Badge.title", """2009 L10n Badge""")
pgettext("DB: kbadge.Badge.description", """This badge is awarded to contributors with 10 approved translations edits during 2009.""")
pgettext("DB: kbadge.Badge.title", """2012 Army of Awesome Badge""")
pgettext("DB: kbadge.Badge.description", """The SUMO Army of Awesome 2012 mini-badge is part of the SUMO series. It represents contribution to SUMO in 2012; in this case: 50 Army of Awesome tweets.
Congrats to all SUMO Army of Awesome 2012 badge earners for advancing the Mozilla Mission!""")
pgettext("DB: kbadge.Badge.title", """2013 Army of Awesome Badge""")
pgettext("DB: kbadge.Badge.description", """The SUMO Army of Awesome 2013 mini-badge is part of the SUMO series. It represents contribution to SUMO in 2013; in this case: 50 Army of Awesome tweets.
Congrats to all SUMO Army of Awesome 2013 badge earners for advancing the Mozilla Mission!""")
pgettext("DB: kbadge.Badge.title", """2010 Army of Awesome Badge""")
pgettext("DB: kbadge.Badge.description", """The SUMO Army of Awesome 2010 mini-badge is part of the SUMO series. It represents contribution to SUMO in 2010; in this case: 50 Army of Awesome tweets.
Congrats to all SUMO Army of Awesome 2010 badge earners for advancing the Mozilla Mission!""")
pgettext("DB: kbadge.Badge.title", """2011 Army of Awesome Badge""")
pgettext("DB: kbadge.Badge.description", """The SUMO Army of Awesome 2011 mini-badge is part of the SUMO series. It represents contribution to SUMO in 2011; in this case: 50 Army of Awesome tweets.
Congrats to all SUMO Army of Awesome 2011 badge earners for advancing the Mozilla Mission!""")
pgettext("DB: kbadge.Badge.title", """2012 Support Forum Badge""")
pgettext("DB: kbadge.Badge.description", """The SUMO Forum 2012 mini-badge is part of the SUMO series. It represents contribution to SUMO in 2012 in this case: 30 replies in the English SUMO Forum.
Congrats to all SUMO Forum 2012 badge earners for advancing the Mozilla Mission!""")
pgettext("DB: kbadge.Badge.title", """2010 KB Badge""")
pgettext("DB: kbadge.Badge.description", """The SUMO KB 2010 mini-badge is part of the SUMO series. It represents contribution to SUMO in 2010; in this case: 10 approved edits of the English SUMO Knowledge Base.
Congrats to all SUMO KB 2010 badge earners for advancing the Mozilla Mission""")
pgettext("DB: kbadge.Badge.title", """2010 L10n Badge""")
pgettext("DB: kbadge.Badge.description", """The SUMO L10n 2010 mini-badge is part of the SUMO series. It represents contribution to SUMO in 2010; in this case: 10 approved translation edits of the SUMO Knowledge Base.
Congrats to all SUMO L10n 2010 badge earners for advancing the Mozilla Mission!""")
pgettext("DB: kbadge.Badge.title", """2010 Support Forum Badge""")
pgettext("DB: kbadge.Badge.description", """The SUMO Forum 2010 mini-badge is part of the SUMO series. It represents contribution to SUMO in 2010 in this case: 30 replies in the English SUMO Forum.
Congrats to all SUMO Forum 2010 badge earners for advancing the Mozilla Mission!""")
pgettext("DB: kbadge.Badge.title", """2011 KB Badge""")
pgettext("DB: kbadge.Badge.description", """The SUMO KB 2011 mini-badge is part of the SUMO series. It represents contribution to SUMO in 2011; in this case: 10 approved edits of the English SUMO Knowledge Base.
Congrats to all SUMO KB 2011 badge earners for advancing the Mozilla Mission!""")
pgettext("DB: kbadge.Badge.title", """2011 L10n Badge""")
pgettext("DB: kbadge.Badge.description", """The SUMO L10n 2011 mini-badge is part of the SUMO series. It represents contribution to SUMO in 2011; in this case: 10 approved translation edits of the SUMO Knowledge Base.
Congrats to all SUMO L10n 2011 badge earners for advancing the Mozilla Mission!""")
pgettext("DB: kbadge.Badge.title", """2011 Support Forum Badge""")
pgettext("DB: kbadge.Badge.description", """The SUMO Forum 2011 mini-badge is part of the SUMO series. It represents contribution to SUMO in 2011 in this case: 30 replies in the English SUMO Forum.
Congrats to all SUMO Forum 2011 badge earners for advancing the Mozilla Mission!
""")
pgettext("DB: kbadge.Badge.title", """2012 KB Badge""")
pgettext("DB: kbadge.Badge.description", """The SUMO KB 2012 mini-badge is part of the SUMO series. It represents contribution to SUMO in 2012; in this case: 10 approved edits of the English SUMO Knowledge Base.
Congrats to all SUMO KB 2012 badge earners for advancing the Mozilla Mission!""")
pgettext("DB: kbadge.Badge.title", """2013 KB Badge""")
pgettext("DB: kbadge.Badge.description", """The SUMO KB 2013 mini-badge is part of the SUMO series. It represents contribution to SUMO in 2013 in this case: 10 approved edits of the English SUMO Knowledge Base.
Congrats to all SUMO KB 2013 badge earners for advancing the Mozilla Mission!""")
pgettext("DB: kbadge.Badge.title", """2013 L10n Badge""")
pgettext("DB: kbadge.Badge.description", """The SUMO L10n 2013 mini-badge is part of the SUMO series. It represents contribution to SUMO in 2013 in this case: 10 approved translation edits of the SUMO Knowledge Base.
Congrats to all SUMO L10n 2013 badge earners for advancing the Mozilla Mission!""")
pgettext("DB: kbadge.Badge.title", """2013 Support Forum Badge""")
pgettext("DB: kbadge.Badge.description", """The SUMO Forum 2013 mini-badge is part of the SUMO series. It represents contribution to SUMO in 2013 in this case: 30 replies in the English SUMO Forum.
Congrats to all SUMO Forum 2013 badge earners for advancing the Mozilla Mission""")
pgettext("DB: kbadge.Badge.title", """2012 L10n Badge""")
pgettext("DB: kbadge.Badge.description", """The SUMO L10n 2012 mini-badge is part of the SUMO series. It represents contribution to SUMO in 2012; in this case: 10 approved translation edits of the SUMO Knowledge Base.
Congrats to all SUMO L10n 2012 badge earners for advancing the Mozilla Mission!""")
pgettext("DB: kbadge.Badge.title", """Kitsune Contributor""")
pgettext("DB: kbadge.Badge.description", """Badge awarded to those who have contributed to the Kitsune code base.""")
pgettext("DB: products.Topic.title", """Learn the Basics: get started""")
pgettext("DB: products.Topic.description", """Learn all you need to know to get started with Firefox.""")
pgettext("DB: products.Topic.title", """Bookmarks and tabs""")
pgettext("DB: products.Topic.description", """Access and organize your favorite webpages easily with bookmarks and tabs""")
pgettext("DB: products.Topic.title", """Basic browsing""")
pgettext("DB: products.Topic.description", """Search and navigate easily with these essential features""")
pgettext("DB: products.Topic.title", """Import settings from other browsers""")
pgettext("DB: products.Topic.description", """Learn how to import or export your information between Firefox and another browser""")
pgettext("DB: products.Topic.title", """Video, audio and interactive settings""")
pgettext("DB: products.Topic.description", """Change how Firefox handles videos, animations, music and other interactive content""")
pgettext("DB: products.Topic.title", """How to use Firefox""")
pgettext("DB: products.Topic.description", """How to browse, search and customize your settings""")
pgettext("DB: products.Topic.title", """Download, install and migration""")
pgettext("DB: products.Topic.description", """Learn how to download Firefox on your desktop devices or move information to and from other browsers.""")
pgettext("DB: products.Topic.title", """Tips and tricks""")
pgettext("DB: products.Topic.description", """Go beyond the basics with these shortcuts and other tips.""")
pgettext("DB: products.Topic.title", """Install and update""")
pgettext("DB: products.Topic.description", """How to install Firefox and keep it up to date""")
pgettext("DB: products.Topic.title", """Display and appearance""")
pgettext("DB: products.Topic.description", """Learn how to change your toolbar, font sizes and browser colors""")
pgettext("DB: products.Topic.title", """Install and update""")
pgettext("DB: products.Topic.description", """Download or update Firefox for Windows, Mac and Linux.""")
pgettext("DB: products.Topic.title", """Firefox Sync""")
pgettext("DB: products.Topic.description", """Firefox Sync settings""")
pgettext("DB: products.Topic.title", """Sync and save""")
pgettext("DB: products.Topic.description", """Sync information on all your devices""")
pgettext("DB: products.Topic.title", """Manage add-ons""")
pgettext("DB: products.Topic.description", """Enhance Firefox's functionality and appearance with add-ons""")
pgettext("DB: products.Topic.title", """Sync, share and save""")
pgettext("DB: products.Topic.description", """Sync browsing information and content across multiple devices with Firefox Accounts.""")
pgettext("DB: products.Topic.title", """Firefox settings""")
pgettext("DB: products.Topic.description", """Privacy and personalization""")
pgettext("DB: products.Topic.title", """Firefox Hello""")
pgettext("DB: products.Topic.description", """Have video or voice conversations using the Firefox browser""")
pgettext("DB: products.Topic.title", """Chat and share""")
pgettext("DB: products.Topic.description", """Connect on video and share pages with your network""")
pgettext("DB: products.Topic.title", """Personalize Firefox""")
pgettext("DB: products.Topic.description", """Make Firefox yours with these customization options.""")
pgettext("DB: products.Topic.title", """Customize controls, options and add-ons""")
pgettext("DB: products.Topic.description", """Make Firefox yours by adding and managing the features that you want.""")
pgettext("DB: products.Topic.title", """Personalize Firefox""")
pgettext("DB: products.Topic.description", """Change Firefox's appearance, behavior and settings.""")
pgettext("DB: products.Topic.title", """Privacy and security settings""")
pgettext("DB: products.Topic.description", """Learn how to keep your information safe and secure with Firefox's private browsing, password features and other security settings.""")
pgettext("DB: products.Topic.title", """Do more with apps""")
pgettext("DB: products.Topic.description", """Install open apps from the Marketplace to add more fun and functionality to your device""")
pgettext("DB: products.Topic.title", """Protect your privacy""")
pgettext("DB: products.Topic.description", """Keep your information safe from prying eyes with the latest privacy and security features.""")
pgettext("DB: products.Topic.title", """Get community support""")
pgettext("DB: products.Topic.description", """Get community support""")
pgettext("DB: products.Topic.title", """Manage preferences and add-ons""")
pgettext("DB: products.Topic.description", """Make Firefox yours through customization settings and add-ons""")
pgettext("DB: products.Topic.title", """Fix problems""")
pgettext("DB: products.Topic.description", """Troubleshoot slowness, crashing and error messages.""")
pgettext("DB: products.Topic.title", """Fix slowness, crashing, error messages and other problems""")
pgettext("DB: products.Topic.description", """Fix slowness, crashing, error messages and other problems""")
pgettext("DB: products.Topic.title", """Advanced and experimental features""")
pgettext("DB: products.Topic.description", """Learn tips beyond the basics and try features before they're released to the public.""")
pgettext("DB: products.Topic.title", """Tab basics""")
pgettext("DB: products.Topic.description", """Tab basics""")
pgettext("DB: products.Topic.title", """Firefox versions and languages""")
pgettext("DB: products.Topic.description", """Firefox versions and languages""")
pgettext("DB: products.Topic.title", """Copy your personal information from one browser to another""")
pgettext("DB: products.Topic.description", """Copy your personal information from one browser to another""")
pgettext("DB: products.Topic.title", """Cookies and cache""")
pgettext("DB: products.Topic.description", """Control the information that Firefox saves""")
pgettext("DB: products.Topic.title", """Passwords, forms, search, and history - control what Firefox suggests""")
pgettext("DB: products.Topic.description", """Passwords, forms, search, and history - control what Firefox suggests""")
pgettext("DB: products.Topic.title", """Firefox controls and buttons""")
pgettext("DB: products.Topic.description", """Firefox controls and buttons""")
pgettext("DB: products.Topic.title", """Tab settings""")
pgettext("DB: products.Topic.description", """Tab settings""")
pgettext("DB: products.Topic.title", """Customize Firefox with add-ons, plugins, and extensions""")
pgettext("DB: products.Topic.description", """Customize Firefox with add-ons, plugins, and extensions""")
pgettext("DB: products.Topic.title", """Firefox options, preferences and settings""")
pgettext("DB: products.Topic.description", """Firefox options, preferences and settings""")
pgettext("DB: products.Topic.title", """Bookmark options""")
pgettext("DB: products.Topic.description", """Bookmark options""")
pgettext("DB: products.Topic.title", """Fix problems with websites (Facebook, YouTube, webmail etc.)""")
pgettext("DB: products.Topic.description", """Fix problems with websites (Facebook, YouTube, webmail etc.)""")
pgettext("DB: products.Topic.title", """Error messages: what they mean and how to fix""")
pgettext("DB: products.Topic.description", """How to troubleshoot error messages on Firefox""")
pgettext("DB: products.Topic.title", """Unblock Firefox from connecting to the Internet""")
pgettext("DB: products.Topic.description", """Unblock Firefox from connecting to the Internet""")
pgettext("DB: products.Topic.title", """Procedures to diagnose and fix problems""")
pgettext("DB: products.Topic.description", """Procedures to diagnose and fix problems""")
pgettext("DB: products.Topic.title", """Videos, sound, pictures and animations don't work""")
pgettext("DB: products.Topic.description", """Videos, sound, pictures and animations don't work""")
pgettext("DB: products.Topic.title", """Firefox is slow or stops working""")
pgettext("DB: products.Topic.description", """Slowness or hanging""")
pgettext("DB: products.Topic.title", """Firefox crashes""")
pgettext("DB: products.Topic.description", """Crashing""")
pgettext("DB: products.Topic.title", """Firefox won't save settings or remember information""")
pgettext("DB: products.Topic.description", """Firefox won't save settings or remember information""")
pgettext("DB: products.Topic.title", """Problems with add-ons, plugins or unwanted software""")
pgettext("DB: products.Topic.description", """Problems with add-ons, plugins or unwanted software""")
pgettext("DB: products.Topic.title", """Mozilla Persona""")
pgettext("DB: products.Topic.description", """Mozilla Persona""")
pgettext("DB: products.Topic.title", """Hot topics""")
pgettext("DB: products.Topic.description", """Hot topics""")
pgettext("DB: products.Topic.title", """Other""")
pgettext("DB: products.Topic.description", """Other""")
pgettext("DB: products.Topic.title", """Basic browsing""")
pgettext("DB: products.Topic.description", """Search and navigate easily with these essential features""")
pgettext("DB: products.Topic.title", """How to use Firefox for Android""")
pgettext("DB: products.Topic.description", """How to use Firefox for Android""")
pgettext("DB: products.Topic.title", """What's new in Firefox for Android""")
pgettext("DB: products.Topic.description", """See what new features are available in each release""")
pgettext("DB: products.Topic.title", """Install and update""")
pgettext("DB: products.Topic.description", """How to install and keep Firefox for Android up to date""")
pgettext("DB: products.Topic.title", """Save, share and sync""")
pgettext("DB: products.Topic.description", """Save, share and synchronize content with other devices""")
pgettext("DB: products.Topic.title", """Changing your settings""")
pgettext("DB: products.Topic.description", """Change Firefox's behavior.""")
pgettext("DB: products.Topic.title", """Cast to your TV""")
pgettext("DB: products.Topic.description", """Learn how to view content on another screen""")
pgettext("DB: products.Topic.title", """Popular articles""")
pgettext("DB: products.Topic.description", """Popular tips and solutions for Firefox for Android""")
pgettext("DB: products.Topic.title", """Protect your privacy""")
pgettext("DB: products.Topic.description", """Control how your information is saved or tracked""")
pgettext("DB: products.Topic.title", """Customize settings and preferences""")
pgettext("DB: products.Topic.description", """Make Firefox for Android yours with these customization options""")
pgettext("DB: products.Topic.title", """Do more with apps""")
pgettext("DB: products.Topic.description", """Learn to find and install open apps to add more fun and functionality to your device""")
pgettext("DB: products.Topic.title", """Fix slowness, crashing, error messages and other problems""")
pgettext("DB: products.Topic.description", """Fix slowness, crashing, error messages and other problems""")
pgettext("DB: products.Topic.title", """Get community support""")
pgettext("DB: products.Topic.description", """Get community support""")
pgettext("DB: products.Topic.title", """Learn the Basics: get started""")
pgettext("DB: products.Topic.description", """Learn all you need to know to get Firefox for Android up and running.""")
pgettext("DB: products.Topic.title", """Download, install and migration""")
pgettext("DB: products.Topic.description", """Learn how to install and transfer information to Firefox for Android.""")
pgettext("DB: products.Topic.title", """Tips and tricks""")
pgettext("DB: products.Topic.description", """Tips and tricks""")
pgettext("DB: products.Topic.title", """Use bookmarks""")
pgettext("DB: products.Topic.description", """The basics of using bookmarks""")
pgettext("DB: products.Topic.title", """Firefox Sync settings""")
pgettext("DB: products.Topic.description", """Firefox Sync settings""")
pgettext("DB: products.Topic.title", """Tab basics""")
pgettext("DB: products.Topic.description", """Tab basics""")
pgettext("DB: products.Topic.title", """Privacy and security settings""")
pgettext("DB: products.Topic.description", """Keep your information safe with Firefox for Android's privacy and security settings.""")
pgettext("DB: products.Topic.title", """Customize controls, options and add-ons""")
pgettext("DB: products.Topic.description", """Make Firefox for Android work the way you want through customization.""")
pgettext("DB: products.Topic.title", """Cookies""")
pgettext("DB: products.Topic.description", """Cookies""")
pgettext("DB: products.Topic.title", """Firefox controls and buttons""")
pgettext("DB: products.Topic.description", """Firefox controls and buttons""")
pgettext("DB: products.Topic.title", """Customize Firefox with add-ons, plugins, and extensions""")
pgettext("DB: products.Topic.description", """Customize Firefox with add-ons, plugins, and extensions""")
pgettext("DB: products.Topic.title", """Fix problems with websites (Facebook, YouTube, webmail etc.)""")
pgettext("DB: products.Topic.description", """Fix problems with websites (Facebook, YouTube, webmail etc.)""")
pgettext("DB: products.Topic.title", """Firefox crashes""")
pgettext("DB: products.Topic.description", """Crashing""")
pgettext("DB: products.Topic.title", """Mozilla Persona""")
pgettext("DB: products.Topic.description", """Mozilla Persona""")
pgettext("DB: products.Topic.title", """Marketplace""")
pgettext("DB: products.Topic.description", """Firefox Marketplace""")
pgettext("DB: products.Topic.title", """Other""")
pgettext("DB: products.Topic.description", """Other""")
pgettext("DB: products.Topic.title", """Install and Update""")
pgettext("DB: products.Topic.description", """Install and keep Firefox up to date on your iPad, iPhone or iPod Touch.""")
pgettext("DB: products.Topic.title", """Reader View and List""")
pgettext("DB: products.Topic.description", """Read and save web pages in a clutter-free, reader-friendly view""")
pgettext("DB: products.Topic.title", """Basic browsing""")
pgettext("DB: products.Topic.description", """How to use bookmarks, tabs and basic Firefox features on your iOS device""")
pgettext("DB: products.Topic.title", """History""")
pgettext("DB: products.Topic.description", """Change your history settings on Firefox for iOS""")
pgettext("DB: products.Topic.title", """How to use Firefox for iOS""")
pgettext("DB: products.Topic.description", """General usage questions""")
pgettext("DB: products.Topic.title", """What's new in Firefox for iOS""")
pgettext("DB: products.Topic.description", """See what features are available in each release.""")
pgettext("DB: products.Topic.title", """Bookmarks and tabs""")
pgettext("DB: products.Topic.description", """Access websites easily with bookmarks and tab features""")
pgettext("DB: products.Topic.title", """Search""")
pgettext("DB: products.Topic.description", """Customize your search settings in Firefox for iOS""")
pgettext("DB: products.Topic.title", """Firefox for iOS is not working as expected""")
pgettext("DB: products.Topic.description", """Troubleshoot problems with Firefox for iOS.""")
pgettext("DB: products.Topic.title", """Privacy""")
pgettext("DB: products.Topic.description", """Protect your information with Firefox's privacy settings on iOS""")
pgettext("DB: products.Topic.title", """Sync, save and share""")
pgettext("DB: products.Topic.description", """Share web pages on Firefox for iOS""")
pgettext("DB: products.Topic.title", """Customize preferences""")
pgettext("DB: products.Topic.description", """Customize preferences for Firefox for iOS""")
pgettext("DB: products.Topic.title", """Crashes, errors and other issues""")
pgettext("DB: products.Topic.description", """Troubleshoot error message on Firefox for iOS""")
pgettext("DB: products.Topic.title", """Get started""")
pgettext("DB: products.Topic.description", """Firefox OS basics""")
pgettext("DB: products.Topic.title", """Basic Features""")
pgettext("DB: products.Topic.description", """Learn the basic functionality for your Firefox OS phone. """)
pgettext("DB: products.Topic.title", """Download and Manage Apps""")
pgettext("DB: products.Topic.description", """Download apps from the Marketplace""")
pgettext("DB: products.Topic.title", """Date and Time""")
pgettext("DB: products.Topic.description", """Setting a date and time on your Firefox OS phone""")
pgettext("DB: products.Topic.title", """Display""")
pgettext("DB: products.Topic.description", """Customize your screen on your Firefox OS device.""")
pgettext("DB: products.Topic.title", """Install and update""")
pgettext("DB: products.Topic.description", """Download and install the mobile app on your device.""")
pgettext("DB: products.Topic.title", """Get started""")
pgettext("DB: products.Topic.description", """Learn the basics""")
pgettext("DB: products.Topic.title", """Calling and Contacts""")
pgettext("DB: products.Topic.description", """Learn how to add and manage contacts, as well as make one-to-one or conference calls on your Firefox OS phone.""")
pgettext("DB: products.Topic.title", """Browsing""")
pgettext("DB: products.Topic.description", """Surf and navigate the Web on Firefox Preview""")
pgettext("DB: products.Topic.title", """How do I use Firefox Preview?""")
pgettext("DB: products.Topic.description", """Get help with using features in Firefox Preview.""")
pgettext("DB: products.Topic.title", """Manage account""")
pgettext("DB: products.Topic.description", """How to change your account settings""")
pgettext("DB: products.Topic.title", """Email and Messages""")
pgettext("DB: products.Topic.description", """Keep in touch with your contacts through email and messaging.""")
pgettext("DB: products.Topic.title", """Library""")
pgettext("DB: products.Topic.description", """Manage bookmarks and history""")
pgettext("DB: products.Topic.title", """Services and Subscriptions""")
pgettext("DB: products.Topic.description", """Free and premium privacy offerings""")
pgettext("DB: products.Topic.title", """Music, Photos and Video""")
pgettext("DB: products.Topic.description", """Take pictures, record videos and listen to music on your Firefox OS phone.""")
pgettext("DB: products.Topic.title", """Sync""")
pgettext("DB: products.Topic.description", """Sync your browsing information across other devices.""")
pgettext("DB: products.Topic.title", """Troubleshoot""")
pgettext("DB: products.Topic.description", """Fix problems with Firefox Accounts""")
pgettext("DB: products.Topic.title", """Marketplace""")
pgettext("DB: products.Topic.description", """How to download, manage and use your favorite apps on your Firefox OS phone.""")
pgettext("DB: products.Topic.title", """Privacy and security""")
pgettext("DB: products.Topic.description", """Keep your information safe with Firefox OS locks, privacy features and more.""")
pgettext("DB: products.Topic.title", """Privacy and security""")
pgettext("DB: products.Topic.description", """Protect your privacy on Firefox Preview.""")
pgettext("DB: products.Topic.title", """Settings""")
pgettext("DB: products.Topic.description", """Learn how to configure the Internet connection, display and time on your Firefox OS device.""")
pgettext("DB: products.Topic.title", """Internet and Connections""")
pgettext("DB: products.Topic.description", """Learn more about Wi-Fi, Bluetooth and NFC connections.""")
pgettext("DB: products.Topic.title", """Settings and preferences""")
pgettext("DB: products.Topic.description", """Manage themes and search settings""")
pgettext("DB: products.Topic.title", """Fix problems with Firefox Preview""")
pgettext("DB: products.Topic.description", """Troubleshoot issues with Firefox Preview""")
pgettext("DB: products.Topic.title", """Fix problems""")
pgettext("DB: products.Topic.description", """Learn how to troubleshoot issues on your Firefox OS phone.""")
pgettext("DB: products.Topic.title", """Advanced Settings""")
pgettext("DB: products.Topic.description", """Do more with Firefox Preview""")
pgettext("DB: products.Topic.title", """Get community support""")
pgettext("DB: products.Topic.description", """Get community support""")
pgettext("DB: products.Topic.title", """View all Firefox OS articles""")
pgettext("DB: products.Topic.description", """View a list of all Firefox OS articles""")
pgettext("DB: products.Topic.title", """Working with messages""")
pgettext("DB: products.Topic.description", """Firefox OS SMS & email""")
pgettext("DB: products.Topic.title", """Procedures to diagnose and fix problems""")
pgettext("DB: products.Topic.description", """Procedures to diagnose and fix problems""")
pgettext("DB: products.Topic.title", """Mozilla Persona""")
pgettext("DB: products.Topic.description", """Mozilla Persona""")
pgettext("DB: products.Topic.title", """Hot topics""")
pgettext("DB: products.Topic.description", """Hot topics""")
pgettext("DB: products.Topic.title", """Technical""")
pgettext("DB: products.Topic.description", """Find solutions for how to use the Firefox Private Network VPN""")
pgettext("DB: products.Topic.title", """Accounts""")
pgettext("DB: products.Topic.description", """Find solutions on managing your account""")
pgettext("DB: products.Topic.title", """Payments""")
pgettext("DB: products.Topic.description", """Manage your payment and subscription""")
pgettext("DB: products.Topic.title", """Troubleshooting""")
pgettext("DB: products.Topic.description", """Fix problems with Firefox Private Network VPN""")
pgettext("DB: products.Topic.title", """Firefox for Fire TV""")
pgettext("DB: products.Topic.description", """Browser for the Amazon Fire TV.""")
pgettext("DB: products.Topic.title", """Firefox for Echo Show""")
pgettext("DB: products.Topic.description", """Browser for the Amazon Echo Show""")
pgettext("DB: products.Topic.title", """Get started""")
pgettext("DB: products.Topic.description", """Get Started with Firefox for Fire TV""")
pgettext("DB: products.Topic.title", """Get started""")
pgettext("DB: products.Topic.description", """Basics for using Firefox Private Network.""")
pgettext("DB: products.Topic.title", """Fix problems""")
pgettext("DB: products.Topic.description", """Troubleshoot problems with Firefox Fire TV""")
pgettext("DB: products.Topic.title", """Manage account and settings""")
pgettext("DB: products.Topic.description", """Change account and settings for Private Network.""")
pgettext("DB: products.Topic.title", """Fix problems""")
pgettext("DB: products.Topic.description", """Troubleshoot issues for Private Network""")
pgettext("DB: products.Topic.title", """Popcorn Maker""")
pgettext("DB: products.Topic.description", """Learn how to remix web video, audio and images into mashups that you can embed on other websites. """)
pgettext("DB: products.Topic.title", """Webmaker for Android""")
pgettext("DB: products.Topic.description", """Get help with the Webmaker app for Android.""")
pgettext("DB: products.Topic.title", """Intro to Open Badges""")
pgettext("DB: products.Topic.description", """Learn the basic about Open Badges""")
pgettext("DB: products.Topic.title", """Release notes""")
pgettext("DB: products.Topic.description", """Where to find release notes and upcoming features.""")
pgettext("DB: products.Topic.title", """Windows""")
pgettext("DB: products.Topic.description", """Deploying Firefox on Windows computers.""")
pgettext("DB: products.Topic.title", """Manage certificates""")
pgettext("DB: products.Topic.description", """Set up certificates on Firefox for your organization.""")
pgettext("DB: products.Topic.title", """Thimble""")
pgettext("DB: products.Topic.description", """Learn how to create and share your own webpages quickly and easily.""")
pgettext("DB: products.Topic.title", """BadgeKit""")
pgettext("DB: products.Topic.description", """Learn how to create, assess and issue badges""")
pgettext("DB: products.Topic.title", """Customization of Firefox in an enterprise environment""")
pgettext("DB: products.Topic.description", """Customization of Firefox in an enterprise environment""")
pgettext("DB: products.Topic.title", """Installation""")
pgettext("DB: products.Topic.description", """How to install Firefox for Enterprise""")
pgettext("DB: products.Topic.title", """Explore""")
pgettext("DB: products.Topic.description", """Learn about Firefox for Enterprise""")
pgettext("DB: products.Topic.title", """Mac""")
pgettext("DB: products.Topic.description", """Deploy Firefox on your organization's Mac computers""")
pgettext("DB: products.Topic.title", """Policies overview""")
pgettext("DB: products.Topic.description", """How to set up policies on Firefox for your organization.""")
pgettext("DB: products.Topic.title", """X-Ray Goggles""")
pgettext("DB: products.Topic.description", """Learn how to inspect the code behind every webpage.""")
pgettext("DB: products.Topic.title", """Get Involved""")
pgettext("DB: products.Topic.description", """Help the Open Badges community""")
pgettext("DB: products.Topic.title", """Deploy""")
pgettext("DB: products.Topic.description", """Deployment of Firefox in an enterprise environment""")
pgettext("DB: products.Topic.title", """Manage updates, policies & customization""")
pgettext("DB: products.Topic.description", """Policies for Firefox for Enterprise""")
pgettext("DB: products.Topic.title", """Autoconfiguration""")
pgettext("DB: products.Topic.description", """How to configure Firefox for Enterprise""")
pgettext("DB: products.Topic.title", """Linux""")
pgettext("DB: products.Topic.description", """Deploy Firefox on your organization's Linux machines.""")
pgettext("DB: products.Topic.title", """Manage settings via policy""")
pgettext("DB: products.Topic.description", """Change Firefox's settings using policies.""")
pgettext("DB: products.Topic.title", """Get the most from webmaker.org""")
pgettext("DB: products.Topic.description", """Help or get help on a Webmaker project.""")
pgettext("DB: products.Topic.title", """Earn Badges""")
pgettext("DB: products.Topic.description", """Earn Badges for the skills you learn online and offline""")
pgettext("DB: products.Topic.title", """Manage add-ons""")
pgettext("DB: products.Topic.description", """Working with add-ons on Firefox for your organization.""")
pgettext("DB: products.Topic.title", """Events and help for Mentors""")
pgettext("DB: products.Topic.description", """Help teach digital skills and share creative ways of teaching technology.""")
pgettext("DB: products.Topic.title", """Issue Badges""")
pgettext("DB: products.Topic.description", """Issue digital badges to acknowledge new skills and achievements""")
pgettext("DB: products.Topic.title", """Display Badges""")
pgettext("DB: products.Topic.description", """Display your digital badges on your social networks, job sites and your own website.""")
pgettext("DB: products.Topic.title", """Knowledge Base""")
pgettext("DB: products.Topic.description", """Windows 8 Touch support articles""")
pgettext("DB: products.Topic.title", """Pocket Basics""")
pgettext("DB: products.Topic.description", """New to Pocket? Start here.""")
pgettext("DB: products.Topic.title", """Install and set up""")
pgettext("DB: products.Topic.description", """Sync your logins across Firefox and your apps.""")
pgettext("DB: products.Topic.title", """About Data Sharing""")
pgettext("DB: products.Topic.description", """In order to process or provide our products and services to you, we share your information with the following business partners. These entities are contractually obligated to handle the data in ways that are approved by Mozilla.""")
pgettext("DB: products.Topic.title", """Pocket for Mobile""")
pgettext("DB: products.Topic.description", """How to use Pocket on your iPhone, iPad, Android or Kobo.""")
pgettext("DB: products.Topic.title", """Manage settings and logins""")
pgettext("DB: products.Topic.description", """Setting up your device to work with Firefox Lockwise""")
pgettext("DB: products.Topic.title", """Managing Your Data""")
pgettext("DB: products.Topic.description", """Learn how to manage your data (including deleting) for specific products or services.""")
pgettext("DB: products.Topic.title", """Pocket for your Computer""")
pgettext("DB: products.Topic.description", """Using Pocket on the Web.""")
pgettext("DB: products.Topic.title", """Fix problems""")
pgettext("DB: products.Topic.description", """Troubleshoot issues with Firefox Lockwise""")
pgettext("DB: products.Topic.title", """Sensible Settings""")
pgettext("DB: products.Topic.description", """Give our users actionable and informed choices by informing and educating at the point of collection and providing a choice to opt-out whenever possible. """)
pgettext("DB: products.Topic.title", """Advanced""")
pgettext("DB: products.Topic.description", """Information for Developers and Beta users.""")
pgettext("DB: products.Topic.title", """Defense in Depth""")
pgettext("DB: products.Topic.description", """Make privacy a key factor in selecting and interacting with partners. """)
pgettext("DB: products.Topic.title", """How does it work?""")
pgettext("DB: products.Topic.description", """Basics to get started with Firefox for Windows 8 Touch.""")
pgettext("DB: products.Topic.title", """Settings""")
pgettext("DB: products.Topic.description", """How to configure and customize Firefox for Windows 8 Touch.""")
pgettext("DB: products.Topic.title", """Problems with websites""")
pgettext("DB: products.Topic.description", """Problems with websites that don't work well in Firefox for Windows 8 Touch.""")
pgettext("DB: products.Topic.title", """Other""")
pgettext("DB: products.Topic.description", """Other questions with Firefox for Windows 8 Touch.""")
pgettext("DB: products.Topic.title", """General contribution""")
pgettext("DB: products.Topic.description", """Topic for any KB articles related to the contribution in general""")
pgettext("DB: products.Topic.title", """Get started""")
pgettext("DB: products.Topic.description", """How to use Firefox Reality""")
pgettext("DB: products.Topic.title", """Forum Support""")
pgettext("DB: products.Topic.description", """Topic for any KB articles related to the Forum Support contribution""")
pgettext("DB: products.Topic.title", """Social Support""")
pgettext("DB: products.Topic.description", """Topic for any KB articles related to the Social Support Program""")
pgettext("DB: products.Topic.title", """Localization""")
pgettext("DB: products.Topic.description", """Topic for any KB articles related to the Localization contribution""")
pgettext("DB: products.Topic.title", """KB Contribution""")
pgettext("DB: products.Topic.description", """Topic for any KB articles related to the KB articles contribution""")
pgettext("DB: products.Topic.title", """Respond tool""")
pgettext("DB: products.Topic.description", """Topic for any KB articles related to the Respond Tool contribution""")
pgettext("DB: products.Topic.title", """Troubleshooting""")
pgettext("DB: products.Topic.description", """Fix problems with Firefox Reality""")
pgettext("DB: products.Topic.title", """[Obsolete] Mozilla Persona""")
pgettext("DB: products.Topic.description", """Mozilla Persona""")
pgettext("DB: products.Topic.title", """[Obsolete] Hot topics""")
pgettext("DB: products.Topic.description", """Hot topics""")
pgettext("DB: products.Topic.title", """Get Started""")
pgettext("DB: products.Topic.description", """Klar verwenden""")
pgettext("DB: products.Topic.title", """Firefox Klar for iOS""")
pgettext("DB: products.Topic.description", """Privacy browser for iOS""")
pgettext("DB: products.Topic.title", """Get started""")
pgettext("DB: products.Topic.description", """Everything you need to know to use Firefox Lite.""")
pgettext("DB: products.Topic.title", """Features""")
pgettext("DB: products.Topic.description", """Getting started with Hubs""")
pgettext("DB: products.Topic.title", """Firefox Klar for Android""")
pgettext("DB: products.Topic.description", """Privacy browser for Android""")
pgettext("DB: products.Topic.title", """Preferences""")
pgettext("DB: products.Topic.description", """Customize Firefox Lite to your desired settings""")
pgettext("DB: products.Topic.title", """Controls""")
pgettext("DB: products.Topic.description", """How to navigate Hubs""")
pgettext("DB: products.Topic.title", """Fix problems""")
pgettext("DB: products.Topic.description", """Troubleshoot problems with Firefox Lite.""")
pgettext("DB: products.Topic.title", """Moderation""")
pgettext("DB: products.Topic.description", """Tools for making Hubs a good experience for all.""")
pgettext("DB: products.Topic.title", """Spoke""")
pgettext("DB: products.Topic.description", """Build scenes with Spoke""")
pgettext("DB: products.Topic.title", """Creators""")
pgettext("DB: products.Topic.description", """Advanced Hubs customization for creators""")
pgettext("DB: products.Topic.title", """Firefox Focus for iOS""")
pgettext("DB: products.Topic.description", """Firefox Focus for iOS""")
pgettext("DB: products.Topic.title", """Firefox Focus for Android""")
pgettext("DB: products.Topic.description", """Privacy browser for Android""")
pgettext("DB: products.Topic.title", """Get started""")
pgettext("DB: products.Topic.description", """Learn the basics about ScreenshotGo""")
pgettext("DB: products.Topic.title", """Learn the Basics. Get Started""")
pgettext("DB: products.Topic.description", """Learn the Basics. Get Started""")
pgettext("DB: products.Topic.title", """Tips and tricks""")
pgettext("DB: products.Topic.description", """Learn tips and shortcuts to help you work faster""")
pgettext("DB: products.Topic.title", """Set up email""")
pgettext("DB: products.Topic.description", """Add and configure your email accounts on Thunderbird""")
pgettext("DB: products.Topic.title", """Install, Migrate and Update""")
pgettext("DB: products.Topic.description", """How to install and keep Thunderbird up to date""")
pgettext("DB: products.Topic.title", """Read, send and organize emails""")
pgettext("DB: products.Topic.description", """Learn how to manage your email messages""")
pgettext("DB: products.Topic.title", """Emails""")
pgettext("DB: products.Topic.description", """Learn to set up accounts, read, send and manage emails""")
pgettext("DB: products.Topic.title", """News Feeds (RSS), Blogs and Social""")
pgettext("DB: products.Topic.description", """Stay up to date with news feeds, blogs and social features""")
pgettext("DB: products.Topic.title", """Contacts""")
pgettext("DB: products.Topic.description", """How to use the address book on Thunderbird""")
pgettext("DB: products.Topic.title", """Calendar""")
pgettext("DB: products.Topic.description", """Related to the Lightning add-on for Calendar""")
pgettext("DB: products.Topic.title", """Customize controls, options and add-ons""")
pgettext("DB: products.Topic.description", """Customize controls, options and add-ons""")
pgettext("DB: products.Topic.title", """Thunderbird versions and languages""")
pgettext("DB: products.Topic.description", """Thunderbird versions and languages""")
pgettext("DB: products.Topic.title", """Passwords, forms and search""")
pgettext("DB: products.Topic.description", """Passwords, forms and search""")
pgettext("DB: products.Topic.title", """Thunderbird controls and buttons """)
pgettext("DB: products.Topic.description", """Learn all about Thunderbird controls and functionality.""")
pgettext("DB: products.Topic.title", """Fix problems with email providers (gmail, Yahoo, etc.) """)
pgettext("DB: products.Topic.description", """Learn how to fix problems with Gmail, Yahoo and other email providers""")
pgettext("DB: products.Topic.title", """Download, install and migration""")
pgettext("DB: products.Topic.description", """Download, install and Migration""")
pgettext("DB: products.Topic.title", """Copy your personal information from one Thunderbird to another""")
pgettext("DB: products.Topic.description", """Copy your personal information from one Thunderbird to another""")
pgettext("DB: products.Topic.title", """Tab settings""")
pgettext("DB: products.Topic.description", """Tab settings""")
pgettext("DB: products.Topic.title", """Error messages: what they mean and how to fix""")
pgettext("DB: products.Topic.description", """Error messages: what they mean and how to fix""")
pgettext("DB: products.Topic.title", """Privacy and security settings""")
pgettext("DB: products.Topic.description", """Keep your information safe with password and security settings""")
pgettext("DB: products.Topic.title", """Customize Thunderbird with add-ons, plugins, and extensions""")
pgettext("DB: products.Topic.description", """Customize Thunderbird with add-ons, plugins, and extensions""")
pgettext("DB: products.Topic.title", """Unblock Thunderbird from connecting to the Internet""")
pgettext("DB: products.Topic.description", """Unblock Thunderbird from connecting to the Internet""")
pgettext("DB: products.Topic.title", """Thunderbird options, preferences and settings """)
pgettext("DB: products.Topic.description", """Thunderbird options, preferences and settings """)
pgettext("DB: products.Topic.title", """Procedures to diagnose and fix problems""")
pgettext("DB: products.Topic.description", """Procedures to diagnose and fix problems""")
pgettext("DB: products.Topic.title", """Fix slowness, crashing, error messages and other problems""")
pgettext("DB: products.Topic.description", """Troubleshoot error messages on Thunderbird.""")
pgettext("DB: products.Topic.title", """Thunderbird is slow or stops working""")
pgettext("DB: products.Topic.description", """Thunderbird is slow or stops working""")
pgettext("DB: products.Topic.title", """Thunderbird crashes""")
pgettext("DB: products.Topic.description", """Thunderbird crashes""")
pgettext("DB: products.Topic.title", """Get community support""")
pgettext("DB: products.Topic.description", """Get community support""")
pgettext("DB: products.Topic.title", """Thunderbird won't save settings or remember information""")
pgettext("DB: products.Topic.description", """Thunderbird won't save settings or remember information""")
pgettext("DB: products.Topic.title", """Problems with add-ons, plugins or unwanted software""")
pgettext("DB: products.Topic.description", """Problems with add-ons, plugins or unwanted software""")
pgettext("DB: products.Topic.title", """How To""")
pgettext("DB: products.Topic.description", """Articles that tell you how you can do more with Thunderbird""")
pgettext("DB: products.Topic.title", """Other""")
pgettext("DB: products.Topic.description", """Other""")
pgettext("DB: products.Product.title", """Firefox""")
pgettext("DB: products.Product.description", """Web browser for Windows, Mac and Linux""")
pgettext("DB: products.Product.title", """Firefox for Android""")
pgettext("DB: products.Product.description", """Web browser for Android smartphones and tablets""")
pgettext("DB: products.Product.title", """Firefox for iOS""")
pgettext("DB: products.Product.description", """Firefox for iPhone, iPad and iPod touch devices""")
pgettext("DB: products.Product.title", """Firefox Accounts""")
pgettext("DB: products.Product.description", """Privacy-first products for desktop and mobile""")
pgettext("DB: products.Product.title", """Firefox OS""")
pgettext("DB: products.Product.description", """Mobile OS for smartphones""")
pgettext("DB: products.Product.title", """Firefox Preview""")
pgettext("DB: products.Product.description", """Early version of an experimental Firefox browser for Android.""")
pgettext("DB: products.Product.title", """Mozilla VPN""")
pgettext("DB: products.Product.description", """VPN for Windows 10, Android, and iOS devices""")
pgettext("DB: products.Product.title", """Firefox for Amazon Devices""")
pgettext("DB: products.Product.description", """Browser for Amazon devices""")
pgettext("DB: products.Product.title", """Firefox for Fire TV""")
pgettext("DB: products.Product.description", """Browser for Amazon Fire TV""")
pgettext("DB: products.Product.title", """Firefox Private Network Browser-level protection""")
pgettext("DB: products.Product.description", """Browse securely on public Wi-Fi""")
pgettext("DB: products.Product.title", """Firefox for Enterprise""")
pgettext("DB: products.Product.description", """Firefox Quantum for businesses""")
pgettext("DB: products.Product.title", """Open Badges""")
pgettext("DB: products.Product.description", """A new online standard to recognize and verify learning""")
pgettext("DB: products.Product.title", """Webmaker""")
pgettext("DB: products.Product.description", """Webmaker and other tools for teaching and learning the Web""")
pgettext("DB: products.Product.title", """Firefox for Android (ESR)""")
pgettext("DB: products.Product.description", """Older version of Firefox for Android (no longer supported)""")
pgettext("DB: products.Product.title", """Firefox for Windows 8 Touch""")
pgettext("DB: products.Product.description", """Firefox for Windows 8 touch devices""")
pgettext("DB: products.Product.title", """Firefox Lockwise""")
pgettext("DB: products.Product.description", """Mobile app that gives you access to passwords you've saved to Firefox.""")
pgettext("DB: products.Product.title", """Pocket""")
pgettext("DB: products.Product.description", """Discover and save stories for later""")
pgettext("DB: products.Product.title", """Privacy and Security""")
pgettext("DB: products.Product.description", """Learn more about Mozilla's privacy and security practices.""")
pgettext("DB: products.Product.title", """Contributors""")
pgettext("DB: products.Product.description", """Contributor articles""")
pgettext("DB: products.Product.title", """Firefox Reality""")
pgettext("DB: products.Product.description", """Web browser for virtual reality headsets""")
pgettext("DB: products.Product.title", """Firefox Send""")
pgettext("DB: products.Product.description", """An app for sending files to anyone.""")
pgettext("DB: products.Product.title", """Firefox Klar""")
pgettext("DB: products.Product.description", """Was ist Firefox Klar?""")
pgettext("DB: products.Product.title", """Firefox Lite""")
pgettext("DB: products.Product.description", """Mobile browser for Indonesia, India, The Philippines, and Thailand""")
pgettext("DB: products.Product.title", """Hubs""")
pgettext("DB: products.Product.description", """Social virtual reality for headsets and browsers""")
pgettext("DB: products.Product.title", """Firefox Focus""")
pgettext("DB: products.Product.description", """Automatic privacy browser and content blocker""")
pgettext("DB: products.Product.title", """Firefox ScreenshotGo""")
pgettext("DB: products.Product.description", """Screenshot app for mobile""")
pgettext("DB: products.Product.title", """Thunderbird""")
pgettext("DB: products.Product.description", """Email software for Windows, Mac and Linux""")
# This is a karma title.
pgettext("DB: karma.Title.name", """Administrator""")
# This is a karma title.
pgettext("DB: karma.Title.name", """Buddy of the Month! (10/2015)""")
# This is a karma title.
pgettext("DB: karma.Title.name", """Locale Leader""")
# This is a karma title.
pgettext("DB: karma.Title.name", """Moderator""")
# This is a karma title.
pgettext("DB: karma.Title.name", """SUMO Warrior""")
# This is a karma title.
pgettext("DB: karma.Title.name", """Top 10 Contributor""")
# This is a karma title.
pgettext("DB: karma.Title.name", """Top 25 Contributor""")
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, you can obtain one at http://mozilla.org/MPL/2.0/.
from optparse import make_option
from django.utils.six.moves import input
from django.conf import settings
from django.core.management.base import BaseCommand
from treeherder.model.models import Datasource, Repository
class Command(BaseCommand):
help = ("Populate the datasource table and"
"create the connected databases")
option_list = BaseCommand.option_list + (
make_option('--host',
action='store',
dest='host',
default=settings.TREEHERDER_DATABASE_HOST,
help='Host to associate the datasource to'),
make_option('--readonly-host',
action='store',
dest='readonly_host',
default=settings.TREEHERDER_DATABASE_HOST,
help='Readonly host to associate the datasource to'),
make_option('--reset',
action='store_true',
dest='reset',
default=False,
help='Reset the datasources if they already exists'),
)
def handle(self, *args, **options):
if options["reset"]:
confirm = input("""You have requested an init of the datasources.
This will IRREVERSIBLY DESTROY all data in the jobs and objectstore databases.
Are you sure you want to do this?
Type 'yes' to continue, or 'no' to cancel: """)
if confirm == "yes":
for ds in Datasource.objects.all():
ds.delete()
projects = Repository.objects.filter(active_status='active').values_list('name', flat=True)
for project in projects:
for contenttype in ("jobs", "objectstore"):
Datasource.objects.get_or_create(
contenttype=contenttype,
dataset=1,
project=project,
host=options['host'],
read_only_host=options['readonly_host']
)
Datasource.reset_cache()
Bug 1125464 - Populate the Datasource read_only_host field correctly
On initial repo setup, we were previously setting the read_only_host
field in the Datasource table to the host value for the master, not the
read-only slave. Oops.
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, you can obtain one at http://mozilla.org/MPL/2.0/.
from optparse import make_option
from django.utils.six.moves import input
from django.conf import settings
from django.core.management.base import BaseCommand
from treeherder.model.models import Datasource, Repository
class Command(BaseCommand):
help = ("Populate the datasource table and"
"create the connected databases")
option_list = BaseCommand.option_list + (
make_option('--host',
action='store',
dest='host',
default=settings.TREEHERDER_DATABASE_HOST,
help='Host to associate the datasource to'),
make_option('--readonly-host',
action='store',
dest='readonly_host',
default=settings.TREEHERDER_RO_DATABASE_HOST,
help='Readonly host to associate the datasource to'),
make_option('--reset',
action='store_true',
dest='reset',
default=False,
help='Reset the datasources if they already exists'),
)
def handle(self, *args, **options):
if options["reset"]:
confirm = input("""You have requested an init of the datasources.
This will IRREVERSIBLY DESTROY all data in the jobs and objectstore databases.
Are you sure you want to do this?
Type 'yes' to continue, or 'no' to cancel: """)
if confirm == "yes":
for ds in Datasource.objects.all():
ds.delete()
projects = Repository.objects.filter(active_status='active').values_list('name', flat=True)
for project in projects:
for contenttype in ("jobs", "objectstore"):
Datasource.objects.get_or_create(
contenttype=contenttype,
dataset=1,
project=project,
host=options['host'],
read_only_host=options['readonly_host']
)
Datasource.reset_cache()
|
from __future__ import absolute_import, division, print_function
import logging
from dynd import nd
from pandas import DataFrame
from .expr.table import *
from .api import *
from .data.csv import *
from .data.json import *
from .compute.python import *
from .data.pandas import *
from .data.meta import *
from .compute.pandas import *
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.WARNING)
inf = float('inf')
nan = float('nan')
__version__ = '0.6.0-dev'
# If IPython is already loaded, register the Blaze catalog magic
# from . import catalog
# import sys
# if 'IPython' in sys.modules:
# catalog.register_ipy_magic()
# del sys
def print_versions():
"""Print all the versions of software that Blaze relies on."""
import sys, platform
import numpy as np
import dynd
import datashape
import blz
print("-=" * 38)
print("Blaze version: %s" % __version__)
print("Datashape version: %s" % datashape.__version__)
print("NumPy version: %s" % np.__version__)
print("DyND version: %s / LibDyND %s" %
(dynd.__version__, dynd.__libdynd_version__))
print("Python version: %s" % sys.version)
(sysname, nodename, release, version, machine, processor) = \
platform.uname()
print("Platform: %s-%s-%s (%s)" % (sysname, release, machine, version))
if sysname == "Linux":
print("Linux dist: %s" % " ".join(platform.linux_distribution()[:-1]))
if not processor:
processor = "not recognized"
print("Processor: %s" % processor)
print("Byte-ordering: %s" % sys.byteorder)
print("Detected cores: %s" % blz.detect_number_of_cores())
print("-=" * 38)
def test(verbosity=1, xunitfile=None, exit=False):
"""
Runs the full Blaze test suite, outputting
the results of the tests to sys.stdout.
This uses nose tests to discover which tests to
run, and runs tests in any 'tests' subdirectory
within the Blaze module.
Parameters
----------
verbosity : int, optional
Value 0 prints very little, 1 prints a little bit,
and 2 prints the test names while testing.
xunitfile : string, optional
If provided, writes the test results to an xunit
style xml file. This is useful for running the tests
in a CI server such as Jenkins.
exit : bool, optional
If True, the function will call sys.exit with an
error code after the tests are finished.
"""
import nose
import os
import sys
argv = ['nosetests', '--verbosity=%d' % verbosity]
# Output an xunit file if requested
if xunitfile:
argv.extend(['--with-xunit', '--xunit-file=%s' % xunitfile])
# Set the logging level to warn
argv.extend(['--logging-level=WARN'])
# Add all 'tests' subdirectories to the options
rootdir = os.path.dirname(__file__)
for root, dirs, files in os.walk(rootdir):
if 'tests' in dirs:
testsdir = os.path.join(root, 'tests')
argv.append(testsdir)
print('Test dir: %s' % testsdir[len(rootdir)+1:])
# print versions (handy when reporting problems)
print_versions()
sys.stdout.flush()
# Ask nose to do its thing
return nose.main(argv=argv, exit=exit)
add hdf5 to root imports
from __future__ import absolute_import, division, print_function
import logging
from dynd import nd
from pandas import DataFrame
from .expr.table import *
from .api import *
from .data.csv import *
from .data.json import *
from .data.hdf5 import *
from .compute.python import *
from .data.pandas import *
from .data.meta import *
from .compute.pandas import *
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.WARNING)
inf = float('inf')
nan = float('nan')
__version__ = '0.6.0-dev'
# If IPython is already loaded, register the Blaze catalog magic
# from . import catalog
# import sys
# if 'IPython' in sys.modules:
# catalog.register_ipy_magic()
# del sys
def print_versions():
"""Print all the versions of software that Blaze relies on."""
import sys, platform
import numpy as np
import dynd
import datashape
import blz
print("-=" * 38)
print("Blaze version: %s" % __version__)
print("Datashape version: %s" % datashape.__version__)
print("NumPy version: %s" % np.__version__)
print("DyND version: %s / LibDyND %s" %
(dynd.__version__, dynd.__libdynd_version__))
print("Python version: %s" % sys.version)
(sysname, nodename, release, version, machine, processor) = \
platform.uname()
print("Platform: %s-%s-%s (%s)" % (sysname, release, machine, version))
if sysname == "Linux":
print("Linux dist: %s" % " ".join(platform.linux_distribution()[:-1]))
if not processor:
processor = "not recognized"
print("Processor: %s" % processor)
print("Byte-ordering: %s" % sys.byteorder)
print("Detected cores: %s" % blz.detect_number_of_cores())
print("-=" * 38)
def test(verbosity=1, xunitfile=None, exit=False):
"""
Runs the full Blaze test suite, outputting
the results of the tests to sys.stdout.
This uses nose tests to discover which tests to
run, and runs tests in any 'tests' subdirectory
within the Blaze module.
Parameters
----------
verbosity : int, optional
Value 0 prints very little, 1 prints a little bit,
and 2 prints the test names while testing.
xunitfile : string, optional
If provided, writes the test results to an xunit
style xml file. This is useful for running the tests
in a CI server such as Jenkins.
exit : bool, optional
If True, the function will call sys.exit with an
error code after the tests are finished.
"""
import nose
import os
import sys
argv = ['nosetests', '--verbosity=%d' % verbosity]
# Output an xunit file if requested
if xunitfile:
argv.extend(['--with-xunit', '--xunit-file=%s' % xunitfile])
# Set the logging level to warn
argv.extend(['--logging-level=WARN'])
# Add all 'tests' subdirectories to the options
rootdir = os.path.dirname(__file__)
for root, dirs, files in os.walk(rootdir):
if 'tests' in dirs:
testsdir = os.path.join(root, 'tests')
argv.append(testsdir)
print('Test dir: %s' % testsdir[len(rootdir)+1:])
# print versions (handy when reporting problems)
print_versions()
sys.stdout.flush()
# Ask nose to do its thing
return nose.main(argv=argv, exit=exit)
|
#!/usr/bin/env python
from __future__ import print_function
from pprint import pprint
from functools import reduce
import operator
import subprocess
import re
from copy import deepcopy
from kerncraft.intervals import Intervals
from kerncraft.prefixedunit import PrefixedUnit
# Datatype sizes in bytes
datatype_size = {'double': 8, 'float': 4}
def blocking(indices, block_size, initial_boundary=0):
'''
splits list of integers into blocks of block_size. returns block indices.
first block element is located at initial_boundary (default 0).
>>> blocking([0, -1, -2, -3, -4, -5, -6, -7, -8, -9], 8)
[0,-1]
>>> blocking([0], 8)
[0]
'''
blocks = []
for idx in indices:
bl_idx = (idx-initial_boundary)/block_size
if bl_idx not in blocks:
blocks.append(bl_idx)
blocks.sort()
return blocks
def flatten_dict(d):
'''
transforms 2d-dict d[i][k] into a new 1d-dict e[(i,k)] with 2-tuple keys
'''
e = {}
for k in d.keys():
for l in d[k].keys():
e[(k, l)] = d[k][l]
return e
class Roofline:
"""
class representation of the Roofline Model
more info to follow...
"""
name = "Roofline"
@classmethod
def configure_arggroup(cls, parser):
pass
def __init__(self, kernel, machine, args=None, parser=None):
"""
*kernel* is a Kernel object
*machine* describes the machine (cpu, cache and memory) characteristics
*args* (optional) are the parsed arguments from the comand line
"""
self.kernel = kernel
self.machine = machine
self._args = args
self._parser = parser
if args:
# handle CLI info
pass
def _calculate_relative_offset(self, name, access_dimensions):
'''
returns the offset from the iteration center in number of elements and the order of indices
used in access.
'''
offset = 0
base_dims = self.kernel._variables[name][1]
for dim, offset_info in enumerate(access_dimensions):
offset_type, idx_name, dim_offset = offset_info
assert offset_type == 'rel', 'Only relative access to arrays is supported at the moment'
if offset_type == 'rel':
offset += dim_offset*reduce(operator.mul, base_dims[dim+1:], 1)
else:
# should not happen
pass
return offset
def _calculate_iteration_offset(self, name, index_order, loop_index):
'''
returns the offset from one to the next iteration using *loop_index*.
*index_order* is the order used by the access dimensions e.g. 'ijk' corresponse to [i][j][k]
*loop_index* specifies the loop to be used for iterations (this is typically the inner
moste one)
'''
offset = 0
base_dims = self.kernel._variables[name][1]
for dim, index_name in enumerate(index_order):
if loop_index == index_name:
offset += reduce(operator.mul, base_dims[dim+1:], 1)
return offset
def _get_index_order(self, access_dimensions):
'''Returns the order of indices used in *access_dimensions*.'''
return ''.join(map(lambda d: d[1], access_dimensions))
def _expand_to_cacheline_blocks(self, first, last):
'''
Returns first and last values wich align with cacheline blocks, by increasing range.
'''
# TODO how to handle multiple datatypes (with different size)?
element_size = datatype_size['double']
elements_per_cacheline = int(float(self.machine['cacheline size'])) / element_size
first = first - first % elements_per_cacheline
last = last - last % elements_per_cacheline + elements_per_cacheline - 1
return [first, last]
def calculate_cache_access(self, CPUL1=True):
results = {'bottleneck level': None, 'mem bottlenecks': []}
read_offsets = {var_name: dict() for var_name in self.kernel._variables.keys()}
write_offsets = {var_name: dict() for var_name in self.kernel._variables.keys()}
iteration_offsets = {var_name: dict() for var_name in self.kernel._variables.keys()}
# TODO how to handle multiple datatypes (with different size)?
element_size = datatype_size['double']
elements_per_cacheline = int(float(self.machine['cacheline size'])) / element_size
loop_order = ''.join(map(lambda l: l[0], self.kernel._loop_stack))
for var_name in self.kernel._variables.keys():
var_type, var_dims = self.kernel._variables[var_name]
# Skip the following access: (they are hopefully kept in registers)
# - scalar values
if var_dims is None:
continue
# - access does not change with inner-most loop index
writes = filter(
lambda acs: loop_order[-1] in map(lambda a: a[1], acs),
self.kernel._destinations.get(var_name, []))
reads = filter(
lambda acs: loop_order[-1] in map(lambda a: a[1], acs),
self.kernel._sources.get(var_name, []))
# Compile access pattern
for r in reads:
offset = self._calculate_relative_offset(var_name, r)
idx_order = self._get_index_order(r)
read_offsets[var_name].setdefault(idx_order, [])
read_offsets[var_name][idx_order].append(offset)
for w in writes:
offset = self._calculate_relative_offset(var_name, w)
idx_order = self._get_index_order(w)
write_offsets[var_name].setdefault(idx_order, [])
write_offsets[var_name][idx_order].append(offset)
# With ECM we would do unrolling, but not with roofline
# initialize misses and hits
misses = {}
hits = {}
evicts = {}
total_misses = {}
total_hits = {}
total_evicts = {}
memory_hierarchy = deepcopy(self.machine['memory hierarchy'])
# L1-CPU level is special, because everything is a miss here
if CPUL1:
memory_hierarchy.insert(0, {
'cores per group': 1,
'cycles per cacheline transfer': None,
'groups': 16,
'level': 'CPU',
'bandwidth': None,
'size per group': 0,
'threads per group': 2,
})
# Check for layer condition towards all cache levels
for cache_level, cache_info in list(enumerate(memory_hierarchy))[:-1]:
cache_size = int(float(cache_info['size per group']))
cache_cycles = cache_info['cycles per cacheline transfer']
trace_length = 0
updated_length = True
while updated_length:
updated_length = False
# Initialize cache, misses, hits and evicts for current level
cache = {}
misses[cache_level] = {}
hits[cache_level] = {}
evicts[cache_level] = {}
# We consider everythin a miss in the beginning
# TODO here read and writes are treated the same, this implies write-allocate
# to support nontemporal stores, this needs to be changed
for name in read_offsets.keys()+write_offsets.keys():
cache[name] = {}
misses[cache_level][name] = {}
hits[cache_level][name] = {}
for idx_order in read_offsets[name].keys()+write_offsets[name].keys():
cache[name][idx_order] = Intervals()
if cache_level-1 not in misses:
misses[cache_level][name][idx_order] = sorted(
read_offsets.get(name, {}).get(idx_order, []) +
write_offsets.get(name, {}).get(idx_order, []),
reverse=True)
else:
misses[cache_level][name][idx_order] = list(
misses[cache_level-1][name][idx_order])
hits[cache_level][name][idx_order] = []
# Caches are still empty (thus only misses)
trace_count = 0
cache_used_size = 0
# Now we trace the cache access backwards (in time/iterations) and check for hits
for var_name in misses[cache_level].keys():
for idx_order in misses[cache_level][var_name].keys():
iter_offset = self._calculate_iteration_offset(
var_name, idx_order, loop_order[-1])
# Add cache trace
for offset in list(misses[cache_level][var_name][idx_order]):
# If already present in cache add to hits
if offset in cache[var_name][idx_order]:
misses[cache_level][var_name][idx_order].remove(offset)
# We might have multiple hits on the same offset (e.g in DAXPY)
if offset not in hits[cache_level][var_name][idx_order]:
hits[cache_level][var_name][idx_order].append(offset)
# Add cache, we can do this since misses are sorted in reverse order of
# access and we assume LRU cache replacement policy
if iter_offset <= elements_per_cacheline:
# iterations overlap, thus we can savely add the whole range
cached_first, cached_last = self._expand_to_cacheline_blocks(
offset-iter_offset*trace_length, offset+1)
cache[var_name][idx_order] &= Intervals(
[cached_first, cached_last+1], sane=True)
else:
# There is no overlap, we can append the ranges onto one another
# TODO optimize this code section (and maybe merge with above)
new_cache = [self._expand_to_cacheline_blocks(o, o) for o in range(
offset-iter_offset*trace_length, offset+1, iter_offset)]
new_cache = Intervals(*new_cache, sane=True)
cache[var_name][idx_order] &= new_cache
trace_count += len(cache[var_name][idx_order]._data)
cache_used_size += len(cache[var_name][idx_order])*element_size
# Calculate new possible trace_length according to free space in cache
# TODO take CL blocked access into account
# TODO make /2 customizable
new_trace_length = trace_length + \
((cache_size/2 - cache_used_size)/trace_count)/element_size
if new_trace_length > trace_length:
trace_length = new_trace_length
updated_length = True
# All writes to require the data to be evicted eventually
evicts[cache_level] = \
{var_name: dict() for var_name in self.kernel._variables.keys()}
for name in write_offsets.keys():
for idx_order in write_offsets[name].keys():
evicts[cache_level][name][idx_order] = list(write_offsets[name][idx_order])
# Compiling stats
total_misses[cache_level] = sum(map(
lambda l: sum(map(len, l.values())),
misses[cache_level].values()))
total_hits[cache_level] = sum(map(
lambda l: sum(map(len, l.values())),
hits[cache_level].values()))
total_evicts[cache_level] = sum(map(
lambda l: sum(map(len, l.values())),
evicts[cache_level].values()))
# Calculate performance (arithmetic intensity * bandwidth with
# arithmetic intensity = flops / bytes transfered)
bytes_transfered = (total_misses[cache_level]+total_evicts[cache_level])*element_size
total_flops = sum(self.kernel._flops.values())
arith_intens = float(total_flops)/float(bytes_transfered)
# choose bw according to cache level and problem
# first, compile stream counts at current cache level
# write-allocate is allready resolved above
read_streams = 0
for var_name in misses[cache_level].keys():
for idx_order in misses[cache_level][var_name]:
read_streams += len(misses[cache_level][var_name][idx_order])
write_streams = 0
for var_name in evicts[cache_level].keys():
for idx_order in evicts[cache_level][var_name]:
write_streams += len(evicts[cache_level][var_name][idx_order])
# second, try to find best fitting kernel (closest to stream seen stream counts):
# write allocate has to be handled in kernel information (all writes are also reads)
# TODO support for non-write-allocate architectures
measurement_kernel = 'load'
measurement_kernel_info = self.machine['benchmarks']['kernels'][measurement_kernel]
for kernel_name, kernel_info in self.machine['benchmarks']['kernels'].items():
if (read_streams >= (kernel_info['read streams']['streams'] +
kernel_info['write streams']['streams'] -
kernel_info['read+write streams']['streams']) >
measurement_kernel_info['read streams']['streams'] +
measurement_kernel_info['write streams']['streams'] -
measurement_kernel_info['read+write streams']['streams'] and
write_streams >= kernel_info['write streams']['streams'] >
measurement_kernel_info['write streams']['streams']):
measurement_kernel = kernel_name
measurement_kernel_info = kernel_info
# TODO choose smt and cores:
threads_per_core, cores = 1, 1
bw_level = memory_hierarchy[cache_level+1]['level']
bw_measurements = \
self.machine['benchmarks']['measurements'][bw_level][threads_per_core]
assert threads_per_core == bw_measurements['threads per core'], \
'malformed measurement dictionary in machine file.'
run_index = bw_measurements['cores'].index(cores)
bw = bw_measurements['results'][measurement_kernel][run_index]
# Correct bandwidth due to miss-measurement of write allocation
# TODO support non-temporal stores and non-write-allocate architectures
measurement_kernel_info = self.machine['benchmarks']['kernels'][measurement_kernel]
factor = (float(measurement_kernel_info['read streams']['bytes']) +
2.0*float(measurement_kernel_info['write streams']['bytes']) -
float(measurement_kernel_info['read+write streams']['bytes'])) / \
(float(measurement_kernel_info['read streams']['bytes']) +
float(measurement_kernel_info['write streams']['bytes']))
bw = bw * factor
performance = arith_intens * float(bw)
results['mem bottlenecks'].append({
'performance': PrefixedUnit(performance, 'FLOP/s'),
'level': (memory_hierarchy[cache_level]['level'] + '-' +
memory_hierarchy[cache_level+1]['level']),
'arithmetic intensity': arith_intens,
'bw kernel': measurement_kernel,
'bandwidth': bw})
if performance <= results.get('min performance', performance):
results['bottleneck level'] = len(results['mem bottlenecks'])-1
results['min performance'] = performance
return results
def analyze(self):
self.results = self.calculate_cache_access()
def conv_perf(self, performance, unit, default='FLOP/s'):
'''Convert performance (FLOP/s) to other units, such as It/s or cy/CL'''
if not unit:
unit = default
clock = self.machine['clock']
flops_per_it = sum(self.kernel._flops.values())
it_s = performance/flops_per_it
it_s.unit = 'It/s'
element_size = datatype_size['double']
elements_per_cacheline = int(float(self.machine['cacheline size'])) / element_size
cy_cl = clock/it_s*elements_per_cacheline
cy_cl.unit = 'cy/CL'
return {'It/s': it_s,
'cy/CL': cy_cl,
'FLOP/s': performance}[unit]
def report(self):
max_flops = self.machine['clock']*sum(self.machine['FLOPs per cycle']['DP'].values())
max_flops.unit = "FLOP/s"
if self._args and self._args.verbose >= 1:
print('Bottlnecks:')
print(' level | a. intensity | performance | bandwidth | bandwidth kernel')
print('--------+--------------+-----------------+--------------+-----------------')
print(' CPU | | {:>15} | |'.format(
self.conv_perf(max_flops, self._args.unit)))
for b in self.results['mem bottlenecks']:
print('{level:>7} | {arithmetic intensity:>5.2} FLOP/b | {:>15} |'
' {bandwidth:>12} | {bw kernel:<8}'.format(
self.conv_perf(b['performance'], self._args.unit), **b))
print()
# TODO support SP
if self.results['min performance'] > max_flops:
# CPU bound
print('CPU bound')
print('{!s} due to CPU max. FLOP/s'.format(max_flops))
else:
# Cache or mem bound
print('Cache or mem bound')
bottleneck = self.results['mem bottlenecks'][self.results['bottleneck level']]
print('{!s} due to {} transfer bottleneck (bw with from {} benchmark)'.format(
self.conv_perf(bottleneck['performance'], self._args.unit),
bottleneck['level'],
bottleneck['bw kernel']))
print('Arithmetic Intensity: {:.2f} FLOP/b'.format(bottleneck['arithmetic intensity']))
class RooflineIACA(Roofline):
"""
class representation of the Roofline Model (with IACA throughput analysis)
more info to follow...
"""
name = "Roofline (with IACA throughput)"
@classmethod
def configure_arggroup(cls, parser):
pass
def __init__(self, kernel, machine, args=None, parser=None):
"""
*kernel* is a Kernel object
*machine* describes the machine (cpu, cache and memory) characteristics
*args* (optional) are the parsed arguments from the comand line
if *args* is given also *parser* has to be provided
"""
Roofline.__init__(self, kernel, machine, args, parser)
def analyze(self):
self.results = self.calculate_cache_access(CPUL1=False)
# For the IACA/CPU analysis we need to compile and assemble
asm_name = self.kernel.compile(compiler_args=self.machine['icc architecture flags'])
bin_name = self.kernel.assemble(
asm_name, iaca_markers=True, asm_block=self._args.asm_block)
# Get total cycles per loop iteration
iaca_output = subprocess.check_output(
['iaca.sh', '-64', '-arch', self.machine['micro-architecture'], bin_name])
match = re.search(
r'^Block Throughput: ([0-9\.]+) Cycles', iaca_output, re.MULTILINE)
assert match, "Could not find Block Throughput in IACA output."
block_throughput = float(match.groups()[0])
# Find ports and cyles per port
ports = filter(lambda l: l.startswith('| Port |'), iaca_output.split('\n'))
cycles = filter(lambda l: l.startswith('| Cycles |'), iaca_output.split('\n'))
assert ports and cycles, "Could not find ports/cylces lines in IACA output."
ports = map(str.strip, ports[0].split('|'))[2:]
cycles = map(str.strip, cycles[0].split('|'))[2:]
port_cycles = []
for i in range(len(ports)):
if '-' in ports[i] and ' ' in cycles[i]:
subports = map(str.strip, ports[i].split('-'))
subcycles = filter(bool, cycles[i].split(' '))
port_cycles.append((subports[0], float(subcycles[0])))
port_cycles.append((subports[0]+subports[1], float(subcycles[1])))
elif ports[i] and cycles[i]:
port_cycles.append((ports[i], float(cycles[i])))
port_cycles = dict(port_cycles)
match = re.search(r'^Total Num Of Uops: ([0-9]+)', iaca_output, re.MULTILINE)
assert match, "Could not find Uops in IACA output."
uops = float(match.groups()[0])
# Get latency prediction from IACA
iaca_latency_output = subprocess.check_output(
['iaca.sh', '-64', '-analysis', 'LATENCY', '-arch',
self.machine['micro-architecture'], bin_name])
# Get predicted latency
match = re.search(
r'^Latency: ([0-9\.]+) Cycles', iaca_latency_output, re.MULTILINE)
assert match, "Could not find Latency in IACA latency analysis output."
block_latency = float(match.groups()[0])
# Normalize to cycles per cacheline
elements_per_block = self.kernel.blocks[self.kernel.block_idx][1]['loop_increment']
block_size = elements_per_block*8 # TODO support SP
block_to_cl_ratio = float(self.machine['cacheline size'])/block_size
port_cycles = dict(map(lambda i: (i[0], i[1]*block_to_cl_ratio), port_cycles.items()))
uops = uops*block_to_cl_ratio
cl_throughput = block_throughput*block_to_cl_ratio
cl_latency = block_latency*block_to_cl_ratio
flops_per_element = sum(self.kernel._flops.values())
# Create result dictionary
self.results.update({
'cpu bottleneck': {
'port cycles': port_cycles,
'cl throughput': cl_throughput,
'cl latency': cl_latency,
'uops': uops,
'performance throughput':
self.machine['clock']/block_throughput*elements_per_block*flops_per_element,
'performance latency':
self.machine['clock']/block_latency*elements_per_block*flops_per_element,
'IACA output': iaca_output,
'IACA latency output': iaca_latency_output}})
self.results['cpu bottleneck']['performance throughput'].unit = 'FLOP/s'
self.results['cpu bottleneck']['performance latency'].unit = 'FLOP/s'
def report(self):
if not self._args.latency:
cpu_flops = PrefixedUnit(
self.results['cpu bottleneck']['performance throughput'], "FLOP/s")
else:
cpu_flops = PrefixedUnit(
self.results['cpu bottleneck']['performance latency'], "FLOP/s")
if self._args and self._args.verbose >= 1:
print('Bottlnecks:')
print(' level | a. intensity | performance | bandwidth | bandwidth kernel')
print('--------+--------------+-----------------+--------------+-----------------')
print(' CPU | | {:>15} | |'.format(
self.conv_perf(cpu_flops, self._args.unit)))
for b in self.results['mem bottlenecks']:
print('{level:>7} | {arithmetic intensity:>5.2} FLOP/b | {:>15} |'
' {bandwidth:>12} | {bw kernel:<8}'.format(
self.conv_perf(b['performance'], self._args.unit), **b))
print()
print('IACA analisys:')
if self._args.verbose >= 3:
print(self.results['cpu bottleneck']['IACA output'])
print(self.results['cpu bottleneck']['IACA latency output'])
print({k: v for k, v in self.results['cpu bottleneck'].items() if k not in
['IACA output', 'IACA latency output']})
# TODO support SP
if float(self.results['min performance']) > float(cpu_flops):
# CPU bound
print('CPU bound')
print('{!s} due to CPU bottleneck'.format(self.conv_perf(cpu_flops, self._args.unit)))
else:
# Cache or mem bound
print('Cache or mem bound')
bottleneck = self.results['mem bottlenecks'][self.results['bottleneck level']]
print('{!s} due to {} transfer bottleneck (bw with from {} benchmark)'.format(
self.conv_perf(bottleneck['performance'], self._args.unit),
bottleneck['level'],
bottleneck['bw kernel']))
print('Arithmetic Intensity: {:.2f} FLOP/b'.format(bottleneck['arithmetic intensity']))
added roofline support for multicore prediction
#!/usr/bin/env python
from __future__ import print_function
from pprint import pprint
from functools import reduce
import operator
import subprocess
import re
from copy import deepcopy
from kerncraft.intervals import Intervals
from kerncraft.prefixedunit import PrefixedUnit
# Datatype sizes in bytes
datatype_size = {'double': 8, 'float': 4}
def blocking(indices, block_size, initial_boundary=0):
'''
splits list of integers into blocks of block_size. returns block indices.
first block element is located at initial_boundary (default 0).
>>> blocking([0, -1, -2, -3, -4, -5, -6, -7, -8, -9], 8)
[0,-1]
>>> blocking([0], 8)
[0]
'''
blocks = []
for idx in indices:
bl_idx = (idx-initial_boundary)/block_size
if bl_idx not in blocks:
blocks.append(bl_idx)
blocks.sort()
return blocks
def flatten_dict(d):
'''
transforms 2d-dict d[i][k] into a new 1d-dict e[(i,k)] with 2-tuple keys
'''
e = {}
for k in d.keys():
for l in d[k].keys():
e[(k, l)] = d[k][l]
return e
class Roofline:
"""
class representation of the Roofline Model
more info to follow...
"""
name = "Roofline"
@classmethod
def configure_arggroup(cls, parser):
pass
def __init__(self, kernel, machine, args=None, parser=None):
"""
*kernel* is a Kernel object
*machine* describes the machine (cpu, cache and memory) characteristics
*args* (optional) are the parsed arguments from the comand line
"""
self.kernel = kernel
self.machine = machine
self._args = args
self._parser = parser
if args:
# handle CLI info
pass
def _calculate_relative_offset(self, name, access_dimensions):
'''
returns the offset from the iteration center in number of elements and the order of indices
used in access.
'''
offset = 0
base_dims = self.kernel._variables[name][1]
for dim, offset_info in enumerate(access_dimensions):
offset_type, idx_name, dim_offset = offset_info
assert offset_type == 'rel', 'Only relative access to arrays is supported at the moment'
if offset_type == 'rel':
offset += dim_offset*reduce(operator.mul, base_dims[dim+1:], 1)
else:
# should not happen
pass
return offset
def _calculate_iteration_offset(self, name, index_order, loop_index):
'''
returns the offset from one to the next iteration using *loop_index*.
*index_order* is the order used by the access dimensions e.g. 'ijk' corresponse to [i][j][k]
*loop_index* specifies the loop to be used for iterations (this is typically the inner
moste one)
'''
offset = 0
base_dims = self.kernel._variables[name][1]
for dim, index_name in enumerate(index_order):
if loop_index == index_name:
offset += reduce(operator.mul, base_dims[dim+1:], 1)
return offset
def _get_index_order(self, access_dimensions):
'''Returns the order of indices used in *access_dimensions*.'''
return ''.join(map(lambda d: d[1], access_dimensions))
def _expand_to_cacheline_blocks(self, first, last):
'''
Returns first and last values wich align with cacheline blocks, by increasing range.
'''
# TODO how to handle multiple datatypes (with different size)?
element_size = datatype_size['double']
elements_per_cacheline = int(float(self.machine['cacheline size'])) / element_size
first = first - first % elements_per_cacheline
last = last - last % elements_per_cacheline + elements_per_cacheline - 1
return [first, last]
def calculate_cache_access(self, CPUL1=True):
results = {'bottleneck level': None, 'mem bottlenecks': []}
read_offsets = {var_name: dict() for var_name in self.kernel._variables.keys()}
write_offsets = {var_name: dict() for var_name in self.kernel._variables.keys()}
iteration_offsets = {var_name: dict() for var_name in self.kernel._variables.keys()}
# TODO how to handle multiple datatypes (with different size)?
element_size = datatype_size['double']
elements_per_cacheline = int(float(self.machine['cacheline size'])) / element_size
loop_order = ''.join(map(lambda l: l[0], self.kernel._loop_stack))
for var_name in self.kernel._variables.keys():
var_type, var_dims = self.kernel._variables[var_name]
# Skip the following access: (they are hopefully kept in registers)
# - scalar values
if var_dims is None:
continue
# - access does not change with inner-most loop index
writes = filter(
lambda acs: loop_order[-1] in map(lambda a: a[1], acs),
self.kernel._destinations.get(var_name, []))
reads = filter(
lambda acs: loop_order[-1] in map(lambda a: a[1], acs),
self.kernel._sources.get(var_name, []))
# Compile access pattern
for r in reads:
offset = self._calculate_relative_offset(var_name, r)
idx_order = self._get_index_order(r)
read_offsets[var_name].setdefault(idx_order, [])
read_offsets[var_name][idx_order].append(offset)
for w in writes:
offset = self._calculate_relative_offset(var_name, w)
idx_order = self._get_index_order(w)
write_offsets[var_name].setdefault(idx_order, [])
write_offsets[var_name][idx_order].append(offset)
# With ECM we would do unrolling, but not with roofline
# initialize misses and hits
misses = {}
hits = {}
evicts = {}
total_misses = {}
total_hits = {}
total_evicts = {}
memory_hierarchy = deepcopy(self.machine['memory hierarchy'])
# L1-CPU level is special, because everything is a miss here
if CPUL1:
memory_hierarchy.insert(0, {
'cores per group': 1,
'cycles per cacheline transfer': None,
'groups': 16,
'level': 'CPU',
'bandwidth': None,
'size per group': 0,
'threads per group': 2,
})
# Check for layer condition towards all cache levels
for cache_level, cache_info in list(enumerate(memory_hierarchy))[:-1]:
cache_size = int(float(cache_info['size per group']))
cache_cycles = cache_info['cycles per cacheline transfer']
trace_length = 0
updated_length = True
while updated_length:
updated_length = False
# Initialize cache, misses, hits and evicts for current level
cache = {}
misses[cache_level] = {}
hits[cache_level] = {}
evicts[cache_level] = {}
# We consider everythin a miss in the beginning
# TODO here read and writes are treated the same, this implies write-allocate
# to support nontemporal stores, this needs to be changed
for name in read_offsets.keys()+write_offsets.keys():
cache[name] = {}
misses[cache_level][name] = {}
hits[cache_level][name] = {}
for idx_order in read_offsets[name].keys()+write_offsets[name].keys():
cache[name][idx_order] = Intervals()
if cache_level-1 not in misses:
misses[cache_level][name][idx_order] = sorted(
read_offsets.get(name, {}).get(idx_order, []) +
write_offsets.get(name, {}).get(idx_order, []),
reverse=True)
else:
misses[cache_level][name][idx_order] = list(
misses[cache_level-1][name][idx_order])
hits[cache_level][name][idx_order] = []
# Caches are still empty (thus only misses)
trace_count = 0
cache_used_size = 0
# Now we trace the cache access backwards (in time/iterations) and check for hits
for var_name in misses[cache_level].keys():
for idx_order in misses[cache_level][var_name].keys():
iter_offset = self._calculate_iteration_offset(
var_name, idx_order, loop_order[-1])
# Add cache trace
for offset in list(misses[cache_level][var_name][idx_order]):
# If already present in cache add to hits
if offset in cache[var_name][idx_order]:
misses[cache_level][var_name][idx_order].remove(offset)
# We might have multiple hits on the same offset (e.g in DAXPY)
if offset not in hits[cache_level][var_name][idx_order]:
hits[cache_level][var_name][idx_order].append(offset)
# Add cache, we can do this since misses are sorted in reverse order of
# access and we assume LRU cache replacement policy
if iter_offset <= elements_per_cacheline:
# iterations overlap, thus we can savely add the whole range
cached_first, cached_last = self._expand_to_cacheline_blocks(
offset-iter_offset*trace_length, offset+1)
cache[var_name][idx_order] &= Intervals(
[cached_first, cached_last+1], sane=True)
else:
# There is no overlap, we can append the ranges onto one another
# TODO optimize this code section (and maybe merge with above)
new_cache = [self._expand_to_cacheline_blocks(o, o) for o in range(
offset-iter_offset*trace_length, offset+1, iter_offset)]
new_cache = Intervals(*new_cache, sane=True)
cache[var_name][idx_order] &= new_cache
trace_count += len(cache[var_name][idx_order]._data)
cache_used_size += len(cache[var_name][idx_order])*element_size
# Calculate new possible trace_length according to free space in cache
# TODO take CL blocked access into account
# TODO make /2 customizable
new_trace_length = trace_length + \
((cache_size/2 - cache_used_size)/trace_count)/element_size
if new_trace_length > trace_length:
trace_length = new_trace_length
updated_length = True
# All writes to require the data to be evicted eventually
evicts[cache_level] = \
{var_name: dict() for var_name in self.kernel._variables.keys()}
for name in write_offsets.keys():
for idx_order in write_offsets[name].keys():
evicts[cache_level][name][idx_order] = list(write_offsets[name][idx_order])
# Compiling stats
total_misses[cache_level] = sum(map(
lambda l: sum(map(len, l.values())),
misses[cache_level].values()))
total_hits[cache_level] = sum(map(
lambda l: sum(map(len, l.values())),
hits[cache_level].values()))
total_evicts[cache_level] = sum(map(
lambda l: sum(map(len, l.values())),
evicts[cache_level].values()))
# Calculate performance (arithmetic intensity * bandwidth with
# arithmetic intensity = flops / bytes transfered)
bytes_transfered = (total_misses[cache_level]+total_evicts[cache_level])*element_size
total_flops = sum(self.kernel._flops.values())
arith_intens = float(total_flops)/float(bytes_transfered)
# choose bw according to cache level and problem
# first, compile stream counts at current cache level
# write-allocate is allready resolved above
read_streams = 0
for var_name in misses[cache_level].keys():
for idx_order in misses[cache_level][var_name]:
read_streams += len(misses[cache_level][var_name][idx_order])
write_streams = 0
for var_name in evicts[cache_level].keys():
for idx_order in evicts[cache_level][var_name]:
write_streams += len(evicts[cache_level][var_name][idx_order])
# second, try to find best fitting kernel (closest to stream seen stream counts):
# write allocate has to be handled in kernel information (all writes are also reads)
# TODO support for non-write-allocate architectures
measurement_kernel = 'load'
measurement_kernel_info = self.machine['benchmarks']['kernels'][measurement_kernel]
for kernel_name, kernel_info in self.machine['benchmarks']['kernels'].items():
if (read_streams >= (kernel_info['read streams']['streams'] +
kernel_info['write streams']['streams'] -
kernel_info['read+write streams']['streams']) >
measurement_kernel_info['read streams']['streams'] +
measurement_kernel_info['write streams']['streams'] -
measurement_kernel_info['read+write streams']['streams'] and
write_streams >= kernel_info['write streams']['streams'] >
measurement_kernel_info['write streams']['streams']):
measurement_kernel = kernel_name
measurement_kernel_info = kernel_info
# TODO choose smt and cores:
threads_per_core, cores = 1, self._args.cores
bw_level = memory_hierarchy[cache_level+1]['level']
bw_measurements = \
self.machine['benchmarks']['measurements'][bw_level][threads_per_core]
assert threads_per_core == bw_measurements['threads per core'], \
'malformed measurement dictionary in machine file.'
run_index = bw_measurements['cores'].index(cores)
bw = bw_measurements['results'][measurement_kernel][run_index]
# Correct bandwidth due to miss-measurement of write allocation
# TODO support non-temporal stores and non-write-allocate architectures
measurement_kernel_info = self.machine['benchmarks']['kernels'][measurement_kernel]
factor = (float(measurement_kernel_info['read streams']['bytes']) +
2.0*float(measurement_kernel_info['write streams']['bytes']) -
float(measurement_kernel_info['read+write streams']['bytes'])) / \
(float(measurement_kernel_info['read streams']['bytes']) +
float(measurement_kernel_info['write streams']['bytes']))
bw = bw * factor
performance = arith_intens * float(bw)
results['mem bottlenecks'].append({
'performance': PrefixedUnit(performance, 'FLOP/s'),
'level': (memory_hierarchy[cache_level]['level'] + '-' +
memory_hierarchy[cache_level+1]['level']),
'arithmetic intensity': arith_intens,
'bw kernel': measurement_kernel,
'bandwidth': bw})
if performance <= results.get('min performance', performance):
results['bottleneck level'] = len(results['mem bottlenecks'])-1
results['min performance'] = performance
return results
def analyze(self):
self.results = self.calculate_cache_access()
def conv_perf(self, performance, unit, default='FLOP/s'):
'''Convert performance (FLOP/s) to other units, such as It/s or cy/CL'''
if not unit:
unit = default
clock = self.machine['clock']
flops_per_it = sum(self.kernel._flops.values())
it_s = performance/flops_per_it
it_s.unit = 'It/s'
element_size = datatype_size['double']
elements_per_cacheline = int(float(self.machine['cacheline size'])) / element_size
cy_cl = clock/it_s*elements_per_cacheline
cy_cl.unit = 'cy/CL'
return {'It/s': it_s,
'cy/CL': cy_cl,
'FLOP/s': performance}[unit]
def report(self):
max_flops = self.machine['clock']*self._args.cores*sum(
self.machine['FLOPs per cycle']['DP'].values())
max_flops.unit = "FLOP/s"
if self._args and self._args.verbose >= 1:
print('Bottlnecks:')
print(' level | a. intensity | performance | bandwidth | bandwidth kernel')
print('--------+--------------+-----------------+--------------+-----------------')
print(' CPU | | {:>15} | |'.format(
self.conv_perf(max_flops, self._args.unit)))
for b in self.results['mem bottlenecks']:
print('{level:>7} | {arithmetic intensity:>5.2} FLOP/b | {:>15} |'
' {bandwidth:>12} | {bw kernel:<8}'.format(
self.conv_perf(b['performance'], self._args.unit), **b))
print()
# TODO support SP
if self.results['min performance'] > max_flops:
# CPU bound
print('CPU bound with', self._args.cores, 'core(s)')
print('{!s} due to CPU max. FLOP/s'.format(max_flops))
else:
# Cache or mem bound
print('Cache or mem bound with', self._args.cores, 'core(s)')
bottleneck = self.results['mem bottlenecks'][self.results['bottleneck level']]
print('{!s} due to {} transfer bottleneck (bw with from {} benchmark)'.format(
self.conv_perf(bottleneck['performance'], self._args.unit),
bottleneck['level'],
bottleneck['bw kernel']))
print('Arithmetic Intensity: {:.2f} FLOP/b'.format(bottleneck['arithmetic intensity']))
class RooflineIACA(Roofline):
"""
class representation of the Roofline Model (with IACA throughput analysis)
more info to follow...
"""
name = "Roofline (with IACA throughput)"
@classmethod
def configure_arggroup(cls, parser):
pass
def __init__(self, kernel, machine, args=None, parser=None):
"""
*kernel* is a Kernel object
*machine* describes the machine (cpu, cache and memory) characteristics
*args* (optional) are the parsed arguments from the comand line
if *args* is given also *parser* has to be provided
"""
Roofline.__init__(self, kernel, machine, args, parser)
def analyze(self):
self.results = self.calculate_cache_access(CPUL1=False)
# For the IACA/CPU analysis we need to compile and assemble
asm_name = self.kernel.compile(compiler_args=self.machine['icc architecture flags'])
bin_name = self.kernel.assemble(
asm_name, iaca_markers=True, asm_block=self._args.asm_block)
# Get total cycles per loop iteration
iaca_output = subprocess.check_output(
['iaca.sh', '-64', '-arch', self.machine['micro-architecture'], bin_name])
match = re.search(
r'^Block Throughput: ([0-9\.]+) Cycles', iaca_output, re.MULTILINE)
assert match, "Could not find Block Throughput in IACA output."
block_throughput = float(match.groups()[0])
# Find ports and cyles per port
ports = filter(lambda l: l.startswith('| Port |'), iaca_output.split('\n'))
cycles = filter(lambda l: l.startswith('| Cycles |'), iaca_output.split('\n'))
assert ports and cycles, "Could not find ports/cylces lines in IACA output."
ports = map(str.strip, ports[0].split('|'))[2:]
cycles = map(str.strip, cycles[0].split('|'))[2:]
port_cycles = []
for i in range(len(ports)):
if '-' in ports[i] and ' ' in cycles[i]:
subports = map(str.strip, ports[i].split('-'))
subcycles = filter(bool, cycles[i].split(' '))
port_cycles.append((subports[0], float(subcycles[0])))
port_cycles.append((subports[0]+subports[1], float(subcycles[1])))
elif ports[i] and cycles[i]:
port_cycles.append((ports[i], float(cycles[i])))
port_cycles = dict(port_cycles)
match = re.search(r'^Total Num Of Uops: ([0-9]+)', iaca_output, re.MULTILINE)
assert match, "Could not find Uops in IACA output."
uops = float(match.groups()[0])
# Get latency prediction from IACA
iaca_latency_output = subprocess.check_output(
['iaca.sh', '-64', '-analysis', 'LATENCY', '-arch',
self.machine['micro-architecture'], bin_name])
# Get predicted latency
match = re.search(
r'^Latency: ([0-9\.]+) Cycles', iaca_latency_output, re.MULTILINE)
assert match, "Could not find Latency in IACA latency analysis output."
block_latency = float(match.groups()[0])
# Normalize to cycles per cacheline
elements_per_block = self.kernel.blocks[self.kernel.block_idx][1]['loop_increment']
block_size = elements_per_block*8 # TODO support SP
block_to_cl_ratio = float(self.machine['cacheline size'])/block_size
port_cycles = dict(map(lambda i: (i[0], i[1]*block_to_cl_ratio), port_cycles.items()))
uops = uops*block_to_cl_ratio
cl_throughput = block_throughput*block_to_cl_ratio
cl_latency = block_latency*block_to_cl_ratio
flops_per_element = sum(self.kernel._flops.values())
# Create result dictionary
self.results.update({
'cpu bottleneck': {
'port cycles': port_cycles,
'cl throughput': cl_throughput,
'cl latency': cl_latency,
'uops': uops,
'performance throughput':
self.machine['clock']/block_throughput*elements_per_block*flops_per_element
*self._args.cores,
'performance latency':
self.machine['clock']/block_latency*elements_per_block*flops_per_element
*self._args.cores,
'IACA output': iaca_output,
'IACA latency output': iaca_latency_output}})
self.results['cpu bottleneck']['performance throughput'].unit = 'FLOP/s'
self.results['cpu bottleneck']['performance latency'].unit = 'FLOP/s'
def report(self):
if not self._args.latency:
cpu_flops = PrefixedUnit(
self.results['cpu bottleneck']['performance throughput'], "FLOP/s")
else:
cpu_flops = PrefixedUnit(
self.results['cpu bottleneck']['performance latency'], "FLOP/s")
if self._args and self._args.verbose >= 1:
print('Bottlnecks:')
print(' level | a. intensity | performance | bandwidth | bandwidth kernel')
print('--------+--------------+-----------------+--------------+-----------------')
print(' CPU | | {:>15} | |'.format(
self.conv_perf(cpu_flops, self._args.unit)))
for b in self.results['mem bottlenecks']:
print('{level:>7} | {arithmetic intensity:>5.2} FLOP/b | {:>15} |'
' {bandwidth:>12} | {bw kernel:<8}'.format(
self.conv_perf(b['performance'], self._args.unit), **b))
print()
print('IACA analisys:')
if self._args.verbose >= 3:
print(self.results['cpu bottleneck']['IACA output'])
print(self.results['cpu bottleneck']['IACA latency output'])
print({k: v for k, v in self.results['cpu bottleneck'].items() if k not in
['IACA output', 'IACA latency output']})
# TODO support SP
if float(self.results['min performance']) > float(cpu_flops):
# CPU bound
print('CPU bound with', self._args.cores, 'core(s)')
print('{!s} due to CPU bottleneck'.format(self.conv_perf(cpu_flops, self._args.unit)))
else:
# Cache or mem bound
print('Cache or mem bound with', self._args.cores, 'core(s)')
bottleneck = self.results['mem bottlenecks'][self.results['bottleneck level']]
print('{!s} due to {} transfer bottleneck (bw with from {} benchmark)'.format(
self.conv_perf(bottleneck['performance'], self._args.unit),
bottleneck['level'],
bottleneck['bw kernel']))
print('Arithmetic Intensity: {:.2f} FLOP/b'.format(bottleneck['arithmetic intensity']))
|
# -*- coding: utf-8 -*-
#
# Copyright 2014 SUSE LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sqlite3
import os
class DBHandle(object):
'''
Handle for the *volatile* database, which serves the purpose of caching
the inspected data. This database can be destroyed or corrupted, so it should
be simply re-created from scratch.
'''
__instance = None
def __new__(cls, *args, **kwargs):
'''
Keep singleton.
'''
if not cls.__instance:
cls.__instance = super(DBHandle, cls).__new__(cls, *args, **kwargs)
return cls.__instance
def __init__(self, path):
'''
Constructor.
'''
self._path = path
self.connection = None
self.cursor = None
def open(self, new=False):
'''
Init the database, if required.
'''
if self.connection and self.cursor:
return
if new and os.path.exists(self._path):
os.unlink(self._path) # As simple as that
self.connection = sqlite3.connect(self._path)
self.cursor = self.connection.cursor()
self.cursor.execute("SELECT name FROM sqlite_master WHERE type='table'")
if self.cursor.fetchall():
return
self.cursor.execute("CREATE TABLE inspector_pkg (id INTEGER PRIMARY KEY, name CHAR(255))")
self.cursor.execute("CREATE TABLE inspector_pkg_cfg_files (id INTEGER PRIMARY KEY, pkgid INTEGER, path CHAR(4096))")
self.cursor.execute("CREATE TABLE inspector_pkg_cfg_diffs (id INTEGER PRIMARY KEY, cfgid INTEGER, diff TEXT)")
self.connection.commit()
def flush(self, table):
'''
Flush the table.
'''
self.cursor.execute("DELETE FROM " + table)
self.connection.commit()
def close(self):
'''
Close the database connection.
'''
if self.cursor is not None and self.connection is not None:
self.connection.close()
self.cursor = self.connection = None
Create generic DBHandleBase class so it can be setup properly for precise use
# -*- coding: utf-8 -*-
#
# Copyright 2014 SUSE LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sqlite3
import os
class DBHandleBase(object):
'''
Handle for the *volatile* database, which serves the purpose of caching
the inspected data. This database can be destroyed or corrupted, so it should
be simply re-created from scratch.
'''
def __init__(self, path):
'''
Constructor.
'''
self._path = path
self.connection = None
self.cursor = None
self.init_queries = list()
def open(self, new=False):
'''
Init the database, if required.
'''
if self.connection and self.cursor:
return
if new and os.path.exists(self._path):
os.unlink(self._path) # As simple as that
self.connection = sqlite3.connect(self._path)
self.cursor = self.connection.cursor()
self.cursor.execute("SELECT name FROM sqlite_master WHERE type='table'")
if self.cursor.fetchall():
return
for query in self.init_queries:
self.cursor.execute(query)
self.connection.commit()
def flush(self, table):
'''
Flush the table.
'''
self.cursor.execute("DELETE FROM " + table)
self.connection.commit()
def close(self):
'''
Close the database connection.
'''
if self.cursor is not None and self.connection is not None:
self.connection.close()
self.cursor = self.connection = None
|
# -*- coding: utf-8 -*-
"""
Created on Aug 2016
Script to save HCP data into bids format. folowing script creates directory struc
ture and renames all files as per BIDS standard.
@author: Suyash B
"""
import os, glob, shutil
import re, json, numpy
import nibabel as ni
def touch(fname):
if os.path.exists(fname):
os.utime(fname, None)
else:
open(fname, 'a').close()
def FourDimImg(image, destinationpath_3d, outputfilename):
#outputfilename= sub-285345_run-02_magnitude2
#this function handles conversion from 4d to 3d along with saving output with bids std name
img = ni.load(image)
destination_path = destinationpath_3d
images = ni.four_to_three(img)
outputfilenamepattern = outputfilename + '{:01d}.nii.gz'
for i, img_3d in enumerate(images):
i = i +1
output_filename = outputfilenamepattern.format(i)
output_path = os.path.join(destination_path, output_filename)
ni.save(img_3d, output_path)
os.remove(image)
return img_3d
def hcp2bids(input_dir, output_dir, s_link = False):
import os
#get hcp subject directory paths
sub_dir = [os.path.join(input_dir,o) for o in os.listdir(input_dir) if os.path.isdir(os.path.join(input_dir,o))]
for subjects in sub_dir:
subj_raw = os.path.join(subjects, 'unprocessed/3T/')
print(subj_raw)
#path_bids = '/scratch/04275/suyashdb/hcp/%s/' %subject
#output directory for the subject
bids = os.path.join(output_dir, subjects.split('/')[-1])
#bids = subjects + '/bids/'
if not os.path.exists(bids):
os.mkdir(bids)
#output directory paths for fmap, func, anat and dwi
fmap = os.path.join(bids, 'fmap/')
func = os.path.join(bids, 'func/')
anat = os.path.join(bids, 'anat/')
dwi = os.path.join(bids,'dwi/')
if not os.path.exists(fmap):
os.mkdir(fmap)
if not os.path.exists(func):
os.mkdir(func)
if not os.path.exists(anat):
os.mkdir(anat)
if not os.path.exists(dwi):
os.mkdir(dwi)
'''Get raw Nifti files from the HCP input directory and move
it to the output directory'''
fieldmaplist = glob.glob(os.path.join(subj_raw, '*/*FieldMap*'))
for fieldmap in fieldmaplist:
parentdir = os.path.split(os.path.dirname(fieldmap))[1]
dst = fmap + parentdir +'_'+ os.path.split(fieldmap)[1]
shutil.copy(fieldmap, dst)
print("done with fMAPs for --", subjects)
func_list = glob.glob(os.path.join(subj_raw, 't*/*tfMRI*'))
for func_data in func_list:
parentdir = os.path.split(os.path.dirname(func_data))[1]
dst = func + parentdir +'_'+ os.path.split(func_data)[1]
if s_link:
if not os.path.islink(dst):
os.symlink(os.path.realpath(func_data), dst)
else:
shutil.move(func_data, dst)
print("done with func for --", subjects)
sbref_list = glob.glob(os.path.join(subj_raw, '*/*SBRef*'))
for sbref in sbref_list:
parentdir = os.path.split(os.path.dirname(sbref))[1]
dst = func + parentdir +'_'+ os.path.split(sbref)[1]
if s_link:
if not os.path.islink(dst):
os.symlink(os.path.realpath(sbref), dst)
else:
shutil.move(sbref, dst)
print("done with SBREF's for --", subjects)
anat_list = glob.glob(os.path.join(subj_raw, 'T*/*3T_T*'))
for anat_data in anat_list:
parentdir = os.path.split(os.path.dirname(anat_data))[1]
dst = anat + parentdir +'_'+ os.path.split(anat_data)[1]
if s_link:
if not os.path.islink(dst):
os.symlink(os.path.realpath(anat_data), dst)
else:
shutil.move(anat_data, dst)
print("done with Anat for --", subjects)
dwi_list = glob.glob(os.path.join(subj_raw, '*/*DWI*'))
for dwi_data in dwi_list:
parentdir = os.path.split(os.path.dirname(dwi_data))[1]
dst = dwi + parentdir +'_'+ os.path.split(dwi_data)[1]
if s_link:
if not os.path.islink(dst):
os.symlink(os.path.realpath(dwi_data), dst)
else:
shutil.move(dwi_data, dst)
print("done with DWI's for --", subjects)
dwi_subj_raw = os.path.join(subjects, 'bids')
dwi_sbref_list = glob.glob(os.path.join(func,'*DWI*SBRef*'))
for sbref in dwi_sbref_list:
parentdir = os.path.split(os.path.dirname(sbref))[1]
dst = dwi +'_'+ os.path.split(sbref)[1]
shutil.move(sbref, dst)
''' Sort nifti files and Rename all files as per bids'''
'''Sort func files and rename all per bids'''
nifti_func_list = glob.glob(os.path.join(func, '*fMRI*.nii.gz'))
print("\npath where nifti files are searched -", os.path.join(func, '*fMRI*.nii.gz'))
print(len(nifti_func_list))
for nifti_func_file in nifti_func_list:
filename_split = nifti_func_file.split('/')
task = filename_split[3].split('_')[1]
if 'LR' in filename_split[3]:
acq = 'LR'
else:
acq = 'RL'
sub = filename_split[1].lower()
if task in ['REST1', 'REST2']:
#m = re.match(r"([a-zA-Z]+)([0-9]+)",task)
#run = m.group(2)
run = '0' + str(task[-1])
task = str(task[:-1])
# print("This is task form rest loop - ", task)
tail = filename_split[-1].split('_')[-1]
if task not in ['REST', 'REST2']:
if 'SBRef' in tail:
filename = 'sub-' + sub + '_' + 'task-' + task + '_' + 'acq-' + acq + '_' + tail.lower()
#filename = 'sub-' + sub + '_' + 'task-' + task + '_' + tail.lower()
else:
filename = 'sub-' + sub + '_' + 'task-' + task + '_' + 'acq-' + acq + '_bold' + tail[-7:]
#filename = 'sub-' + sub + '_' + 'task-' + task + '_bold' + tail[-7:]
# rep_time = { "EMOTION" : 2.26,
# "GAMBLING" : 3.20,
# "LANGUAGE" : 3.95,
# "SOCIAL" : 3.45,
# "WM" : 5.01,
# "MOTOR" : 3.5,
# "RELATIONAL" : 2.95
# }
# bold_json_dict = {}
# bold_json_dict["RepetitionTime"] = 0.72
# bold_json_dict["TaskName"] = task
# touch(func + filename[:-6]+ 'json')
# json_file = func + filename[:-6]+ 'json'
# with open(json_file, 'w') as editfile:
# json.dump(bold_json_dict, editfile, indent = 4)
else:
#filename = 'sub-' + sub + '_' + 'task-' + task + '_' + 'acq-' + acq +'_'+ 'run-' + run + '_' + tail.lower()
filename = 'sub-' + sub + '_' + 'task-' + task + '_' +'run-' + run + '_' + tail.lower()
path_filename = func + filename
print(path_filename)
if not os.path.isfile(path_filename):
basedir = os.path.dirname(path_filename)
if not os.path.exists(basedir):
os.makedirs(basedir)
shutil.move(nifti_func_file, path_filename)
#touch(path_filename[:-6]+ 'json')
''' sort anat files and rename it '''
#anat = '/Users/suyashdb/Documents/hcp2bids/hcpdata/285446/bids/anat'
anat_files_list = glob.glob(os.path.join(anat, '*T*.nii.gz'))
print("\npath where nifti files are searched -", os.path.join(anat, '*T*.nii.gz'))
print(len(anat_files_list))
for anat_file in anat_files_list:
filename_split = anat_file.split('/')
sub = filename_split[1]
modality = filename_split[3].split('_')[0]
tail = filename_split[3][-7:]
run = str(1)
filename = 'sub-' + sub + '_' + 'run-0' + run + '_' + modality + tail
path_filename = anat + filename
while os.path.isfile(path_filename):
run = str(int(run) + 1)
filename = 'sub-' + sub + '_' + 'run-0' + run + '_' + modality + tail
path_filename = anat + filename
print(path_filename)
shutil.move(anat_file, path_filename)
#touch(path_filename[:-6]+ 'json')
#########
#Sort all nii.gz files in dwi and fmaps '''
dwi_files_list = glob.glob(os.path.join(dwi, 'Diffusion*DWI*.nii.gz'))
print("\npath where nifti files are searched -", os.path.join(dwi, 'Diffusion*DWI*.nii.gz'))
for dwi_file in dwi_files_list:
filename_split = dwi_file.split('/')
sub = filename_split[1]
acq = filename_split[-1].split('_')[4].lower() + filename_split[-1].split('_')[5][:2].lower()
if "SBRef.nii.gz" in filename_split[-1].split('_'):
# filename = 'sub-' + sub + '_' + 'task-' + 'DWI' + '_' + 'sbref' + tail
# path_filename = func + filename
# shutil.move(dwi_file, path_filename)
# print(path_filename)
continue
modality = 'dwi'
tail = filename_split[-1][-7:]
filename = 'sub-' + sub + '_' + 'acq-' + acq + '_' + modality + tail
path_filename = dwi + filename
print(path_filename)
if not os.path.isfile(path_filename):
basedir = os.path.dirname(path_filename)
if not os.path.exists(basedir):
os.makedirs(basedir)
shutil.move(dwi_file, path_filename)
dwi_json_dict = {}
dwi_json_dict["EffectiveEchoSpacing"] = 0.00078
dwi_json_dict["TotalReadoutTime"] = 0.60
dwi_json_dict["EchoTime"] = 0.08950
if dwi_file[-9:-7] == 'LR':
dwi_json_dict["PhaseEncodingDirection"] = "i-"
else:
dwi_json_dict["PhaseEncodingDirection"] = "i"
touch(path_filename[:-6]+ 'json')
json_file = path_filename[:-6]+ 'json'
with open(json_file, 'w') as editfile:
json.dump( dwi_json_dict, editfile, indent = 4)
shutil.move((dwi_file[:-6]+'bval'), (path_filename[:-6] + 'bval'))
shutil.move((dwi_file[:-6]+'bvec'), (path_filename[:-6] + 'bvec'))
dwisbref_files_list = glob.glob(os.path.join(dwi, '*DWI*SBRef.nii.gz'))
print("\npath where nifti files are searched -", os.path.join(dwi, '*DWI*SBRef.nii.gz'))
for dwi_file in dwisbref_files_list:
filename_split = dwi_file.split('/')
sub = filename_split[1]
acq = filename_split[-1].split('_')[-3].lower() + filename_split[-1].split('_')[-2].lower()
modality = 'sbref'
tail = filename_split[-1][-7:]
filename = 'sub-' + sub + '_' + 'acq-' + acq + '_' + modality + tail
path_filename = dwi + filename
shutil.move(dwi_file, path_filename)
print(path_filename)
dwi_json_dict = {}
dwi_json_dict["EffectiveEchoSpacing"] = 0.00078
dwi_json_dict["TotalReadoutTime"] = 0.60
dwi_json_dict["EchoTime"] = 0.08950
if filename_split[-1].split('_')[-2][:2] == 'LR':
dwi_json_dict["PhaseEncodingDirection"] = "i-"
else:
dwi_json_dict["PhaseEncodingDirection"] = "i"
touch(path_filename[:-6]+ 'json')
json_file = path_filename[:-6]+ 'json'
with open(json_file, 'w') as editfile:
json.dump( dwi_json_dict, editfile, indent = 4)
''' Fmaps'''
counter = 1
fmap_files_list = glob.glob(os.path.join(fmap, '*SpinEchoFieldMap*.nii.gz'))
print("\npath where nifti files are searched -", os.path.join(fmap, '*SpinEchoFieldMap*.nii.gz'))
print(len(fmap_files_list))
for fmapfile in fmap_files_list:
fmap_file = os.path.split(fmapfile)[1]
filename_split = fmap_file.split('_')
task = filename_split[1]
acq = filename_split[2]
sub = filename_split[3].lower()
#print("Task:", task, "\tAcq:", acq, "\tSub:", sub)
if task in ['REST1', 'REST2']:
#m = re.match(r"([a-zA-Z]+)([0-9]+)",task)
#run = m.group(2)
run = '0' + str(task[-1])
task = str(task[:-1])
print("This is task form rest loop - ", task)
tail = filename_split[-1]
if task not in ['REST', 'REST2']:
if 'SBRef' in tail:
filename = 'sub-' + sub + '_' + 'task-' + task + '_' + 'acq-' + acq + '_' + tail.lower()
else:
filename = 'sub-' + sub + '_' + 'task-' + task + '_' + 'acq-' + acq + '_bold' + tail[-7:]
else:
filename = 'sub-' + sub + '_' + 'task-' + task + '_' + 'acq-' + acq +'_'+ 'run-' + run + '_' + tail.lower()
print('intended_for - ',filename)
filename = 'func/'+ filename
fmap_json_dict = {}
fmap_json_dict["intended_for"] = filename
fmap_json_dict["TotalReadoutTime"] = 0.08346
if fmapfile[-9:-7] == 'LR':
fmap_json_dict["PhaseEncodingDirection"] = "i-"
else:
fmap_json_dict["PhaseEncodingDirection"] = "i"
#intended_for ={"IntendedFor", filename}
dir = counter
hcpfmapfilename = 'sub-' + sub + '_'+ 'dir-' + str(dir) + '_' + 'epi.nii.gz'
print('hcpfmap_filename',hcpfmapfilename)
path_filename = fmap + hcpfmapfilename
shutil.move(fmapfile, path_filename)
touch(path_filename[:-6]+ 'json')
json_file = path_filename[:-6]+ 'json'
with open(json_file, 'w') as editfile:
json.dump( fmap_json_dict, editfile, indent = 4)
counter = counter + 1
#fmap_magnitude and phasediff
fmap_files_list = glob.glob(os.path.join(fmap, 'T*Magnitude.nii.gz'))
print("\npath where nifti files are searched -", os.path.join(fmap, 'T*Magnitude.nii.gz'))
run = 1
for fmapfile in fmap_files_list:
print(fmapfile)
fmap_file = os.path.split(fmapfile)[1]
filename_split = fmap_file.split('_')
acq = filename_split[1]
sub = filename_split[2]
run_number = filename_split[1][-1]
filename = 'sub-' + sub + '_' + 'run-0' + str(run) + '_magnitude'+ '.nii.gz'
path_filename = os.path.join(fmap, filename)
print(path_filename)
shutil.move(fmapfile, path_filename)
#looking into phasediff image
filename_phasediff = 'sub-' + sub + '_' + 'run-0' + str(run) + '_phasediff' + '.nii.gz'
filename_phasediff_path = os.path.join(fmap,filename_phasediff)
print(filename_phasediff_path)
shutil.move(fmapfile.replace('Magnitude', 'Phase'), filename_phasediff_path)
filename_phasediff_json = filename_phasediff[:-6]+ 'json'
filename_phasediff_json_path = os.path.join(fmap, filename_phasediff_json)
touch(filename_phasediff_json_path)
intended_for_filename = 'anat/sub-' + sub + '_' + 'run-0' + run_number + '_' + filename_split[0] + '.nii.gz'
print('intended_for - ',intended_for_filename)
fmap_phasdiff_json_dict = {}
fmap_phasdiff_json_dict["intended_for"] = intended_for_filename
if filename_split[0] == 'T1w':
fmap_phasdiff_json_dict["EchoTime1"] = 0.00214
fmap_phasdiff_json_dict["EchoTime2"] = 0.00460
if filename_split[0] == 'T2w':
fmap_phasdiff_json_dict["EchoTime1"] = 0.00565
fmap_phasdiff_json_dict["EchoTime2"] = 0.00811
with open(filename_phasediff_json_path, 'w') as editfile:
json.dump( fmap_phasdiff_json_dict, editfile, indent = 4)
run = run + 1
print("\n\nBIDS format data is at -", output_dir)
## main.py
##get input and output dir from user
# hcp2bids('/work/04275/suyashdb/lonestar/test_hcp1/', '/work/04275/suyashdb/lonestar/test_output/')
# output_dir = '/work/04275/suyashdb/lonestar/test_output/'
def arrange_subjects(output_dir):
# find all the subjects in the output dir
sub_dir = [os.path.join(output_dir,o) for o in os.listdir(output_dir) if os.path.isdir(os.path.join(output_dir,o))]
for subjects in sub_dir:
# rename all subjects sub-{subject_number}
sub = subjects.split('/')[-1]
dir_name = 'sub-'+ sub
dir_name_path = os.path.join(output_dir, dir_name)
shutil.move(subjects, dir_name_path)
#task json files
def json_toplevel(output_dir):
tasks = ['EMOTION', 'GAMBLING', 'LANGUAGE', 'RELATIONAL', 'MOTOR', 'SOCIAL', 'WM', 'REST']
for task in tasks:
filename = os.path.join(output_dir, 'task-%s_acq-RL_bold.json' %task)
touch(filename)
filename = os.path.join(output_dir, 'task-%s_acq-LR_bold.json' %task)
touch(filename)
filename = os.path.join(output_dir, 'task-%s_acq-RL_sbref.json' %task)
touch(filename)
filename = os.path.join(output_dir, 'task-%s_acq-LR_sbref.json' %task)
touch(filename)
json_task_files = glob.glob(os.path.join(output_dir, 'task*.json'))
#declare dict with common scan_parameters
bold_json_dict = {
"RepetitionTime": 0.72,
"EchoTime": 0.058,
"EffectiveEchoSpacing": 0.00058,
"MagneticFieldStrength": 3.0,
"TaskName": "Gambling",
"Manufacturer": "Siemens",
"ManufacturerModelName": "Skyra"
}
for json_file in json_task_files:
LR = re.search('acq-LR', json_file)
if LR is not None:
print("its LR ")
addline = {"PhaseEncodingDirection": "i"}
RL = re.search('acq-RL', json_file)
if RL is not None:
print("its RL ")
addline = {"PhaseEncodingDirection": "i-"}
# addline = { "EffectiveEchoSpacing" : 0.0058}
z = bold_json_dict.copy()
z.update(addline)
print("updated", json_file)
with open(json_file, 'w') as editfile:
json.dump( z, editfile, indent = 4)
def main():
import argparse
import sys
class MyParser(argparse.ArgumentParser):
def error(self, message):
sys.stderr.write('error: %s\n' % message)
self.print_help()
sys.exit(2)
parser = MyParser(
description="HCP to BIDS converter. This software sucks because Chris wrote it. But it's better because Nino's fixing it.",
fromfile_prefix_chars='@',
)
# TODO Specify your real parameters here.
parser.add_argument(
"input_dir",
help="Location of the root of your HCP dataset directory",
metavar="input_dir")
parser.add_argument(
"output_dir",
help="Directory where BIDS data will be stored",
metavar="output_dir")
parser.add_argument(
"-s",
help="Type t for true and f for false. If true, symlinks will be " + \
"created for files from input_dir to output_dir and put the" + \
" symlinks in BIDS format. If false, files from input_dir will be " + \
"moved to output_dir and then put into BIDS format.",
metavar = "--symlink",
default = 'f'
)
parser.add_argument(
"-g",
help="Path to a text file with participant_id to GUID mapping. You will need to use the "
"GUID Tool (https://ndar.nih.gov/contribute.html) to generate GUIDs for your participants.",
metavar="--guid_mapping",
default = '.'
)
args = parser.parse_args()
input_dir = vars(args)['input_dir']
guid_map = vars(args)['guid_mapping']
output_dir = vars(args)['output_dir']
if vars(args)['s'] == 't':
symlink = True
else:
symlink = False
print("Input Directory: ", input_dir)
print("GUID Mapping", guid_map)
print("Output Directory: ", output_dir)
print("Symlink: ", symlink)
print("\nMetadata extraction complete.")
print("\nRunning hcp2bids")
hcp2bids(input_dir, output_dir, s_link = symlink)
print("\nRunning arrange_subjects")
arrange_subjects(output_dir)
print("\nRunning json_toplevel")
json_toplevel(output_dir)
if __name__ == '__main__':
main()
fixed indexed
# -*- coding: utf-8 -*-
"""
Created on Aug 2016
Script to save HCP data into bids format. folowing script creates directory struc
ture and renames all files as per BIDS standard.
@author: Suyash B
"""
import os, glob, shutil
import re, json, numpy
import nibabel as ni
def touch(fname):
if os.path.exists(fname):
os.utime(fname, None)
else:
open(fname, 'a').close()
def FourDimImg(image, destinationpath_3d, outputfilename):
#outputfilename= sub-285345_run-02_magnitude2
#this function handles conversion from 4d to 3d along with saving output with bids std name
img = ni.load(image)
destination_path = destinationpath_3d
images = ni.four_to_three(img)
outputfilenamepattern = outputfilename + '{:01d}.nii.gz'
for i, img_3d in enumerate(images):
i = i +1
output_filename = outputfilenamepattern.format(i)
output_path = os.path.join(destination_path, output_filename)
ni.save(img_3d, output_path)
os.remove(image)
return img_3d
def hcp2bids(input_dir, output_dir, s_link = False):
import os
#get hcp subject directory paths
sub_dir = [os.path.join(input_dir,o) for o in os.listdir(input_dir) if os.path.isdir(os.path.join(input_dir,o))]
for subjects in sub_dir:
subj_raw = os.path.join(subjects, 'unprocessed/3T/')
print(subj_raw)
#path_bids = '/scratch/04275/suyashdb/hcp/%s/' %subject
#output directory for the subject
bids = os.path.join(output_dir, subjects.split('/')[-1])
#bids = subjects + '/bids/'
if not os.path.exists(bids):
os.mkdir(bids)
#output directory paths for fmap, func, anat and dwi
fmap = os.path.join(bids, 'fmap/')
func = os.path.join(bids, 'func/')
anat = os.path.join(bids, 'anat/')
dwi = os.path.join(bids,'dwi/')
if not os.path.exists(fmap):
os.mkdir(fmap)
if not os.path.exists(func):
os.mkdir(func)
if not os.path.exists(anat):
os.mkdir(anat)
if not os.path.exists(dwi):
os.mkdir(dwi)
'''Get raw Nifti files from the HCP input directory and move
it to the output directory'''
fieldmaplist = glob.glob(os.path.join(subj_raw, '*/*FieldMap*'))
for fieldmap in fieldmaplist:
parentdir = os.path.split(os.path.dirname(fieldmap))[1]
dst = fmap + parentdir +'_'+ os.path.split(fieldmap)[1]
shutil.copy(fieldmap, dst)
print("done with fMAPs for --", subjects)
func_list = glob.glob(os.path.join(subj_raw, 't*/*tfMRI*'))
for func_data in func_list:
parentdir = os.path.split(os.path.dirname(func_data))[1]
dst = func + parentdir +'_'+ os.path.split(func_data)[1]
if s_link:
if not os.path.islink(dst):
os.symlink(os.path.realpath(func_data), dst)
else:
shutil.move(func_data, dst)
print("done with func for --", subjects)
sbref_list = glob.glob(os.path.join(subj_raw, '*/*SBRef*'))
for sbref in sbref_list:
parentdir = os.path.split(os.path.dirname(sbref))[1]
dst = func + parentdir +'_'+ os.path.split(sbref)[1]
if s_link:
if not os.path.islink(dst):
os.symlink(os.path.realpath(sbref), dst)
else:
shutil.move(sbref, dst)
print("done with SBREF's for --", subjects)
anat_list = glob.glob(os.path.join(subj_raw, 'T*/*3T_T*'))
for anat_data in anat_list:
parentdir = os.path.split(os.path.dirname(anat_data))[1]
dst = anat + parentdir +'_'+ os.path.split(anat_data)[1]
if s_link:
if not os.path.islink(dst):
os.symlink(os.path.realpath(anat_data), dst)
else:
shutil.move(anat_data, dst)
print("done with Anat for --", subjects)
dwi_list = glob.glob(os.path.join(subj_raw, '*/*DWI*'))
for dwi_data in dwi_list:
parentdir = os.path.split(os.path.dirname(dwi_data))[1]
dst = dwi + parentdir +'_'+ os.path.split(dwi_data)[1]
if s_link:
if not os.path.islink(dst):
os.symlink(os.path.realpath(dwi_data), dst)
else:
shutil.move(dwi_data, dst)
print("done with DWI's for --", subjects)
dwi_subj_raw = os.path.join(subjects, 'bids')
dwi_sbref_list = glob.glob(os.path.join(func,'*DWI*SBRef*'))
for sbref in dwi_sbref_list:
parentdir = os.path.split(os.path.dirname(sbref))[1]
dst = dwi +'_'+ os.path.split(sbref)[1]
shutil.move(sbref, dst)
''' Sort nifti files and Rename all files as per bids'''
'''Sort func files and rename all per bids'''
nifti_func_list = glob.glob(os.path.join(func, '*fMRI*.nii.gz'))
print("\npath where nifti files are searched -", os.path.join(func, '*fMRI*.nii.gz'))
print(len(nifti_func_list))
for nifti_func_file in nifti_func_list:
filename_split = nifti_func_file.split('/')
task = filename_split[-1].split('_')[1]
if 'LR' in filename_split[-1]:
acq = 'LR'
else:
acq = 'RL'
sub = filename_split[-3].lower()
if task in ['REST1', 'REST2']:
#m = re.match(r"([a-zA-Z]+)([0-9]+)",task)
#run = m.group(2)
run = '0' + str(task[-1])
task = str(task[:-1])
# print("This is task form rest loop - ", task)
tail = filename_split[-1].split('_')[-1]
if task not in ['REST', 'REST2']:
if 'SBRef' in tail:
filename = 'sub-' + sub + '_' + 'task-' + task + '_' + 'acq-' + acq + '_' + tail.lower()
#filename = 'sub-' + sub + '_' + 'task-' + task + '_' + tail.lower()
else:
filename = 'sub-' + sub + '_' + 'task-' + task + '_' + 'acq-' + acq + '_bold' + tail[-7:]
#filename = 'sub-' + sub + '_' + 'task-' + task + '_bold' + tail[-7:]
# rep_time = { "EMOTION" : 2.26,
# "GAMBLING" : 3.20,
# "LANGUAGE" : 3.95,
# "SOCIAL" : 3.45,
# "WM" : 5.01,
# "MOTOR" : 3.5,
# "RELATIONAL" : 2.95
# }
# bold_json_dict = {}
# bold_json_dict["RepetitionTime"] = 0.72
# bold_json_dict["TaskName"] = task
# touch(func + filename[:-6]+ 'json')
# json_file = func + filename[:-6]+ 'json'
# with open(json_file, 'w') as editfile:
# json.dump(bold_json_dict, editfile, indent = 4)
else:
#filename = 'sub-' + sub + '_' + 'task-' + task + '_' + 'acq-' + acq +'_'+ 'run-' + run + '_' + tail.lower()
filename = 'sub-' + sub + '_' + 'task-' + task + '_' +'run-' + run + '_' + tail.lower()
path_filename = func + filename
print(path_filename)
if not os.path.isfile(path_filename):
basedir = os.path.dirname(path_filename)
if not os.path.exists(basedir):
os.makedirs(basedir)
shutil.move(nifti_func_file, path_filename)
#touch(path_filename[:-6]+ 'json')
''' sort anat files and rename it '''
#anat = '/Users/suyashdb/Documents/hcp2bids/hcpdata/285446/bids/anat'
anat_files_list = glob.glob(os.path.join(anat, '*T*.nii.gz'))
print("\npath where nifti files are searched -", os.path.join(anat, '*T*.nii.gz'))
print(len(anat_files_list))
for anat_file in anat_files_list:
filename_split = anat_file.split('/')
sub = filename_split[-3]
modality = filename_split[-1].split('_')[0]
tail = filename_split[-1][-7:]
run = str(1)
filename = 'sub-' + sub + '_' + 'run-0' + run + '_' + modality + tail
path_filename = anat + filename
while os.path.isfile(path_filename):
run = str(int(run) + 1)
filename = 'sub-' + sub + '_' + 'run-0' + run + '_' + modality + tail
path_filename = anat + filename
print(path_filename)
shutil.move(anat_file, path_filename)
#touch(path_filename[:-6]+ 'json')
#########
#Sort all nii.gz files in dwi and fmaps '''
dwi_files_list = glob.glob(os.path.join(dwi, 'Diffusion*DWI*.nii.gz'))
print("\npath where nifti files are searched -", os.path.join(dwi, 'Diffusion*DWI*.nii.gz'))
for dwi_file in dwi_files_list:
filename_split = dwi_file.split('/')
sub = filename_split[-3]
acq = filename_split[-1].split('_')[4].lower() + filename_split[-1].split('_')[5][:2].lower()
if "SBRef.nii.gz" in filename_split[-1].split('_'):
# filename = 'sub-' + sub + '_' + 'task-' + 'DWI' + '_' + 'sbref' + tail
# path_filename = func + filename
# shutil.move(dwi_file, path_filename)
# print(path_filename)
continue
modality = 'dwi'
tail = filename_split[-1][-7:]
filename = 'sub-' + sub + '_' + 'acq-' + acq + '_' + modality + tail
path_filename = dwi + filename
print(path_filename)
if not os.path.isfile(path_filename):
basedir = os.path.dirname(path_filename)
if not os.path.exists(basedir):
os.makedirs(basedir)
shutil.move(dwi_file, path_filename)
dwi_json_dict = {}
dwi_json_dict["EffectiveEchoSpacing"] = 0.00078
dwi_json_dict["TotalReadoutTime"] = 0.60
dwi_json_dict["EchoTime"] = 0.08950
if dwi_file[-9:-7] == 'LR':
dwi_json_dict["PhaseEncodingDirection"] = "i-"
else:
dwi_json_dict["PhaseEncodingDirection"] = "i"
touch(path_filename[:-6]+ 'json')
json_file = path_filename[:-6]+ 'json'
with open(json_file, 'w') as editfile:
json.dump( dwi_json_dict, editfile, indent = 4)
shutil.move((dwi_file[:-6]+'bval'), (path_filename[:-6] + 'bval'))
shutil.move((dwi_file[:-6]+'bvec'), (path_filename[:-6] + 'bvec'))
dwisbref_files_list = glob.glob(os.path.join(dwi, '*DWI*SBRef.nii.gz'))
print("\npath where nifti files are searched -", os.path.join(dwi, '*DWI*SBRef.nii.gz'))
for dwi_file in dwisbref_files_list:
filename_split = dwi_file.split('/')
sub = filename_split[-3]
acq = filename_split[-1].split('_')[-3].lower() + filename_split[-1].split('_')[-2].lower()
modality = 'sbref'
tail = filename_split[-1][-7:]
filename = 'sub-' + sub + '_' + 'acq-' + acq + '_' + modality + tail
path_filename = dwi + filename
shutil.move(dwi_file, path_filename)
print(path_filename)
dwi_json_dict = {}
dwi_json_dict["EffectiveEchoSpacing"] = 0.00078
dwi_json_dict["TotalReadoutTime"] = 0.60
dwi_json_dict["EchoTime"] = 0.08950
if filename_split[-1].split('_')[-2][:2] == 'LR':
dwi_json_dict["PhaseEncodingDirection"] = "i-"
else:
dwi_json_dict["PhaseEncodingDirection"] = "i"
touch(path_filename[:-6]+ 'json')
json_file = path_filename[:-6]+ 'json'
with open(json_file, 'w') as editfile:
json.dump( dwi_json_dict, editfile, indent = 4)
''' Fmaps'''
counter = 1
fmap_files_list = glob.glob(os.path.join(fmap, '*SpinEchoFieldMap*.nii.gz'))
print("\npath where nifti files are searched -", os.path.join(fmap, '*SpinEchoFieldMap*.nii.gz'))
print(len(fmap_files_list))
for fmapfile in fmap_files_list:
fmap_file = os.path.split(fmapfile)[1]
filename_split = fmap_file.split('_')
task = filename_split[1]
acq = filename_split[2]
sub = filename_split[3].lower()
#print("Task:", task, "\tAcq:", acq, "\tSub:", sub)
if task in ['REST1', 'REST2']:
#m = re.match(r"([a-zA-Z]+)([0-9]+)",task)
#run = m.group(2)
run = '0' + str(task[-1])
task = str(task[:-1])
print("This is task form rest loop - ", task)
tail = filename_split[-1]
if task not in ['REST', 'REST2']:
if 'SBRef' in tail:
filename = 'sub-' + sub + '_' + 'task-' + task + '_' + 'acq-' + acq + '_' + tail.lower()
else:
filename = 'sub-' + sub + '_' + 'task-' + task + '_' + 'acq-' + acq + '_bold' + tail[-7:]
else:
filename = 'sub-' + sub + '_' + 'task-' + task + '_' + 'acq-' + acq +'_'+ 'run-' + run + '_' + tail.lower()
print('intended_for - ',filename)
filename = 'func/'+ filename
fmap_json_dict = {}
fmap_json_dict["intended_for"] = filename
fmap_json_dict["TotalReadoutTime"] = 0.08346
if fmapfile[-9:-7] == 'LR':
fmap_json_dict["PhaseEncodingDirection"] = "i-"
else:
fmap_json_dict["PhaseEncodingDirection"] = "i"
#intended_for ={"IntendedFor", filename}
dir = counter
hcpfmapfilename = 'sub-' + sub + '_'+ 'dir-' + str(dir) + '_' + 'epi.nii.gz'
print('hcpfmap_filename',hcpfmapfilename)
path_filename = fmap + hcpfmapfilename
shutil.move(fmapfile, path_filename)
touch(path_filename[:-6]+ 'json')
json_file = path_filename[:-6]+ 'json'
with open(json_file, 'w') as editfile:
json.dump( fmap_json_dict, editfile, indent = 4)
counter = counter + 1
#fmap_magnitude and phasediff
fmap_files_list = glob.glob(os.path.join(fmap, 'T*Magnitude.nii.gz'))
print("\npath where nifti files are searched -", os.path.join(fmap, 'T*Magnitude.nii.gz'))
run = 1
for fmapfile in fmap_files_list:
print(fmapfile)
fmap_file = os.path.split(fmapfile)[1]
filename_split = fmap_file.split('_')
acq = filename_split[1]
sub = filename_split[2]
run_number = filename_split[1][-1]
filename = 'sub-' + sub + '_' + 'run-0' + str(run) + '_magnitude'+ '.nii.gz'
path_filename = os.path.join(fmap, filename)
print(path_filename)
shutil.move(fmapfile, path_filename)
#looking into phasediff image
filename_phasediff = 'sub-' + sub + '_' + 'run-0' + str(run) + '_phasediff' + '.nii.gz'
filename_phasediff_path = os.path.join(fmap,filename_phasediff)
print(filename_phasediff_path)
shutil.move(fmapfile.replace('Magnitude', 'Phase'), filename_phasediff_path)
filename_phasediff_json = filename_phasediff[:-6]+ 'json'
filename_phasediff_json_path = os.path.join(fmap, filename_phasediff_json)
touch(filename_phasediff_json_path)
intended_for_filename = 'anat/sub-' + sub + '_' + 'run-0' + run_number + '_' + filename_split[0] + '.nii.gz'
print('intended_for - ',intended_for_filename)
fmap_phasdiff_json_dict = {}
fmap_phasdiff_json_dict["intended_for"] = intended_for_filename
if filename_split[0] == 'T1w':
fmap_phasdiff_json_dict["EchoTime1"] = 0.00214
fmap_phasdiff_json_dict["EchoTime2"] = 0.00460
if filename_split[0] == 'T2w':
fmap_phasdiff_json_dict["EchoTime1"] = 0.00565
fmap_phasdiff_json_dict["EchoTime2"] = 0.00811
with open(filename_phasediff_json_path, 'w') as editfile:
json.dump( fmap_phasdiff_json_dict, editfile, indent = 4)
run = run + 1
print("\n\nBIDS format data is at -", output_dir)
## main.py
##get input and output dir from user
# hcp2bids('/work/04275/suyashdb/lonestar/test_hcp1/', '/work/04275/suyashdb/lonestar/test_output/')
# output_dir = '/work/04275/suyashdb/lonestar/test_output/'
def arrange_subjects(output_dir):
# find all the subjects in the output dir
sub_dir = [os.path.join(output_dir,o) for o in os.listdir(output_dir) if os.path.isdir(os.path.join(output_dir,o))]
for subjects in sub_dir:
# rename all subjects sub-{subject_number}
sub = subjects.split('/')[-1]
dir_name = 'sub-'+ sub
dir_name_path = os.path.join(output_dir, dir_name)
shutil.move(subjects, dir_name_path)
#task json files
def json_toplevel(output_dir):
tasks = ['EMOTION', 'GAMBLING', 'LANGUAGE', 'RELATIONAL', 'MOTOR', 'SOCIAL', 'WM', 'REST']
for task in tasks:
filename = os.path.join(output_dir, 'task-%s_acq-RL_bold.json' %task)
touch(filename)
filename = os.path.join(output_dir, 'task-%s_acq-LR_bold.json' %task)
touch(filename)
filename = os.path.join(output_dir, 'task-%s_acq-RL_sbref.json' %task)
touch(filename)
filename = os.path.join(output_dir, 'task-%s_acq-LR_sbref.json' %task)
touch(filename)
json_task_files = glob.glob(os.path.join(output_dir, 'task*.json'))
#declare dict with common scan_parameters
bold_json_dict = {
"RepetitionTime": 0.72,
"EchoTime": 0.058,
"EffectiveEchoSpacing": 0.00058,
"MagneticFieldStrength": 3.0,
"TaskName": "Gambling",
"Manufacturer": "Siemens",
"ManufacturerModelName": "Skyra"
}
for json_file in json_task_files:
LR = re.search('acq-LR', json_file)
if LR is not None:
print("its LR ")
addline = {"PhaseEncodingDirection": "i"}
RL = re.search('acq-RL', json_file)
if RL is not None:
print("its RL ")
addline = {"PhaseEncodingDirection": "i-"}
# addline = { "EffectiveEchoSpacing" : 0.0058}
z = bold_json_dict.copy()
z.update(addline)
print("updated", json_file)
with open(json_file, 'w') as editfile:
json.dump( z, editfile, indent = 4)
def main():
import argparse
import sys
class MyParser(argparse.ArgumentParser):
def error(self, message):
sys.stderr.write('error: %s\n' % message)
self.print_help()
sys.exit(2)
parser = MyParser(
description="HCP to BIDS converter. This software sucks because Chris wrote it. But it's better because Nino's fixing it.",
fromfile_prefix_chars='@',
)
# TODO Specify your real parameters here.
parser.add_argument(
"input_dir",
help="Location of the root of your HCP dataset directory",
metavar="input_dir")
parser.add_argument(
"output_dir",
help="Directory where BIDS data will be stored",
metavar="output_dir")
parser.add_argument(
"-s",
help="Type t for true and f for false. If true, symlinks will be " + \
"created for files from input_dir to output_dir and put the" + \
" symlinks in BIDS format. If false, files from input_dir will be " + \
"moved to output_dir and then put into BIDS format.",
metavar = "--symlink",
default = 'f'
)
parser.add_argument(
"-g",
help="Path to a text file with participant_id to GUID mapping. You will need to use the "
"GUID Tool (https://ndar.nih.gov/contribute.html) to generate GUIDs for your participants.",
metavar="--guid_mapping",
default = '.'
)
args = parser.parse_args()
input_dir = vars(args)['input_dir']
guid_map = vars(args)['g']
output_dir = vars(args)['output_dir']
if vars(args)['s'] == 't':
symlink = True
else:
symlink = False
print("Input Directory: ", input_dir)
print("GUID Mapping", guid_map)
print("Output Directory: ", output_dir)
print("Symlink: ", symlink)
print("\nMetadata extraction complete.")
print("\nRunning hcp2bids")
hcp2bids(input_dir, output_dir, s_link = symlink)
print("\nRunning arrange_subjects")
arrange_subjects(output_dir)
print("\nRunning json_toplevel")
json_toplevel(output_dir)
if __name__ == '__main__':
main()
|
from glob import glob
import os
from django.conf import settings
from django.contrib.auth.models import User
from django.core.files import File
from django.core.files.images import ImageFile
from nose.tools import eq_
from questions.models import Question
from sumo.tests import TestCase
from upload.models import ImageAttachment
from upload.tasks import (_scale_dimensions, _create_image_thumbnail,
generate_thumbnail)
class ScaleDimensionsTestCase(TestCase):
def test_scale_dimensions_default(self):
"""A square image of exact size is not scaled."""
ts = settings.THUMBNAIL_SIZE
(width, height) = _scale_dimensions(ts, ts, ts)
eq_(ts, width)
eq_(ts, height)
def test_small(self):
"""A small image is not scaled."""
ts = settings.THUMBNAIL_SIZE / 2
(width, height) = _scale_dimensions(ts, ts)
eq_(ts, width)
eq_(ts, height)
def test_width_large(self):
"""An image with large width is scaled to width=MAX."""
ts = 120
(width, height) = _scale_dimensions(ts * 3 + 10, ts - 1, ts)
eq_(ts, width)
eq_(38, height)
def test_large_height(self):
"""An image with large height is scaled to height=MAX."""
ts = 150
(width, height) = _scale_dimensions(ts - 2, ts * 2 + 9, ts)
eq_(71, width)
eq_(ts, height)
def test_large_both_height(self):
"""An image with both large is scaled to the largest - height."""
ts = 150
(width, height) = _scale_dimensions(ts * 2 + 13, ts * 5 + 30, ts)
eq_(60, width)
eq_(ts, height)
def test_large_both_width(self):
"""An image with both large is scaled to the largest - width."""
ts = 150
(width, height) = _scale_dimensions(ts * 20 + 8, ts * 4 + 36, ts)
eq_(ts, width)
eq_(31, height)
class CreateThumbnailTestCase(TestCase):
def test_create_image_thumbnail_default(self):
"""A thumbnail is created from an image file."""
thumb_content = _create_image_thumbnail(
'apps/upload/tests/media/test.jpg')
actual_thumb = ImageFile(thumb_content)
with open('apps/upload/tests/media/test_thumb.jpg') as f:
expected_thumb = ImageFile(f)
eq_(expected_thumb.width, actual_thumb.width)
eq_(expected_thumb.height, actual_thumb.height)
class GenerateThumbnail(TestCase):
fixtures = ['users.json', 'questions.json']
def setUp(self):
super(GenerateThumbnail, self).setUp()
self.user = User.objects.all()[0]
self.obj = Question.objects.all()[0]
def tearDown(self):
ImageAttachment.objects.all().delete()
def _image_with_thumbnail(self):
image = ImageAttachment(content_object=self.obj, creator=self.user)
with open('apps/upload/tests/media/test.jpg') as f:
up_file = File(f)
image.file.save(up_file.name, up_file, save=True)
generate_thumbnail(image, 'file', 'thumbnail')
return image
def test_generate_thumbnail_default(self):
"""generate_thumbnail creates a thumbnail."""
image = self._image_with_thumbnail()
eq_(90, image.thumbnail.width)
eq_(120, image.thumbnail.height)
def test_generate_thumbnail_twice(self):
"""generate_thumbnail replaces old thumbnail."""
image = self._image_with_thumbnail()
image_dir, _ = os.path.split(image.thumbnail.path)
glob_path = image_dir + '/*.jpg'
num_files = len(glob(glob_path))
# We only have one file before.
assert num_files == 1, 'Expected 1, got %s' % num_files
generate_thumbnail(image, 'file', 'thumbnail')
# And only one file after.
assert len(glob(glob_path)) == 1
Make test_generate_thumbnail_twice less brittle. [bug 628367]
import os
from django.conf import settings
from django.contrib.auth.models import User
from django.core.files import File
from django.core.files.images import ImageFile
from nose.tools import eq_
from questions.models import Question
from sumo.tests import TestCase
from upload.models import ImageAttachment
from upload.tasks import (_scale_dimensions, _create_image_thumbnail,
generate_thumbnail)
class ScaleDimensionsTestCase(TestCase):
def test_scale_dimensions_default(self):
"""A square image of exact size is not scaled."""
ts = settings.THUMBNAIL_SIZE
(width, height) = _scale_dimensions(ts, ts, ts)
eq_(ts, width)
eq_(ts, height)
def test_small(self):
"""A small image is not scaled."""
ts = settings.THUMBNAIL_SIZE / 2
(width, height) = _scale_dimensions(ts, ts)
eq_(ts, width)
eq_(ts, height)
def test_width_large(self):
"""An image with large width is scaled to width=MAX."""
ts = 120
(width, height) = _scale_dimensions(ts * 3 + 10, ts - 1, ts)
eq_(ts, width)
eq_(38, height)
def test_large_height(self):
"""An image with large height is scaled to height=MAX."""
ts = 150
(width, height) = _scale_dimensions(ts - 2, ts * 2 + 9, ts)
eq_(71, width)
eq_(ts, height)
def test_large_both_height(self):
"""An image with both large is scaled to the largest - height."""
ts = 150
(width, height) = _scale_dimensions(ts * 2 + 13, ts * 5 + 30, ts)
eq_(60, width)
eq_(ts, height)
def test_large_both_width(self):
"""An image with both large is scaled to the largest - width."""
ts = 150
(width, height) = _scale_dimensions(ts * 20 + 8, ts * 4 + 36, ts)
eq_(ts, width)
eq_(31, height)
class CreateThumbnailTestCase(TestCase):
def test_create_image_thumbnail_default(self):
"""A thumbnail is created from an image file."""
thumb_content = _create_image_thumbnail(
'apps/upload/tests/media/test.jpg')
actual_thumb = ImageFile(thumb_content)
with open('apps/upload/tests/media/test_thumb.jpg') as f:
expected_thumb = ImageFile(f)
eq_(expected_thumb.width, actual_thumb.width)
eq_(expected_thumb.height, actual_thumb.height)
class GenerateThumbnail(TestCase):
fixtures = ['users.json', 'questions.json']
def setUp(self):
super(GenerateThumbnail, self).setUp()
self.user = User.objects.all()[0]
self.obj = Question.objects.all()[0]
def tearDown(self):
ImageAttachment.objects.all().delete()
def _image_with_thumbnail(self):
image = ImageAttachment(content_object=self.obj, creator=self.user)
with open('apps/upload/tests/media/test.jpg') as f:
up_file = File(f)
image.file.save(up_file.name, up_file, save=True)
generate_thumbnail(image, 'file', 'thumbnail')
return image
def test_generate_thumbnail_default(self):
"""generate_thumbnail creates a thumbnail."""
image = self._image_with_thumbnail()
eq_(90, image.thumbnail.width)
eq_(120, image.thumbnail.height)
def test_generate_thumbnail_twice(self):
"""generate_thumbnail replaces old thumbnail."""
image = self._image_with_thumbnail()
old_path = image.thumbnail.path
# The thumbnail exists.
assert os.path.exists(old_path)
assert os.path.isfile(old_path)
generate_thumbnail(image, 'file', 'thumbnail')
new_path = image.thumbnail.path
# The thumbnail was replaced.
eq_(old_path, new_path)
assert os.path.exists(new_path)
assert os.path.isfile(new_path)
|
#!/usr/bin/env python
#
# OmsAgentForLinux Extension
#
# Copyright 2015 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
import os
import io
import datetime
from datetime import datetime, timedelta
import time
import string
import traceback
import shutil
import sys
import json
import uuid
from threading import Thread
import re
import hashlib
from omsagent import run_command_and_log
from omsagent import RestartOMSAgentServiceCommand
"""
Write now hardcode memory threshold to watch for to 20 %.
If agent is using more than 20% of memory it is definitely very high.
In future we may want to set it based on customer configuration.
"""
# Constants.
MemoryThresholdToWatchFor = 20
OmsAgentPidFile = "/var/opt/microsoft/omsagent/run/omsagent.pid"
OmsAgentLogFile = "/var/opt/microsoft/omsagent/log/omsagent.log"
reg_ex = re.compile('([0-9]{4}-[0-9]{2}-[0-9]{2}.*)\[(\w+)\]:(.*)')
"""
We can add to the list below with more error messages to identify non recoverable errors.
"""
ErrorStatements = ["Errono::ENOSPC error=", "Fatal error, can not clear buffer file", "No space left on the device"]
class SelfMonitorInfo:
"""
Class to hold self mon info for omsagent.
"""
def __init__(self):
self._consecutive_error_count = 0
self._last_reset_success = True
self._error_count = 0
self._memory_used_in_percent = 0
self._consecutive_high_memory_usage = 0
def reset(self):
self._consecutive_error_count = 0
self._consecutive_high_memory_usage = 0
self._memory_used_in_percent = 0
def reset_error_info(self):
self._consecutive_error_count = 0
def increment_heartbeat_missing_count(self):
self._consecutive_error_count += 1
def crossed_error_threshold(self):
if (self._consecutive_error_count > 3):
return True
else:
return False
def corssed_memory_threshold(self):
if (self._consecutive_high_memory_usage > 3):
return True
else:
return False
def increment_high_memory_count(self):
self._consecutive_high_memory_usage += 1
def reset_high_memory_count(self):
self._consecutive_high_memory_usage = 0
def current_status(self):
"""
Python 2.6 does not support enum.
"""
if (self._consecutive_error_count == 0 and self._consecutive_high_memory_usage == 0):
return "Green"
elif (self._consecutive_error_count < 3 and self._consecutive_high_memory_usage < 3):
return "Yellow"
else:
return "Red"
class LogFileMarker:
"""
Class to hold omsagent log file marker information.
"""
def __init__(self):
self._last_pos = 0
self._last_crc = ""
def reset_marker(self):
self._last_pos = 0
self._last_crc = ""
class Watcher:
"""
A class that handles periodic monitoring activities.
"""
def __init__(self, hutil_error, hutil_log, log_to_console=False):
"""
Constructor.
:param hutil_error: Error logging function (e.g., hutil.error). This is not a stream.
:param hutil_log: Normal logging function (e.g., hutil.log). This is not a stream.
:param log_to_console: Indicates whether to log any issues to /dev/console or not.
"""
self._hutil_error = hutil_error
self._hutil_log = hutil_log
self._log_to_console = log_to_console
self._consecutive_error_count = 0
self._consecutive_restarts_due_to_error = 0
def _do_log_to_console_if_enabled(self, message):
"""
Write 'message' to console. Stolen from waagent LogToCon().
"""
if self._log_to_console:
try:
with open('/dev/console', 'w') as console:
message = filter(lambda x: x in string.printable, message)
console.write(message.encode('ascii', 'ignore') + '\n')
except IOError as e:
self._hutil_error('Error writing to console. Exception={0}'.format(e))
def write_waagent_event(self, event):
offset = str(int(time.time() * 1000000))
temp_fn = '/var/lib/waagent/events/'+str(uuid.uuid4())
with open(temp_fn,'w+') as fh:
fh.write(event)
fn_template = '/var/lib/waagent/events/{}.tld'
fn = fn_template.format(offset)
while os.path.isfile(fn):
offset += 1
fn = fn_template.format(offset)
shutil.move(temp_fn, fn)
self._hutil_log(fn)
def create_telemetry_event(self, operation, operation_success, message, duration):
template = """ {{
"eventId": 1,
"providerId": "69B669B9-4AF8-4C50-BDC4-6006FA76E975",
"parameters": [
{{
"name": "Name",
"value": "Microsoft.EnterpriseCloud.Monitoring.OmsAgentForLinux"
}},
{{
"name": "Version",
"value": "1.7.5"
}},
{{
"name": "Operation",
"value": "{}"
}},
{{
"name": "OperationSuccess",
"value": {}
}},
{{
"name": "Message",
"value": "{}"
}},
{{
"name": "Duration",
"value": {}
}}
]
}}"""
operation_success_as_string = str(operation_success).lower()
formatted_message = message.replace("\n", "\\n").replace("\t", "\\t").replace('"', '\"')
return template.format(operation, operation_success_as_string, formatted_message, duration)
def upload_telemetry(self):
status_files = [
"/var/opt/microsoft/omsagent/log/ODSIngestion.status",
"/var/opt/microsoft/omsagent/log/ODSIngestionBlob.status",
"/var/opt/microsoft/omsagent/log/ODSIngestionAPI.status",
"/var/opt/microsoft/omsconfig/status/dscperformconsistency",
"/var/opt/microsoft/omsconfig/status/dscperforminventory",
"/var/opt/microsoft/omsconfig/status/dscsetlcm"
]
for sf in status_files:
if os.path.isfile(sf):
mod_time = os.path.getmtime(sf)
curr_time = int(time.time())
if (curr_time - mod_time < 300):
with open(sf) as json_file:
try:
status_data = json.load(json_file)
operation = status_data["operation"]
operation_success = status_data["success"]
message = status_data["message"]
event = self.create_telemetry_event(operation,operation_success,message,"300000")
self._hutil_log("Writing telemetry event: "+event)
self.write_waagent_event(event)
self._hutil_log("Successfully processed telemetry status file: "+sf)
except Exception as e:
self._hutil_log("Error parsing telemetry status file: "+sf)
self._hutil_log("Exception info: "+traceback.format_exc())
else:
self._hutil_log("Telemetry status file not updated in last 5 mins: "+sf)
else:
self._hutil_log("Telemetry status file does not exist: "+sf)
pass
def watch(self):
"""
Main loop performing various monitoring activities periodically.
Currently iterates every 5 minutes, and other periodic activities might be
added in the loop later.
:return: None
"""
self._hutil_log('started watcher thread')
while True:
self._hutil_log('watcher thread waking')
self.upload_telemetry()
# Sleep 5 minutes
self._hutil_log('watcher thread sleeping')
time.sleep(60 * 5)
pass
def monitor_heartbeat(self, self_mon_info, log_file_marker):
"""
Monitor heartbeat health. OMS output plugin will update the timestamp
of new heartbeat file every 5 minutes. We will check if it is updated
If not, we will look into omsagent logs and look for specific error logs
which indicate we are in non recoverable state.
"""
take_action = False
if (not self.received_heartbeat_recently()):
"""
We haven't seen heartbeat in more than past 300 seconds
"""
self_mon_info.increment_heartbeat_missing_count()
take_action = False
if (self_mon_info.crossed_error_threshold()):
# If we do not see heartbeat for last 3 iterations, take corrective action.
take_action = True
elif (self.check_for_fatal_oms_logs(log_file_marker)):
# If we see hearbeat missing and error message, no need to wait for more than one
# iteration. It is not a false positive. Take corrective action immediately.
take_action = True
if (take_action):
if (self._consecutive_restarts_due_to_error < 5):
self.take_corrective_action(self_mon_info)
self._consecutive_restarts_due_to_error += 1
else:
self._hutil_error("Last 5 restarts did not help. So we will not restart the agent immediately")
# Reset historical infomration.
self._consecutive_restarts_due_to_error = 0
self_mon_info.reset_error_info()
else:
"""
If we are able to get the heartbeats, check omsagent logs
to identify if there are any error logs.
"""
self_mon_info.reset_error_info()
self._consecutive_restarts_due_to_error = 0
def received_heartbeat_recently(self):
heartbeat_file = '/var/opt/microsoft/omsagent/log/ODSIngestions.status'
curr_time = int(time.time())
return_val = True
file_update_time = curr_time
if (os.path.isfile(heartbeat_file)):
file_update_time = os.path.getmtime(heartbeat_file)
self._hutil_log("File update time={0}, current time={1}".format(file_update_time, curr_time))
else:
self._hutil_log("Heartbeat file is not present on the disk.")
file_update_time = curr_time - 1000
if (file_update_time + 360 < curr_time):
return_val = False
else:
try:
with open(heartbeat_file) as json_file:
status_data = json.load(json_file)
operation_success = status_data["success"]
if (operation_success.lower() == "true"):
self._hutil_log("Found success message from ODS Ingestion.")
return_val = True
else:
self._hutil_log("Did not find success message in heart beat file. {0}".format(operation_success))
return_val = False
except Exception as e:
self._hutil_log("Error parsing ODS Ingestion status file: "+sf)
# Return True in case we failed to parse the file. We do not want to go into recycle loop in this scenario.
return_val = True
return return_val
def monitor_resource(self, self_mon_info):
"""
Monitor resource utilization of omsagent.
Check for memory and CPU periodically. If they cross the threshold for consecutive 3 iterations
we will restart the agent.
"""
resource_usage = self.get_oms_agent_resource_usage()
message = "Memory : {0}, CPU : {1}".format(resource_usage[0], resource_usage[1])
event = self.create_telemetry_event("agenttelemetry","True",message,"300000")
self.write_waagent_event(event)
self_mon_info._memory_used_in_percent = resource_usage[0]
if (self_mon_info._memory_used_in_percent > 0):
if (self_mon_info._memory_used_in_percent > MemoryThresholdToWatchFor):
# check consecutive memory usage.
self_mon_info.increment_high_memory_count()
if (self_mon_info.corssed_memory_threshold()):
# if we have crossed the memory threshold take corrective action.
self.take_corrective_action(self_mon_info)
else:
self_mon_info.reset_high_memory_count()
else:
self_mon_info.reset_high_memory_count()
def monitor_health(self):
"""
Role of this function is monitor the health of the oms agent.
To begin with it will monitor heartbeats flowing through oms agent.
We will also read oms agent logs to determine some error conditions.
We don't want to interfare with log watcher function.
So we will start this on a new thread.
"""
self_mon_info = SelfMonitorInfo()
log_file_marker = LogFileMarker()
# check every 6 minutes. we want to be bit pessimistic while looking for health, especially heartbeats which is emitted every 5 minutes.
sleepTime = 6 * 60
# sleep before starting the monitoring.
time.sleep(sleepTime)
while True:
try:
# Monitor heartbeat and logs.
self.monitor_heartbeat(self_mon_info, log_file_marker)
# Monitor memory usage
self.monitor_resource(self_mon_info)
except IOError as e:
self._hutil_error('I/O error in monitoring health of the omsagent. Exception={0}'.format(e))
except Exception as e:
self._hutil_error('Error in monitoring health of the omsagent. Exception={0}'.format(e))
finally:
time.sleep(sleepTime)
def take_corrective_action(self, self_mon_info):
"""
Take a corrective action.
"""
run_command_and_log(RestartOMSAgentServiceCommand)
self._hutil_log("Successfully restarted OMS linux agent, resetting self mon information.")
# Reset self mon information.
self_mon_info.reset()
def emit_telemetry_after_corrective_action(self):
"""
TODO : Emit telemetry after taking corrective action.
"""
def get_total_seconds_from_epoch_for_fluent_logs(self, datetime_string):
# fluentd logs timestamp format : 2018-08-02 19:27:34 +0000
# for python 2.7 or earlier there is no good way to convert it into seconds.
# so we parse upto seconds, and parse utc specific offset seperately.
try:
date_time_format = '%Y-%m-%d %H:%M:%S'
epoch = datetime(1970, 1, 1)
# get hours and minute delta for utc offset.
hours_delta_utc = int(datetime_string[21:23])
minutes_delta_utc= int(datetime_string[23:])
log_time = datetime.strptime(datetime_string[:19], date_time_format) + ((timedelta(hours=hours_delta_utc, minutes=minutes_delta_utc)) * (-1 if datetime_string[20] == "+" else 1))
return (log_time - epoch).total_seconds()
except Exception as e:
self._hutil_error('Error converting timestamp string to seconds. Exception={0}'.format(e))
return 0
def check_for_fatal_oms_logs(self, log_file_marker):
"""
This function will go through oms log file and check for the
logs indicating non recoverable state. That set is hardcoded right now
and we can add it to it as we learn more.
If we find there is atleast one occurance of such log line from last occurance,
we will return True else will return False.
"""
read_start_time = int(time.time())
if os.path.isfile(OmsAgentLogFile):
last_crc = log_file_marker._last_crc
last_pos = log_file_marker._last_pos
# We do not want to propogate any exception to the caller.
try:
f = open(OmsAgentLogFile, "r")
text = f.readline()
# Handle log rotate. Check for CRC of first line of the log file.
# Some of the agents like Splunk uses this technique.
# If it matches with previous CRC, then file has not changed.
# If it is not matching then file has changed and do not seek from
# the last_pos rather continue from the begining.
if (text != ''):
crc = hashlib.md5(text).hexdigest()
self._hutil_log("Last crc = {0}, current crc= {1} position = {2}".format(last_crc, crc, last_pos))
if (last_crc == crc):
if (last_pos > 0):
f.seek(last_pos)
else:
self._hutil_log("File has changed do not seek from the offset. current crc = {0}".format(crc))
log_file_marker._last_crc = crc
total_lines_read = 1
while True:
text = f.readline()
if (text == ''):
log_file_marker._last_pos = f.tell()
break
total_lines_read += 1
res = reg_ex.match(text)
if res:
log_entry_time = self.get_total_seconds_from_epoch_for_fluent_logs(res.group(1))
if (log_entry_time + (10 * 60) < read_start_time):
# ignore log line if we are reading logs older than 10 minutes.
pass
elif (res.group(2) == "warn" or res.group(2) == "error"):
for error_statement in ErrorStatements:
if (res.group(3) in error_statement):
self._hutil_error("Found non recoverable error log in agent log file")
# File should be closed in the finally block.
return True
self._hutil_log("Did not find any non recoverable logs in omsagent log file")
except Exception as e:
self._hutil_error ("Caught an exception {0}".format(traceback.format_exc()))
finally:
f.close()
else:
self._hutil_error ("Omsagent log file not found : {0}".format(OmsAgentLogFile))
return False
def get_oms_agent_resource_usage(self):
"""
If we hit any exception in getting resoource usage of the omsagent return 0,0
We need not crash/fail in this case.
return tuple : memory, cpu.
Long run for north star we should use cgroups. cgroups tools are not available
by default on all the distros and we would need to package with the agent those and use.
Also at this point it is not very clear if customers would want us to create cgroups on their vms.
"""
try:
mem_usage = 0.0
cpu_usage = 0.0
with open(OmsAgentPidFile, 'r') as infile:
pid = infile.readline() # Get pid of omsagent process.
# top output:
# $1 - PID,
# $2 - account,
# $9 - CPU,
# $10 - Memory,
# $12 - Process name
out = subprocess.Popen('top -bn1 | grep -i omsagent | awk \'{print $1 " " $2 " " $9 " " $10 " " $12}\'', shell=True, stdout=subprocess.PIPE)
for line in out.stdout:
s = line.split()
if (len(s) >= 4 and s[0] == pid and s[1] == 'omsagent' and s[4] == 'omsagent'):
return float(s[3]) , float(s[2])
except Exception as e:
self._hutil_error('Error getting memory usage for omsagent process. Exception={0}'.format(e))
# Control will reach here only in case of error condition. In that case it is ok to return 0 as it is harmless to be cautious.
return mem_usage, cpu_usage
Update watcherutil.py
#!/usr/bin/env python
#
# OmsAgentForLinux Extension
#
# Copyright 2015 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
import os
import io
import datetime
from datetime import datetime, timedelta
import time
import string
import traceback
import shutil
import sys
import json
import uuid
from threading import Thread
import re
import hashlib
from omsagent import run_command_and_log
from omsagent import RestartOMSAgentServiceCommand
"""
Write now hardcode memory threshold to watch for to 20 %.
If agent is using more than 20% of memory it is definitely very high.
In future we may want to set it based on customer configuration.
"""
# Constants.
MemoryThresholdToWatchFor = 20
OmsAgentPidFile = "/var/opt/microsoft/omsagent/run/omsagent.pid"
OmsAgentLogFile = "/var/opt/microsoft/omsagent/log/omsagent.log"
reg_ex = re.compile('([0-9]{4}-[0-9]{2}-[0-9]{2}.*)\[(\w+)\]:(.*)')
"""
We can add to the list below with more error messages to identify non recoverable errors.
"""
ErrorStatements = ["Errono::ENOSPC error=", "Fatal error, can not clear buffer file", "No space left on the device"]
class SelfMonitorInfo:
"""
Class to hold self mon info for omsagent.
"""
def __init__(self):
self._consecutive_error_count = 0
self._last_reset_success = True
self._error_count = 0
self._memory_used_in_percent = 0
self._consecutive_high_memory_usage = 0
def reset(self):
self._consecutive_error_count = 0
self._consecutive_high_memory_usage = 0
self._memory_used_in_percent = 0
def reset_error_info(self):
self._consecutive_error_count = 0
def increment_heartbeat_missing_count(self):
self._consecutive_error_count += 1
def crossed_error_threshold(self):
if (self._consecutive_error_count > 3):
return True
else:
return False
def corssed_memory_threshold(self):
if (self._consecutive_high_memory_usage > 3):
return True
else:
return False
def increment_high_memory_count(self):
self._consecutive_high_memory_usage += 1
def reset_high_memory_count(self):
self._consecutive_high_memory_usage = 0
def current_status(self):
"""
Python 2.6 does not support enum.
"""
if (self._consecutive_error_count == 0 and self._consecutive_high_memory_usage == 0):
return "Green"
elif (self._consecutive_error_count < 3 and self._consecutive_high_memory_usage < 3):
return "Yellow"
else:
return "Red"
class LogFileMarker:
"""
Class to hold omsagent log file marker information.
"""
def __init__(self):
self._last_pos = 0
self._last_crc = ""
def reset_marker(self):
self._last_pos = 0
self._last_crc = ""
class Watcher:
"""
A class that handles periodic monitoring activities.
"""
def __init__(self, hutil_error, hutil_log, log_to_console=False):
"""
Constructor.
:param hutil_error: Error logging function (e.g., hutil.error). This is not a stream.
:param hutil_log: Normal logging function (e.g., hutil.log). This is not a stream.
:param log_to_console: Indicates whether to log any issues to /dev/console or not.
"""
self._hutil_error = hutil_error
self._hutil_log = hutil_log
self._log_to_console = log_to_console
self._consecutive_error_count = 0
self._consecutive_restarts_due_to_error = 0
def _do_log_to_console_if_enabled(self, message):
"""
Write 'message' to console. Stolen from waagent LogToCon().
"""
if self._log_to_console:
try:
with open('/dev/console', 'w') as console:
message = filter(lambda x: x in string.printable, message)
console.write(message.encode('ascii', 'ignore') + '\n')
except IOError as e:
self._hutil_error('Error writing to console. Exception={0}'.format(e))
def write_waagent_event(self, event):
offset = str(int(time.time() * 1000000))
temp_fn = '/var/lib/waagent/events/'+str(uuid.uuid4())
with open(temp_fn,'w+') as fh:
fh.write(event)
fn_template = '/var/lib/waagent/events/{}.tld'
fn = fn_template.format(offset)
while os.path.isfile(fn):
offset += 1
fn = fn_template.format(offset)
shutil.move(temp_fn, fn)
self._hutil_log(fn)
def create_telemetry_event(self, operation, operation_success, message, duration):
template = """ {{
"eventId": 1,
"providerId": "69B669B9-4AF8-4C50-BDC4-6006FA76E975",
"parameters": [
{{
"name": "Name",
"value": "Microsoft.EnterpriseCloud.Monitoring.OmsAgentForLinux"
}},
{{
"name": "Version",
"value": "1.8.0"
}},
{{
"name": "Operation",
"value": "{}"
}},
{{
"name": "OperationSuccess",
"value": {}
}},
{{
"name": "Message",
"value": "{}"
}},
{{
"name": "Duration",
"value": {}
}}
]
}}"""
operation_success_as_string = str(operation_success).lower()
formatted_message = message.replace("\n", "\\n").replace("\t", "\\t").replace('"', '\"')
return template.format(operation, operation_success_as_string, formatted_message, duration)
def upload_telemetry(self):
status_files = [
"/var/opt/microsoft/omsagent/log/ODSIngestion.status",
"/var/opt/microsoft/omsagent/log/ODSIngestionBlob.status",
"/var/opt/microsoft/omsagent/log/ODSIngestionAPI.status",
"/var/opt/microsoft/omsconfig/status/dscperformconsistency",
"/var/opt/microsoft/omsconfig/status/dscperforminventory",
"/var/opt/microsoft/omsconfig/status/dscsetlcm"
]
for sf in status_files:
if os.path.isfile(sf):
mod_time = os.path.getmtime(sf)
curr_time = int(time.time())
if (curr_time - mod_time < 300):
with open(sf) as json_file:
try:
status_data = json.load(json_file)
operation = status_data["operation"]
operation_success = status_data["success"]
message = status_data["message"]
event = self.create_telemetry_event(operation,operation_success,message,"300000")
self._hutil_log("Writing telemetry event: "+event)
self.write_waagent_event(event)
self._hutil_log("Successfully processed telemetry status file: "+sf)
except Exception as e:
self._hutil_log("Error parsing telemetry status file: "+sf)
self._hutil_log("Exception info: "+traceback.format_exc())
else:
self._hutil_log("Telemetry status file not updated in last 5 mins: "+sf)
else:
self._hutil_log("Telemetry status file does not exist: "+sf)
pass
def watch(self):
"""
Main loop performing various monitoring activities periodically.
Currently iterates every 5 minutes, and other periodic activities might be
added in the loop later.
:return: None
"""
self._hutil_log('started watcher thread')
while True:
self._hutil_log('watcher thread waking')
self.upload_telemetry()
# Sleep 5 minutes
self._hutil_log('watcher thread sleeping')
time.sleep(60 * 5)
pass
def monitor_heartbeat(self, self_mon_info, log_file_marker):
"""
Monitor heartbeat health. OMS output plugin will update the timestamp
of new heartbeat file every 5 minutes. We will check if it is updated
If not, we will look into omsagent logs and look for specific error logs
which indicate we are in non recoverable state.
"""
take_action = False
if (not self.received_heartbeat_recently()):
"""
We haven't seen heartbeat in more than past 300 seconds
"""
self_mon_info.increment_heartbeat_missing_count()
take_action = False
if (self_mon_info.crossed_error_threshold()):
# If we do not see heartbeat for last 3 iterations, take corrective action.
take_action = True
elif (self.check_for_fatal_oms_logs(log_file_marker)):
# If we see hearbeat missing and error message, no need to wait for more than one
# iteration. It is not a false positive. Take corrective action immediately.
take_action = True
if (take_action):
if (self._consecutive_restarts_due_to_error < 5):
self.take_corrective_action(self_mon_info)
self._consecutive_restarts_due_to_error += 1
else:
self._hutil_error("Last 5 restarts did not help. So we will not restart the agent immediately")
# Reset historical infomration.
self._consecutive_restarts_due_to_error = 0
self_mon_info.reset_error_info()
else:
"""
If we are able to get the heartbeats, check omsagent logs
to identify if there are any error logs.
"""
self_mon_info.reset_error_info()
self._consecutive_restarts_due_to_error = 0
def received_heartbeat_recently(self):
heartbeat_file = '/var/opt/microsoft/omsagent/log/ODSIngestions.status'
curr_time = int(time.time())
return_val = True
file_update_time = curr_time
if (os.path.isfile(heartbeat_file)):
file_update_time = os.path.getmtime(heartbeat_file)
self._hutil_log("File update time={0}, current time={1}".format(file_update_time, curr_time))
else:
self._hutil_log("Heartbeat file is not present on the disk.")
file_update_time = curr_time - 1000
if (file_update_time + 360 < curr_time):
return_val = False
else:
try:
with open(heartbeat_file) as json_file:
status_data = json.load(json_file)
operation_success = status_data["success"]
if (operation_success.lower() == "true"):
self._hutil_log("Found success message from ODS Ingestion.")
return_val = True
else:
self._hutil_log("Did not find success message in heart beat file. {0}".format(operation_success))
return_val = False
except Exception as e:
self._hutil_log("Error parsing ODS Ingestion status file: "+sf)
# Return True in case we failed to parse the file. We do not want to go into recycle loop in this scenario.
return_val = True
return return_val
def monitor_resource(self, self_mon_info):
"""
Monitor resource utilization of omsagent.
Check for memory and CPU periodically. If they cross the threshold for consecutive 3 iterations
we will restart the agent.
"""
resource_usage = self.get_oms_agent_resource_usage()
message = "Memory : {0}, CPU : {1}".format(resource_usage[0], resource_usage[1])
event = self.create_telemetry_event("agenttelemetry","True",message,"300000")
self.write_waagent_event(event)
self_mon_info._memory_used_in_percent = resource_usage[0]
if (self_mon_info._memory_used_in_percent > 0):
if (self_mon_info._memory_used_in_percent > MemoryThresholdToWatchFor):
# check consecutive memory usage.
self_mon_info.increment_high_memory_count()
if (self_mon_info.corssed_memory_threshold()):
# if we have crossed the memory threshold take corrective action.
self.take_corrective_action(self_mon_info)
else:
self_mon_info.reset_high_memory_count()
else:
self_mon_info.reset_high_memory_count()
def monitor_health(self):
"""
Role of this function is monitor the health of the oms agent.
To begin with it will monitor heartbeats flowing through oms agent.
We will also read oms agent logs to determine some error conditions.
We don't want to interfare with log watcher function.
So we will start this on a new thread.
"""
self_mon_info = SelfMonitorInfo()
log_file_marker = LogFileMarker()
# check every 6 minutes. we want to be bit pessimistic while looking for health, especially heartbeats which is emitted every 5 minutes.
sleepTime = 6 * 60
# sleep before starting the monitoring.
time.sleep(sleepTime)
while True:
try:
# Monitor heartbeat and logs.
self.monitor_heartbeat(self_mon_info, log_file_marker)
# Monitor memory usage
self.monitor_resource(self_mon_info)
except IOError as e:
self._hutil_error('I/O error in monitoring health of the omsagent. Exception={0}'.format(e))
except Exception as e:
self._hutil_error('Error in monitoring health of the omsagent. Exception={0}'.format(e))
finally:
time.sleep(sleepTime)
def take_corrective_action(self, self_mon_info):
"""
Take a corrective action.
"""
run_command_and_log(RestartOMSAgentServiceCommand)
self._hutil_log("Successfully restarted OMS linux agent, resetting self mon information.")
# Reset self mon information.
self_mon_info.reset()
def emit_telemetry_after_corrective_action(self):
"""
TODO : Emit telemetry after taking corrective action.
"""
def get_total_seconds_from_epoch_for_fluent_logs(self, datetime_string):
# fluentd logs timestamp format : 2018-08-02 19:27:34 +0000
# for python 2.7 or earlier there is no good way to convert it into seconds.
# so we parse upto seconds, and parse utc specific offset seperately.
try:
date_time_format = '%Y-%m-%d %H:%M:%S'
epoch = datetime(1970, 1, 1)
# get hours and minute delta for utc offset.
hours_delta_utc = int(datetime_string[21:23])
minutes_delta_utc= int(datetime_string[23:])
log_time = datetime.strptime(datetime_string[:19], date_time_format) + ((timedelta(hours=hours_delta_utc, minutes=minutes_delta_utc)) * (-1 if datetime_string[20] == "+" else 1))
return (log_time - epoch).total_seconds()
except Exception as e:
self._hutil_error('Error converting timestamp string to seconds. Exception={0}'.format(e))
return 0
def check_for_fatal_oms_logs(self, log_file_marker):
"""
This function will go through oms log file and check for the
logs indicating non recoverable state. That set is hardcoded right now
and we can add it to it as we learn more.
If we find there is atleast one occurance of such log line from last occurance,
we will return True else will return False.
"""
read_start_time = int(time.time())
if os.path.isfile(OmsAgentLogFile):
last_crc = log_file_marker._last_crc
last_pos = log_file_marker._last_pos
# We do not want to propogate any exception to the caller.
try:
f = open(OmsAgentLogFile, "r")
text = f.readline()
# Handle log rotate. Check for CRC of first line of the log file.
# Some of the agents like Splunk uses this technique.
# If it matches with previous CRC, then file has not changed.
# If it is not matching then file has changed and do not seek from
# the last_pos rather continue from the begining.
if (text != ''):
crc = hashlib.md5(text).hexdigest()
self._hutil_log("Last crc = {0}, current crc= {1} position = {2}".format(last_crc, crc, last_pos))
if (last_crc == crc):
if (last_pos > 0):
f.seek(last_pos)
else:
self._hutil_log("File has changed do not seek from the offset. current crc = {0}".format(crc))
log_file_marker._last_crc = crc
total_lines_read = 1
while True:
text = f.readline()
if (text == ''):
log_file_marker._last_pos = f.tell()
break
total_lines_read += 1
res = reg_ex.match(text)
if res:
log_entry_time = self.get_total_seconds_from_epoch_for_fluent_logs(res.group(1))
if (log_entry_time + (10 * 60) < read_start_time):
# ignore log line if we are reading logs older than 10 minutes.
pass
elif (res.group(2) == "warn" or res.group(2) == "error"):
for error_statement in ErrorStatements:
if (res.group(3) in error_statement):
self._hutil_error("Found non recoverable error log in agent log file")
# File should be closed in the finally block.
return True
self._hutil_log("Did not find any non recoverable logs in omsagent log file")
except Exception as e:
self._hutil_error ("Caught an exception {0}".format(traceback.format_exc()))
finally:
f.close()
else:
self._hutil_error ("Omsagent log file not found : {0}".format(OmsAgentLogFile))
return False
def get_oms_agent_resource_usage(self):
"""
If we hit any exception in getting resoource usage of the omsagent return 0,0
We need not crash/fail in this case.
return tuple : memory, cpu.
Long run for north star we should use cgroups. cgroups tools are not available
by default on all the distros and we would need to package with the agent those and use.
Also at this point it is not very clear if customers would want us to create cgroups on their vms.
"""
try:
mem_usage = 0.0
cpu_usage = 0.0
with open(OmsAgentPidFile, 'r') as infile:
pid = infile.readline() # Get pid of omsagent process.
# top output:
# $1 - PID,
# $2 - account,
# $9 - CPU,
# $10 - Memory,
# $12 - Process name
out = subprocess.Popen('top -bn1 | grep -i omsagent | awk \'{print $1 " " $2 " " $9 " " $10 " " $12}\'', shell=True, stdout=subprocess.PIPE)
for line in out.stdout:
s = line.split()
if (len(s) >= 4 and s[0] == pid and s[1] == 'omsagent' and s[4] == 'omsagent'):
return float(s[3]) , float(s[2])
except Exception as e:
self._hutil_error('Error getting memory usage for omsagent process. Exception={0}'.format(e))
# Control will reach here only in case of error condition. In that case it is ok to return 0 as it is harmless to be cautious.
return mem_usage, cpu_usage
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
import logging
import os
import random
import shutil
import subprocess
import sys
import tempfile
import time
import urllib2
from optparse import OptionParser
from sys import stderr
import boto
from boto.ec2.blockdevicemapping import BlockDeviceMapping, EBSBlockDeviceType
from boto import ec2
# A URL prefix from which to fetch AMI information
AMI_PREFIX = "https://raw.github.com/pwendell/spark-ec2/ec2-updates/ami-list"
# Configure and parse our command-line arguments
def parse_args():
parser = OptionParser(usage="spark-ec2 [options] <action> <cluster_name>"
+ "\n\n<action> can be: launch, destroy, login, stop, start, get-master",
add_help_option=False)
parser.add_option("-h", "--help", action="help",
help="Show this help message and exit")
parser.add_option("-s", "--slaves", type="int", default=1,
help="Number of slaves to launch (default: 1)")
parser.add_option("-w", "--wait", type="int", default=120,
help="Seconds to wait for nodes to start (default: 120)")
parser.add_option("-k", "--key-pair",
help="Key pair to use on instances")
parser.add_option("-i", "--identity-file",
help="SSH private key file to use for logging into instances")
parser.add_option("-t", "--instance-type", default="m1.large",
help="Type of instance to launch (default: m1.large). " +
"WARNING: must be 64-bit; small instances won't work")
parser.add_option("-m", "--master-instance-type", default="",
help="Master instance type (leave empty for same as instance-type)")
parser.add_option("-r", "--region", default="us-east-1",
help="EC2 region zone to launch instances in")
parser.add_option("-z", "--zone", default="",
help="Availability zone to launch instances in, or 'all' to spread " +
"slaves across multiple (an additional $0.01/Gb for bandwidth" +
"between zones applies)")
parser.add_option("-a", "--ami", default="v0.7.0",
help="Amazon Machine Image ID to use, or 'vX.Y.Z' to use version " +
"X.Y.Z of Spark (default: v0.7.0)")
parser.add_option("-D", metavar="[ADDRESS:]PORT", dest="proxy_port",
help="Use SSH dynamic port forwarding to create a SOCKS proxy at " +
"the given local address (for use with login)")
parser.add_option("--resume", action="store_true", default=False,
help="Resume installation on a previously launched cluster " +
"(for debugging)")
parser.add_option("--ebs-vol-size", metavar="SIZE", type="int", default=0,
help="Attach a new EBS volume of size SIZE (in GB) to each node as " +
"/vol. The volumes will be deleted when the instances terminate. " +
"Only possible on EBS-backed AMIs.")
parser.add_option("--swap", metavar="SWAP", type="int", default=1024,
help="Swap space to set up per node, in MB (default: 1024)")
parser.add_option("--spot-price", metavar="PRICE", type="float",
help="If specified, launch slaves as spot instances with the given " +
"maximum price (in dollars)")
parser.add_option("--cluster-type", type="choice", metavar="TYPE",
choices=["mesos", "standalone"], default="standalone",
help="'mesos' for a Mesos cluster, 'standalone' for a standalone " +
"Spark cluster (default: standalone)")
parser.add_option("--ganglia", action="store_true", default=True,
help="Setup Ganglia monitoring on cluster (default: on). NOTE: " +
"the Ganglia page will be publicly accessible")
parser.add_option("--no-ganglia", action="store_false", dest="ganglia",
help="Disable Ganglia monitoring for the cluster")
parser.add_option("--old-scripts", action="store_true", default=False,
help="Use old mesos-ec2 scripts, for Spark <= 0.6 AMIs")
parser.add_option("-u", "--user", default="root",
help="The SSH user you want to connect as (default: root)")
parser.add_option("--delete-groups", action="store_true", default=False,
help="When destroying a cluster, delete the security groups that were created")
(opts, args) = parser.parse_args()
if len(args) != 2:
parser.print_help()
sys.exit(1)
(action, cluster_name) = args
if opts.identity_file == None and action in ['launch', 'login']:
print >> stderr, ("ERROR: The -i or --identity-file argument is " +
"required for " + action)
sys.exit(1)
if opts.cluster_type not in ["mesos", "standalone"] and action == "launch":
print >> stderr, ("ERROR: Invalid cluster type: " + opts.cluster_type)
sys.exit(1)
# Boto config check
# http://boto.cloudhackers.com/en/latest/boto_config_tut.html
home_dir = os.getenv('HOME')
if home_dir == None or not os.path.isfile(home_dir + '/.boto'):
if not os.path.isfile('/etc/boto.cfg'):
if os.getenv('AWS_ACCESS_KEY_ID') == None:
print >> stderr, ("ERROR: The environment variable AWS_ACCESS_KEY_ID " +
"must be set")
sys.exit(1)
if os.getenv('AWS_SECRET_ACCESS_KEY') == None:
print >> stderr, ("ERROR: The environment variable AWS_SECRET_ACCESS_KEY " +
"must be set")
sys.exit(1)
return (opts, action, cluster_name)
# Get the EC2 security group of the given name, creating it if it doesn't exist
def get_or_make_group(conn, name):
groups = conn.get_all_security_groups()
group = [g for g in groups if g.name == name]
if len(group) > 0:
return group[0]
else:
print "Creating security group " + name
return conn.create_security_group(name, "Spark EC2 group")
# Wait for a set of launched instances to exit the "pending" state
# (i.e. either to start running or to fail and be terminated)
def wait_for_instances(conn, instances):
while True:
for i in instances:
i.update()
if len([i for i in instances if i.state == 'pending']) > 0:
time.sleep(5)
else:
return
# Check whether a given EC2 instance object is in a state we consider active,
# i.e. not terminating or terminated. We count both stopping and stopped as
# active since we can restart stopped clusters.
def is_active(instance):
return (instance.state in ['pending', 'running', 'stopping', 'stopped'])
# Attempt to resolve an appropriate AMI given the architecture and
# region of the request.
def get_spark_ami(opts):
version = opts.ami
instance_types = {
"m1.small": "pvm",
"m1.medium": "pvm",
"m1.large": "pvm",
"m1.xlarge": "pvm",
"t1.micro": "pvm",
"c1.medium": "pvm",
"c1.xlarge": "pvm",
"m2.xlarge": "pvm",
"m2.2xlarge": "pvm",
"m2.4xlarge": "pvm",
"cc1.4xlarge": "hvm",
"cc2.8xlarge": "hvm",
"cg1.4xlarge": "hvm",
"hs1.8xlarge": "hvm",
"hi1.4xlarge": "hvm",
"m3.xlarge": "hvm",
"m3.2xlarge": "hvm",
"cr1.8xlarge": "hvm"
}
if opts.instance_type in instance_types:
instance_type = instance_types[opts.instance_type]
else:
instance_type = "pvm"
print >> stderr,\
"Don't recognize %s, assuming type is pvm" % opts.instance_type
if version != "v0.7.0":
print >> stderr, \
"Don't know how to resolve AMI for version: %s" % version
# TODO(pwendell) Once we have multiple Spark AMI versions, we should let
# people give a version flag here in place of just saying 'latest'.
version = version[1:]
parts = opts.region.split("-")
region = "-".join([parts[0], parts[1], parts[2][0]]) # strip any avail. zone
ami_path = "%s/%s/%s/%s" % (AMI_PREFIX, version, region, instance_type)
try:
ami = urllib2.urlopen(ami_path).read().strip()
print "Spark AMI: " + ami
except:
print >> stderr, "Could not resolve AMI at: " + ami_path
sys.exit(1)
return ami
# Launch a cluster of the given name, by setting up its security groups,
# and then starting new instances in them.
# Returns a tuple of EC2 reservation objects for the master, slave
# and zookeeper instances (in that order).
# Fails if there already instances running in the cluster's groups.
def launch_cluster(conn, opts, cluster_name):
print "Setting up security groups..."
master_group = get_or_make_group(conn, cluster_name + "-master")
slave_group = get_or_make_group(conn, cluster_name + "-slaves")
zoo_group = get_or_make_group(conn, cluster_name + "-zoo")
if master_group.rules == []: # Group was just now created
master_group.authorize(src_group=master_group)
master_group.authorize(src_group=slave_group)
master_group.authorize(src_group=zoo_group)
master_group.authorize('tcp', 22, 22, '0.0.0.0/0')
master_group.authorize('tcp', 8080, 8081, '0.0.0.0/0')
master_group.authorize('tcp', 50030, 50030, '0.0.0.0/0')
master_group.authorize('tcp', 50070, 50070, '0.0.0.0/0')
master_group.authorize('tcp', 60070, 60070, '0.0.0.0/0')
if opts.cluster_type == "mesos":
master_group.authorize('tcp', 38090, 38090, '0.0.0.0/0')
if opts.ganglia:
master_group.authorize('tcp', 5080, 5080, '0.0.0.0/0')
if slave_group.rules == []: # Group was just now created
slave_group.authorize(src_group=master_group)
slave_group.authorize(src_group=slave_group)
slave_group.authorize(src_group=zoo_group)
slave_group.authorize('tcp', 22, 22, '0.0.0.0/0')
slave_group.authorize('tcp', 8080, 8081, '0.0.0.0/0')
slave_group.authorize('tcp', 50060, 50060, '0.0.0.0/0')
slave_group.authorize('tcp', 50075, 50075, '0.0.0.0/0')
slave_group.authorize('tcp', 60060, 60060, '0.0.0.0/0')
slave_group.authorize('tcp', 60075, 60075, '0.0.0.0/0')
if zoo_group.rules == []: # Group was just now created
zoo_group.authorize(src_group=master_group)
zoo_group.authorize(src_group=slave_group)
zoo_group.authorize(src_group=zoo_group)
zoo_group.authorize('tcp', 22, 22, '0.0.0.0/0')
zoo_group.authorize('tcp', 2181, 2181, '0.0.0.0/0')
zoo_group.authorize('tcp', 2888, 2888, '0.0.0.0/0')
zoo_group.authorize('tcp', 3888, 3888, '0.0.0.0/0')
# Check if instances are already running in our groups
active_nodes = get_existing_cluster(conn, opts, cluster_name,
die_on_error=False)
if any(active_nodes):
print >> stderr, ("ERROR: There are already instances running in " +
"group %s, %s or %s" % (master_group.name, slave_group.name, zoo_group.name))
sys.exit(1)
# Figure out Spark AMI
if opts.ami[0] == "v":
opts.ami = get_spark_ami(opts)
print "Launching instances..."
try:
image = conn.get_all_images(image_ids=[opts.ami])[0]
except:
print >> stderr, "Could not find AMI " + opts.ami
sys.exit(1)
# Create block device mapping so that we can add an EBS volume if asked to
block_map = BlockDeviceMapping()
if opts.ebs_vol_size > 0:
device = EBSBlockDeviceType()
device.size = opts.ebs_vol_size
device.delete_on_termination = True
block_map["/dev/sdv"] = device
# Launch slaves
if opts.spot_price != None:
# Launch spot instances with the requested price
print ("Requesting %d slaves as spot instances with price $%.3f" %
(opts.slaves, opts.spot_price))
zones = get_zones(conn, opts)
num_zones = len(zones)
i = 0
my_req_ids = []
for zone in zones:
num_slaves_this_zone = get_partition(opts.slaves, num_zones, i)
slave_reqs = conn.request_spot_instances(
price = opts.spot_price,
image_id = opts.ami,
launch_group = "launch-group-%s" % cluster_name,
placement = zone,
count = num_slaves_this_zone,
key_name = opts.key_pair,
security_groups = [slave_group],
instance_type = opts.instance_type,
block_device_map = block_map)
my_req_ids += [req.id for req in slave_reqs]
i += 1
print "Waiting for spot instances to be granted..."
try:
while True:
time.sleep(10)
reqs = conn.get_all_spot_instance_requests()
id_to_req = {}
for r in reqs:
id_to_req[r.id] = r
active_instance_ids = []
for i in my_req_ids:
if i in id_to_req and id_to_req[i].state == "active":
active_instance_ids.append(id_to_req[i].instance_id)
if len(active_instance_ids) == opts.slaves:
print "All %d slaves granted" % opts.slaves
reservations = conn.get_all_instances(active_instance_ids)
slave_nodes = []
for r in reservations:
slave_nodes += r.instances
break
else:
print "%d of %d slaves granted, waiting longer" % (
len(active_instance_ids), opts.slaves)
except:
print "Canceling spot instance requests"
conn.cancel_spot_instance_requests(my_req_ids)
# Log a warning if any of these requests actually launched instances:
(master_nodes, slave_nodes, zoo_nodes) = get_existing_cluster(
conn, opts, cluster_name, die_on_error=False)
running = len(master_nodes) + len(slave_nodes) + len(zoo_nodes)
if running:
print >> stderr, ("WARNING: %d instances are still running" % running)
sys.exit(0)
else:
# Launch non-spot instances
zones = get_zones(conn, opts)
num_zones = len(zones)
i = 0
slave_nodes = []
for zone in zones:
num_slaves_this_zone = get_partition(opts.slaves, num_zones, i)
if num_slaves_this_zone > 0:
slave_res = image.run(key_name = opts.key_pair,
security_groups = [slave_group],
instance_type = opts.instance_type,
placement = zone,
min_count = num_slaves_this_zone,
max_count = num_slaves_this_zone,
block_device_map = block_map)
slave_nodes += slave_res.instances
print "Launched %d slaves in %s, regid = %s" % (num_slaves_this_zone,
zone, slave_res.id)
i += 1
# Launch masters
master_type = opts.master_instance_type
if master_type == "":
master_type = opts.instance_type
if opts.zone == 'all':
opts.zone = random.choice(conn.get_all_zones()).name
master_res = image.run(key_name = opts.key_pair,
security_groups = [master_group],
instance_type = master_type,
placement = opts.zone,
min_count = 1,
max_count = 1,
block_device_map = block_map)
master_nodes = master_res.instances
print "Launched master in %s, regid = %s" % (zone, master_res.id)
zoo_nodes = []
# Return all the instances
return (master_nodes, slave_nodes, zoo_nodes)
# Get the EC2 instances in an existing cluster if available.
# Returns a tuple of lists of EC2 instance objects for the masters,
# slaves and zookeeper nodes (in that order).
def get_existing_cluster(conn, opts, cluster_name, die_on_error=True):
print "Searching for existing cluster " + cluster_name + "..."
reservations = conn.get_all_instances()
master_nodes = []
slave_nodes = []
zoo_nodes = []
for res in reservations:
active = [i for i in res.instances if is_active(i)]
if len(active) > 0:
group_names = [g.name for g in res.groups]
if group_names == [cluster_name + "-master"]:
master_nodes += res.instances
elif group_names == [cluster_name + "-slaves"]:
slave_nodes += res.instances
elif group_names == [cluster_name + "-zoo"]:
zoo_nodes += res.instances
if any((master_nodes, slave_nodes, zoo_nodes)):
print ("Found %d master(s), %d slaves, %d ZooKeeper nodes" %
(len(master_nodes), len(slave_nodes), len(zoo_nodes)))
if (master_nodes != [] and slave_nodes != []) or not die_on_error:
return (master_nodes, slave_nodes, zoo_nodes)
else:
if master_nodes == [] and slave_nodes != []:
print "ERROR: Could not find master in group " + cluster_name + "-master"
elif master_nodes != [] and slave_nodes == []:
print "ERROR: Could not find slaves in group " + cluster_name + "-slaves"
else:
print "ERROR: Could not find any existing cluster"
sys.exit(1)
# Deploy configuration files and run setup scripts on a newly launched
# or started EC2 cluster.
def setup_cluster(conn, master_nodes, slave_nodes, zoo_nodes, opts, deploy_ssh_key):
master = master_nodes[0].public_dns_name
if deploy_ssh_key:
print "Copying SSH key %s to master..." % opts.identity_file
ssh(master, opts, 'mkdir -p ~/.ssh')
scp(master, opts, opts.identity_file, '~/.ssh/id_rsa')
ssh(master, opts, 'chmod 600 ~/.ssh/id_rsa')
if opts.cluster_type == "mesos":
modules = ['ephemeral-hdfs', 'persistent-hdfs', 'mapreduce', 'mesos']
elif opts.cluster_type == "standalone":
modules = ['ephemeral-hdfs', 'persistent-hdfs', 'mapreduce',
'spark-standalone']
if opts.ganglia:
modules.append('ganglia')
if not opts.old_scripts:
# NOTE: We should clone the repository before running deploy_files to
# prevent ec2-variables.sh from being overwritten
ssh(master, opts, "rm -rf spark-ec2 && git clone https://github.com/pwendell/spark-ec2.git -b ec2-updates")
print "Deploying files to master..."
deploy_files(conn, "deploy.generic", opts, master_nodes, slave_nodes,
zoo_nodes, modules)
print "Running setup on master..."
if opts.old_scripts:
if opts.cluster_type == "mesos":
setup_mesos_cluster(master, opts)
elif opts.cluster_type == "standalone":
setup_standalone_cluster(master, slave_nodes, opts)
else:
setup_spark_cluster(master, opts)
print "Done!"
def setup_mesos_cluster(master, opts):
ssh(master, opts, "chmod u+x mesos-ec2/setup")
ssh(master, opts, "mesos-ec2/setup %s %s %s %s" %
("generic", "none", "master", opts.swap))
def setup_standalone_cluster(master, slave_nodes, opts):
slave_ips = '\n'.join([i.public_dns_name for i in slave_nodes])
ssh(master, opts, "echo \"%s\" > spark/conf/slaves" % (slave_ips))
ssh(master, opts, "/root/spark/bin/start-all.sh")
def setup_spark_cluster(master, opts):
ssh(master, opts, "chmod u+x spark-ec2/setup.sh")
ssh(master, opts, "spark-ec2/setup.sh")
if opts.cluster_type == "mesos":
print "Mesos cluster started at http://%s:8080" % master
elif opts.cluster_type == "standalone":
print "Spark standalone cluster started at http://%s:8080" % master
if opts.ganglia:
print "Ganglia started at http://%s:5080/ganglia" % master
# Wait for a whole cluster (masters, slaves and ZooKeeper) to start up
def wait_for_cluster(conn, wait_secs, master_nodes, slave_nodes, zoo_nodes):
print "Waiting for instances to start up..."
time.sleep(5)
wait_for_instances(conn, master_nodes)
wait_for_instances(conn, slave_nodes)
if zoo_nodes != []:
wait_for_instances(conn, zoo_nodes)
print "Waiting %d more seconds..." % wait_secs
time.sleep(wait_secs)
# Get number of local disks available for a given EC2 instance type.
def get_num_disks(instance_type):
# From http://docs.amazonwebservices.com/AWSEC2/latest/UserGuide/index.html?InstanceStorage.html
disks_by_instance = {
"m1.small": 1,
"m1.medium": 1,
"m1.large": 2,
"m1.xlarge": 4,
"t1.micro": 1,
"c1.medium": 1,
"c1.xlarge": 4,
"m2.xlarge": 1,
"m2.2xlarge": 1,
"m2.4xlarge": 2,
"cc1.4xlarge": 2,
"cc2.8xlarge": 4,
"cg1.4xlarge": 2,
"hs1.8xlarge": 24,
"cr1.8xlarge": 2,
"hi1.4xlarge": 2,
"m3.xlarge": 0,
"m3.2xlarge": 0
}
if instance_type in disks_by_instance:
return disks_by_instance[instance_type]
else:
print >> stderr, ("WARNING: Don't know number of disks on instance type %s; assuming 1"
% instance_type)
return 1
# Deploy the configuration file templates in a given local directory to
# a cluster, filling in any template parameters with information about the
# cluster (e.g. lists of masters and slaves). Files are only deployed to
# the first master instance in the cluster, and we expect the setup
# script to be run on that instance to copy them to other nodes.
def deploy_files(conn, root_dir, opts, master_nodes, slave_nodes, zoo_nodes,
modules):
active_master = master_nodes[0].public_dns_name
num_disks = get_num_disks(opts.instance_type)
hdfs_data_dirs = "/mnt/ephemeral-hdfs/data"
mapred_local_dirs = "/mnt/hadoop/mrlocal"
spark_local_dirs = "/mnt/spark"
if num_disks > 1:
for i in range(2, num_disks + 1):
hdfs_data_dirs += ",/mnt%d/ephemeral-hdfs/data" % i
mapred_local_dirs += ",/mnt%d/hadoop/mrlocal" % i
spark_local_dirs += ",/mnt%d/spark" % i
if zoo_nodes != []:
zoo_list = '\n'.join([i.public_dns_name for i in zoo_nodes])
cluster_url = "zoo://" + ",".join(
["%s:2181/mesos" % i.public_dns_name for i in zoo_nodes])
elif opts.cluster_type == "mesos":
zoo_list = "NONE"
cluster_url = "%s:5050" % active_master
elif opts.cluster_type == "standalone":
zoo_list = "NONE"
cluster_url = "%s:7077" % active_master
template_vars = {
"master_list": '\n'.join([i.public_dns_name for i in master_nodes]),
"active_master": active_master,
"slave_list": '\n'.join([i.public_dns_name for i in slave_nodes]),
"zoo_list": zoo_list,
"cluster_url": cluster_url,
"hdfs_data_dirs": hdfs_data_dirs,
"mapred_local_dirs": mapred_local_dirs,
"spark_local_dirs": spark_local_dirs,
"swap": str(opts.swap),
"modules": '\n'.join(modules)
}
# Create a temp directory in which we will place all the files to be
# deployed after we substitue template parameters in them
tmp_dir = tempfile.mkdtemp()
for path, dirs, files in os.walk(root_dir):
if path.find(".svn") == -1:
dest_dir = os.path.join('/', path[len(root_dir):])
local_dir = tmp_dir + dest_dir
if not os.path.exists(local_dir):
os.makedirs(local_dir)
for filename in files:
if filename[0] not in '#.~' and filename[-1] != '~':
dest_file = os.path.join(dest_dir, filename)
local_file = tmp_dir + dest_file
with open(os.path.join(path, filename)) as src:
with open(local_file, "w") as dest:
text = src.read()
for key in template_vars:
text = text.replace("{{" + key + "}}", template_vars[key])
dest.write(text)
dest.close()
# rsync the whole directory over to the master machine
command = (("rsync -rv -e 'ssh -o StrictHostKeyChecking=no -i %s' " +
"'%s/' '%s@%s:/'") % (opts.identity_file, tmp_dir, opts.user, active_master))
subprocess.check_call(command, shell=True)
# Remove the temp directory we created above
shutil.rmtree(tmp_dir)
# Copy a file to a given host through scp, throwing an exception if scp fails
def scp(host, opts, local_file, dest_file):
subprocess.check_call(
"scp -q -o StrictHostKeyChecking=no -i %s '%s' '%s@%s:%s'" %
(opts.identity_file, local_file, opts.user, host, dest_file), shell=True)
# Run a command on a host through ssh, retrying up to two times
# and then throwing an exception if ssh continues to fail.
def ssh(host, opts, command):
tries = 0
while True:
try:
return subprocess.check_call(
"ssh -t -o StrictHostKeyChecking=no -i %s %s@%s '%s'" %
(opts.identity_file, opts.user, host, command), shell=True)
except subprocess.CalledProcessError as e:
if (tries > 2):
raise e
print "Error connecting to host {0}, sleeping 30".format(e)
time.sleep(30)
tries = tries + 1
# Gets a list of zones to launch instances in
def get_zones(conn, opts):
if opts.zone == 'all':
zones = [z.name for z in conn.get_all_zones()]
else:
zones = [opts.zone]
return zones
# Gets the number of items in a partition
def get_partition(total, num_partitions, current_partitions):
num_slaves_this_zone = total / num_partitions
if (total % num_partitions) - current_partitions > 0:
num_slaves_this_zone += 1
return num_slaves_this_zone
def main():
(opts, action, cluster_name) = parse_args()
try:
conn = ec2.connect_to_region(opts.region)
except Exception as e:
print >> stderr, (e)
sys.exit(1)
# Select an AZ at random if it was not specified.
if opts.zone == "":
opts.zone = random.choice(conn.get_all_zones()).name
if action == "launch":
if opts.resume:
(master_nodes, slave_nodes, zoo_nodes) = get_existing_cluster(
conn, opts, cluster_name)
else:
(master_nodes, slave_nodes, zoo_nodes) = launch_cluster(
conn, opts, cluster_name)
wait_for_cluster(conn, opts.wait, master_nodes, slave_nodes, zoo_nodes)
setup_cluster(conn, master_nodes, slave_nodes, zoo_nodes, opts, True)
elif action == "destroy":
response = raw_input("Are you sure you want to destroy the cluster " +
cluster_name + "?\nALL DATA ON ALL NODES WILL BE LOST!!\n" +
"Destroy cluster " + cluster_name + " (y/N): ")
if response == "y":
(master_nodes, slave_nodes, zoo_nodes) = get_existing_cluster(
conn, opts, cluster_name, die_on_error=False)
print "Terminating master..."
for inst in master_nodes:
inst.terminate()
print "Terminating slaves..."
for inst in slave_nodes:
inst.terminate()
if zoo_nodes != []:
print "Terminating zoo..."
for inst in zoo_nodes:
inst.terminate()
# Delete security groups as well
if opts.delete_groups:
print "Deleting security groups (this will take some time)..."
group_names = [cluster_name + "-master", cluster_name + "-slaves", cluster_name + "-zoo"]
attempt = 1;
while attempt <= 3:
print "Attempt %d" % attempt
groups = [g for g in conn.get_all_security_groups() if g.name in group_names]
success = True
# Delete individual rules in all groups before deleting groups to
# remove dependencies between them
for group in groups:
print "Deleting rules in security group " + group.name
for rule in group.rules:
for grant in rule.grants:
success &= group.revoke(ip_protocol=rule.ip_protocol,
from_port=rule.from_port,
to_port=rule.to_port,
src_group=grant)
# Sleep for AWS eventual-consistency to catch up, and for instances
# to terminate
time.sleep(30) # Yes, it does have to be this long :-(
for group in groups:
try:
conn.delete_security_group(group.name)
print "Deleted security group " + group.name
except boto.exception.EC2ResponseError:
success = False;
print "Failed to delete security group " + group.name
# Unfortunately, group.revoke() returns True even if a rule was not
# deleted, so this needs to be rerun if something fails
if success: break;
attempt += 1
if not success:
print "Failed to delete all security groups after 3 tries."
print "Try re-running in a few minutes."
elif action == "login":
(master_nodes, slave_nodes, zoo_nodes) = get_existing_cluster(
conn, opts, cluster_name)
master = master_nodes[0].public_dns_name
print "Logging into master " + master + "..."
proxy_opt = ""
if opts.proxy_port != None:
proxy_opt = "-D " + opts.proxy_port
subprocess.check_call("ssh -o StrictHostKeyChecking=no -i %s %s %s@%s" %
(opts.identity_file, proxy_opt, opts.user, master), shell=True)
elif action == "get-master":
(master_nodes, slave_nodes, zoo_nodes) = get_existing_cluster(conn, opts, cluster_name)
print master_nodes[0].public_dns_name
elif action == "stop":
response = raw_input("Are you sure you want to stop the cluster " +
cluster_name + "?\nDATA ON EPHEMERAL DISKS WILL BE LOST, " +
"BUT THE CLUSTER WILL KEEP USING SPACE ON\n" +
"AMAZON EBS IF IT IS EBS-BACKED!!\n" +
"Stop cluster " + cluster_name + " (y/N): ")
if response == "y":
(master_nodes, slave_nodes, zoo_nodes) = get_existing_cluster(
conn, opts, cluster_name, die_on_error=False)
print "Stopping master..."
for inst in master_nodes:
if inst.state not in ["shutting-down", "terminated"]:
inst.stop()
print "Stopping slaves..."
for inst in slave_nodes:
if inst.state not in ["shutting-down", "terminated"]:
inst.stop()
if zoo_nodes != []:
print "Stopping zoo..."
for inst in zoo_nodes:
if inst.state not in ["shutting-down", "terminated"]:
inst.stop()
elif action == "start":
(master_nodes, slave_nodes, zoo_nodes) = get_existing_cluster(
conn, opts, cluster_name)
print "Starting slaves..."
for inst in slave_nodes:
if inst.state not in ["shutting-down", "terminated"]:
inst.start()
print "Starting master..."
for inst in master_nodes:
if inst.state not in ["shutting-down", "terminated"]:
inst.start()
if zoo_nodes != []:
print "Starting zoo..."
for inst in zoo_nodes:
if inst.state not in ["shutting-down", "terminated"]:
inst.start()
wait_for_cluster(conn, opts.wait, master_nodes, slave_nodes, zoo_nodes)
setup_cluster(conn, master_nodes, slave_nodes, zoo_nodes, opts, False)
else:
print >> stderr, "Invalid action: %s" % action
sys.exit(1)
if __name__ == "__main__":
logging.basicConfig()
main()
Removing unnecessary parsing
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
import logging
import os
import random
import shutil
import subprocess
import sys
import tempfile
import time
import urllib2
from optparse import OptionParser
from sys import stderr
import boto
from boto.ec2.blockdevicemapping import BlockDeviceMapping, EBSBlockDeviceType
from boto import ec2
# A URL prefix from which to fetch AMI information
AMI_PREFIX = "https://raw.github.com/pwendell/spark-ec2/ec2-updates/ami-list"
# Configure and parse our command-line arguments
def parse_args():
parser = OptionParser(usage="spark-ec2 [options] <action> <cluster_name>"
+ "\n\n<action> can be: launch, destroy, login, stop, start, get-master",
add_help_option=False)
parser.add_option("-h", "--help", action="help",
help="Show this help message and exit")
parser.add_option("-s", "--slaves", type="int", default=1,
help="Number of slaves to launch (default: 1)")
parser.add_option("-w", "--wait", type="int", default=120,
help="Seconds to wait for nodes to start (default: 120)")
parser.add_option("-k", "--key-pair",
help="Key pair to use on instances")
parser.add_option("-i", "--identity-file",
help="SSH private key file to use for logging into instances")
parser.add_option("-t", "--instance-type", default="m1.large",
help="Type of instance to launch (default: m1.large). " +
"WARNING: must be 64-bit; small instances won't work")
parser.add_option("-m", "--master-instance-type", default="",
help="Master instance type (leave empty for same as instance-type)")
parser.add_option("-r", "--region", default="us-east-1",
help="EC2 region zone to launch instances in")
parser.add_option("-z", "--zone", default="",
help="Availability zone to launch instances in, or 'all' to spread " +
"slaves across multiple (an additional $0.01/Gb for bandwidth" +
"between zones applies)")
parser.add_option("-a", "--ami", default="v0.7.0",
help="Amazon Machine Image ID to use, or 'vX.Y.Z' to use version " +
"X.Y.Z of Spark (default: v0.7.0)")
parser.add_option("-D", metavar="[ADDRESS:]PORT", dest="proxy_port",
help="Use SSH dynamic port forwarding to create a SOCKS proxy at " +
"the given local address (for use with login)")
parser.add_option("--resume", action="store_true", default=False,
help="Resume installation on a previously launched cluster " +
"(for debugging)")
parser.add_option("--ebs-vol-size", metavar="SIZE", type="int", default=0,
help="Attach a new EBS volume of size SIZE (in GB) to each node as " +
"/vol. The volumes will be deleted when the instances terminate. " +
"Only possible on EBS-backed AMIs.")
parser.add_option("--swap", metavar="SWAP", type="int", default=1024,
help="Swap space to set up per node, in MB (default: 1024)")
parser.add_option("--spot-price", metavar="PRICE", type="float",
help="If specified, launch slaves as spot instances with the given " +
"maximum price (in dollars)")
parser.add_option("--cluster-type", type="choice", metavar="TYPE",
choices=["mesos", "standalone"], default="standalone",
help="'mesos' for a Mesos cluster, 'standalone' for a standalone " +
"Spark cluster (default: standalone)")
parser.add_option("--ganglia", action="store_true", default=True,
help="Setup Ganglia monitoring on cluster (default: on). NOTE: " +
"the Ganglia page will be publicly accessible")
parser.add_option("--no-ganglia", action="store_false", dest="ganglia",
help="Disable Ganglia monitoring for the cluster")
parser.add_option("--old-scripts", action="store_true", default=False,
help="Use old mesos-ec2 scripts, for Spark <= 0.6 AMIs")
parser.add_option("-u", "--user", default="root",
help="The SSH user you want to connect as (default: root)")
parser.add_option("--delete-groups", action="store_true", default=False,
help="When destroying a cluster, delete the security groups that were created")
(opts, args) = parser.parse_args()
if len(args) != 2:
parser.print_help()
sys.exit(1)
(action, cluster_name) = args
if opts.identity_file == None and action in ['launch', 'login']:
print >> stderr, ("ERROR: The -i or --identity-file argument is " +
"required for " + action)
sys.exit(1)
if opts.cluster_type not in ["mesos", "standalone"] and action == "launch":
print >> stderr, ("ERROR: Invalid cluster type: " + opts.cluster_type)
sys.exit(1)
# Boto config check
# http://boto.cloudhackers.com/en/latest/boto_config_tut.html
home_dir = os.getenv('HOME')
if home_dir == None or not os.path.isfile(home_dir + '/.boto'):
if not os.path.isfile('/etc/boto.cfg'):
if os.getenv('AWS_ACCESS_KEY_ID') == None:
print >> stderr, ("ERROR: The environment variable AWS_ACCESS_KEY_ID " +
"must be set")
sys.exit(1)
if os.getenv('AWS_SECRET_ACCESS_KEY') == None:
print >> stderr, ("ERROR: The environment variable AWS_SECRET_ACCESS_KEY " +
"must be set")
sys.exit(1)
return (opts, action, cluster_name)
# Get the EC2 security group of the given name, creating it if it doesn't exist
def get_or_make_group(conn, name):
groups = conn.get_all_security_groups()
group = [g for g in groups if g.name == name]
if len(group) > 0:
return group[0]
else:
print "Creating security group " + name
return conn.create_security_group(name, "Spark EC2 group")
# Wait for a set of launched instances to exit the "pending" state
# (i.e. either to start running or to fail and be terminated)
def wait_for_instances(conn, instances):
while True:
for i in instances:
i.update()
if len([i for i in instances if i.state == 'pending']) > 0:
time.sleep(5)
else:
return
# Check whether a given EC2 instance object is in a state we consider active,
# i.e. not terminating or terminated. We count both stopping and stopped as
# active since we can restart stopped clusters.
def is_active(instance):
return (instance.state in ['pending', 'running', 'stopping', 'stopped'])
# Attempt to resolve an appropriate AMI given the architecture and
# region of the request.
def get_spark_ami(opts):
version = opts.ami
instance_types = {
"m1.small": "pvm",
"m1.medium": "pvm",
"m1.large": "pvm",
"m1.xlarge": "pvm",
"t1.micro": "pvm",
"c1.medium": "pvm",
"c1.xlarge": "pvm",
"m2.xlarge": "pvm",
"m2.2xlarge": "pvm",
"m2.4xlarge": "pvm",
"cc1.4xlarge": "hvm",
"cc2.8xlarge": "hvm",
"cg1.4xlarge": "hvm",
"hs1.8xlarge": "hvm",
"hi1.4xlarge": "hvm",
"m3.xlarge": "hvm",
"m3.2xlarge": "hvm",
"cr1.8xlarge": "hvm"
}
if opts.instance_type in instance_types:
instance_type = instance_types[opts.instance_type]
else:
instance_type = "pvm"
print >> stderr,\
"Don't recognize %s, assuming type is pvm" % opts.instance_type
if version != "v0.7.0":
print >> stderr, \
"Don't know how to resolve AMI for version: %s" % version
# TODO(pwendell) Once we have multiple Spark AMI versions, we should let
# people give a version flag here in place of just saying 'latest'.
version = version[1:]
ami_path = "%s/%s/%s/%s" % (AMI_PREFIX, version, opts.region, instance_type)
try:
ami = urllib2.urlopen(ami_path).read().strip()
print "Spark AMI: " + ami
except:
print >> stderr, "Could not resolve AMI at: " + ami_path
sys.exit(1)
return ami
# Launch a cluster of the given name, by setting up its security groups,
# and then starting new instances in them.
# Returns a tuple of EC2 reservation objects for the master, slave
# and zookeeper instances (in that order).
# Fails if there already instances running in the cluster's groups.
def launch_cluster(conn, opts, cluster_name):
print "Setting up security groups..."
master_group = get_or_make_group(conn, cluster_name + "-master")
slave_group = get_or_make_group(conn, cluster_name + "-slaves")
zoo_group = get_or_make_group(conn, cluster_name + "-zoo")
if master_group.rules == []: # Group was just now created
master_group.authorize(src_group=master_group)
master_group.authorize(src_group=slave_group)
master_group.authorize(src_group=zoo_group)
master_group.authorize('tcp', 22, 22, '0.0.0.0/0')
master_group.authorize('tcp', 8080, 8081, '0.0.0.0/0')
master_group.authorize('tcp', 50030, 50030, '0.0.0.0/0')
master_group.authorize('tcp', 50070, 50070, '0.0.0.0/0')
master_group.authorize('tcp', 60070, 60070, '0.0.0.0/0')
if opts.cluster_type == "mesos":
master_group.authorize('tcp', 38090, 38090, '0.0.0.0/0')
if opts.ganglia:
master_group.authorize('tcp', 5080, 5080, '0.0.0.0/0')
if slave_group.rules == []: # Group was just now created
slave_group.authorize(src_group=master_group)
slave_group.authorize(src_group=slave_group)
slave_group.authorize(src_group=zoo_group)
slave_group.authorize('tcp', 22, 22, '0.0.0.0/0')
slave_group.authorize('tcp', 8080, 8081, '0.0.0.0/0')
slave_group.authorize('tcp', 50060, 50060, '0.0.0.0/0')
slave_group.authorize('tcp', 50075, 50075, '0.0.0.0/0')
slave_group.authorize('tcp', 60060, 60060, '0.0.0.0/0')
slave_group.authorize('tcp', 60075, 60075, '0.0.0.0/0')
if zoo_group.rules == []: # Group was just now created
zoo_group.authorize(src_group=master_group)
zoo_group.authorize(src_group=slave_group)
zoo_group.authorize(src_group=zoo_group)
zoo_group.authorize('tcp', 22, 22, '0.0.0.0/0')
zoo_group.authorize('tcp', 2181, 2181, '0.0.0.0/0')
zoo_group.authorize('tcp', 2888, 2888, '0.0.0.0/0')
zoo_group.authorize('tcp', 3888, 3888, '0.0.0.0/0')
# Check if instances are already running in our groups
active_nodes = get_existing_cluster(conn, opts, cluster_name,
die_on_error=False)
if any(active_nodes):
print >> stderr, ("ERROR: There are already instances running in " +
"group %s, %s or %s" % (master_group.name, slave_group.name, zoo_group.name))
sys.exit(1)
# Figure out Spark AMI
if opts.ami[0] == "v":
opts.ami = get_spark_ami(opts)
print "Launching instances..."
try:
image = conn.get_all_images(image_ids=[opts.ami])[0]
except:
print >> stderr, "Could not find AMI " + opts.ami
sys.exit(1)
# Create block device mapping so that we can add an EBS volume if asked to
block_map = BlockDeviceMapping()
if opts.ebs_vol_size > 0:
device = EBSBlockDeviceType()
device.size = opts.ebs_vol_size
device.delete_on_termination = True
block_map["/dev/sdv"] = device
# Launch slaves
if opts.spot_price != None:
# Launch spot instances with the requested price
print ("Requesting %d slaves as spot instances with price $%.3f" %
(opts.slaves, opts.spot_price))
zones = get_zones(conn, opts)
num_zones = len(zones)
i = 0
my_req_ids = []
for zone in zones:
num_slaves_this_zone = get_partition(opts.slaves, num_zones, i)
slave_reqs = conn.request_spot_instances(
price = opts.spot_price,
image_id = opts.ami,
launch_group = "launch-group-%s" % cluster_name,
placement = zone,
count = num_slaves_this_zone,
key_name = opts.key_pair,
security_groups = [slave_group],
instance_type = opts.instance_type,
block_device_map = block_map)
my_req_ids += [req.id for req in slave_reqs]
i += 1
print "Waiting for spot instances to be granted..."
try:
while True:
time.sleep(10)
reqs = conn.get_all_spot_instance_requests()
id_to_req = {}
for r in reqs:
id_to_req[r.id] = r
active_instance_ids = []
for i in my_req_ids:
if i in id_to_req and id_to_req[i].state == "active":
active_instance_ids.append(id_to_req[i].instance_id)
if len(active_instance_ids) == opts.slaves:
print "All %d slaves granted" % opts.slaves
reservations = conn.get_all_instances(active_instance_ids)
slave_nodes = []
for r in reservations:
slave_nodes += r.instances
break
else:
print "%d of %d slaves granted, waiting longer" % (
len(active_instance_ids), opts.slaves)
except:
print "Canceling spot instance requests"
conn.cancel_spot_instance_requests(my_req_ids)
# Log a warning if any of these requests actually launched instances:
(master_nodes, slave_nodes, zoo_nodes) = get_existing_cluster(
conn, opts, cluster_name, die_on_error=False)
running = len(master_nodes) + len(slave_nodes) + len(zoo_nodes)
if running:
print >> stderr, ("WARNING: %d instances are still running" % running)
sys.exit(0)
else:
# Launch non-spot instances
zones = get_zones(conn, opts)
num_zones = len(zones)
i = 0
slave_nodes = []
for zone in zones:
num_slaves_this_zone = get_partition(opts.slaves, num_zones, i)
if num_slaves_this_zone > 0:
slave_res = image.run(key_name = opts.key_pair,
security_groups = [slave_group],
instance_type = opts.instance_type,
placement = zone,
min_count = num_slaves_this_zone,
max_count = num_slaves_this_zone,
block_device_map = block_map)
slave_nodes += slave_res.instances
print "Launched %d slaves in %s, regid = %s" % (num_slaves_this_zone,
zone, slave_res.id)
i += 1
# Launch masters
master_type = opts.master_instance_type
if master_type == "":
master_type = opts.instance_type
if opts.zone == 'all':
opts.zone = random.choice(conn.get_all_zones()).name
master_res = image.run(key_name = opts.key_pair,
security_groups = [master_group],
instance_type = master_type,
placement = opts.zone,
min_count = 1,
max_count = 1,
block_device_map = block_map)
master_nodes = master_res.instances
print "Launched master in %s, regid = %s" % (zone, master_res.id)
zoo_nodes = []
# Return all the instances
return (master_nodes, slave_nodes, zoo_nodes)
# Get the EC2 instances in an existing cluster if available.
# Returns a tuple of lists of EC2 instance objects for the masters,
# slaves and zookeeper nodes (in that order).
def get_existing_cluster(conn, opts, cluster_name, die_on_error=True):
print "Searching for existing cluster " + cluster_name + "..."
reservations = conn.get_all_instances()
master_nodes = []
slave_nodes = []
zoo_nodes = []
for res in reservations:
active = [i for i in res.instances if is_active(i)]
if len(active) > 0:
group_names = [g.name for g in res.groups]
if group_names == [cluster_name + "-master"]:
master_nodes += res.instances
elif group_names == [cluster_name + "-slaves"]:
slave_nodes += res.instances
elif group_names == [cluster_name + "-zoo"]:
zoo_nodes += res.instances
if any((master_nodes, slave_nodes, zoo_nodes)):
print ("Found %d master(s), %d slaves, %d ZooKeeper nodes" %
(len(master_nodes), len(slave_nodes), len(zoo_nodes)))
if (master_nodes != [] and slave_nodes != []) or not die_on_error:
return (master_nodes, slave_nodes, zoo_nodes)
else:
if master_nodes == [] and slave_nodes != []:
print "ERROR: Could not find master in group " + cluster_name + "-master"
elif master_nodes != [] and slave_nodes == []:
print "ERROR: Could not find slaves in group " + cluster_name + "-slaves"
else:
print "ERROR: Could not find any existing cluster"
sys.exit(1)
# Deploy configuration files and run setup scripts on a newly launched
# or started EC2 cluster.
def setup_cluster(conn, master_nodes, slave_nodes, zoo_nodes, opts, deploy_ssh_key):
master = master_nodes[0].public_dns_name
if deploy_ssh_key:
print "Copying SSH key %s to master..." % opts.identity_file
ssh(master, opts, 'mkdir -p ~/.ssh')
scp(master, opts, opts.identity_file, '~/.ssh/id_rsa')
ssh(master, opts, 'chmod 600 ~/.ssh/id_rsa')
if opts.cluster_type == "mesos":
modules = ['ephemeral-hdfs', 'persistent-hdfs', 'mapreduce', 'mesos']
elif opts.cluster_type == "standalone":
modules = ['ephemeral-hdfs', 'persistent-hdfs', 'mapreduce',
'spark-standalone']
if opts.ganglia:
modules.append('ganglia')
if not opts.old_scripts:
# NOTE: We should clone the repository before running deploy_files to
# prevent ec2-variables.sh from being overwritten
ssh(master, opts, "rm -rf spark-ec2 && git clone https://github.com/pwendell/spark-ec2.git -b ec2-updates")
print "Deploying files to master..."
deploy_files(conn, "deploy.generic", opts, master_nodes, slave_nodes,
zoo_nodes, modules)
print "Running setup on master..."
if opts.old_scripts:
if opts.cluster_type == "mesos":
setup_mesos_cluster(master, opts)
elif opts.cluster_type == "standalone":
setup_standalone_cluster(master, slave_nodes, opts)
else:
setup_spark_cluster(master, opts)
print "Done!"
def setup_mesos_cluster(master, opts):
ssh(master, opts, "chmod u+x mesos-ec2/setup")
ssh(master, opts, "mesos-ec2/setup %s %s %s %s" %
("generic", "none", "master", opts.swap))
def setup_standalone_cluster(master, slave_nodes, opts):
slave_ips = '\n'.join([i.public_dns_name for i in slave_nodes])
ssh(master, opts, "echo \"%s\" > spark/conf/slaves" % (slave_ips))
ssh(master, opts, "/root/spark/bin/start-all.sh")
def setup_spark_cluster(master, opts):
ssh(master, opts, "chmod u+x spark-ec2/setup.sh")
ssh(master, opts, "spark-ec2/setup.sh")
if opts.cluster_type == "mesos":
print "Mesos cluster started at http://%s:8080" % master
elif opts.cluster_type == "standalone":
print "Spark standalone cluster started at http://%s:8080" % master
if opts.ganglia:
print "Ganglia started at http://%s:5080/ganglia" % master
# Wait for a whole cluster (masters, slaves and ZooKeeper) to start up
def wait_for_cluster(conn, wait_secs, master_nodes, slave_nodes, zoo_nodes):
print "Waiting for instances to start up..."
time.sleep(5)
wait_for_instances(conn, master_nodes)
wait_for_instances(conn, slave_nodes)
if zoo_nodes != []:
wait_for_instances(conn, zoo_nodes)
print "Waiting %d more seconds..." % wait_secs
time.sleep(wait_secs)
# Get number of local disks available for a given EC2 instance type.
def get_num_disks(instance_type):
# From http://docs.amazonwebservices.com/AWSEC2/latest/UserGuide/index.html?InstanceStorage.html
disks_by_instance = {
"m1.small": 1,
"m1.medium": 1,
"m1.large": 2,
"m1.xlarge": 4,
"t1.micro": 1,
"c1.medium": 1,
"c1.xlarge": 4,
"m2.xlarge": 1,
"m2.2xlarge": 1,
"m2.4xlarge": 2,
"cc1.4xlarge": 2,
"cc2.8xlarge": 4,
"cg1.4xlarge": 2,
"hs1.8xlarge": 24,
"cr1.8xlarge": 2,
"hi1.4xlarge": 2,
"m3.xlarge": 0,
"m3.2xlarge": 0
}
if instance_type in disks_by_instance:
return disks_by_instance[instance_type]
else:
print >> stderr, ("WARNING: Don't know number of disks on instance type %s; assuming 1"
% instance_type)
return 1
# Deploy the configuration file templates in a given local directory to
# a cluster, filling in any template parameters with information about the
# cluster (e.g. lists of masters and slaves). Files are only deployed to
# the first master instance in the cluster, and we expect the setup
# script to be run on that instance to copy them to other nodes.
def deploy_files(conn, root_dir, opts, master_nodes, slave_nodes, zoo_nodes,
modules):
active_master = master_nodes[0].public_dns_name
num_disks = get_num_disks(opts.instance_type)
hdfs_data_dirs = "/mnt/ephemeral-hdfs/data"
mapred_local_dirs = "/mnt/hadoop/mrlocal"
spark_local_dirs = "/mnt/spark"
if num_disks > 1:
for i in range(2, num_disks + 1):
hdfs_data_dirs += ",/mnt%d/ephemeral-hdfs/data" % i
mapred_local_dirs += ",/mnt%d/hadoop/mrlocal" % i
spark_local_dirs += ",/mnt%d/spark" % i
if zoo_nodes != []:
zoo_list = '\n'.join([i.public_dns_name for i in zoo_nodes])
cluster_url = "zoo://" + ",".join(
["%s:2181/mesos" % i.public_dns_name for i in zoo_nodes])
elif opts.cluster_type == "mesos":
zoo_list = "NONE"
cluster_url = "%s:5050" % active_master
elif opts.cluster_type == "standalone":
zoo_list = "NONE"
cluster_url = "%s:7077" % active_master
template_vars = {
"master_list": '\n'.join([i.public_dns_name for i in master_nodes]),
"active_master": active_master,
"slave_list": '\n'.join([i.public_dns_name for i in slave_nodes]),
"zoo_list": zoo_list,
"cluster_url": cluster_url,
"hdfs_data_dirs": hdfs_data_dirs,
"mapred_local_dirs": mapred_local_dirs,
"spark_local_dirs": spark_local_dirs,
"swap": str(opts.swap),
"modules": '\n'.join(modules)
}
# Create a temp directory in which we will place all the files to be
# deployed after we substitue template parameters in them
tmp_dir = tempfile.mkdtemp()
for path, dirs, files in os.walk(root_dir):
if path.find(".svn") == -1:
dest_dir = os.path.join('/', path[len(root_dir):])
local_dir = tmp_dir + dest_dir
if not os.path.exists(local_dir):
os.makedirs(local_dir)
for filename in files:
if filename[0] not in '#.~' and filename[-1] != '~':
dest_file = os.path.join(dest_dir, filename)
local_file = tmp_dir + dest_file
with open(os.path.join(path, filename)) as src:
with open(local_file, "w") as dest:
text = src.read()
for key in template_vars:
text = text.replace("{{" + key + "}}", template_vars[key])
dest.write(text)
dest.close()
# rsync the whole directory over to the master machine
command = (("rsync -rv -e 'ssh -o StrictHostKeyChecking=no -i %s' " +
"'%s/' '%s@%s:/'") % (opts.identity_file, tmp_dir, opts.user, active_master))
subprocess.check_call(command, shell=True)
# Remove the temp directory we created above
shutil.rmtree(tmp_dir)
# Copy a file to a given host through scp, throwing an exception if scp fails
def scp(host, opts, local_file, dest_file):
subprocess.check_call(
"scp -q -o StrictHostKeyChecking=no -i %s '%s' '%s@%s:%s'" %
(opts.identity_file, local_file, opts.user, host, dest_file), shell=True)
# Run a command on a host through ssh, retrying up to two times
# and then throwing an exception if ssh continues to fail.
def ssh(host, opts, command):
tries = 0
while True:
try:
return subprocess.check_call(
"ssh -t -o StrictHostKeyChecking=no -i %s %s@%s '%s'" %
(opts.identity_file, opts.user, host, command), shell=True)
except subprocess.CalledProcessError as e:
if (tries > 2):
raise e
print "Error connecting to host {0}, sleeping 30".format(e)
time.sleep(30)
tries = tries + 1
# Gets a list of zones to launch instances in
def get_zones(conn, opts):
if opts.zone == 'all':
zones = [z.name for z in conn.get_all_zones()]
else:
zones = [opts.zone]
return zones
# Gets the number of items in a partition
def get_partition(total, num_partitions, current_partitions):
num_slaves_this_zone = total / num_partitions
if (total % num_partitions) - current_partitions > 0:
num_slaves_this_zone += 1
return num_slaves_this_zone
def main():
(opts, action, cluster_name) = parse_args()
try:
conn = ec2.connect_to_region(opts.region)
except Exception as e:
print >> stderr, (e)
sys.exit(1)
# Select an AZ at random if it was not specified.
if opts.zone == "":
opts.zone = random.choice(conn.get_all_zones()).name
if action == "launch":
if opts.resume:
(master_nodes, slave_nodes, zoo_nodes) = get_existing_cluster(
conn, opts, cluster_name)
else:
(master_nodes, slave_nodes, zoo_nodes) = launch_cluster(
conn, opts, cluster_name)
wait_for_cluster(conn, opts.wait, master_nodes, slave_nodes, zoo_nodes)
setup_cluster(conn, master_nodes, slave_nodes, zoo_nodes, opts, True)
elif action == "destroy":
response = raw_input("Are you sure you want to destroy the cluster " +
cluster_name + "?\nALL DATA ON ALL NODES WILL BE LOST!!\n" +
"Destroy cluster " + cluster_name + " (y/N): ")
if response == "y":
(master_nodes, slave_nodes, zoo_nodes) = get_existing_cluster(
conn, opts, cluster_name, die_on_error=False)
print "Terminating master..."
for inst in master_nodes:
inst.terminate()
print "Terminating slaves..."
for inst in slave_nodes:
inst.terminate()
if zoo_nodes != []:
print "Terminating zoo..."
for inst in zoo_nodes:
inst.terminate()
# Delete security groups as well
if opts.delete_groups:
print "Deleting security groups (this will take some time)..."
group_names = [cluster_name + "-master", cluster_name + "-slaves", cluster_name + "-zoo"]
attempt = 1;
while attempt <= 3:
print "Attempt %d" % attempt
groups = [g for g in conn.get_all_security_groups() if g.name in group_names]
success = True
# Delete individual rules in all groups before deleting groups to
# remove dependencies between them
for group in groups:
print "Deleting rules in security group " + group.name
for rule in group.rules:
for grant in rule.grants:
success &= group.revoke(ip_protocol=rule.ip_protocol,
from_port=rule.from_port,
to_port=rule.to_port,
src_group=grant)
# Sleep for AWS eventual-consistency to catch up, and for instances
# to terminate
time.sleep(30) # Yes, it does have to be this long :-(
for group in groups:
try:
conn.delete_security_group(group.name)
print "Deleted security group " + group.name
except boto.exception.EC2ResponseError:
success = False;
print "Failed to delete security group " + group.name
# Unfortunately, group.revoke() returns True even if a rule was not
# deleted, so this needs to be rerun if something fails
if success: break;
attempt += 1
if not success:
print "Failed to delete all security groups after 3 tries."
print "Try re-running in a few minutes."
elif action == "login":
(master_nodes, slave_nodes, zoo_nodes) = get_existing_cluster(
conn, opts, cluster_name)
master = master_nodes[0].public_dns_name
print "Logging into master " + master + "..."
proxy_opt = ""
if opts.proxy_port != None:
proxy_opt = "-D " + opts.proxy_port
subprocess.check_call("ssh -o StrictHostKeyChecking=no -i %s %s %s@%s" %
(opts.identity_file, proxy_opt, opts.user, master), shell=True)
elif action == "get-master":
(master_nodes, slave_nodes, zoo_nodes) = get_existing_cluster(conn, opts, cluster_name)
print master_nodes[0].public_dns_name
elif action == "stop":
response = raw_input("Are you sure you want to stop the cluster " +
cluster_name + "?\nDATA ON EPHEMERAL DISKS WILL BE LOST, " +
"BUT THE CLUSTER WILL KEEP USING SPACE ON\n" +
"AMAZON EBS IF IT IS EBS-BACKED!!\n" +
"Stop cluster " + cluster_name + " (y/N): ")
if response == "y":
(master_nodes, slave_nodes, zoo_nodes) = get_existing_cluster(
conn, opts, cluster_name, die_on_error=False)
print "Stopping master..."
for inst in master_nodes:
if inst.state not in ["shutting-down", "terminated"]:
inst.stop()
print "Stopping slaves..."
for inst in slave_nodes:
if inst.state not in ["shutting-down", "terminated"]:
inst.stop()
if zoo_nodes != []:
print "Stopping zoo..."
for inst in zoo_nodes:
if inst.state not in ["shutting-down", "terminated"]:
inst.stop()
elif action == "start":
(master_nodes, slave_nodes, zoo_nodes) = get_existing_cluster(
conn, opts, cluster_name)
print "Starting slaves..."
for inst in slave_nodes:
if inst.state not in ["shutting-down", "terminated"]:
inst.start()
print "Starting master..."
for inst in master_nodes:
if inst.state not in ["shutting-down", "terminated"]:
inst.start()
if zoo_nodes != []:
print "Starting zoo..."
for inst in zoo_nodes:
if inst.state not in ["shutting-down", "terminated"]:
inst.start()
wait_for_cluster(conn, opts.wait, master_nodes, slave_nodes, zoo_nodes)
setup_cluster(conn, master_nodes, slave_nodes, zoo_nodes, opts, False)
else:
print >> stderr, "Invalid action: %s" % action
sys.exit(1)
if __name__ == "__main__":
logging.basicConfig()
main()
|
import sqlalchemy
from osmaxx.converters.gis_converter.bootstrap.bootstrap import BootStrapper
from tests.inside_worker_test.conftest import slow, cleanup_osmaxx_schemas
from tests.inside_worker_test.declarative_schema import osm_models
@slow
def test_osmaxx_data_model_processing_puts_amenity_grave_yard_with_religion_into_table_pow_a(
osmaxx_functions, clean_osm_tables, monkeypatch):
assert osmaxx_functions == clean_osm_tables # same db-connection
engine = osmaxx_functions
def create_osm_data():
engine.execute(
osm_models.t_osm_polygon.insert().values(
amenity='grave_yard',
religion='any value will do, as long as one is present',
).execution_options(autocommit=True)
)
monkeypatch.setattr(
'osmaxx.converters.gis_converter.helper.postgres_wrapper.create_engine', lambda *_, **__: engine)
monkeypatch.setattr(BootStrapper, '_reset_database', lambda _self: None) # Already taken care of by fixtures.
monkeypatch.setattr(BootStrapper, '_convert_osm_pbf_to_postgres', lambda _self: create_osm_data())
monkeypatch.setattr(BootStrapper, '_setup_db_functions', lambda _self: None) # Already taken care of by fixtures.
bootstrapper = BootStrapper(pbf_file_path=None)
try:
bootstrapper.bootstrap()
t_pow_a = sqlalchemy.sql.schema.Table('pow_a', osm_models.metadata, schema='osmaxx')
result = engine.execute(sqlalchemy.select([t_pow_a]))
assert result.rowcount == 1
finally:
try:
del result # The (unfetched) result would block the dropping of SCHEMA "osmaxx" in the following cleanup.
except NameError:
pass
cleanup_osmaxx_schemas(engine)
modify comment: Whether or not the result has been fetched doesn't matter
import sqlalchemy
from osmaxx.converters.gis_converter.bootstrap.bootstrap import BootStrapper
from tests.inside_worker_test.conftest import slow, cleanup_osmaxx_schemas
from tests.inside_worker_test.declarative_schema import osm_models
@slow
def test_osmaxx_data_model_processing_puts_amenity_grave_yard_with_religion_into_table_pow_a(
osmaxx_functions, clean_osm_tables, monkeypatch):
assert osmaxx_functions == clean_osm_tables # same db-connection
engine = osmaxx_functions
def create_osm_data():
engine.execute(
osm_models.t_osm_polygon.insert().values(
amenity='grave_yard',
religion='any value will do, as long as one is present',
).execution_options(autocommit=True)
)
monkeypatch.setattr(
'osmaxx.converters.gis_converter.helper.postgres_wrapper.create_engine', lambda *_, **__: engine)
monkeypatch.setattr(BootStrapper, '_reset_database', lambda _self: None) # Already taken care of by fixtures.
monkeypatch.setattr(BootStrapper, '_convert_osm_pbf_to_postgres', lambda _self: create_osm_data())
monkeypatch.setattr(BootStrapper, '_setup_db_functions', lambda _self: None) # Already taken care of by fixtures.
bootstrapper = BootStrapper(pbf_file_path=None)
try:
bootstrapper.bootstrap()
t_pow_a = sqlalchemy.sql.schema.Table('pow_a', osm_models.metadata, schema='osmaxx')
result = engine.execute(sqlalchemy.select([t_pow_a]))
assert result.rowcount == 1
finally:
try:
del result # The result would block the dropping of SCHEMA "osmaxx" in the following cleanup.
except NameError:
pass
cleanup_osmaxx_schemas(engine)
|
from errors import *
import json
import requests
import re
class BaseCriteria(object):
def __init__(self, resource):
self.resource = resource
def __iter__(self):
return self.all()
def all(self):
resp = self.request()
return [self.resource.get_from(o) for o in resp]
def one(self):
resp = self.request()
return self.resource.get_from(resp[0])
def compute_params(self):
return None
def request(self):
params=self.compute_params()
return self.resource.request('get', self.resource.url(), params=params)
class BaseObject(dict):
_pk = 'id'
def __init__(self, id=None):
self._unsaved_values = set()
self._transient_values = set()
self[self._pk] = id
def __setattr__(self, k, v):
if k[0] == '_' or k in self.__dict__ or hasattr(type(self), k):
return super(BaseObject, self).__setattr__(k, v)
else:
self[k] = v
def __getattr__(self, k):
if k[0] == '_':
raise AttributeError(k)
try:
return self[k]
except KeyError, err:
raise AttributeError(*err.args)
def __setitem__(self, k, v):
super(BaseObject, self).__setitem__(k, v)
self._unsaved_values.add(k)
def __getitem__(self, k):
try:
return super(BaseObject, self).__getitem__(k)
except KeyError, err:
if k in self._transient_values:
raise KeyError(
"%r attribute not set, available values on this object are: %s" %
(k, ', '.join(self.keys())))
else:
raise err
def __delitem__(self, k):
raise TypeError(
"You cannot delete attributes, to unset a property, set it to None.")
class classproperty(property):
def __get__(self, cls, owner):
return self.fget.__get__(None, owner)()
class BaseModel(BaseObject):
criteria_class = BaseCriteria
ObjectNotFound = ObjectNotFound
RequestError = RequestError
AccessError = AccessError
MethodError = MethodError
IntegrityError = IntegrityError
inline_models = dict()
def __str__(self):
return "%s #%s" % (self.__class__.__name__, self.obj_id())
def __getitem__(self, k):
try:
return super(BaseModel, self).__getitem__(k)
except KeyError:
try:
resp = self.__class__.request('get', self.instance_url()+'/'+k)
self.load_attr(k, resp)
return self[k]
except self.RequestError:
raise KeyError(
"%r attribute or method not found, available values on this object are: %s" %
(k, ', '.join(self.keys())))
def obj_id(self):
return self[self._pk]
def instance_url(self):
return "%s/%s" % (self.__class__.url(), self.obj_id())
@classmethod
def url(cls):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', cls.__name__)
name = re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
return "/%s" % (name)
@classmethod
def get(cls, id):
obj = cls(id)
obj.reload()
return obj
@classmethod
def get_from(cls, values):
instance = cls(values.get(cls._pk, None))
instance.reload_from(values)
return instance
def reload(self):
self.reload_from(self.request('get', self.instance_url()))
return self
def reload_from(self, values):
removed = set(self.keys()) - set(values)
self._transient_values = self._transient_values | removed
self._unsaved_values = set()
self.clear()
self._transient_values = self._transient_values - set(values)
for k, v in values.iteritems():
self.load_attr(k, v)
def load_attr(self, k, v):
if isinstance(v, dict):
type = self.inline_models.get(k, BaseModel)
value = type.get_from(v)
elif isinstance(v, list):
type = self.inline_models.get(k, BaseModel)
value = []
for o in v:
if isinstance(v, dict) or isinstance(v, list):
value.append(type.get_from(o))
else:
value.append(o)
else:
value = v
super(BaseObject, self).__setitem__(k, value)
def save(self):
if self.obj_id():
self.reload_from(self.request('put', self.instance_url(), data=self.serialize()))
else:
self.reload_from(self.request('post', self.__class__.url(), data=self.serialize()))
def delete(self):
if self.obj_id():
self.reload_from(self.request('delete', self.instance_url()))
def serialize(self):
params = {}
if self._unsaved_values:
for k in self._unsaved_values:
if k == self._pk:
continue
v = getattr(self, k)
params[k] = v# if v is not None else ""
return params
@classproperty
@classmethod
def find(cls):
return cls.criteria_class(cls)
@classmethod
def request(cls, method, url, params=None, headers={'Content-Type': 'application/json'}, data=None):
resp = requests.request(method, url, headers=headers, data=json.dumps(data), params=params, timeout=80)
if 200 <= resp.status_code < 399:
return resp.json()
elif resp.status_code == 400:
raise IntegrityError(url)
elif resp.status_code == 401:
raise AccessError(url)
elif resp.status_code == 405:
raise MethodError(url)
elif 402 <= resp.status_code < 500:
raise ObjectNotFound(url)
else:
raise RequestError('API query error (%s - %s): %s %s' % (url, resp.status_code, resp.text, params) )
attrs same name as method fix
from errors import *
import json
import requests
import re
class BaseCriteria(object):
def __init__(self, resource):
self.resource = resource
def __iter__(self):
return self.all()
def all(self):
resp = self.request()
return [self.resource.get_from(o) for o in resp]
def one(self):
resp = self.request()
return self.resource.get_from(resp[0])
def compute_params(self):
return None
def request(self):
params=self.compute_params()
return self.resource.request('get', self.resource.url(), params=params)
class BaseObject(dict):
_pk = 'id'
def __init__(self, id=None):
self._unsaved_values = set()
self._transient_values = set()
self[self._pk] = id
def __setattr__(self, k, v):
if k[0] == '_' or k in self.__dict__ or (hasattr(type(self), k) and not hasattr(getattr(type(self), k), '__call__') ):
return super(BaseObject, self).__setattr__(k, v)
else:
self[k] = v
def __getattr__(self, k):
if k[0] == '_':
raise AttributeError(k)
try:
return self[k]
except KeyError, err:
raise AttributeError(*err.args)
def __setitem__(self, k, v):
super(BaseObject, self).__setitem__(k, v)
self._unsaved_values.add(k)
def __getitem__(self, k):
try:
return super(BaseObject, self).__getitem__(k)
except KeyError, err:
if k in self._transient_values:
raise KeyError(
"%r attribute not set, available values on this object are: %s" %
(k, ', '.join(self.keys())))
else:
raise err
def __delitem__(self, k):
raise TypeError(
"You cannot delete attributes, to unset a property, set it to None.")
class classproperty(property):
def __get__(self, cls, owner):
return self.fget.__get__(None, owner)()
class BaseModel(BaseObject):
criteria_class = BaseCriteria
ObjectNotFound = ObjectNotFound
RequestError = RequestError
AccessError = AccessError
MethodError = MethodError
IntegrityError = IntegrityError
inline_models = dict()
def __str__(self):
return "%s #%s" % (self.__class__.__name__, self.obj_id())
def __getitem__(self, k):
try:
return super(BaseModel, self).__getitem__(k)
except KeyError:
try:
resp = self.__class__.request('get', self.instance_url()+'/'+k)
self.load_attr(k, resp)
return self[k]
except self.RequestError:
raise KeyError(
"%r attribute or method not found, available values on this object are: %s" %
(k, ', '.join(self.keys())))
def obj_id(self):
return self[self._pk]
def instance_url(self):
return "%s/%s" % (self.__class__.url(), self.obj_id())
@classmethod
def url(cls):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', cls.__name__)
name = re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
return "/%s" % (name)
@classmethod
def get(cls, id):
obj = cls(id)
obj.reload()
return obj
@classmethod
def get_from(cls, values):
instance = cls(values.get(cls._pk, None))
instance.reload_from(values)
return instance
def reload(self):
self.reload_from(self.request('get', self.instance_url()))
return self
def reload_from(self, values):
removed = set(self.keys()) - set(values)
self._transient_values = self._transient_values | removed
self._unsaved_values = set()
self.clear()
self._transient_values = self._transient_values - set(values)
for k, v in values.iteritems():
self.load_attr(k, v)
def load_attr(self, k, v):
if isinstance(v, dict):
type = self.inline_models.get(k, BaseModel)
value = type.get_from(v)
elif isinstance(v, list):
type = self.inline_models.get(k, BaseModel)
value = []
for o in v:
if isinstance(v, dict) or isinstance(v, list):
value.append(type.get_from(o))
else:
value.append(o)
else:
value = v
super(BaseObject, self).__setitem__(k, value)
def save(self):
if self.obj_id():
self.reload_from(self.request('put', self.instance_url(), data=self.serialize()))
else:
self.reload_from(self.request('post', self.__class__.url(), data=self.serialize()))
def delete(self):
if self.obj_id():
self.reload_from(self.request('delete', self.instance_url()))
def serialize(self):
params = {}
if self._unsaved_values:
for k in self._unsaved_values:
if k == self._pk:
continue
v = getattr(self, k)
params[k] = v# if v is not None else ""
return params
@classproperty
@classmethod
def find(cls):
return cls.criteria_class(cls)
@classmethod
def request(cls, method, url, params=None, headers={'Content-Type': 'application/json'}, data=None):
resp = requests.request(method, url, headers=headers, data=json.dumps(data), params=params, timeout=80)
if 200 <= resp.status_code < 399:
return resp.json()
elif resp.status_code == 400:
raise IntegrityError(url)
elif resp.status_code == 401:
raise AccessError(url)
elif resp.status_code == 405:
raise MethodError(url)
elif 402 <= resp.status_code < 500:
raise ObjectNotFound(url)
else:
raise RequestError('API query error (%s - %s): %s %s' % (url, resp.status_code, resp.text, params) )
|
import os
import sys
from urllib2 import build_opener, HTTPCookieProcessor, Request
from urllib import urlencode
from subprocess import Popen, PIPE, STDOUT
import re
import urllib2
import tempfile
import subprocess
opener = build_opener(HTTPCookieProcessor)
def urlopen2(url, data=None, auth=True, user_agent='github-cli'):
if auth:
config = get_config()
auth_dict = {'login': config['user'], 'token': config['token']}
if data:
data.update(auth_dict)
else:
data = auth_dict
if hasattr(data, "__iter__"):
data = urlencode(data)
headers = {'User-Agent' : user_agent}
try:
return opener.open(Request(url, data, headers))
except urllib2.HTTPError:
raise Exception("server problem")
except urllib2.URLError:
raise Exception("connection problem")
def get_remote_info():
command = "git config --get remote.origin.url"
stdout, stderr = Popen(command, shell=True, stdin=PIPE, stdout=PIPE,
stderr=PIPE).communicate()
if stderr:
for line in stderr.splitlines():
print line.lower()
elif stdout:
line = stdout.strip()
pattern = re.compile(r'([^:/]+)/([^/]+).git$')
result = pattern.search(line)
if result:
return result.groups()
else:
raise Exception("invalid user and repo name")
raise Exception("not a git repository")
def get_remote_info_from_option(repository):
if "/" in repository:
user, repo = repository.split("/")
return user, repo
else:
config = get_config()
return config['user'], repository
def get_config():
required_keys = ["user", "token"]
config = {}
for key in required_keys:
command = "git config --global github.%s" % key
stdout, stderr = Popen(command, shell=True, stdin=PIPE, stdout=PIPE,
stderr=PIPE).communicate()
if stderr:
for line in stderr.splitlines():
print line
sys.exit(1)
if stdout:
value = stdout.strip()
config[key] = value
else:
alt_help_names = {'user': 'username'}
help_name = alt_help_names.get(key, key)
print "error: required GitHub entry '%s' not found in global git config" % key
print "please add it to the global git config by doing this:"
print
print "git config --global github.%s <your GitHub %s>" % (key, help_name)
sys.exit(1)
return config
def edit_text(text):
editor = os.getenv('EDITOR', 'vi')
f = tempfile.NamedTemporaryFile()
f.write(text)
f.flush()
command = "%s %s" % (editor, f.name)
ret = subprocess.call(command, shell=True)
if ret != 0:
print "error: editor command failed"
sys.exit(1)
changed_text = open(f.name).read()
f.close()
stripcomment_re = re.compile(r'^#.*$', re.MULTILINE)
return stripcomment_re.sub('', changed_text).strip()
def get_prog():
if sys.argv and sys.argv[0]:
return os.path.split(sys.argv[0])[1]
else:
return '<prog>'
class Pager(object):
"""enable paging for multiple writes
see http://svn.python.org/view/python/branches/release25-maint/Lib/pydoc.py?view=markup
(getpager()) for handling different circumstances or platforms
"""
def __init__(self):
self.proc = None
self.file = sys.stdout # ultimate fallback
self.cmd = ''
if hasattr(sys.stdout, 'isatty') and sys.stdout.isatty():
pager_commands = ['more -MR', 'more', 'less -MR', 'less']
for cmd in pager_commands:
if hasattr(os, 'system') and \
os.system('(%s) 2>/dev/null' % cmd) == 0:
self.proc = subprocess.Popen([cmd], shell=True,
stdin=subprocess.PIPE, stderr=subprocess.PIPE)
self.file = self.proc.stdin
self.cmd = cmd
break
def write(self, text=""):
try:
self.file.write("%s\n" % text)
except:
# in case the pager cmd fails unexpectedly
self.file = sys.stdout
self.file.write("%s\n" % text)
def close(self):
if 'less' in self.cmd:
self.write("press q to quit")
if self.proc:
self.file.close()
try:
self.proc.wait()
except KeyboardInterrupt:
# TODO: should kill the self.proc here gracefully
sys.exit(0) # close silently no matter what
put -E option back for `more` command
import os
import sys
from urllib2 import build_opener, HTTPCookieProcessor, Request
from urllib import urlencode
from subprocess import Popen, PIPE, STDOUT
import re
import urllib2
import tempfile
import subprocess
opener = build_opener(HTTPCookieProcessor)
def urlopen2(url, data=None, auth=True, user_agent='github-cli'):
if auth:
config = get_config()
auth_dict = {'login': config['user'], 'token': config['token']}
if data:
data.update(auth_dict)
else:
data = auth_dict
if hasattr(data, "__iter__"):
data = urlencode(data)
headers = {'User-Agent' : user_agent}
try:
return opener.open(Request(url, data, headers))
except urllib2.HTTPError:
raise Exception("server problem")
except urllib2.URLError:
raise Exception("connection problem")
def get_remote_info():
command = "git config --get remote.origin.url"
stdout, stderr = Popen(command, shell=True, stdin=PIPE, stdout=PIPE,
stderr=PIPE).communicate()
if stderr:
for line in stderr.splitlines():
print line.lower()
elif stdout:
line = stdout.strip()
pattern = re.compile(r'([^:/]+)/([^/]+).git$')
result = pattern.search(line)
if result:
return result.groups()
else:
raise Exception("invalid user and repo name")
raise Exception("not a git repository")
def get_remote_info_from_option(repository):
if "/" in repository:
user, repo = repository.split("/")
return user, repo
else:
config = get_config()
return config['user'], repository
def get_config():
required_keys = ["user", "token"]
config = {}
for key in required_keys:
command = "git config --global github.%s" % key
stdout, stderr = Popen(command, shell=True, stdin=PIPE, stdout=PIPE,
stderr=PIPE).communicate()
if stderr:
for line in stderr.splitlines():
print line
sys.exit(1)
if stdout:
value = stdout.strip()
config[key] = value
else:
alt_help_names = {'user': 'username'}
help_name = alt_help_names.get(key, key)
print "error: required GitHub entry '%s' not found in global git config" % key
print "please add it to the global git config by doing this:"
print
print "git config --global github.%s <your GitHub %s>" % (key, help_name)
sys.exit(1)
return config
def edit_text(text):
editor = os.getenv('EDITOR', 'vi')
f = tempfile.NamedTemporaryFile()
f.write(text)
f.flush()
command = "%s %s" % (editor, f.name)
ret = subprocess.call(command, shell=True)
if ret != 0:
print "error: editor command failed"
sys.exit(1)
changed_text = open(f.name).read()
f.close()
stripcomment_re = re.compile(r'^#.*$', re.MULTILINE)
return stripcomment_re.sub('', changed_text).strip()
def get_prog():
if sys.argv and sys.argv[0]:
return os.path.split(sys.argv[0])[1]
else:
return '<prog>'
class Pager(object):
"""enable paging for multiple writes
see http://svn.python.org/view/python/branches/release25-maint/Lib/pydoc.py?view=markup
(getpager()) for handling different circumstances or platforms
"""
def __init__(self):
self.proc = None
self.file = sys.stdout # ultimate fallback
self.cmd = ''
if hasattr(sys.stdout, 'isatty') and sys.stdout.isatty():
pager_commands = ['more -EMR', 'more', 'less -MR', 'less']
for cmd in pager_commands:
if hasattr(os, 'system') and \
os.system('(%s) 2>/dev/null' % cmd) == 0:
self.proc = subprocess.Popen([cmd], shell=True,
stdin=subprocess.PIPE, stderr=subprocess.PIPE)
self.file = self.proc.stdin
self.cmd = cmd
break
def write(self, text=""):
try:
self.file.write("%s\n" % text)
except:
# in case the pager cmd fails unexpectedly
self.file = sys.stdout
self.file.write("%s\n" % text)
def close(self):
if 'less' in self.cmd:
self.write("press q to quit")
if self.proc:
self.file.close()
try:
self.proc.wait()
except KeyboardInterrupt:
# TODO: should kill the self.proc here gracefully
sys.exit(0) # close silently no matter what
|
# -*- coding: utf-8 -*-
#
import math
import numpy
import orthopy
from .gauss_legendre import GaussLegendre
from ..tools import scheme_from_rc
class GaussKronrod(object):
"""
Gauss-Kronrod quadrature; see
<https://en.wikipedia.org/wiki/Gauss%E2%80%93Kronrod_quadrature_formula>.
Besides points and weights, this class provides the weights of the
corresponding Gauss-Legendre scheme in self.gauss_weights.
Code adapted from
<https://www.cs.purdue.edu/archives/2002/wxg/codes/r_kronrod.m>,
<https://www.cs.purdue.edu/archives/2002/wxg/codes/kronrod.m>.
See
Calculation of Gauss-Kronrod quadrature rules,
Dirk P. Laurie,
Math. Comp. 66 (1997), 1133-1145,
<https://doi.org/10.1090/S0025-5718-97-00861-2>
Abstract:
The Jacobi matrix of the $(2n+1)$-point Gauss-Kronrod quadrature rule for a
given measure is calculated efficiently by a five-term recurrence relation.
The algorithm uses only rational operations and is therefore also useful
for obtaining the Jacobi-Kronrod matrix analytically. The nodes and weights
can then be computed directly by standard software for Gaussian quadrature
formulas.
"""
def __init__(self, n, a=0.0, b=0.0):
# The general scheme is:
# Get the Jacobi recurrence coefficients, get the Kronrod vectors alpha
# and beta, and hand those off to scheme_from_rc. There, the
# eigenproblem for a tridiagonal matrix with alpha and beta is solved
# to retrieve the points and weights.
# TODO replace math.ceil by -(-k//n)
length = int(math.ceil(3 * n / 2.0)) + 1
self.degree = 2 * length + 1
_, _, alpha, beta = orthopy.line_segment.recurrence_coefficients.jacobi(
length, a, b, "monic"
)
flt = numpy.vectorize(float)
alpha = flt(alpha)
beta = flt(beta)
a, b = self.r_kronrod(n, alpha, beta)
x, w = scheme_from_rc(a, b, mode="numpy")
# sort by x
i = numpy.argsort(x)
self.points = x[i]
self.weights = w[i]
return
def r_kronrod(self, n, a0, b0):
assert len(a0) == int(math.ceil(3 * n / 2.0)) + 1
assert len(b0) == int(math.ceil(3 * n / 2.0)) + 1
a = numpy.zeros(2 * n + 1)
b = numpy.zeros(2 * n + 1)
k = int(math.floor(3 * n / 2.0)) + 1
a[:k] = a0[:k]
k = int(math.ceil(3 * n / 2.0)) + 1
b[:k] = b0[:k]
s = numpy.zeros(int(math.floor(n / 2.0)) + 2)
t = numpy.zeros(int(math.floor(n / 2.0)) + 2)
t[1] = b[n + 1]
for m in range(n - 1):
k0 = int(math.floor((m + 1) / 2.0))
k = numpy.arange(k0, -1, -1)
L = m - k
s[k + 1] = numpy.cumsum(
(a[k + n + 1] - a[L]) * t[k + 1] + b[k + n + 1] * s[k] - b[L] * s[k + 1]
)
s, t = t, s
j = int(math.floor(n / 2.0)) + 1
s[1 : j + 1] = s[:j]
for m in range(n - 1, 2 * n - 2):
k0 = m + 1 - n
k1 = int(math.floor((m - 1) / 2.0))
k = numpy.arange(k0, k1 + 1)
L = m - k
j = n - 1 - L
s[j + 1] = numpy.cumsum(
-(a[k + n + 1] - a[L]) * t[j + 1]
- b[k + n + 1] * s[j + 1]
+ b[L] * s[j + 2]
)
j = j[-1]
k = int(math.floor((m + 1) / 2.0))
if m % 2 == 0:
a[k + n + 1] = a[k] + (s[j + 1] - b[k + n + 1] * s[j + 2]) / t[j + 2]
else:
b[k + n + 1] = s[j + 1] / s[j + 2]
s, t = t, s
a[2 * n] = a[n - 1] - b[2 * n] * s[1] / t[1]
return a, b
def _gauss_kronrod_integrate(k, f, interval, dot=numpy.dot):
def _scale_points(points, interval):
alpha = 0.5 * (interval[1] - interval[0])
beta = 0.5 * (interval[0] + interval[1])
return (numpy.multiply.outer(points, alpha) + beta).T
def _integrate(values, weights, interval_length, dot):
"""Integration with point values explicitly specified.
"""
return 0.5 * interval_length * dot(values, weights)
# Compute the integral estimations according to Gauss and Gauss-Kronrod,
# sharing the function evaluations
scheme = GaussKronrod(k)
gauss_weights = GaussLegendre(k).weights
point_vals_gk = f(_scale_points(scheme.points, interval))
point_vals_g = point_vals_gk[..., 1::2]
alpha = abs(interval[1] - interval[0])
val_gauss_kronrod = _integrate(point_vals_gk, scheme.weights, alpha, dot=dot)
val_gauss = _integrate(point_vals_g, gauss_weights, alpha, dot=dot)
# Get an error estimate. According to
#
# A Review of Error Estimation in Adaptive Quadrature
# Pedro Gonnet,
# ACM Computing Surveys (CSUR) Surveys,
# Volume 44, Issue 4, August 2012
# <https://doi.org/10.1145/2333112.2333117>,
# <https://arxiv.org/pdf/1003.4629.pdf>
#
# the classicial QUADPACK still compares favorably with other approaches.
average = val_gauss_kronrod / alpha
point_vals_abs = abs(point_vals_gk - average[..., None])
I_tilde = _integrate(point_vals_abs, scheme.weights, alpha, dot=dot)
# The exponent 1.5 is chosen such that (200*x)**1.5 is approximately x at
# 1.0e-6, the machine precision on IEEE 754 32-bit floating point
# arithmentic. This could be adapted to
#
# eps = numpy.finfo(float).eps
# exponent = numpy.log(eps) / numpy.log(200*eps)
#
error_estimate = I_tilde * numpy.minimum(
numpy.ones(I_tilde.shape),
(200 * abs(val_gauss_kronrod - val_gauss) / I_tilde) ** 1.5,
)
return val_gauss_kronrod, val_gauss, error_estimate
better error message for adaptive integration
# -*- coding: utf-8 -*-
#
import math
import numpy
import orthopy
from .gauss_legendre import GaussLegendre
from ..tools import scheme_from_rc
class GaussKronrod(object):
"""
Gauss-Kronrod quadrature; see
<https://en.wikipedia.org/wiki/Gauss%E2%80%93Kronrod_quadrature_formula>.
Besides points and weights, this class provides the weights of the
corresponding Gauss-Legendre scheme in self.gauss_weights.
Code adapted from
<https://www.cs.purdue.edu/archives/2002/wxg/codes/r_kronrod.m>,
<https://www.cs.purdue.edu/archives/2002/wxg/codes/kronrod.m>.
See
Calculation of Gauss-Kronrod quadrature rules,
Dirk P. Laurie,
Math. Comp. 66 (1997), 1133-1145,
<https://doi.org/10.1090/S0025-5718-97-00861-2>
Abstract:
The Jacobi matrix of the $(2n+1)$-point Gauss-Kronrod quadrature rule for a
given measure is calculated efficiently by a five-term recurrence relation.
The algorithm uses only rational operations and is therefore also useful
for obtaining the Jacobi-Kronrod matrix analytically. The nodes and weights
can then be computed directly by standard software for Gaussian quadrature
formulas.
"""
def __init__(self, n, a=0.0, b=0.0):
# The general scheme is:
# Get the Jacobi recurrence coefficients, get the Kronrod vectors alpha
# and beta, and hand those off to scheme_from_rc. There, the
# eigenproblem for a tridiagonal matrix with alpha and beta is solved
# to retrieve the points and weights.
# TODO replace math.ceil by -(-k//n)
length = int(math.ceil(3 * n / 2.0)) + 1
self.degree = 2 * length + 1
_, _, alpha, beta = orthopy.line_segment.recurrence_coefficients.jacobi(
length, a, b, "monic"
)
flt = numpy.vectorize(float)
alpha = flt(alpha)
beta = flt(beta)
a, b = self.r_kronrod(n, alpha, beta)
x, w = scheme_from_rc(a, b, mode="numpy")
# sort by x
i = numpy.argsort(x)
self.points = x[i]
self.weights = w[i]
return
def r_kronrod(self, n, a0, b0):
assert len(a0) == int(math.ceil(3 * n / 2.0)) + 1
assert len(b0) == int(math.ceil(3 * n / 2.0)) + 1
a = numpy.zeros(2 * n + 1)
b = numpy.zeros(2 * n + 1)
k = int(math.floor(3 * n / 2.0)) + 1
a[:k] = a0[:k]
k = int(math.ceil(3 * n / 2.0)) + 1
b[:k] = b0[:k]
s = numpy.zeros(int(math.floor(n / 2.0)) + 2)
t = numpy.zeros(int(math.floor(n / 2.0)) + 2)
t[1] = b[n + 1]
for m in range(n - 1):
k0 = int(math.floor((m + 1) / 2.0))
k = numpy.arange(k0, -1, -1)
L = m - k
s[k + 1] = numpy.cumsum(
(a[k + n + 1] - a[L]) * t[k + 1] + b[k + n + 1] * s[k] - b[L] * s[k + 1]
)
s, t = t, s
j = int(math.floor(n / 2.0)) + 1
s[1 : j + 1] = s[:j]
for m in range(n - 1, 2 * n - 2):
k0 = m + 1 - n
k1 = int(math.floor((m - 1) / 2.0))
k = numpy.arange(k0, k1 + 1)
L = m - k
j = n - 1 - L
s[j + 1] = numpy.cumsum(
-(a[k + n + 1] - a[L]) * t[j + 1]
- b[k + n + 1] * s[j + 1]
+ b[L] * s[j + 2]
)
j = j[-1]
k = int(math.floor((m + 1) / 2.0))
if m % 2 == 0:
a[k + n + 1] = a[k] + (s[j + 1] - b[k + n + 1] * s[j + 2]) / t[j + 2]
else:
b[k + n + 1] = s[j + 1] / s[j + 2]
s, t = t, s
a[2 * n] = a[n - 1] - b[2 * n] * s[1] / t[1]
return a, b
def _gauss_kronrod_integrate(k, f, interval, dot=numpy.dot):
def _scale_points(points, interval):
alpha = 0.5 * (interval[1] - interval[0])
beta = 0.5 * (interval[0] + interval[1])
return (numpy.multiply.outer(points, alpha) + beta).T
def _integrate(values, weights, interval_length, dot):
"""Integration with point values explicitly specified.
"""
return 0.5 * interval_length * dot(values, weights)
# Compute the integral estimations according to Gauss and Gauss-Kronrod,
# sharing the function evaluations
scheme = GaussKronrod(k)
gauss_weights = GaussLegendre(k).weights
sp = _scale_points(scheme.points, interval)
point_vals_gk = f(sp)
assert point_vals_gk.shape == sp.shape, (
"Function evaluation returned numpy array of wrong shape. "
"(Input shape: {}, expected output shape: {}, got: {})".format(
sp.shape, sp.shape, point_vals_gk.shape
)
)
point_vals_g = point_vals_gk[..., 1::2]
alpha = abs(interval[1] - interval[0])
val_gauss_kronrod = _integrate(point_vals_gk, scheme.weights, alpha, dot=dot)
val_gauss = _integrate(point_vals_g, gauss_weights, alpha, dot=dot)
# Get an error estimate. According to
#
# A Review of Error Estimation in Adaptive Quadrature
# Pedro Gonnet,
# ACM Computing Surveys (CSUR) Surveys,
# Volume 44, Issue 4, August 2012
# <https://doi.org/10.1145/2333112.2333117>,
# <https://arxiv.org/pdf/1003.4629.pdf>
#
# the classicial QUADPACK still compares favorably with other approaches.
average = val_gauss_kronrod / alpha
point_vals_abs = abs(point_vals_gk - average[..., None])
I_tilde = _integrate(point_vals_abs, scheme.weights, alpha, dot=dot)
# The exponent 1.5 is chosen such that (200*x)**1.5 is approximately x at
# 1.0e-6, the machine precision on IEEE 754 32-bit floating point
# arithmentic. This could be adapted to
#
# eps = numpy.finfo(float).eps
# exponent = numpy.log(eps) / numpy.log(200*eps)
#
error_estimate = I_tilde * numpy.minimum(
numpy.ones(I_tilde.shape),
(200 * abs(val_gauss_kronrod - val_gauss) / I_tilde) ** 1.5,
)
return val_gauss_kronrod, val_gauss, error_estimate
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2008-2013 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
import os
import re
from datetime import datetime, timedelta
from trac.admin.tests.functional import AuthorizationTestCaseSetup
from trac.test import locale_en
from trac.tests.functional import *
from trac.util.datefmt import utc, localtz, format_date, format_datetime, \
pretty_timedelta
from trac.util.text import to_utf8
try:
from configobj import ConfigObj
except ImportError:
ConfigObj = None
class AdminEnumDefaultTestCaseSetup(FunctionalTwillTestCaseSetup):
def test_default(self, enum, name):
url = self._tester.url + '/admin/ticket/%s' % enum
tc.go(url)
tc.url(url + '$')
tc.find(name)
tc.formvalue('enumtable', 'default', name)
tc.submit('apply')
tc.url(url + '$')
tc.find('radio.*"%s"\\schecked="checked"' % name)
# Test the "Clear default" button
tc.go(url)
tc.submit('clear', formname='enumtable')
tc.notfind('type="radio" name="default" value=".+" checked="checked"')
class TestTickets(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Create a ticket and comment on it."""
# TODO: this should be split into multiple tests
id = self._tester.create_ticket()
self._tester.add_comment(id)
class TestTicketMaxSummarySize(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test `[ticket] max_summary_size` option.
http://trac.edgewall.org/ticket/11472"""
prev_max_summary_size = \
self._testenv.get_config('ticket', 'max_summary_size')
short_summary = "abcdefghijklmnopqrstuvwxyz"
long_summary = short_summary + "."
max_summary_size = len(short_summary)
warning_message = r"Ticket summary is too long \(must be less " \
r"than %s characters\)" % max_summary_size
self._testenv.set_config('ticket', 'max_summary_size',
str(max_summary_size))
try:
self._tester.create_ticket(short_summary)
tc.find(short_summary)
tc.notfind(warning_message)
self._tester.go_to_front()
tc.follow(r"\bNew Ticket\b")
tc.notfind(internal_error)
tc.url(self._tester.url + '/newticket')
tc.formvalue('propertyform', 'field_summary', long_summary)
tc.submit('submit')
tc.url(self._tester.url + '/newticket')
tc.find(warning_message)
finally:
self._testenv.set_config('ticket', 'max_summary_size',
prev_max_summary_size)
class TestTicketAddAttachment(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Add attachment to a ticket. Test that the attachment button
reads 'Attach file' when no files have been attached, and 'Attach
another file' when there are existing attachments.
Feature added in http://trac.edgewall.org/ticket/10281"""
id = self._tester.create_ticket()
tc.find("Attach file")
filename = self._tester.attach_file_to_ticket(id)
self._tester.go_to_ticket(id)
tc.find("Attach another file")
tc.find('Attachments <span class="trac-count">\(1\)</span>')
tc.find(filename)
tc.find('Download all attachments as:\s+<a rel="nofollow" '
'href="/zip-attachment/ticket/%s/">.zip</a>' % id)
class TestTicketPreview(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Preview ticket creation"""
self._tester.go_to_front()
tc.follow('New Ticket')
summary = random_sentence(5)
desc = random_sentence(5)
tc.formvalue('propertyform', 'field-summary', summary)
tc.formvalue('propertyform', 'field-description', desc)
tc.submit('preview')
tc.url(self._tester.url + '/newticket$')
tc.find('ticket not yet created')
tc.find(summary)
tc.find(desc)
class TestTicketNoSummary(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Creating a ticket without summary should fail"""
self._tester.go_to_front()
tc.follow('New Ticket')
desc = random_sentence(5)
tc.formvalue('propertyform', 'field-description', desc)
tc.submit('submit')
tc.find(desc)
tc.find('Tickets must contain a summary.')
tc.find('Create New Ticket')
tc.find('ticket not yet created')
class TestTicketAltFormats(FunctionalTestCaseSetup):
def runTest(self):
"""Download ticket in alternative formats"""
summary = random_sentence(5)
self._tester.create_ticket(summary)
for format in ['Comma-delimited Text', 'Tab-delimited Text',
'RSS Feed']:
tc.follow(format)
content = b.get_html()
if content.find(summary) < 0:
raise AssertionError('Summary missing from %s format'
% format)
tc.back()
class TestTicketCSVFormat(FunctionalTestCaseSetup):
def runTest(self):
"""Download ticket in CSV format"""
self._tester.create_ticket()
tc.follow('Comma-delimited Text')
csv = b.get_html()
if not csv.startswith('\xef\xbb\xbfid,summary,'): # BOM
raise AssertionError('Bad CSV format')
class TestTicketTabFormat(FunctionalTestCaseSetup):
def runTest(self):
"""Download ticket in Tab-delimited format"""
self._tester.create_ticket()
tc.follow('Tab-delimited Text')
tab = b.get_html()
if not tab.startswith('\xef\xbb\xbfid\tsummary\t'): # BOM
raise AssertionError('Bad tab delimited format')
class TestTicketRSSFormat(FunctionalTestCaseSetup):
def runTest(self):
"""Download ticket in RSS format"""
summary = random_sentence(5)
self._tester.create_ticket(summary)
# Make a number of changes to exercise all of the RSS feed code
tc.formvalue('propertyform', 'comment', random_sentence(3))
tc.formvalue('propertyform', 'field-type', 'task')
tc.formvalue('propertyform', 'description', summary + '\n\n' +
random_sentence(8))
tc.formvalue('propertyform', 'field-keywords', 'key')
tc.submit('submit')
time.sleep(1) # Have to wait a second
tc.formvalue('propertyform', 'field-keywords', '')
tc.submit('submit')
tc.find('RSS Feed')
tc.follow('RSS Feed')
rss = b.get_html()
if not rss.startswith('<?xml version="1.0"?>'):
raise AssertionError('RSS Feed not valid feed')
class TestTicketSearch(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test ticket search"""
summary = random_sentence(4)
self._tester.create_ticket(summary)
self._tester.go_to_front()
tc.follow('Search')
tc.formvalue('fullsearch', 'ticket', True)
tc.formvalue('fullsearch', 'q', summary)
tc.submit('Search')
tc.find('class="searchable">.*' + summary)
tc.notfind('No matches found')
class TestNonTicketSearch(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test non-ticket search"""
# Create a summary containing only unique words
summary = ' '.join([random_word() + '_TestNonTicketSearch'
for i in range(5)])
self._tester.create_ticket(summary)
self._tester.go_to_front()
tc.follow('Search')
tc.formvalue('fullsearch', 'ticket', False)
tc.formvalue('fullsearch', 'q', summary)
tc.submit('Search')
tc.notfind('class="searchable">' + summary)
tc.find('No matches found')
class TestTicketHistory(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test ticket history"""
summary = random_sentence(5)
ticketid = self._tester.create_ticket(summary)
comment = self._tester.add_comment(ticketid)
self._tester.go_to_ticket(ticketid)
tc.find(r'<a [^>]+>\bModify\b</a>')
tc.find(r"\bAttach file\b")
tc.find(r"\bAdd Comment\b")
tc.find(r"\bModify Ticket\b")
tc.find(r"\bPreview\b")
tc.find(r"\bSubmit changes\b")
url = b.get_url()
tc.go(url + '?version=0')
tc.find('at <[^>]*>*Initial Version')
tc.find(summary)
tc.notfind(comment)
tc.go(url + '?version=1')
tc.find('at <[^>]*>*Version 1')
tc.find(summary)
tc.find(comment)
tc.notfind(r'<a [^>]+>\bModify\b</a>')
tc.notfind(r"\bAttach file\b")
tc.notfind(r"\bAdd Comment\b")
tc.notfind(r"\bModify Ticket\b")
tc.notfind(r"\bPreview\b")
tc.notfind(r"\bSubmit changes\b")
class TestTicketHistoryDiff(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test ticket history (diff)"""
self._tester.create_ticket()
tc.formvalue('propertyform', 'description', random_sentence(6))
tc.submit('submit')
tc.find('Description<[^>]*>\\s*modified \\(<[^>]*>diff', 's')
tc.follow('diff')
tc.find('Changes\\s*between\\s*<[^>]*>Initial Version<[^>]*>\\s*and'
'\\s*<[^>]*>Version 1<[^>]*>\\s*of\\s*<[^>]*>Ticket #' , 's')
class TestTicketQueryLinks(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test ticket query links"""
count = 3
ticket_ids = [self._tester.create_ticket(
summary='TestTicketQueryLinks%s' % i)
for i in range(count)]
self._tester.go_to_query()
# We don't have the luxury of javascript, so this is a multi-step
# process
tc.formvalue('query', 'add_filter_0', 'summary')
tc.submit('add_0')
tc.formvalue('query', '0_owner', 'nothing')
tc.submit('rm_filter_0_owner_0')
tc.formvalue('query', '0_summary', 'TestTicketQueryLinks')
tc.submit('update')
query_url = b.get_url()
tc.find(r'\(%d matches\)' % count)
for i in range(count):
tc.find('TestTicketQueryLinks%s' % i)
tc.follow('TestTicketQueryLinks0')
tc.find('class="missing">← Previous Ticket')
tc.find('title="Ticket #%s">Next Ticket' % ticket_ids[1])
tc.follow('Back to Query')
tc.url(re.escape(query_url))
tc.follow('TestTicketQueryLinks1')
tc.find('title="Ticket #%s">Previous Ticket' % ticket_ids[0])
tc.find('title="Ticket #%s">Next Ticket' % ticket_ids[2])
tc.follow('Next Ticket')
tc.find('title="Ticket #%s">Previous Ticket' % ticket_ids[1])
tc.find('class="missing">Next Ticket →')
class TestTicketQueryLinksQueryModuleDisabled(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Ticket query links should not be present when the QueryModule
is disabled."""
def enable_query_module(enable):
self._tester.go_to_admin('Plugins')
tc.formvalue('edit-plugin-trac', 'component',
'trac.ticket.query.QueryModule')
tc.formvalue('edit-plugin-trac', 'enable',
'%strac.ticket.query.QueryModule'
% ('+' if enable else '-'))
tc.submit()
tc.find("The following component has been %s:"
".*QueryModule.*\(trac\.ticket\.query\.\*\)"
% ("enabled" if enable else "disabled"))
props = {'cc': 'user1, user2',
'component': 'component1',
'keywords': 'kw1, kw2',
'milestone': 'milestone1',
'owner': 'user',
'priority': 'major',
'reporter': 'admin',
'version': '2.0'}
tid = self._tester.create_ticket(info=props)
milestone_cell = \
r'<td headers="h_milestone">\s*' \
r'<a class="milestone" href="/milestone/%(milestone)s" ' \
r'title=".*">\s*%(milestone)s\s*</a>\s*</td>'\
% {'milestone': props['milestone']}
try:
for field, value in props.iteritems():
if field != 'milestone':
links = r', '.join(r'<a href="/query.*>%s</a>'
% v.strip() for v in value.split(','))
tc.find(r'<td headers="h_%s"( class="searchable")?>'
r'\s*%s\s*</td>' % (field, links))
else:
tc.find(milestone_cell)
enable_query_module(False)
self._tester.go_to_ticket(tid)
for field, value in props.iteritems():
if field != 'milestone':
tc.find(r'<td headers="h_%s"( class="searchable")?>'
r'\s*%s\s*</td>' % (field, value))
else:
tc.find(milestone_cell)
finally:
enable_query_module(True)
class TestTicketQueryOrClause(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test ticket query with an or clauses"""
count = 3
[self._tester.create_ticket(summary='TestTicketQueryOrClause%s' % i,
info={'keywords': str(i)})
for i in range(count)]
self._tester.go_to_query()
tc.formvalue('query', '0_owner', '')
tc.submit('rm_filter_0_owner_0')
tc.formvalue('query', 'add_filter_0', 'summary')
tc.submit('add_0')
tc.formvalue('query', '0_summary', 'TestTicketQueryOrClause1')
tc.formvalue('query', 'add_clause_1', 'keywords')
tc.submit('add_1')
tc.formvalue('query', '1_keywords', '2')
tc.submit('update')
tc.notfind('TestTicketQueryOrClause0')
for i in (1, 2):
tc.find('TestTicketQueryOrClause%s' % i)
class TestTicketCustomFieldTextNoFormat(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test custom text field with no format explicitly specified.
Its contents should be rendered as plain text.
"""
env = self._testenv.get_trac_environment()
env.config.set('ticket-custom', 'newfield', 'text')
env.config.set('ticket-custom', 'newfield.label',
'Another Custom Field')
env.config.set('ticket-custom', 'newfield.format', '')
env.config.save()
val = "%s %s" % (random_unique_camel(), random_word())
self._tester.create_ticket(info={'newfield': val})
tc.find('<td headers="h_newfield"[^>]*>\s*%s\s*</td>' % val)
class TestTicketCustomFieldTextAreaNoFormat(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test custom textarea field with no format explicitly specified,
its contents should be rendered as plain text.
"""
env = self._testenv.get_trac_environment()
env.config.set('ticket-custom', 'newfield', 'textarea')
env.config.set('ticket-custom', 'newfield.label',
'Another Custom Field')
env.config.set('ticket-custom', 'newfield.format', '')
env.config.save()
val = "%s %s" % (random_unique_camel(), random_word())
self._tester.create_ticket(info={'newfield': val})
tc.find('<td headers="h_newfield"[^>]*>\s*%s\s*</td>' % val)
class TestTicketCustomFieldTextWikiFormat(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test custom text field with `wiki` format.
Its contents should through the wiki engine, wiki-links and all.
Feature added in http://trac.edgewall.org/ticket/1791
"""
env = self._testenv.get_trac_environment()
env.config.set('ticket-custom', 'newfield', 'text')
env.config.set('ticket-custom', 'newfield.label',
'Another Custom Field')
env.config.set('ticket-custom', 'newfield.format', 'wiki')
env.config.save()
word1 = random_unique_camel()
word2 = random_word()
val = "%s %s" % (word1, word2)
self._tester.create_ticket(info={'newfield': val})
wiki = '<a [^>]*>%s\??</a> %s' % (word1, word2)
tc.find('<td headers="h_newfield"[^>]*>\s*%s\s*</td>' % wiki)
class TestTicketCustomFieldTextAreaWikiFormat(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test custom textarea field with no format explicitly specified,
its contents should be rendered as plain text.
"""
env = self._testenv.get_trac_environment()
env.config.set('ticket-custom', 'newfield', 'textarea')
env.config.set('ticket-custom', 'newfield.label',
'Another Custom Field')
env.config.set('ticket-custom', 'newfield.format', 'wiki')
env.config.save()
word1 = random_unique_camel()
word2 = random_word()
val = "%s %s" % (word1, word2)
self._tester.create_ticket(info={'newfield': val})
wiki = '<p>\s*<a [^>]*>%s\??</a> %s<br />\s*</p>' % (word1, word2)
tc.find('<td headers="h_newfield"[^>]*>\s*%s\s*</td>' % wiki)
class TestTicketCustomFieldTextReferenceFormat(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test custom text field with `reference` format.
Its contents are treated as a single value
and are rendered as an auto-query link.
Feature added in http://trac.edgewall.org/ticket/10643
"""
env = self._testenv.get_trac_environment()
env.config.set('ticket-custom', 'newfield', 'text')
env.config.set('ticket-custom', 'newfield.label',
'Another Custom Field')
env.config.set('ticket-custom', 'newfield.format', 'reference')
env.config.save()
word1 = random_unique_camel()
word2 = random_word()
val = "%s %s" % (word1, word2)
self._tester.create_ticket(info={'newfield': val})
query = 'status=!closed&newfield=%s\+%s' % (word1, word2)
querylink = '<a href="/query\?%s">%s</a>' % (query, val)
tc.find('<td headers="h_newfield"[^>]*>\s*%s\s*</td>' % querylink)
class TestTicketCustomFieldTextListFormat(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test custom text field with `list` format.
Its contents are treated as a space-separated list of values
and are rendered as separate auto-query links per word.
Feature added in http://trac.edgewall.org/ticket/10643
"""
env = self._testenv.get_trac_environment()
env.config.set('ticket-custom', 'newfield', 'text')
env.config.set('ticket-custom', 'newfield.label',
'Another Custom Field')
env.config.set('ticket-custom', 'newfield.format', 'list')
env.config.save()
word1 = random_unique_camel()
word2 = random_word()
val = "%s %s" % (word1, word2)
self._tester.create_ticket(info={'newfield': val})
query1 = 'status=!closed&newfield=~%s' % word1
query2 = 'status=!closed&newfield=~%s' % word2
querylink1 = '<a href="/query\?%s">%s</a>' % (query1, word1)
querylink2 = '<a href="/query\?%s">%s</a>' % (query2, word2)
querylinks = '%s %s' % (querylink1, querylink2)
tc.find('<td headers="h_newfield"[^>]*>\s*%s\s*</td>' % querylinks)
class RegressionTestTicket10828(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/10828
Rendered property changes should be described as lists of added and
removed items, even in the presence of comma and semicolon separators.
"""
env = self._testenv.get_trac_environment()
env.config.set('ticket-custom', 'newfield', 'text')
env.config.set('ticket-custom', 'newfield.label',
'A Custom Field')
env.config.set('ticket-custom', 'newfield.format', 'list')
env.config.save()
self._tester.create_ticket()
word1 = random_unique_camel()
word2 = random_word()
val = "%s %s" % (word1, word2)
tc.formvalue('propertyform', 'field-newfield', val)
tc.submit('submit')
tc.find('<em>%s</em> <em>%s</em> added' % (word1, word2))
word3 = random_unique_camel()
word4 = random_unique_camel()
val = "%s, %s; %s" % (word2, word3, word4)
tc.formvalue('propertyform', 'field-newfield', val)
tc.submit('submit')
tc.find('<em>%s</em> <em>%s</em> added; <em>%s</em> removed'
% (word3, word4, word1))
tc.formvalue('propertyform', 'field-newfield', '')
tc.submit('submit')
tc.find('<em>%s</em> <em>%s</em> <em>%s</em> removed'
% (word2, word3, word4))
val = "%s %s,%s" % (word1, word2, word3)
tc.formvalue('propertyform', 'field-newfield', val)
tc.submit('submit')
tc.find('<em>%s</em> <em>%s</em> <em>%s</em> added'
% (word1, word2, word3))
query1 = 'status=!closed&newfield=~%s' % word1
query2 = 'status=!closed&newfield=~%s' % word2
query3 = 'status=!closed&newfield=~%s' % word3
querylink1 = '<a href="/query\?%s">%s</a>' % (query1, word1)
querylink2 = '<a href="/query\?%s">%s</a>' % (query2, word2)
querylink3 = '<a href="/query\?%s">%s</a>' % (query3, word3)
querylinks = '%s %s, %s' % (querylink1, querylink2, querylink3)
tc.find('<td headers="h_newfield"[^>]*>\s*%s\s*</td>' % querylinks)
class TestTicketTimeline(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test ticket details on timeline"""
env = self._testenv.get_trac_environment()
env.config.set('timeline', 'ticket_show_details', 'yes')
env.config.save()
summary = random_sentence(5)
ticketid = self._tester.create_ticket(summary)
self._tester.add_comment(ticketid)
self._tester.go_to_timeline()
tc.formvalue('prefs', 'ticket', True)
tc.submit()
tc.find('Ticket.*#%s.*created' % ticketid)
tc.formvalue('prefs', 'ticket_details', True)
tc.submit()
htmltags = '(<[^>]*>)*'
tc.find('Ticket ' + htmltags + '#' + str(ticketid) + htmltags +
' \\(' + summary.split()[0] +
' [^\\)]+\\) updated\\s+by\\s+' + htmltags + 'admin', 's')
class TestAdminComponent(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin create component"""
self._tester.create_component()
class TestAdminComponentAuthorization(AuthorizationTestCaseSetup):
def runTest(self):
"""Check permissions required to access the Ticket Components
panel."""
self.test_authorization('/admin/ticket/components', 'TICKET_ADMIN',
"Manage Components")
class TestAdminComponentDuplicates(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin create duplicate component"""
name = "DuplicateComponent"
self._tester.create_component(name)
component_url = self._tester.url + "/admin/ticket/components"
tc.go(component_url)
tc.formvalue('addcomponent', 'name', name)
tc.submit()
tc.notfind(internal_error)
tc.find('Component .* already exists')
class TestAdminComponentRemoval(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin remove component"""
name = "RemovalComponent"
self._tester.create_component(name)
component_url = self._tester.url + "/admin/ticket/components"
tc.go(component_url)
tc.formvalue('component_table', 'sel', name)
tc.submit('remove')
tc.notfind(name)
class TestAdminComponentNonRemoval(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin remove no selected component"""
component_url = self._tester.url + "/admin/ticket/components"
tc.go(component_url)
tc.submit('remove', formname='component_table')
tc.find('No component selected')
class TestAdminComponentDefault(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin set default component"""
name = "DefaultComponent"
self._tester.create_component(name)
component_url = self._tester.url + "/admin/ticket/components"
tc.go(component_url)
tc.formvalue('component_table', 'default', name)
tc.submit('apply')
tc.find('type="radio" name="default" value="%s" checked="checked"' % \
name)
tc.go(self._tester.url + '/newticket')
tc.find('<option selected="selected" value="%s">%s</option>'
% (name, name))
# Test the "Clear default" button
self._testenv.set_config('ticket', 'optional_fields', 'component')
tc.go(component_url)
tc.submit('clear', formname='component_table')
tc.notfind('type="radio" name="default" value=".+" checked="checked"')
self._tester.create_ticket()
tc.find('<th id="h_component" class="missing">\s*Component:\s*</th>'
'\s*<td headers="h_component">\s*</td>')
self._testenv.remove_config('ticket', 'optional_fields')
class TestAdminComponentDetail(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin component detail"""
name = "DetailComponent"
self._tester.create_component(name)
component_url = self._tester.url + "/admin/ticket/components"
tc.go(component_url)
tc.follow(name)
desc = 'Some component description'
tc.formvalue('modcomp', 'description', desc)
tc.submit('cancel')
tc.url(component_url + '$')
tc.follow(name)
tc.notfind(desc)
class TestAdminComponentNoneDefined(FunctionalTwillTestCaseSetup):
def runTest(self):
"""The table should be hidden and help text shown when there are no
components defined (#11103)."""
from trac.ticket import model
env = self._testenv.get_trac_environment()
components = list(model.Component.select(env))
self._tester.go_to_admin()
tc.follow(r"\bComponents\b")
try:
for comp in components:
tc.formvalue('component_table', 'sel', comp.name)
tc.submit('remove')
tc.notfind('<table class="listing" id="complist">')
tc.find("As long as you don't add any items to the list, this "
"field[ \t\n]*will remain completely hidden from the "
"user interface.")
finally:
for comp in components:
self._tester.create_component(comp.name, comp.owner,
comp.description)
class TestAdminMilestone(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin create milestone"""
self._tester.create_milestone()
class TestAdminMilestoneAuthorization(AuthorizationTestCaseSetup):
def runTest(self):
"""Check permissions required to access the Ticket Milestone
panel."""
self.test_authorization('/admin/ticket/milestones', 'TICKET_ADMIN',
"Manage Milestones")
class TestAdminMilestoneSpace(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin create milestone with a space"""
self._tester.create_milestone('Milestone 1')
class TestAdminMilestoneDuplicates(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin create duplicate milestone"""
name = "DuplicateMilestone"
self._tester.create_milestone(name)
milestone_url = self._tester.url + "/admin/ticket/milestones"
tc.go(milestone_url)
tc.url(milestone_url)
tc.formvalue('addmilestone', 'name', name)
tc.submit()
tc.notfind(internal_error)
tc.find('Milestone %s already exists' % name)
tc.notfind('%s')
class TestAdminMilestoneDetail(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin modify milestone details"""
name = "DetailMilestone"
# Create a milestone
self._tester.create_milestone(name)
# Modify the details of the milestone
milestone_url = self._tester.url + "/admin/ticket/milestones"
tc.go(milestone_url)
tc.url(milestone_url)
tc.follow(name)
tc.url(milestone_url + '/' + name)
tc.formvalue('modifymilestone', 'description', 'Some description.')
tc.submit('save')
tc.url(milestone_url)
# Make sure the milestone isn't closed
self._tester.go_to_roadmap()
tc.find(name)
# Cancel more modifications
tc.go(milestone_url)
tc.url(milestone_url)
tc.follow(name)
tc.formvalue('modifymilestone', 'description',
'~~Some other description.~~')
tc.submit('cancel')
tc.url(milestone_url)
# Verify the correct modifications show up
self._tester.go_to_roadmap()
tc.find('Some description.')
tc.follow(name)
tc.find('Some description.')
class TestAdminMilestoneDue(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin milestone duedate"""
name = "DueMilestone"
duedate = datetime.now(tz=utc)
duedate_string = format_datetime(duedate, tzinfo=utc,
locale=locale_en)
self._tester.create_milestone(name, due=duedate_string)
tc.find(duedate_string)
class TestAdminMilestoneDetailDue(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin modify milestone duedate on detail page"""
name = "DetailDueMilestone"
# Create a milestone
self._tester.create_milestone(name)
# Modify the details of the milestone
milestone_url = self._tester.url + "/admin/ticket/milestones"
tc.go(milestone_url)
tc.url(milestone_url)
tc.follow(name)
tc.url(milestone_url + '/' + name)
duedate = datetime.now(tz=utc)
duedate_string = format_datetime(duedate, tzinfo=utc,
locale=locale_en)
tc.formvalue('modifymilestone', 'due', duedate_string)
tc.submit('save')
tc.url(milestone_url + '$')
tc.find(name + '(<[^>]*>|\\s)*'+ duedate_string, 's')
class TestAdminMilestoneDetailRename(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin rename milestone"""
name1 = self._tester.create_milestone()
name2 = random_unique_camel()
tid = self._tester.create_ticket(info={'milestone': name1})
milestone_url = self._tester.url + '/admin/ticket/milestones'
self._tester.go_to_url(milestone_url)
tc.follow(name1)
tc.url(milestone_url + '/' + name1)
tc.formvalue('modifymilestone', 'name', name2)
tc.submit('save')
tc.find(r"Your changes have been saved\.")
tc.find(r"\b%s\b" % name2)
tc.notfind(r"\b%s\b" % name1)
self._tester.go_to_ticket(tid)
tc.find('<a class="milestone" href="/milestone/%(name)s" '
'title="No date set">%(name)s</a>' % {'name': name2})
tc.find('<strong class="trac-field-milestone">Milestone</strong>'
'[ \t\n]+changed from <em>%s</em> to <em>%s</em>'
% (name1, name2))
tc.find("Milestone renamed")
class TestAdminMilestoneCompleted(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin milestone completed"""
name = "CompletedMilestone"
self._tester.create_milestone(name)
milestone_url = self._tester.url + "/admin/ticket/milestones"
tc.go(milestone_url)
tc.url(milestone_url)
tc.follow(name)
tc.url(milestone_url + '/' + name)
tc.formvalue('modifymilestone', 'completed', True)
tc.submit('save')
tc.url(milestone_url + "$")
class TestAdminMilestoneCompletedFuture(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin milestone completed in the future"""
name = "CompletedFutureMilestone"
self._tester.create_milestone(name)
milestone_url = self._tester.url + "/admin/ticket/milestones"
tc.go(milestone_url)
tc.url(milestone_url)
tc.follow(name)
tc.url(milestone_url + '/' + name)
tc.formvalue('modifymilestone', 'completed', True)
cdate = datetime.now(tz=utc) + timedelta(days=2)
cdate_string = format_date(cdate, tzinfo=localtz, locale=locale_en)
tc.formvalue('modifymilestone', 'completeddate', cdate_string)
tc.submit('save')
tc.find('Completion date may not be in the future')
# And make sure it wasn't marked as completed.
self._tester.go_to_roadmap()
tc.find(name)
class TestAdminMilestoneRemove(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin remove milestone"""
name = "MilestoneRemove"
self._tester.create_milestone(name)
tid = self._tester.create_ticket(info={'milestone': name})
milestone_url = self._tester.url + '/admin/ticket/milestones'
tc.go(milestone_url)
tc.formvalue('milestone_table', 'sel', name)
tc.submit('remove')
tc.url(milestone_url + '$')
tc.notfind(name)
self._tester.go_to_ticket(tid)
tc.find('<th id="h_milestone" class="missing">'
'[ \t\n]*Milestone:[ \t\n]*</th>')
tc.find('<strong class="trac-field-milestone">Milestone'
'</strong>[ \t\n]*<em>%s</em>[ \t\n]*deleted'
% name)
tc.find("Milestone deleted")
class TestAdminMilestoneRemoveMulti(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin remove multiple milestones"""
name = "MultiRemoveMilestone"
count = 3
for i in range(count):
self._tester.create_milestone("%s%s" % (name, i))
milestone_url = self._tester.url + '/admin/ticket/milestones'
tc.go(milestone_url)
tc.url(milestone_url + '$')
for i in range(count):
tc.find("%s%s" % (name, i))
for i in range(count):
tc.formvalue('milestone_table', 'sel', "%s%s" % (name, i))
tc.submit('remove')
tc.url(milestone_url + '$')
for i in range(count):
tc.notfind("%s%s" % (name, i))
class TestAdminMilestoneNonRemoval(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin remove no selected milestone"""
milestone_url = self._tester.url + "/admin/ticket/milestones"
tc.go(milestone_url)
tc.submit('remove', formname='milestone_table')
tc.find('No milestone selected')
class TestAdminMilestoneDefaults(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin set default ticket milestone, default retarget milestone
and clear defaults."""
def clear_defaults():
# Test the "Clear default" button
tc.go(milestone_url)
tc.submit('clear', formname='milestone_table')
tc.notfind('type="radio" name="ticket_default" '
'value=".+" checked="checked"')
tc.notfind('type="radio" name="retarget_default" '
'value=".+" checked="checked"')
self._tester.go_to_ticket(tid)
tc.find('<th id="h_milestone" class="missing">[ \t\n]+'
'Milestone:[ \t\n]+</th>[ \t\n]+'
'(?!<td headers="h_milestone">)')
self._tester.go_to_milestone(mid2)
tc.submit(formname='deletemilestone')
tc.notfind('<option selected="selected" value="%s">%s</option>'
% (mid1, mid1))
milestone_url = self._tester.url + "/admin/ticket/milestones"
tid = self._tester.create_ticket()
mid1 = self._tester.create_milestone()
mid2 = self._tester.create_milestone()
self._tester.create_ticket(info={'milestone': mid2})
# Set default ticket milestone
tc.go(milestone_url)
tc.formvalue('milestone_table', 'ticket_default', mid1)
tc.submit('apply')
tc.find('type="radio" name="ticket_default" value="%s" '
'checked="checked"' % mid1)
tc.notfind('type="radio" name="retarget_default" value=".+" '
'checked="checked"')
# verify it is the default on the newticket page.
tc.go(self._tester.url + '/newticket')
tc.find('<option selected="selected" value="%s">%s</option>'
% (mid1, mid1))
clear_defaults()
# Set default retarget to milestone
tc.go(milestone_url)
tc.formvalue('milestone_table', 'retarget_default', mid1)
tc.submit('apply')
tc.find('type="radio" name="retarget_default" value="%s" '
'checked="checked"' % mid1)
tc.notfind('type="radio" name="ticket_default" value=".+" '
'checked="checked"')
# verify it is the default on the confirm delete page.
self._tester.go_to_milestone(mid2)
tc.submit(formname='deletemilestone')
tc.find('<option selected="selected" value="%s">%s</option>'
% (mid1, mid1))
clear_defaults()
# Set both
tc.go(milestone_url)
tc.formvalue('milestone_table', 'ticket_default', mid1)
tc.formvalue('milestone_table', 'retarget_default', mid1)
tc.submit('apply')
tc.find('type="radio" name="ticket_default" value="%s" '
'checked="checked"' % mid1)
tc.find('type="radio" name="retarget_default" value="%s" '
'checked="checked"' % mid1)
# verify it is the default on the newticket page.
tc.go(self._tester.url + '/newticket')
tc.find('<option selected="selected" value="%s">%s</option>'
% (mid1, mid1))
# verify it is the default on the confirm delete page.
self._tester.go_to_milestone(mid2)
tc.submit(formname='deletemilestone')
tc.find('<option selected="selected" value="%s">%s</option>'
% (mid1, mid1))
clear_defaults()
#Set neither
tc.go(milestone_url)
tc.submit('apply', formname='milestone_table')
tc.notfind('type="radio" name="retarget_default" value=".+" '
'checked="checked"')
tc.notfind('type="radio" name="ticket_default" value=".+" '
'checked="checked"')
# verify no default on the newticket page.
tc.go(self._tester.url + '/newticket')
tc.find('<th id="h_milestone" class="missing">[ \t\n]+'
'Milestone:[ \t\n]+</th>[ \t\n]+'
'(?!<td headers="h_milestone">)')
# verify none selected on the confirm delete page.
self._tester.go_to_milestone(mid2)
tc.submit(formname='deletemilestone')
tc.notfind('<option selected="selected" value="%s">%s</option>'
% (mid1, mid1))
class TestAdminPriority(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin create priority"""
self._tester.create_priority()
class TestAdminPriorityAuthorization(AuthorizationTestCaseSetup):
def runTest(self):
"""Check permissions required to access the Ticket Priority
panel."""
self.test_authorization('/admin/ticket/priority', 'TICKET_ADMIN',
"Manage Priorities")
class TestAdminPriorityDuplicates(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin create duplicate priority"""
name = "DuplicatePriority"
self._tester.create_priority(name)
self._tester.create_priority(name)
tc.find('Priority %s already exists' % name)
class TestAdminPriorityModify(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin modify priority"""
name = "ModifyPriority"
self._tester.create_priority(name)
priority_url = self._tester.url + '/admin/ticket/priority'
tc.go(priority_url)
tc.url(priority_url + '$')
tc.find(name)
tc.follow(name)
tc.formvalue('modenum', 'name', name * 2)
tc.submit('save')
tc.url(priority_url + '$')
tc.find(name * 2)
class TestAdminPriorityRemove(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin remove priority"""
name = "RemovePriority"
self._tester.create_priority(name)
priority_url = self._tester.url + '/admin/ticket/priority'
tc.go(priority_url)
tc.url(priority_url + '$')
tc.find(name)
tc.formvalue('enumtable', 'sel', name)
tc.submit('remove')
tc.url(priority_url + '$')
tc.notfind(name)
class TestAdminPriorityRemoveMulti(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin remove multiple priorities"""
name = "MultiRemovePriority"
count = 3
for i in range(count):
self._tester.create_priority("%s%s" % (name, i))
priority_url = self._tester.url + '/admin/ticket/priority'
tc.go(priority_url)
tc.url(priority_url + '$')
for i in range(count):
tc.find("%s%s" % (name, i))
for i in range(count):
tc.formvalue('enumtable', 'sel', "%s%s" % (name, i))
tc.submit('remove')
tc.url(priority_url + '$')
for i in range(count):
tc.notfind("%s%s" % (name, i))
class TestAdminPriorityNonRemoval(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin remove no selected priority"""
priority_url = self._tester.url + "/admin/ticket/priority"
tc.go(priority_url)
tc.submit('remove', formname='enumtable')
tc.find('No priority selected')
class TestAdminPriorityDefault(AdminEnumDefaultTestCaseSetup):
def runTest(self):
"""Admin default priority"""
name = self._tester.create_priority()
self.test_default('priority', name)
class TestAdminPriorityDetail(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin modify priority details"""
name = "DetailPriority"
# Create a priority
self._tester.create_priority(name + '1')
# Modify the details of the priority
priority_url = self._tester.url + "/admin/ticket/priority"
tc.go(priority_url)
tc.url(priority_url + '$')
tc.follow(name + '1')
tc.url(priority_url + '/' + name + '1')
tc.formvalue('modenum', 'name', name + '2')
tc.submit('save')
tc.url(priority_url + '$')
# Cancel more modifications
tc.go(priority_url)
tc.follow(name)
tc.formvalue('modenum', 'name', name + '3')
tc.submit('cancel')
tc.url(priority_url + '$')
# Verify that only the correct modifications show up
tc.notfind(name + '1')
tc.find(name + '2')
tc.notfind(name + '3')
class TestAdminPriorityRenumber(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin renumber priorities"""
valuesRE = re.compile('<select name="value_([0-9]+)">', re.M)
html = b.get_html()
max_priority = max([int(x) for x in valuesRE.findall(html)])
name = "RenumberPriority"
self._tester.create_priority(name + '1')
self._tester.create_priority(name + '2')
priority_url = self._tester.url + '/admin/ticket/priority'
tc.go(priority_url)
tc.url(priority_url + '$')
tc.find(name + '1')
tc.find(name + '2')
tc.formvalue('enumtable',
'value_%s' % (max_priority + 1), str(max_priority + 2))
tc.formvalue('enumtable',
'value_%s' % (max_priority + 2), str(max_priority + 1))
tc.submit('apply')
tc.url(priority_url + '$')
# Verify that their order has changed.
tc.find(name + '2.*' + name + '1', 's')
class TestAdminPriorityRenumberDup(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin badly renumber priorities"""
# Make the first priority the 2nd priority, and leave the 2nd priority
# as the 2nd priority.
priority_url = self._tester.url + '/admin/ticket/priority'
tc.go(priority_url)
tc.url(priority_url + '$')
tc.formvalue('enumtable', 'value_1', '2')
tc.submit('apply')
tc.url(priority_url + '$')
tc.find('Order numbers must be unique')
class TestAdminResolution(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin create resolution"""
self._tester.create_resolution()
class TestAdminResolutionAuthorization(AuthorizationTestCaseSetup):
def runTest(self):
"""Check permissions required to access the Ticket Resolutions
panel."""
self.test_authorization('/admin/ticket/resolution', 'TICKET_ADMIN',
"Manage Resolutions")
class TestAdminResolutionDuplicates(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin create duplicate resolution"""
name = "DuplicateResolution"
self._tester.create_resolution(name)
self._tester.create_resolution(name)
tc.find('Resolution value "%s" already exists' % name)
class TestAdminResolutionDefault(AdminEnumDefaultTestCaseSetup):
def runTest(self):
"""Admin default resolution"""
name = self._tester.create_resolution()
self.test_default('resolution', name)
class TestAdminSeverity(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin create severity"""
self._tester.create_severity()
class TestAdminSeverityAuthorization(AuthorizationTestCaseSetup):
def runTest(self):
"""Check permissions required to access the Ticket Severities
panel."""
self.test_authorization('/admin/ticket/severity', 'TICKET_ADMIN',
"Manage Severities")
class TestAdminSeverityDuplicates(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin create duplicate severity"""
name = "DuplicateSeverity"
self._tester.create_severity(name)
self._tester.create_severity(name)
tc.find('Severity value "%s" already exists' % name)
class TestAdminSeverityDefault(AdminEnumDefaultTestCaseSetup):
def runTest(self):
"""Admin default severity"""
name = self._tester.create_severity()
self.test_default('severity', name)
class TestAdminType(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin create type"""
self._tester.create_type()
class TestAdminTypeAuthorization(AuthorizationTestCaseSetup):
def runTest(self):
"""Check permissions required to access the Ticket Types
panel."""
self.test_authorization('/admin/ticket/type', 'TICKET_ADMIN',
"Manage Ticket Types")
class TestAdminTypeDuplicates(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin create duplicate type"""
name = "DuplicateType"
self._tester.create_type(name)
self._tester.create_type(name)
tc.find('Type value "%s" already exists' % name)
class TestAdminTypeDefault(AdminEnumDefaultTestCaseSetup):
def runTest(self):
"""Admin default type"""
name = self._tester.create_type()
self.test_default('type', name)
class TestAdminVersion(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin create version"""
self._tester.create_version()
class TestAdminVersionAuthorization(AuthorizationTestCaseSetup):
def runTest(self):
"""Check permissions required to access the Versions panel."""
self.test_authorization('/admin/ticket/versions', 'TICKET_ADMIN',
"Manage Versions")
class TestAdminVersionDuplicates(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin create duplicate version"""
name = "DuplicateVersion"
self._tester.create_version(name)
version_admin = self._tester.url + "/admin/ticket/versions"
tc.go(version_admin)
tc.url(version_admin)
tc.formvalue('addversion', 'name', name)
tc.submit()
tc.notfind(internal_error)
tc.find("Version %s already exists." % name)
class TestAdminVersionDetail(FunctionalTwillTestCaseSetup):
# This is somewhat pointless... the only place to find the version
# description is on the version details page.
def runTest(self):
"""Admin version details"""
name = "DetailVersion"
self._tester.create_version(name)
version_admin = self._tester.url + "/admin/ticket/versions"
tc.go(version_admin)
tc.url(version_admin)
tc.follow(name)
desc = 'Some version description.'
tc.formvalue('modifyversion', 'description', desc)
tc.submit('save')
tc.url(version_admin)
tc.follow(name)
tc.find(desc)
class TestAdminVersionDetailTime(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin version detail set time"""
name = "DetailTimeVersion"
self._tester.create_version(name)
version_admin = self._tester.url + "/admin/ticket/versions"
tc.go(version_admin)
tc.url(version_admin)
tc.follow(name)
tc.formvalue('modifyversion', 'time', '')
tc.submit('save')
tc.url(version_admin + '$')
tc.find(name + '(<[^>]*>|\\s)*<[^>]* name="default" value="%s"'
% name, 's')
class TestAdminVersionDetailCancel(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin version details"""
name = "DetailVersion"
self._tester.create_version(name)
version_admin = self._tester.url + "/admin/ticket/versions"
tc.go(version_admin)
tc.url(version_admin)
tc.follow(name)
desc = 'Some other version description.'
tc.formvalue('modifyversion', 'description', desc)
tc.submit('cancel')
tc.url(version_admin)
tc.follow(name)
tc.notfind(desc)
class TestAdminVersionRemove(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin remove version"""
name = "VersionRemove"
self._tester.create_version(name)
version_url = self._tester.url + "/admin/ticket/versions"
tc.go(version_url)
tc.formvalue('version_table', 'sel', name)
tc.submit('remove')
tc.url(version_url + '$')
tc.notfind(name)
class TestAdminVersionRemoveMulti(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin remove multiple versions"""
name = "MultiRemoveVersion"
count = 3
for i in range(count):
self._tester.create_version("%s%s" % (name, i))
version_url = self._tester.url + '/admin/ticket/versions'
tc.go(version_url)
tc.url(version_url + '$')
for i in range(count):
tc.find("%s%s" % (name, i))
for i in range(count):
tc.formvalue('version_table', 'sel', "%s%s" % (name, i))
tc.submit('remove')
tc.url(version_url + '$')
for i in range(count):
tc.notfind("%s%s" % (name, i))
class TestAdminVersionNonRemoval(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin remove no selected version"""
version_url = self._tester.url + "/admin/ticket/versions"
tc.go(version_url)
tc.submit('remove', formname='version_table')
tc.find('No version selected')
class TestAdminVersionDefault(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin set default version"""
name = "DefaultVersion"
self._tester.create_version(name)
version_url = self._tester.url + "/admin/ticket/versions"
tc.go(version_url)
tc.formvalue('version_table', 'default', name)
tc.submit('apply')
tc.find('type="radio" name="default" value="%s" checked="checked"' % \
name)
# verify it is the default on the newticket page.
tc.go(self._tester.url + '/newticket')
tc.find('<option selected="selected" value="%s">%s</option>'
% (name, name))
# Test the "Clear default" button
tc.go(version_url)
tc.submit('clear', formname='version_table')
tc.notfind('type="radio" name="default" value=".+" checked="checked"')
self._tester.create_ticket()
tc.find('<th id="h_version" class="missing">[ \t\n]+'
'Version:[ \t\n]+</th>[ \t\n]+'
'(?!<td headers="h_version">)')
class TestNewReport(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Create a new report"""
self._tester.create_report(
'Closed tickets, modified in the past 7 days by owner.', """
SELECT DISTINCT p.value AS __color__,
id AS ticket,
summary, component, milestone, t.type AS type,
reporter, time AS created,
changetime AS modified, description AS _description,
priority,
round(julianday('now') -
julianday(changetime, 'unixepoch')) as days,
resolution,
owner as __group__
FROM ticket t
LEFT JOIN enum p ON p.name = t.priority AND
p.type = 'priority'
WHERE ((julianday('now') -
julianday(changetime, 'unixepoch')) < 7)
AND status = 'closed'
ORDER BY __group__, changetime, p.value
""",
'List of all tickets that are closed, and have been modified in'
' the past 7 days, grouped by owner.\n\n(So they have probably'
' been closed this week.)')
class TestReportRealmDecoration(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Realm/id decoration in report"""
self._tester.create_report(
'Realm/id decoration',
"""\
SELECT NULL AS _realm, NULL AS id, NULL AS _parent_realm, NULL AS _parent_id
UNION ALL SELECT 'ticket', '42', NULL, NULL
UNION ALL SELECT 'report', '42', NULL, NULL
UNION ALL SELECT 'milestone', '42', NULL, NULL
UNION ALL SELECT 'wiki', 'WikiStart', NULL, NULL
UNION ALL SELECT 'changeset', '42/trunk', NULL, NULL
UNION ALL SELECT 'changeset', '42/trunk', 'repository', 'repo'
UNION ALL SELECT 'changeset', '43/tags', 'repository', ''
UNION ALL SELECT 'attachment', 'file.ext', 'ticket', '42'
UNION ALL SELECT 'attachment', 'file.ext', 'milestone', '42'
UNION ALL SELECT 'attachment', 'file.ext', 'wiki', 'WikiStart'
""", '')
tc.find('<a title="View ticket" href="[^"]*?/ticket/42">#42</a>')
tc.find('<a title="View report" href="[^"]*?/report/42">report:42</a>')
tc.find('<a title="View milestone" href="[^"]*?/milestone/42">42</a>')
tc.find('<a title="View wiki" href="[^"]*?/wiki/WikiStart">'
'WikiStart</a>')
tc.find('<a title="View changeset" href="[^"]*?/changeset/42/trunk">'
'Changeset 42/trunk</a>')
tc.find('<a title="View changeset" '
'href="[^"]*?/changeset/42/trunk/repo">'
'Changeset 42/trunk in repo</a>')
tc.find('<a title="View changeset" href="[^"]*?/changeset/43/tags">'
'Changeset 43/tags</a>')
tc.find('<a title="View attachment" '
'href="[^"]*?/attachment/ticket/42/file[.]ext">'
'file[.]ext [(]Ticket #42[)]</a>')
tc.find('<a title="View attachment" '
'href="[^"]*?/attachment/milestone/42/file[.]ext">'
'file[.]ext [(]Milestone 42[)]</a>')
tc.find('<a title="View attachment" '
'href="[^"]*?/attachment/wiki/WikiStart/file[.]ext">'
'file[.]ext [(]WikiStart[)]</a>')
class TestReportDynamicVariables(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Generate a report with dynamic variables in title, summary
and SQL"""
summary = random_sentence(3)
fields = {'component': 'component1'}
ticket_id = self._tester.create_ticket(summary, fields)
reportnum = self._tester.create_report(
"$USER's tickets for component $COMPONENT",
"""SELECT DISTINCT
t.id AS ticket, summary, component, version, milestone,
t.type AS type, priority, t.time AS created,
t.changetime AS _changetime, summary AS _description,
reporter AS _reporter
FROM ticket t
LEFT JOIN enum p ON p.name = t.priority AND p.type = 'priority'
LEFT JOIN ticket_change tc ON tc.ticket = t.id AND tc.author = $USER
AND tc.field = 'comment'
WHERE t.status <> 'closed'
AND component = $COMPONENT
AND (owner = $USER OR reporter = $USER OR author = $USER)
""",
"Tickets assigned to $USER for component $COMPONENT"
)
self._tester.go_to_report(reportnum, fields)
tc.find("admin's tickets for component component1")
tc.find("Tickets assigned to admin for component component1")
tc.find('<a title="View ticket" href="/ticket/%s">%s</a>' %
(ticket_id, summary))
class TestMilestone(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Create a milestone."""
self._tester.go_to_roadmap()
tc.submit(formname='add')
tc.url(self._tester.url + '/milestone\?action=new')
name = random_unique_camel()
due = format_datetime(datetime.now(tz=utc) + timedelta(hours=1),
tzinfo=localtz, locale=locale_en)
tc.formvalue('edit', 'name', name)
tc.formvalue('edit', 'due', True)
tc.formvalue('edit', 'duedate', due)
tc.notfind("Retarget associated open tickets to milestone:")
tc.submit('add')
tc.url(self._tester.url + '/milestone/' + name + '$')
tc.find(r'<h1>Milestone %s</h1>' % name)
tc.find(due)
self._tester.create_ticket(info={'milestone': name})
tc.find('<a class="milestone" href="/milestone/%(name)s" '
'title="Due in .+ (.+)">%(name)s</a>'
% {'name': name})
class TestMilestoneAddAttachment(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Add attachment to a milestone. Test that the attachment
button reads 'Attach file' when no files have been attached, and
'Attach another file' when there are existing attachments.
Feature added in http://trac.edgewall.org/ticket/10281."""
name = self._tester.create_milestone()
self._tester.go_to_milestone(name)
tc.find("Attach file")
filename = self._tester.attach_file_to_milestone(name)
self._tester.go_to_milestone(name)
tc.find("Attach another file")
tc.find('Attachments <span class="trac-count">\(1\)</span>')
tc.find(filename)
tc.find('Download all attachments as:\s+<a rel="nofollow" '
'href="/zip-attachment/milestone/%s/">.zip</a>' % name)
class TestMilestoneClose(FunctionalTwillTestCaseSetup):
"""Close a milestone and verify that tickets are retargeted
to the selected milestone"""
def runTest(self):
name = self._tester.create_milestone()
# Check that hint is shown when there are no tickets to retarget
self._tester.go_to_milestone(name)
tc.submit(formname='editmilestone')
tc.find("There are no tickets associated with this milestone.")
retarget_to = self._tester.create_milestone()
tid1 = self._tester.create_ticket(info={'milestone': name})
tid2 = self._tester.create_ticket(info={'milestone': name})
tc.formvalue('propertyform', 'action', 'resolve')
tc.formvalue('propertyform',
'action_resolve_resolve_resolution', 'fixed')
tc.submit('submit')
# Add a ticket and check that it is retargeted when milestone closed
tid = self._tester.create_ticket(info={'milestone': name})
self._tester.go_to_milestone(name)
completed = format_datetime(datetime.now(tz=utc) - timedelta(hours=1),
tzinfo=localtz, locale=locale_en)
tc.submit(formname='editmilestone')
tc.formvalue('edit', 'completed', True)
tc.formvalue('edit', 'completeddate', completed)
tc.formvalue('edit', 'target', retarget_to)
tc.submit('save')
tc.url(self._tester.url + '/milestone/%s$' % name)
tc.find('The open tickets associated with milestone "%s" '
'have been retargeted to milestone "%s".'
% (name, retarget_to))
tc.find("Completed")
self._tester.go_to_ticket(tid1)
tc.find('<a class="milestone" href="/milestone/%(name)s" '
'title="No date set">%(name)s</a>' % {'name': retarget_to})
tc.find('changed from <em>%s</em> to <em>%s</em>'
% (name, retarget_to))
tc.find("Ticket retargeted after milestone closed")
self._tester.go_to_ticket(tid2)
tc.find('<a class="closed milestone" href="/milestone/%(name)s" '
'title="Completed .+ ago (.+)">%(name)s</a>'
% {'name': name})
tc.notfind('changed from <em>%s</em> to <em>%s</em>'
% (name, retarget_to))
tc.notfind("Ticket retargeted after milestone closed")
class TestMilestoneDelete(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Delete a milestone and verify that tickets are retargeted
to the selected milestone."""
def submit_delete(name, retarget_to=None, tid=None):
tc.submit('delete', formname='edit')
tc.url(self._tester.url + '/roadmap')
tc.find('The milestone "%s" has been deleted.' % name)
tc.notfind('Milestone:.*%s' % name)
retarget_notice = 'The tickets associated with milestone "%s" ' \
'have been retargeted to milestone "%s".' \
% (name, str(retarget_to))
if retarget_to is not None:
tc.find('Milestone:.*%s' % retarget_to)
if tid is not None:
tc.find(retarget_notice)
self._tester.go_to_ticket(tid)
tc.find('Changed[ \t\n]+<a .*>\d+ seconds? ago</a>'
'[ \t\n]+by <span class="trac-author">admin</span>')
if retarget_to is not None:
tc.find('<a class="milestone" href="/milestone/%(name)s" '
'title="No date set">%(name)s</a>'
% {'name': retarget_to})
tc.find('<strong class="trac-field-milestone">Milestone'
'</strong>[ \t\n]+changed from <em>%s</em> to '
'<em>%s</em>' % (name, retarget_to))
else:
tc.find('<th id="h_milestone" class="missing">'
'[ \t\n]*Milestone:[ \t\n]*</th>')
tc.find('<strong class="trac-field-milestone">Milestone'
'</strong>[ \t\n]*<em>%s</em>[ \t\n]*deleted'
% name)
tc.find("Ticket retargeted after milestone deleted")
else:
tc.notfind(retarget_notice)
# No tickets associated with milestone to be retargeted
name = self._tester.create_milestone()
self._tester.go_to_milestone(name)
tc.submit(formname='deletemilestone')
tc.find("There are no tickets associated with this milestone.")
submit_delete(name)
# Don't select a milestone to retarget to
name = self._tester.create_milestone()
tid = self._tester.create_ticket(info={'milestone': name})
self._tester.go_to_milestone(name)
tc.submit(formname='deletemilestone')
submit_delete(name, tid=tid)
# Select a milestone to retarget to
name = self._tester.create_milestone()
retarget_to = self._tester.create_milestone()
tid = self._tester.create_ticket(info={'milestone': name})
self._tester.go_to_milestone(name)
tc.submit(formname='deletemilestone')
tc.formvalue('edit', 'target', retarget_to)
submit_delete(name, retarget_to, tid)
# Just navigate to the page and select cancel
name = self._tester.create_milestone()
tid = self._tester.create_ticket(info={'milestone': name})
self._tester.go_to_milestone(name)
tc.submit(formname='deletemilestone')
tc.submit('cancel', formname='edit')
tc.url(self._tester.url + '/milestone/%s' % name)
tc.notfind('The milestone "%s" has been deleted.' % name)
tc.notfind('The tickets associated with milestone "%s" '
'have been retargeted to milestone' % name)
self._tester.go_to_ticket(tid)
tc.find('<a class="milestone" href="/milestone/%(name)s" '
'title="No date set">%(name)s</a>' % {'name': name})
tc.notfind('<strong class="trac-field-milestone">Milestone</strong>'
'[ \t\n]*<em>%s</em>[ \t\n]*deleted' % name)
tc.notfind("Ticket retargeted after milestone deleted<br />")
class TestMilestoneRename(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Rename a milestone and verify that the rename is shown in the
change history for the associated tickets."""
name = self._tester.create_milestone()
new_name = random_unique_camel()
tid = self._tester.create_ticket(info={'milestone': name})
self._tester.go_to_milestone(name)
tc.submit(formname='editmilestone')
tc.formvalue('edit', 'name', new_name)
tc.submit('save')
tc.url(self._tester.url + '/milestone/' + new_name)
tc.find("Your changes have been saved.")
tc.find(r"<h1>Milestone %s</h1>" % new_name)
self._tester.go_to_ticket(tid)
tc.find('Changed[ \t\n]+<a .*>\d+ seconds? ago</a>[ \t\n]+'
'by <span class="trac-author">admin</span>')
tc.find('<a class="milestone" href="/milestone/%(name)s" '
'title="No date set">%(name)s</a>' % {'name': new_name})
tc.find('<strong class="trac-field-milestone">Milestone</strong>'
'[ \t\n]+changed from <em>%s</em> to <em>%s</em>'
% (name, new_name))
tc.find("Milestone renamed")
class RegressionTestRev5665(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin create version without release time (r5665)"""
self._tester.create_version(releasetime='')
class RegressionTestRev5994(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of the column label fix in r5994"""
env = self._testenv.get_trac_environment()
env.config.set('ticket-custom', 'custfield', 'text')
env.config.set('ticket-custom', 'custfield.label', 'Custom Field')
env.config.save()
try:
self._tester.go_to_query()
tc.find('<label>( |\\n)*<input[^<]*value="custfield"'
'[^<]*/>( |\\n)*Custom Field( |\\n)*</label>', 's')
finally:
pass
#env.config.set('ticket', 'restrict_owner', 'no')
#env.config.save()
class RegressionTestTicket4447(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/4447"""
env = self._testenv.get_trac_environment()
env.config.set('ticket-custom', 'newfield', 'text')
env.config.set('ticket-custom', 'newfield.label',
'Another Custom Field')
env.config.save()
ticketid = self._tester.create_ticket(summary="Hello World")
self._tester.add_comment(ticketid)
tc.notfind('<strong class="trac-field-newfield">Another Custom Field'
'</strong>[ \t\n]+<em></em>[ \t\n]+deleted')
tc.notfind('set to')
class RegressionTestTicket4630a(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/4630 a"""
env = self._testenv.get_trac_environment()
env.config.set('ticket', 'restrict_owner', 'yes')
env.config.save()
try:
# Make sure 'user' has logged in.
self._tester.go_to_front()
self._tester.logout()
self._tester.login('user')
self._tester.go_to_front()
self._tester.logout()
self._tester.login('joe')
self._tester.go_to_front()
self._tester.logout()
self._tester.login('admin')
self._tester.create_ticket()
tc.formvalue('propertyform', 'action', 'reassign')
tc.find('reassign_reassign_owner')
tc.formvalue('propertyform', 'action_reassign_reassign_owner',
'user')
tc.submit('submit')
finally:
# Undo the config change for now since this (failing)
# regression test causes problems for later tests.
env.config.set('ticket', 'restrict_owner', 'no')
env.config.save()
class RegressionTestTicket4630b(FunctionalTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/4630 b"""
# NOTE: this must be run after RegressionTestTicket4630 (user must
# have logged in)
from trac.perm import PermissionSystem
env = self._testenv.get_trac_environment()
perm = PermissionSystem(env)
users = perm.get_users_with_permission('TRAC_ADMIN')
self.assertEqual(users, ['admin'])
users = perm.get_users_with_permission('TICKET_MODIFY')
self.assertEqual(sorted(users), ['admin', 'joe', 'user'])
class RegressionTestTicket5022(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/5022
"""
summary = 'RegressionTestTicket5022'
ticket_id = self._tester.create_ticket(summary=summary)
tc.go(self._tester.url + '/newticket?id=%s' % ticket_id)
tc.notfind(summary)
class RegressionTestTicket5394a(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/5394 a
Order user list alphabetically in (re)assign action
"""
# set restrict_owner config
env = self._testenv.get_trac_environment()
env.config.set('ticket', 'restrict_owner', 'yes')
env.config.save()
self._tester.go_to_front()
self._tester.logout()
test_users = ['alice', 'bob', 'jane', 'john', 'charlie', 'alan',
'zorro']
# Apparently it takes a sec for the new user to be recognized by the
# environment. So we add all the users, then log in as the users
# in a second loop. This should be faster than adding a sleep(1)
# between the .adduser and .login steps.
for user in test_users:
self._testenv.adduser(user)
for user in test_users:
self._tester.login(user)
self._tester.go_to_front()
self._tester.logout()
self._tester.login('admin')
self._tester.create_ticket("regression test 5394a")
options = 'id="action_reassign_reassign_owner">' + \
''.join(['<option[^>]*>%s</option>' % user for user in
sorted(test_users + ['admin', 'joe', 'user'])])
tc.find(to_utf8(options), 's')
# We don't have a good way to fully delete a user from the Trac db.
# Once we do, we may want to cleanup our list of users here.
class RegressionTestTicket5394b(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/5394 b
Order user list alphabetically on new ticket page
"""
# Must run after RegressionTestTicket5394a
self._tester.go_to_front()
tc.follow('New Ticket')
tc.find('Create New Ticket')
test_users = ['alice', 'bob', 'jane', 'john', 'charlie', 'alan',
'zorro']
options = 'id="field-owner"[^>]*>[[:space:]]*<option/>.*' + \
'.*'.join(['<option[^>]*>%s</option>' % user for user in
sorted(test_users + ['admin', 'user'])])
options = '.*'.join(sorted(test_users + ['admin', 'user']))
tc.find(options, 's')
# TODO: this should probably be changed to be a testsuite derived from
# TestSetup
class RegressionTestTicket5497prep(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/5497 prep
When the component is changed, the owner should update to the
default owner of the component.
If component is changed and the owner is changed (reassigned action
for open tickets in the basic workflow), the owner should be the
specified owner, not the owner of the component.
"""
# The default owner for the component we're using for this testcase
# is 'user', and we'll manually assign to 'admin'.
self._tester.create_component('regression5497', 'user')
class RegressionTestTicket5497a(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/5497 a
Open ticket, component changed, owner not changed"""
self._tester.create_ticket("regression test 5497a")
tc.formvalue('propertyform', 'field-component', 'regression5497')
tc.submit('submit')
tc.find(regex_owned_by('user'))
class RegressionTestTicket5497b(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/5497 b
Open ticket, component changed, owner changed"""
self._tester.create_ticket("regression test 5497b")
tc.formvalue('propertyform', 'field-component', 'regression5497')
tc.formvalue('propertyform', 'action', 'reassign')
tc.formvalue('propertyform', 'action_reassign_reassign_owner',
'admin')
tc.submit('submit')
tc.notfind(regex_owned_by('user'))
tc.find(regex_owned_by('admin'))
class RegressionTestTicket5497c(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/5497 c
New ticket, component changed, owner not changed"""
self._tester.create_ticket("regression test 5497c",
{'component':'regression5497'})
tc.find(regex_owned_by('user'))
class RegressionTestTicket5497d(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/5497 d
New ticket, component changed, owner changed"""
self._tester.create_ticket("regression test 5497d",
{'component':'regression5497',
'owner':'admin'})
tc.find(regex_owned_by('admin'))
class RegressionTestTicket5602(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/5602"""
# Create a set of tickets, and assign them all to a milestone
milestone = self._tester.create_milestone()
ids = [self._tester.create_ticket(info={'milestone': milestone})
for x in range(5)]
# Need a ticket in each state: new, assigned, accepted, closed,
# reopened
# leave ids[0] as new
# make ids[1] be assigned
self._tester.go_to_ticket(ids[1])
tc.formvalue('propertyform', 'action', 'reassign')
tc.formvalue('propertyform', 'action_reassign_reassign_owner',
'admin')
tc.submit('submit')
# make ids[2] be accepted
self._tester.go_to_ticket(ids[2])
tc.formvalue('propertyform', 'action', 'accept')
tc.submit('submit')
# make ids[3] be closed
self._tester.go_to_ticket(ids[3])
tc.formvalue('propertyform', 'action', 'resolve')
tc.formvalue('propertyform', 'action_resolve_resolve_resolution',
'fixed')
tc.submit('submit')
# make ids[4] be reopened
self._tester.go_to_ticket(ids[4])
tc.formvalue('propertyform', 'action', 'resolve')
tc.formvalue('propertyform', 'action_resolve_resolve_resolution',
'fixed')
tc.submit('submit')
# FIXME: we have to wait a second to avoid "IntegrityError: columns
# ticket, time, field are not unique"
time.sleep(1)
tc.formvalue('propertyform', 'action', 'reopen')
tc.submit('submit')
tc.show()
tc.notfind("Python Traceback")
# Go to the milestone and follow the links to the closed and active
# tickets.
tc.go(self._tester.url + "/roadmap")
tc.follow(milestone)
tc.follow("closed:")
tc.find("Resolution:[ \t\n]+fixed")
tc.back()
tc.follow("active:")
tc.find("Status:[ \t\n]+new")
tc.find("Status:[ \t\n]+assigned")
tc.find("Status:[ \t\n]+accepted")
tc.notfind("Status:[ \t\n]+closed")
tc.find("Status:[ \t\n]+reopened")
class RegressionTestTicket5687(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/5687"""
self._tester.go_to_front()
self._tester.logout()
self._tester.login('user')
self._tester.create_ticket()
self._tester.logout()
self._tester.login('admin')
class RegressionTestTicket5930(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/5930
TypeError: from_string() takes exactly 3 non-keyword arguments (4
given)
Caused by a saved query
"""
self._tester.create_report('Saved Query', 'query:version=1.0', '')
tc.notfind(internal_error)
# TODO: Add a testcase for the following:
# Can you also throw in addition of a 1.0 ticket and a 2.0 ticket
# as part of the demo env, then see that only the correct one shows
# up in the report?
class RegressionTestTicket6048(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/6048"""
# Setup the DeleteTicket plugin
plugin = open(os.path.join(self._testenv.trac_src, 'sample-plugins',
'workflow', 'DeleteTicket.py')).read()
open(os.path.join(self._testenv.tracdir, 'plugins',
'DeleteTicket.py'), 'w').write(plugin)
env = self._testenv.get_trac_environment()
prevconfig = env.config.get('ticket', 'workflow')
env.config.set('ticket', 'workflow',
prevconfig + ',DeleteTicketActionController')
env.config.save()
env = self._testenv.get_trac_environment() # reload environment
# Create a ticket and delete it
ticket_id = self._tester.create_ticket('RegressionTestTicket6048')
# (Create a second ticket so that the ticket id does not get reused
# and confuse the tester object.)
self._tester.create_ticket(summary='RegressionTestTicket6048b')
self._tester.go_to_ticket(ticket_id)
tc.find('delete ticket')
tc.formvalue('propertyform', 'action', 'delete')
tc.submit('submit')
self._tester.go_to_ticket(ticket_id)
tc.find('Error: Invalid ticket number')
tc.find('Ticket %s does not exist.' % ticket_id)
# Remove the DeleteTicket plugin
env.config.set('ticket', 'workflow', prevconfig)
env.config.save()
env = self._testenv.get_trac_environment() # reload environment
for ext in ('py', 'pyc', 'pyo'):
filename = os.path.join(self._testenv.tracdir, 'plugins',
'DeleteTicket.%s' % ext)
if os.path.exists(filename):
os.unlink(filename)
class RegressionTestTicket6747(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/6747"""
env = self._testenv.get_trac_environment()
env.config.set('ticket-workflow', 'resolve.operations',
'set_resolution,set_owner')
env.config.set('ticket-workflow', 'resolve.set_owner',
'a_specified_owner')
env.config.save()
try:
self._tester.create_ticket("RegressionTestTicket6747")
tc.find("a_specified_owner")
tc.notfind("a_specified_owneras")
finally:
# Undo the config change to avoid causing problems for later
# tests.
env.config.set('ticket-workflow', 'resolve.operations',
'set_resolution')
env.config.remove('ticket-workflow', 'resolve.set_owner')
env.config.save()
class RegressionTestTicket6879a(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/6879 a
Make sure that previewing a close does not make the available actions
be those for the close status.
"""
# create a ticket, then preview resolving the ticket twice
self._tester.create_ticket("RegressionTestTicket6879 a")
tc.formvalue('propertyform', 'action', 'resolve')
tc.formvalue('propertyform', 'action_resolve_resolve_resolution',
'fixed')
tc.submit('preview')
tc.formvalue('propertyform', 'action', 'resolve')
tc.submit('preview')
class RegressionTestTicket6879b(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/6879 a
Make sure that previewing a close does not make the available actions
be those for the close status.
"""
# create a ticket, then preview resolving the ticket twice
self._tester.create_ticket("RegressionTestTicket6879 b")
tc.formvalue('propertyform', 'action', 'resolve')
tc.formvalue('propertyform', 'action_resolve_resolve_resolution',
'fixed')
tc.submit('preview')
tc.formvalue('propertyform', 'action', 'resolve')
tc.submit('submit')
class RegressionTestTicket6912a(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/6912 a"""
try:
self._tester.create_component(name='RegressionTestTicket6912a',
owner='')
except twill.utils.ClientForm.ItemNotFoundError, e:
raise twill.errors.TwillAssertionError(e)
class RegressionTestTicket6912b(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/6912 b"""
self._tester.create_component(name='RegressionTestTicket6912b',
owner='admin')
tc.follow('RegressionTestTicket6912b')
try:
tc.formvalue('modcomp', 'owner', '')
except twill.utils.ClientForm.ItemNotFoundError, e:
raise twill.errors.TwillAssertionError(e)
tc.submit('save', formname='modcomp')
tc.find('RegressionTestTicket6912b</a>[ \n\t]*</td>[ \n\t]*'
'<td class="owner"></td>', 's')
class RegressionTestTicket7821group(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/7821 group.
"""
env = self._testenv.get_trac_environment()
saved_default_query = env.config.get('query', 'default_query')
default_query = 'status!=closed&order=status&group=status&max=42' \
'&desc=1&groupdesc=1&col=summary|status|cc' \
'&cc~=$USER'
env.config.set('query', 'default_query', default_query)
env.config.save()
try:
self._tester.create_ticket('RegressionTestTicket7821 group')
self._tester.go_to_query()
# $USER
tc.find('<input type="text" name="0_cc" value="admin"'
' size="[0-9]+" />')
# col
tc.find('<input type="checkbox" name="col" value="summary"'
' checked="checked" />')
tc.find('<input type="checkbox" name="col" value="owner" />')
tc.find('<input type="checkbox" name="col" value="status"'
' checked="checked" />')
tc.find('<input type="checkbox" name="col" value="cc"'
' checked="checked" />')
# group
tc.find('<option selected="selected" value="status">Status'
'</option>')
# groupdesc
tc.find('<input type="checkbox" name="groupdesc" id="groupdesc"'
' checked="checked" />')
# max
tc.find('<input type="text" name="max" id="max" size="[0-9]*?"'
' value="42" />')
# col in results
tc.find('<a title="Sort by Ticket [(]ascending[)]" ')
tc.find('<a title="Sort by Summary [(]ascending[)]" ')
tc.find('<a title="Sort by Status [(]ascending[)]" ')
tc.find('<a title="Sort by Cc [(]ascending[)]" ')
tc.notfind('<a title="Sort by Owner "')
finally:
env.config.set('query', 'default_query', saved_default_query)
env.config.save()
class RegressionTestTicket7821var(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/7821 var"""
env = self._testenv.get_trac_environment()
saved_default_query = env.config.get('query', 'default_query')
saved_restrict_owner = env.config.get('ticket', 'restrict_owner')
default_query = '?status=!closed&cc=~$USER&owner=$USER'
env.config.set('query', 'default_query', default_query)
env.config.set('ticket', 'restrict_owner', 'no')
env.config.save()
try:
self._tester.create_ticket('RegressionTestTicket7821 var')
self._tester.go_to_query()
# $USER in default_query
tc.find('<input type="text" name="0_owner" value="admin"'
' size="[0-9]+" />')
tc.find('<input type="text" name="0_cc" value="admin"'
' size="[0-9]+" />')
# query:owner=$USER&or&cc~=$USER
tc.go(self._tester.url + \
'/intertrac/query:owner=$USER&or&cc~=$USER')
tc.find('<input type="text" name="0_owner" value="admin"'
' size="[0-9]+" />')
tc.find('<input type="text" name="1_cc" value="admin"'
' size="[0-9]+" />')
finally:
env.config.set('query', 'default_query', saved_default_query)
env.config.set('ticket', 'restrict_owner', saved_restrict_owner)
env.config.save()
class RegressionTestTicket8247(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/8247
Author field of ticket comment corresponding to the milestone removal
was always 'anonymous'."""
name = "MilestoneRemove"
self._tester.create_milestone(name)
id = self._tester.create_ticket(info={'milestone': name})
ticket_url = self._tester.url + "/ticket/%d" % id
tc.go(ticket_url)
tc.find(name)
tc.go(self._tester.url + "/admin/ticket/milestones")
tc.formvalue('milestone_table', 'sel', name)
tc.submit('remove')
tc.go(ticket_url)
tc.find('<strong class="trac-field-milestone">Milestone</strong>'
'[ \n\t]*<em>%s</em> deleted' % name)
tc.find('Changed <a.* ago</a> by '
'<span class="trac-author">admin</span>')
tc.notfind('anonymous')
class RegressionTestTicket8861(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/8816
When creating a milestone with an already existing name, you get
a warning. After changing the name you will find that the original
milestone with that name is renamed instead of a new one being
created."""
name = "8861Milestone"
self._tester.create_milestone(name)
tc.go(self._tester.url + "/milestone?action=new")
tc.formvalue('edit', 'name', name)
tc.submit('Add milestone')
tc.find('Milestone "%s" already exists' % name)
tc.formvalue('edit', 'name', name + '__')
tc.submit('Add milestone')
tc.go(self._tester.url + "/roadmap")
tc.find('Milestone: <em>%s</em>' % name)
tc.find('Milestone: <em>%s</em>' % (name + '__'))
class RegressionTestTicket9084(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/9084"""
ticketid = self._tester.create_ticket()
self._tester.add_comment(ticketid)
self._tester.go_to_ticket(ticketid)
tc.submit('2', formname='reply-to-comment-1') # '1' hidden, '2' submit
tc.formvalue('propertyform', 'comment', random_sentence(3))
tc.submit('Submit changes')
tc.notfind('AssertionError')
class RegressionTestTicket9981(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/9981"""
tid1 = self._tester.create_ticket()
self._tester.add_comment(tid1)
tc.formvalue('propertyform', 'action', 'resolve')
tc.submit('submit')
tid2 = self._tester.create_ticket()
comment = '[comment:1:ticket:%s]' % tid1
self._tester.add_comment(tid2, comment)
self._tester.go_to_ticket(tid2)
tc.find('<a class="closed ticket"[ \t\n]+'
'href="/ticket/%(num)s#comment:1"[ \t\n]+'
'title="Comment 1 for Ticket #%(num)s"' % {'num': tid1})
class RegressionTestTicket10010(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/10010
Allow configuring the default retargeting option when closing or
deleting a milestone."""
m1 = self._tester.create_milestone()
m2 = self._tester.create_milestone()
self._tester.create_ticket(info={'milestone': m1})
def go_to_and_find_markup(markup, find=True):
self._tester.go_to_milestone(m1)
tc.formvalue('editmilestone', 'action', 'edit')
tc.submit()
if find:
tc.find(markup)
else:
tc.notfind(markup)
self._tester.go_to_milestone(m1)
tc.formvalue('editmilestone', 'action', 'delete')
tc.submit()
if find:
tc.find(markup)
else:
tc.notfind(markup)
try:
go_to_and_find_markup('<option selected="selected" ', False)
self._testenv.set_config('milestone', 'default_retarget_to', m2)
go_to_and_find_markup('<option selected="selected" '
'value="%(name)s">%(name)s</option>' % {'name': m2})
self._testenv.set_config('milestone', 'default_retarget_to', m1)
go_to_and_find_markup('<option selected="selected" ', False)
self._testenv.set_config('milestone', 'default_retarget_to', '')
go_to_and_find_markup('<option selected="selected" ', False)
finally:
self._testenv.remove_config('milestone', 'default_retarget_to')
class RegressionTestTicket10772(FunctionalTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/10772"""
def find_prop(field, value=None):
if value and field == 'type':
tc.find(r'<span class="trac-%(field)s">\s*'
r'<a href="/query\?status=!closed&'
r'%(field)s=%(value)s">\s*%(value)s\s*</a>\s*</span>'
% {'field': field, 'value': value})
elif value and field == 'milestone':
tc.find(r'<td headers="h_%(field)s">\s*'
r'<a class="%(field)s" href="/%(field)s/%(value)s" '
r'title=".+">\s*%(value)s\s*</a>\s*</td>'
% {'field': field, 'value': value})
elif value:
tc.find(r'<td headers="h_%(field)s">\s*'
r'<a href="/query\?status=!closed&'
r'%(field)s=%(value)s">\s*%(value)s\s*</a>\s*</td>'
% {'field': field, 'value': value})
else:
tc.find(r'<td headers="h_%(field)s">\s*</td>'
% {'field': field})
self._testenv.set_config('ticket', 'optional_fields',
'component, milestone, priority, version')
try:
# TODO: use the //Clear default// buttons to clear these values
self._tester.go_to_admin("Components")
tc.submit('clear', formname='component_table')
self._tester.go_to_admin("Milestones")
tc.submit('clear', formname='milestone_table')
self._tester.go_to_admin("Versions")
tc.submit('clear', formname='version_table')
self._tester.go_to_admin("Priorities")
tc.formvalue('enumtable', 'default', 'major')
tc.submit('apply')
self._tester.go_to_ticket()
tc.formvalue('propertyform', 'field-summary', 'ticket summary')
tc.submit('submit')
find_prop('component')
find_prop('milestone')
find_prop('priority', 'major')
find_prop('version')
self._testenv.set_config('ticket', 'optional_fields', '')
self._tester.go_to_admin("Components")
tc.formvalue('component_table', 'default', 'component2')
tc.submit('apply')
self._tester.go_to_admin("Milestones")
tc.formvalue('milestone_table', 'ticket_default', 'milestone2')
tc.submit('apply')
self._tester.go_to_admin("Priorities")
tc.formvalue('enumtable', 'default', 'minor')
tc.submit('apply')
self._tester.go_to_admin("Versions")
tc.formvalue('version_table', 'default', '2.0')
tc.submit('apply')
self._tester.go_to_ticket()
self._tester.go_to_admin("Ticket Types")
tc.formvalue('enumtable', 'default', 'task')
tc.submit('apply')
self._tester.go_to_ticket()
tc.formvalue('propertyform', 'field-summary', 'ticket summary')
tc.submit('submit')
find_prop('component', 'component2')
find_prop('milestone', 'milestone2')
find_prop('priority', 'minor')
find_prop('version', '2.0')
find_prop('type', 'task')
finally:
self._testenv.remove_config('ticket', 'optional_fields')
class RegressionTestTicket11028(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/11028"""
self._tester.go_to_roadmap()
try:
# Check that a milestone is found on the roadmap,
# even for anonymous
tc.find('<a href="/milestone/milestone1">[ \n\t]*'
'Milestone: <em>milestone1</em>[ \n\t]*</a>')
self._tester.logout()
tc.find('<a href="/milestone/milestone1">[ \n\t]*'
'Milestone: <em>milestone1</em>[ \n\t]*</a>')
# Check that no milestones are found on the roadmap when
# MILESTONE_VIEW is revoked
self._testenv.revoke_perm('anonymous', 'MILESTONE_VIEW')
tc.reload()
tc.notfind('Milestone: <em>milestone\d+</em>')
# Check that roadmap can't be viewed without ROADMAP_VIEW
self._testenv.revoke_perm('anonymous', 'ROADMAP_VIEW')
self._tester.go_to_url(self._tester.url + '/roadmap')
tc.find('<h1>Error: Forbidden</h1>')
finally:
# Restore state prior to test execution
self._tester.login('admin')
self._testenv.grant_perm('anonymous',
('ROADMAP_VIEW', 'MILESTONE_VIEW'))
class RegressionTestTicket11153(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/11153"""
# Check that "View Tickets" mainnav entry links to the report page
self._tester.go_to_view_tickets()
# Check that "View Tickets" mainnav entry links to the query page
# when the user doesn't have REPORT_VIEW, and that the mainnav entry
# is not present when the user doesn't have TICKET_VIEW.
try:
self._tester.logout()
self._testenv.revoke_perm('anonymous', 'REPORT_VIEW')
self._tester.go_to_view_tickets('query')
self._testenv.revoke_perm('anonymous', 'TICKET_VIEW')
self._tester.go_to_front()
tc.notfind('\\bView Tickets\\b')
finally:
self._testenv.grant_perm('anonymous',
('REPORT_VIEW', 'TICKET_VIEW'))
self._tester.login('admin')
# Disable the ReportModule component and check that "View Tickets"
# mainnav entry links to the `/query` page.
env = self._testenv.get_trac_environment()
env.config.set('components', 'trac.ticket.report.ReportModule',
'disabled')
env.config.save()
try:
self._tester.go_to_view_tickets('query')
finally:
env.config.remove('components', 'trac.ticket.report.ReportModule')
env.config.save()
# Disable the QueryModule component and check that "View Tickets"
# mainnav entry links to the `/report` page
env.config.set('components', 'trac.ticket.query.QueryModule',
'disabled')
env.config.save()
try:
self._tester.go_to_view_tickets('report')
tc.notfind('<li class="last first">Available Reports</li>')
finally:
env.config.remove('components', 'trac.ticket.query.QueryModule')
env.config.save()
class RegressionTestTicket11176(FunctionalTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/11176
Fine-grained permission checks should be enforced on the Report list
page, the report pages and query pages."""
self._testenv.enable_authz_permpolicy("""
[report:1]
anonymous = REPORT_VIEW
[report:2]
anonymous = REPORT_VIEW
[report:*]
anonymous =
""")
self._tester.go_to_front()
self._tester.logout()
self._tester.go_to_view_tickets()
try:
# Check that permissions are enforced on the report list page
tc.find(r'<a title="View report" '
r'href="/report/1">[ \n\t]*<em>\{1\}</em>')
tc.find(r'<a title="View report" '
r'href="/report/2">[ \n\t]*<em>\{2\}</em>')
for report_num in range(3, 9):
tc.notfind(r'<a title="View report" '
r'href="/report/%(num)s">[ \n\t]*'
r'<em>\{%(num)s\}</em>' % {'num': report_num})
# Check that permissions are enforced on the report pages
tc.go(self._tester.url + '/report/1')
tc.find(r'<h1>\{1\} Active Tickets[ \n\t]*'
r'(<span class="numrows">\(\d+ matches\)</span>)?'
r'[ \n\t]*</h1>')
tc.go(self._tester.url + '/report/2')
tc.find(r'<h1>\{2\} Active Tickets by Version[ \n\t]*'
r'(<span class="numrows">\(\d+ matches\)</span>)?'
r'[ \n\t]*</h1>')
for report_num in range(3, 9):
tc.go(self._tester.url + '/report/%d' % report_num)
tc.find(r'<h1>Error: Forbidden</h1>')
# Check that permissions are enforced on the query pages
tc.go(self._tester.url + '/query?report=1')
tc.find(r'<h1>Active Tickets '
r'<span class="numrows">\(\d+ matches\)</span></h1>')
tc.go(self._tester.url + '/query?report=2')
tc.find(r'<h1>Active Tickets by Version '
r'<span class="numrows">\(\d+ matches\)</span></h1>')
for report_num in range(3, 9):
tc.go(self._tester.url + '/query?report=%d' % report_num)
tc.find(r'<h1>Error: Forbidden</h1>')
finally:
self._tester.login('admin')
self._testenv.disable_authz_permpolicy()
def functionalSuite(suite=None):
if not suite:
import trac.tests.functional
suite = trac.tests.functional.functionalSuite()
suite.addTest(TestTickets())
suite.addTest(TestTicketMaxSummarySize())
suite.addTest(TestTicketAddAttachment())
suite.addTest(TestTicketPreview())
suite.addTest(TestTicketNoSummary())
suite.addTest(TestTicketAltFormats())
suite.addTest(TestTicketCSVFormat())
suite.addTest(TestTicketTabFormat())
suite.addTest(TestTicketRSSFormat())
suite.addTest(TestTicketSearch())
suite.addTest(TestNonTicketSearch())
suite.addTest(TestTicketHistory())
suite.addTest(TestTicketHistoryDiff())
suite.addTest(TestTicketQueryLinks())
suite.addTest(TestTicketQueryLinksQueryModuleDisabled())
suite.addTest(TestTicketQueryOrClause())
suite.addTest(TestTicketCustomFieldTextNoFormat())
suite.addTest(TestTicketCustomFieldTextWikiFormat())
suite.addTest(TestTicketCustomFieldTextAreaNoFormat())
suite.addTest(TestTicketCustomFieldTextAreaWikiFormat())
suite.addTest(TestTicketCustomFieldTextReferenceFormat())
suite.addTest(TestTicketCustomFieldTextListFormat())
suite.addTest(RegressionTestTicket10828())
suite.addTest(TestTicketTimeline())
suite.addTest(TestAdminComponent())
suite.addTest(TestAdminComponentAuthorization())
suite.addTest(TestAdminComponentDuplicates())
suite.addTest(TestAdminComponentRemoval())
suite.addTest(TestAdminComponentNonRemoval())
suite.addTest(TestAdminComponentDefault())
suite.addTest(TestAdminComponentDetail())
suite.addTest(TestAdminComponentNoneDefined())
suite.addTest(TestAdminMilestone())
suite.addTest(TestAdminMilestoneAuthorization())
suite.addTest(TestAdminMilestoneSpace())
suite.addTest(TestAdminMilestoneDuplicates())
suite.addTest(TestAdminMilestoneDetail())
suite.addTest(TestAdminMilestoneDue())
suite.addTest(TestAdminMilestoneDetailDue())
suite.addTest(TestAdminMilestoneDetailRename())
suite.addTest(TestAdminMilestoneCompleted())
suite.addTest(TestAdminMilestoneCompletedFuture())
suite.addTest(TestAdminMilestoneRemove())
suite.addTest(TestAdminMilestoneRemoveMulti())
suite.addTest(TestAdminMilestoneNonRemoval())
suite.addTest(TestAdminMilestoneDefaults())
suite.addTest(TestAdminPriority())
suite.addTest(TestAdminPriorityAuthorization())
suite.addTest(TestAdminPriorityModify())
suite.addTest(TestAdminPriorityRemove())
suite.addTest(TestAdminPriorityRemoveMulti())
suite.addTest(TestAdminPriorityNonRemoval())
suite.addTest(TestAdminPriorityDefault())
suite.addTest(TestAdminPriorityDetail())
suite.addTest(TestAdminPriorityRenumber())
suite.addTest(TestAdminPriorityRenumberDup())
suite.addTest(TestAdminResolution())
suite.addTest(TestAdminResolutionAuthorization())
suite.addTest(TestAdminResolutionDuplicates())
suite.addTest(TestAdminResolutionDefault())
suite.addTest(TestAdminSeverity())
suite.addTest(TestAdminSeverityAuthorization())
suite.addTest(TestAdminSeverityDuplicates())
suite.addTest(TestAdminSeverityDefault())
suite.addTest(TestAdminType())
suite.addTest(TestAdminTypeAuthorization())
suite.addTest(TestAdminTypeDuplicates())
suite.addTest(TestAdminTypeDefault())
suite.addTest(TestAdminVersion())
suite.addTest(TestAdminVersionAuthorization())
suite.addTest(TestAdminVersionDuplicates())
suite.addTest(TestAdminVersionDetail())
suite.addTest(TestAdminVersionDetailTime())
suite.addTest(TestAdminVersionDetailCancel())
suite.addTest(TestAdminVersionRemove())
suite.addTest(TestAdminVersionRemoveMulti())
suite.addTest(TestAdminVersionNonRemoval())
suite.addTest(TestAdminVersionDefault())
suite.addTest(TestNewReport())
suite.addTest(TestReportRealmDecoration())
suite.addTest(TestReportDynamicVariables())
suite.addTest(TestMilestone())
suite.addTest(TestMilestoneAddAttachment())
suite.addTest(TestMilestoneClose())
suite.addTest(TestMilestoneDelete())
suite.addTest(TestMilestoneRename())
suite.addTest(RegressionTestRev5665())
suite.addTest(RegressionTestRev5994())
suite.addTest(RegressionTestTicket4447())
suite.addTest(RegressionTestTicket4630a())
suite.addTest(RegressionTestTicket4630b())
suite.addTest(RegressionTestTicket5022())
suite.addTest(RegressionTestTicket5394a())
suite.addTest(RegressionTestTicket5394b())
suite.addTest(RegressionTestTicket5497prep())
suite.addTest(RegressionTestTicket5497a())
suite.addTest(RegressionTestTicket5497b())
suite.addTest(RegressionTestTicket5497c())
suite.addTest(RegressionTestTicket5497d())
suite.addTest(RegressionTestTicket5602())
suite.addTest(RegressionTestTicket5687())
suite.addTest(RegressionTestTicket5930())
suite.addTest(RegressionTestTicket6048())
suite.addTest(RegressionTestTicket6747())
suite.addTest(RegressionTestTicket6879a())
suite.addTest(RegressionTestTicket6879b())
suite.addTest(RegressionTestTicket6912a())
suite.addTest(RegressionTestTicket6912b())
suite.addTest(RegressionTestTicket7821group())
suite.addTest(RegressionTestTicket7821var())
suite.addTest(RegressionTestTicket8247())
suite.addTest(RegressionTestTicket8861())
suite.addTest(RegressionTestTicket9084())
suite.addTest(RegressionTestTicket9981())
suite.addTest(RegressionTestTicket10010())
suite.addTest(RegressionTestTicket10772())
suite.addTest(RegressionTestTicket11028())
suite.addTest(RegressionTestTicket11153())
if ConfigObj:
suite.addTest(RegressionTestTicket11176())
else:
print "SKIP: RegressionTestTicket11176 (ConfigObj not installed)"
return suite
if __name__ == '__main__':
unittest.main(defaultTest='functionalSuite')
1.1.2dev: Merged [12627] from 1.0-stable (fix for #11557)
git-svn-id: 0d96b0c1a6983ccc08b3732614f4d6bfcf9cbb42@12628 af82e41b-90c4-0310-8c96-b1721e28e2e2
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2008-2013 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
import os
import re
from datetime import datetime, timedelta
from trac.admin.tests.functional import AuthorizationTestCaseSetup
from trac.test import locale_en
from trac.tests.functional import *
from trac.util.datefmt import utc, localtz, format_date, format_datetime, \
pretty_timedelta
from trac.util.text import to_utf8
try:
from configobj import ConfigObj
except ImportError:
ConfigObj = None
class AdminEnumDefaultTestCaseSetup(FunctionalTwillTestCaseSetup):
def test_default(self, enum, name):
url = self._tester.url + '/admin/ticket/%s' % enum
tc.go(url)
tc.url(url + '$')
tc.find(name)
tc.formvalue('enumtable', 'default', name)
tc.submit('apply')
tc.url(url + '$')
tc.find('radio.*"%s"\\schecked="checked"' % name)
# Test the "Clear default" button
tc.go(url)
tc.submit('clear', formname='enumtable')
tc.notfind('type="radio" name="default" value=".+" checked="checked"')
class TestTickets(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Create a ticket and comment on it."""
# TODO: this should be split into multiple tests
id = self._tester.create_ticket()
self._tester.add_comment(id)
class TestTicketMaxSummarySize(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test `[ticket] max_summary_size` option.
http://trac.edgewall.org/ticket/11472"""
prev_max_summary_size = \
self._testenv.get_config('ticket', 'max_summary_size')
short_summary = "abcdefghijklmnopqrstuvwxyz"
long_summary = short_summary + "."
max_summary_size = len(short_summary)
warning_message = r"Ticket summary is too long \(must be less " \
r"than %s characters\)" % max_summary_size
self._testenv.set_config('ticket', 'max_summary_size',
str(max_summary_size))
try:
self._tester.create_ticket(short_summary)
tc.find(short_summary)
tc.notfind(warning_message)
self._tester.go_to_front()
tc.follow(r"\bNew Ticket\b")
tc.notfind(internal_error)
tc.url(self._tester.url + '/newticket')
tc.formvalue('propertyform', 'field_summary', long_summary)
tc.submit('submit')
tc.url(self._tester.url + '/newticket')
tc.find(warning_message)
finally:
self._testenv.set_config('ticket', 'max_summary_size',
prev_max_summary_size)
class TestTicketAddAttachment(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Add attachment to a ticket. Test that the attachment button
reads 'Attach file' when no files have been attached, and 'Attach
another file' when there are existing attachments.
Feature added in http://trac.edgewall.org/ticket/10281"""
id = self._tester.create_ticket()
tc.find("Attach file")
filename = self._tester.attach_file_to_ticket(id)
self._tester.go_to_ticket(id)
tc.find("Attach another file")
tc.find('Attachments <span class="trac-count">\(1\)</span>')
tc.find(filename)
tc.find('Download all attachments as:\s+<a rel="nofollow" '
'href="/zip-attachment/ticket/%s/">.zip</a>' % id)
class TestTicketPreview(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Preview ticket creation"""
self._tester.go_to_front()
tc.follow('New Ticket')
summary = random_sentence(5)
desc = random_sentence(5)
tc.formvalue('propertyform', 'field-summary', summary)
tc.formvalue('propertyform', 'field-description', desc)
tc.submit('preview')
tc.url(self._tester.url + '/newticket$')
tc.find('ticket not yet created')
tc.find(summary)
tc.find(desc)
class TestTicketNoSummary(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Creating a ticket without summary should fail"""
self._tester.go_to_front()
tc.follow('New Ticket')
desc = random_sentence(5)
tc.formvalue('propertyform', 'field-description', desc)
tc.submit('submit')
tc.find(desc)
tc.find('Tickets must contain a summary.')
tc.find('Create New Ticket')
tc.find('ticket not yet created')
class TestTicketAltFormats(FunctionalTestCaseSetup):
def runTest(self):
"""Download ticket in alternative formats"""
summary = random_sentence(5)
self._tester.create_ticket(summary)
for format in ['Comma-delimited Text', 'Tab-delimited Text',
'RSS Feed']:
tc.follow(format)
content = b.get_html()
if content.find(summary) < 0:
raise AssertionError('Summary missing from %s format'
% format)
tc.back()
class TestTicketCSVFormat(FunctionalTestCaseSetup):
def runTest(self):
"""Download ticket in CSV format"""
self._tester.create_ticket()
tc.follow('Comma-delimited Text')
csv = b.get_html()
if not csv.startswith('\xef\xbb\xbfid,summary,'): # BOM
raise AssertionError('Bad CSV format')
class TestTicketTabFormat(FunctionalTestCaseSetup):
def runTest(self):
"""Download ticket in Tab-delimited format"""
self._tester.create_ticket()
tc.follow('Tab-delimited Text')
tab = b.get_html()
if not tab.startswith('\xef\xbb\xbfid\tsummary\t'): # BOM
raise AssertionError('Bad tab delimited format')
class TestTicketRSSFormat(FunctionalTestCaseSetup):
def runTest(self):
"""Download ticket in RSS format"""
summary = random_sentence(5)
self._tester.create_ticket(summary)
# Make a number of changes to exercise all of the RSS feed code
tc.formvalue('propertyform', 'comment', random_sentence(3))
tc.formvalue('propertyform', 'field-type', 'task')
tc.formvalue('propertyform', 'description', summary + '\n\n' +
random_sentence(8))
tc.formvalue('propertyform', 'field-keywords', 'key')
tc.submit('submit')
time.sleep(1) # Have to wait a second
tc.formvalue('propertyform', 'field-keywords', '')
tc.submit('submit')
tc.find('RSS Feed')
tc.follow('RSS Feed')
rss = b.get_html()
if not rss.startswith('<?xml version="1.0"?>'):
raise AssertionError('RSS Feed not valid feed')
class TestTicketSearch(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test ticket search"""
summary = random_sentence(4)
self._tester.create_ticket(summary)
self._tester.go_to_front()
tc.follow('Search')
tc.formvalue('fullsearch', 'ticket', True)
tc.formvalue('fullsearch', 'q', summary)
tc.submit('Search')
tc.find('class="searchable">.*' + summary)
tc.notfind('No matches found')
class TestNonTicketSearch(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test non-ticket search"""
# Create a summary containing only unique words
summary = ' '.join([random_word() + '_TestNonTicketSearch'
for i in range(5)])
self._tester.create_ticket(summary)
self._tester.go_to_front()
tc.follow('Search')
tc.formvalue('fullsearch', 'ticket', False)
tc.formvalue('fullsearch', 'q', summary)
tc.submit('Search')
tc.notfind('class="searchable">' + summary)
tc.find('No matches found')
class TestTicketHistory(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test ticket history"""
summary = random_sentence(5)
ticketid = self._tester.create_ticket(summary)
comment = self._tester.add_comment(ticketid)
self._tester.go_to_ticket(ticketid)
tc.find(r'<a [^>]+>\bModify\b</a>')
tc.find(r"\bAttach file\b")
tc.find(r"\bAdd Comment\b")
tc.find(r"\bModify Ticket\b")
tc.find(r"\bPreview\b")
tc.find(r"\bSubmit changes\b")
url = b.get_url()
tc.go(url + '?version=0')
tc.find('at <[^>]*>*Initial Version')
tc.find(summary)
tc.notfind(comment)
tc.go(url + '?version=1')
tc.find('at <[^>]*>*Version 1')
tc.find(summary)
tc.find(comment)
tc.notfind(r'<a [^>]+>\bModify\b</a>')
tc.notfind(r"\bAttach file\b")
tc.notfind(r"\bAdd Comment\b")
tc.notfind(r"\bModify Ticket\b")
tc.notfind(r"\bPreview\b")
tc.notfind(r"\bSubmit changes\b")
class TestTicketHistoryDiff(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test ticket history (diff)"""
self._tester.create_ticket()
tc.formvalue('propertyform', 'description', random_sentence(6))
tc.submit('submit')
tc.find('Description<[^>]*>\\s*modified \\(<[^>]*>diff', 's')
tc.follow('diff')
tc.find('Changes\\s*between\\s*<[^>]*>Initial Version<[^>]*>\\s*and'
'\\s*<[^>]*>Version 1<[^>]*>\\s*of\\s*<[^>]*>Ticket #' , 's')
class TestTicketQueryLinks(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test ticket query links"""
count = 3
ticket_ids = [self._tester.create_ticket(
summary='TestTicketQueryLinks%s' % i)
for i in range(count)]
self._tester.go_to_query()
# We don't have the luxury of javascript, so this is a multi-step
# process
tc.formvalue('query', 'add_filter_0', 'summary')
tc.submit('add_0')
tc.formvalue('query', '0_owner', 'nothing')
tc.submit('rm_filter_0_owner_0')
tc.formvalue('query', '0_summary', 'TestTicketQueryLinks')
tc.submit('update')
query_url = b.get_url()
tc.find(r'\(%d matches\)' % count)
for i in range(count):
tc.find('TestTicketQueryLinks%s' % i)
tc.follow('TestTicketQueryLinks0')
tc.find('class="missing">← Previous Ticket')
tc.find('title="Ticket #%s">Next Ticket' % ticket_ids[1])
tc.follow('Back to Query')
tc.url(re.escape(query_url))
tc.follow('TestTicketQueryLinks1')
tc.find('title="Ticket #%s">Previous Ticket' % ticket_ids[0])
tc.find('title="Ticket #%s">Next Ticket' % ticket_ids[2])
tc.follow('Next Ticket')
tc.find('title="Ticket #%s">Previous Ticket' % ticket_ids[1])
tc.find('class="missing">Next Ticket →')
class TestTicketQueryLinksQueryModuleDisabled(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Ticket query links should not be present when the QueryModule
is disabled."""
def enable_query_module(enable):
self._tester.go_to_admin('Plugins')
tc.formvalue('edit-plugin-trac', 'component',
'trac.ticket.query.QueryModule')
tc.formvalue('edit-plugin-trac', 'enable',
'%strac.ticket.query.QueryModule'
% ('+' if enable else '-'))
tc.submit()
tc.find("The following component has been %s:"
".*QueryModule.*\(trac\.ticket\.query\.\*\)"
% ("enabled" if enable else "disabled"))
props = {'cc': 'user1, user2',
'component': 'component1',
'keywords': 'kw1, kw2',
'milestone': 'milestone1',
'owner': 'user',
'priority': 'major',
'reporter': 'admin',
'version': '2.0'}
tid = self._tester.create_ticket(info=props)
milestone_cell = \
r'<td headers="h_milestone">\s*' \
r'<a class="milestone" href="/milestone/%(milestone)s" ' \
r'title=".*">\s*%(milestone)s\s*</a>\s*</td>'\
% {'milestone': props['milestone']}
try:
for field, value in props.iteritems():
if field != 'milestone':
links = r', '.join(r'<a href="/query.*>%s</a>'
% v.strip() for v in value.split(','))
tc.find(r'<td headers="h_%s"( class="searchable")?>'
r'\s*%s\s*</td>' % (field, links))
else:
tc.find(milestone_cell)
enable_query_module(False)
self._tester.go_to_ticket(tid)
for field, value in props.iteritems():
if field != 'milestone':
tc.find(r'<td headers="h_%s"( class="searchable")?>'
r'\s*%s\s*</td>' % (field, value))
else:
tc.find(milestone_cell)
finally:
enable_query_module(True)
class TestTicketQueryOrClause(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test ticket query with an or clauses"""
count = 3
[self._tester.create_ticket(summary='TestTicketQueryOrClause%s' % i,
info={'keywords': str(i)})
for i in range(count)]
self._tester.go_to_query()
tc.formvalue('query', '0_owner', '')
tc.submit('rm_filter_0_owner_0')
tc.formvalue('query', 'add_filter_0', 'summary')
tc.submit('add_0')
tc.formvalue('query', '0_summary', 'TestTicketQueryOrClause1')
tc.formvalue('query', 'add_clause_1', 'keywords')
tc.submit('add_1')
tc.formvalue('query', '1_keywords', '2')
tc.submit('update')
tc.notfind('TestTicketQueryOrClause0')
for i in (1, 2):
tc.find('TestTicketQueryOrClause%s' % i)
class TestTicketCustomFieldTextNoFormat(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test custom text field with no format explicitly specified.
Its contents should be rendered as plain text.
"""
env = self._testenv.get_trac_environment()
env.config.set('ticket-custom', 'newfield', 'text')
env.config.set('ticket-custom', 'newfield.label',
'Another Custom Field')
env.config.set('ticket-custom', 'newfield.format', '')
env.config.save()
val = "%s %s" % (random_unique_camel(), random_word())
self._tester.create_ticket(info={'newfield': val})
tc.find('<td headers="h_newfield"[^>]*>\s*%s\s*</td>' % val)
class TestTicketCustomFieldTextAreaNoFormat(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test custom textarea field with no format explicitly specified,
its contents should be rendered as plain text.
"""
env = self._testenv.get_trac_environment()
env.config.set('ticket-custom', 'newfield', 'textarea')
env.config.set('ticket-custom', 'newfield.label',
'Another Custom Field')
env.config.set('ticket-custom', 'newfield.format', '')
env.config.save()
val = "%s %s" % (random_unique_camel(), random_word())
self._tester.create_ticket(info={'newfield': val})
tc.find('<td headers="h_newfield"[^>]*>\s*%s\s*</td>' % val)
class TestTicketCustomFieldTextWikiFormat(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test custom text field with `wiki` format.
Its contents should through the wiki engine, wiki-links and all.
Feature added in http://trac.edgewall.org/ticket/1791
"""
env = self._testenv.get_trac_environment()
env.config.set('ticket-custom', 'newfield', 'text')
env.config.set('ticket-custom', 'newfield.label',
'Another Custom Field')
env.config.set('ticket-custom', 'newfield.format', 'wiki')
env.config.save()
word1 = random_unique_camel()
word2 = random_word()
val = "%s %s" % (word1, word2)
self._tester.create_ticket(info={'newfield': val})
wiki = '<a [^>]*>%s\??</a> %s' % (word1, word2)
tc.find('<td headers="h_newfield"[^>]*>\s*%s\s*</td>' % wiki)
class TestTicketCustomFieldTextAreaWikiFormat(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test custom textarea field with no format explicitly specified,
its contents should be rendered as plain text.
"""
env = self._testenv.get_trac_environment()
env.config.set('ticket-custom', 'newfield', 'textarea')
env.config.set('ticket-custom', 'newfield.label',
'Another Custom Field')
env.config.set('ticket-custom', 'newfield.format', 'wiki')
env.config.save()
word1 = random_unique_camel()
word2 = random_word()
val = "%s %s" % (word1, word2)
self._tester.create_ticket(info={'newfield': val})
wiki = '<p>\s*<a [^>]*>%s\??</a> %s<br />\s*</p>' % (word1, word2)
tc.find('<td headers="h_newfield"[^>]*>\s*%s\s*</td>' % wiki)
class TestTicketCustomFieldTextReferenceFormat(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test custom text field with `reference` format.
Its contents are treated as a single value
and are rendered as an auto-query link.
Feature added in http://trac.edgewall.org/ticket/10643
"""
env = self._testenv.get_trac_environment()
env.config.set('ticket-custom', 'newfield', 'text')
env.config.set('ticket-custom', 'newfield.label',
'Another Custom Field')
env.config.set('ticket-custom', 'newfield.format', 'reference')
env.config.save()
word1 = random_unique_camel()
word2 = random_word()
val = "%s %s" % (word1, word2)
self._tester.create_ticket(info={'newfield': val})
query = 'status=!closed&newfield=%s\+%s' % (word1, word2)
querylink = '<a href="/query\?%s">%s</a>' % (query, val)
tc.find('<td headers="h_newfield"[^>]*>\s*%s\s*</td>' % querylink)
class TestTicketCustomFieldTextListFormat(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test custom text field with `list` format.
Its contents are treated as a space-separated list of values
and are rendered as separate auto-query links per word.
Feature added in http://trac.edgewall.org/ticket/10643
"""
env = self._testenv.get_trac_environment()
env.config.set('ticket-custom', 'newfield', 'text')
env.config.set('ticket-custom', 'newfield.label',
'Another Custom Field')
env.config.set('ticket-custom', 'newfield.format', 'list')
env.config.save()
word1 = random_unique_camel()
word2 = random_word()
val = "%s %s" % (word1, word2)
self._tester.create_ticket(info={'newfield': val})
query1 = 'status=!closed&newfield=~%s' % word1
query2 = 'status=!closed&newfield=~%s' % word2
querylink1 = '<a href="/query\?%s">%s</a>' % (query1, word1)
querylink2 = '<a href="/query\?%s">%s</a>' % (query2, word2)
querylinks = '%s %s' % (querylink1, querylink2)
tc.find('<td headers="h_newfield"[^>]*>\s*%s\s*</td>' % querylinks)
class RegressionTestTicket10828(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/10828
Rendered property changes should be described as lists of added and
removed items, even in the presence of comma and semicolon separators.
"""
env = self._testenv.get_trac_environment()
env.config.set('ticket-custom', 'newfield', 'text')
env.config.set('ticket-custom', 'newfield.label',
'A Custom Field')
env.config.set('ticket-custom', 'newfield.format', 'list')
env.config.save()
self._tester.create_ticket()
word1 = random_unique_camel()
word2 = random_word()
val = "%s %s" % (word1, word2)
tc.formvalue('propertyform', 'field-newfield', val)
tc.submit('submit')
tc.find('<em>%s</em> <em>%s</em> added' % (word1, word2))
word3 = random_unique_camel()
word4 = random_unique_camel()
val = "%s, %s; %s" % (word2, word3, word4)
tc.formvalue('propertyform', 'field-newfield', val)
tc.submit('submit')
tc.find('<em>%s</em> <em>%s</em> added; <em>%s</em> removed'
% (word3, word4, word1))
tc.formvalue('propertyform', 'field-newfield', '')
tc.submit('submit')
tc.find('<em>%s</em> <em>%s</em> <em>%s</em> removed'
% (word2, word3, word4))
val = "%s %s,%s" % (word1, word2, word3)
tc.formvalue('propertyform', 'field-newfield', val)
tc.submit('submit')
tc.find('<em>%s</em> <em>%s</em> <em>%s</em> added'
% (word1, word2, word3))
query1 = 'status=!closed&newfield=~%s' % word1
query2 = 'status=!closed&newfield=~%s' % word2
query3 = 'status=!closed&newfield=~%s' % word3
querylink1 = '<a href="/query\?%s">%s</a>' % (query1, word1)
querylink2 = '<a href="/query\?%s">%s</a>' % (query2, word2)
querylink3 = '<a href="/query\?%s">%s</a>' % (query3, word3)
querylinks = '%s %s, %s' % (querylink1, querylink2, querylink3)
tc.find('<td headers="h_newfield"[^>]*>\s*%s\s*</td>' % querylinks)
class TestTicketTimeline(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test ticket details on timeline"""
env = self._testenv.get_trac_environment()
env.config.set('timeline', 'ticket_show_details', 'yes')
env.config.save()
summary = random_sentence(5)
ticketid = self._tester.create_ticket(summary)
self._tester.add_comment(ticketid)
self._tester.go_to_timeline()
tc.formvalue('prefs', 'ticket', True)
tc.submit()
tc.find('Ticket.*#%s.*created' % ticketid)
tc.formvalue('prefs', 'ticket_details', True)
tc.submit()
htmltags = '(<[^>]*>)*'
tc.find('Ticket ' + htmltags + '#' + str(ticketid) + htmltags +
' \\(' + summary.split()[0] +
' [^\\)]+\\) updated\\s+by\\s+' + htmltags + 'admin', 's')
class TestAdminComponent(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin create component"""
self._tester.create_component()
class TestAdminComponentAuthorization(AuthorizationTestCaseSetup):
def runTest(self):
"""Check permissions required to access the Ticket Components
panel."""
self.test_authorization('/admin/ticket/components', 'TICKET_ADMIN',
"Manage Components")
class TestAdminComponentDuplicates(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin create duplicate component"""
name = "DuplicateComponent"
self._tester.create_component(name)
component_url = self._tester.url + "/admin/ticket/components"
tc.go(component_url)
tc.formvalue('addcomponent', 'name', name)
tc.submit()
tc.notfind(internal_error)
tc.find('Component .* already exists')
class TestAdminComponentRemoval(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin remove component"""
name = "RemovalComponent"
self._tester.create_component(name)
component_url = self._tester.url + "/admin/ticket/components"
tc.go(component_url)
tc.formvalue('component_table', 'sel', name)
tc.submit('remove')
tc.notfind(name)
class TestAdminComponentNonRemoval(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin remove no selected component"""
component_url = self._tester.url + "/admin/ticket/components"
tc.go(component_url)
tc.submit('remove', formname='component_table')
tc.find('No component selected')
class TestAdminComponentDefault(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin set default component"""
name = "DefaultComponent"
self._tester.create_component(name)
component_url = self._tester.url + "/admin/ticket/components"
tc.go(component_url)
tc.formvalue('component_table', 'default', name)
tc.submit('apply')
tc.find('type="radio" name="default" value="%s" checked="checked"' % \
name)
tc.go(self._tester.url + '/newticket')
tc.find('<option selected="selected" value="%s">%s</option>'
% (name, name))
# Test the "Clear default" button
self._testenv.set_config('ticket', 'optional_fields', 'component')
tc.go(component_url)
tc.submit('clear', formname='component_table')
tc.notfind('type="radio" name="default" value=".+" checked="checked"')
self._tester.create_ticket()
tc.find('<th id="h_component" class="missing">\s*Component:\s*</th>'
'\s*<td headers="h_component">\s*</td>')
self._testenv.remove_config('ticket', 'optional_fields')
class TestAdminComponentDetail(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin component detail"""
name = "DetailComponent"
self._tester.create_component(name)
component_url = self._tester.url + "/admin/ticket/components"
tc.go(component_url)
tc.follow(name)
desc = 'Some component description'
tc.formvalue('modcomp', 'description', desc)
tc.submit('cancel')
tc.url(component_url + '$')
tc.follow(name)
tc.notfind(desc)
class TestAdminComponentNoneDefined(FunctionalTwillTestCaseSetup):
def runTest(self):
"""The table should be hidden and help text shown when there are no
components defined (#11103)."""
from trac.ticket import model
env = self._testenv.get_trac_environment()
components = list(model.Component.select(env))
self._tester.go_to_admin()
tc.follow(r"\bComponents\b")
try:
for comp in components:
tc.formvalue('component_table', 'sel', comp.name)
tc.submit('remove')
tc.notfind('<table class="listing" id="complist">')
tc.find("As long as you don't add any items to the list, this "
"field[ \t\n]*will remain completely hidden from the "
"user interface.")
finally:
for comp in components:
self._tester.create_component(comp.name, comp.owner,
comp.description)
class TestAdminMilestone(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin create milestone"""
self._tester.create_milestone()
class TestAdminMilestoneAuthorization(AuthorizationTestCaseSetup):
def runTest(self):
"""Check permissions required to access the Ticket Milestone
panel."""
self.test_authorization('/admin/ticket/milestones', 'TICKET_ADMIN',
"Manage Milestones")
class TestAdminMilestoneSpace(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin create milestone with a space"""
self._tester.create_milestone('Milestone 1')
class TestAdminMilestoneDuplicates(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin create duplicate milestone"""
name = "DuplicateMilestone"
self._tester.create_milestone(name)
milestone_url = self._tester.url + "/admin/ticket/milestones"
tc.go(milestone_url)
tc.url(milestone_url)
tc.formvalue('addmilestone', 'name', name)
tc.submit()
tc.notfind(internal_error)
tc.find('Milestone %s already exists' % name)
tc.notfind('%s')
class TestAdminMilestoneDetail(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin modify milestone details"""
name = "DetailMilestone"
# Create a milestone
self._tester.create_milestone(name)
# Modify the details of the milestone
milestone_url = self._tester.url + "/admin/ticket/milestones"
tc.go(milestone_url)
tc.url(milestone_url)
tc.follow(name)
tc.url(milestone_url + '/' + name)
tc.formvalue('modifymilestone', 'description', 'Some description.')
tc.submit('save')
tc.url(milestone_url)
# Make sure the milestone isn't closed
self._tester.go_to_roadmap()
tc.find(name)
# Cancel more modifications
tc.go(milestone_url)
tc.url(milestone_url)
tc.follow(name)
tc.formvalue('modifymilestone', 'description',
'~~Some other description.~~')
tc.submit('cancel')
tc.url(milestone_url)
# Verify the correct modifications show up
self._tester.go_to_roadmap()
tc.find('Some description.')
tc.follow(name)
tc.find('Some description.')
class TestAdminMilestoneDue(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin milestone duedate"""
name = "DueMilestone"
duedate = datetime.now(tz=utc)
duedate_string = format_datetime(duedate, tzinfo=utc,
locale=locale_en)
self._tester.create_milestone(name, due=duedate_string)
tc.find(duedate_string)
class TestAdminMilestoneDetailDue(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin modify milestone duedate on detail page"""
name = "DetailDueMilestone"
# Create a milestone
self._tester.create_milestone(name)
# Modify the details of the milestone
milestone_url = self._tester.url + "/admin/ticket/milestones"
tc.go(milestone_url)
tc.url(milestone_url)
tc.follow(name)
tc.url(milestone_url + '/' + name)
duedate = datetime.now(tz=utc)
duedate_string = format_datetime(duedate, tzinfo=utc,
locale=locale_en)
tc.formvalue('modifymilestone', 'due', duedate_string)
tc.submit('save')
tc.url(milestone_url + '$')
tc.find(name + '(<[^>]*>|\\s)*'+ duedate_string, 's')
class TestAdminMilestoneDetailRename(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin rename milestone"""
name1 = self._tester.create_milestone()
name2 = random_unique_camel()
tid = self._tester.create_ticket(info={'milestone': name1})
milestone_url = self._tester.url + '/admin/ticket/milestones'
self._tester.go_to_url(milestone_url)
tc.follow(name1)
tc.url(milestone_url + '/' + name1)
tc.formvalue('modifymilestone', 'name', name2)
tc.submit('save')
tc.find(r"Your changes have been saved\.")
tc.find(r"\b%s\b" % name2)
tc.notfind(r"\b%s\b" % name1)
self._tester.go_to_ticket(tid)
tc.find('<a class="milestone" href="/milestone/%(name)s" '
'title="No date set">%(name)s</a>' % {'name': name2})
tc.find('<strong class="trac-field-milestone">Milestone</strong>'
'[ \t\n]+changed from <em>%s</em> to <em>%s</em>'
% (name1, name2))
tc.find("Milestone renamed")
class TestAdminMilestoneCompleted(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin milestone completed"""
name = "CompletedMilestone"
self._tester.create_milestone(name)
milestone_url = self._tester.url + "/admin/ticket/milestones"
tc.go(milestone_url)
tc.url(milestone_url)
tc.follow(name)
tc.url(milestone_url + '/' + name)
tc.formvalue('modifymilestone', 'completed', True)
tc.submit('save')
tc.url(milestone_url + "$")
class TestAdminMilestoneCompletedFuture(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin milestone completed in the future"""
name = "CompletedFutureMilestone"
self._tester.create_milestone(name)
milestone_url = self._tester.url + "/admin/ticket/milestones"
tc.go(milestone_url)
tc.url(milestone_url)
tc.follow(name)
tc.url(milestone_url + '/' + name)
tc.formvalue('modifymilestone', 'completed', True)
cdate = datetime.now(tz=utc) + timedelta(days=2)
cdate_string = format_date(cdate, tzinfo=localtz, locale=locale_en)
tc.formvalue('modifymilestone', 'completeddate', cdate_string)
tc.submit('save')
tc.find('Completion date may not be in the future')
# And make sure it wasn't marked as completed.
self._tester.go_to_roadmap()
tc.find(name)
class TestAdminMilestoneRemove(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin remove milestone"""
name = "MilestoneRemove"
self._tester.create_milestone(name)
tid = self._tester.create_ticket(info={'milestone': name})
milestone_url = self._tester.url + '/admin/ticket/milestones'
tc.go(milestone_url)
tc.formvalue('milestone_table', 'sel', name)
tc.submit('remove')
tc.url(milestone_url + '$')
tc.notfind(name)
self._tester.go_to_ticket(tid)
tc.find('<th id="h_milestone" class="missing">'
'[ \t\n]*Milestone:[ \t\n]*</th>')
tc.find('<strong class="trac-field-milestone">Milestone'
'</strong>[ \t\n]*<em>%s</em>[ \t\n]*deleted'
% name)
tc.find("Milestone deleted")
class TestAdminMilestoneRemoveMulti(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin remove multiple milestones"""
name = "MultiRemoveMilestone"
count = 3
for i in range(count):
self._tester.create_milestone("%s%s" % (name, i))
milestone_url = self._tester.url + '/admin/ticket/milestones'
tc.go(milestone_url)
tc.url(milestone_url + '$')
for i in range(count):
tc.find("%s%s" % (name, i))
for i in range(count):
tc.formvalue('milestone_table', 'sel', "%s%s" % (name, i))
tc.submit('remove')
tc.url(milestone_url + '$')
for i in range(count):
tc.notfind("%s%s" % (name, i))
class TestAdminMilestoneNonRemoval(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin remove no selected milestone"""
milestone_url = self._tester.url + "/admin/ticket/milestones"
tc.go(milestone_url)
tc.submit('remove', formname='milestone_table')
tc.find('No milestone selected')
class TestAdminMilestoneDefaults(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin set default ticket milestone, default retarget milestone
and clear defaults."""
def clear_defaults():
# Test the "Clear default" button
tc.go(milestone_url)
tc.submit('clear', formname='milestone_table')
tc.notfind('type="radio" name="ticket_default" '
'value=".+" checked="checked"')
tc.notfind('type="radio" name="retarget_default" '
'value=".+" checked="checked"')
self._tester.go_to_ticket(tid)
tc.find('<th id="h_milestone" class="missing">[ \t\n]+'
'Milestone:[ \t\n]+</th>[ \t\n]+'
'(?!<td headers="h_milestone">)')
self._tester.go_to_milestone(mid2)
tc.submit(formname='deletemilestone')
tc.notfind('<option selected="selected" value="%s">%s</option>'
% (mid1, mid1))
milestone_url = self._tester.url + "/admin/ticket/milestones"
tid = self._tester.create_ticket()
mid1 = self._tester.create_milestone()
mid2 = self._tester.create_milestone()
self._tester.create_ticket(info={'milestone': mid2})
# Set default ticket milestone
tc.go(milestone_url)
tc.formvalue('milestone_table', 'ticket_default', mid1)
tc.submit('apply')
tc.find('type="radio" name="ticket_default" value="%s" '
'checked="checked"' % mid1)
tc.notfind('type="radio" name="retarget_default" value=".+" '
'checked="checked"')
# verify it is the default on the newticket page.
tc.go(self._tester.url + '/newticket')
tc.find('<option selected="selected" value="%s">%s</option>'
% (mid1, mid1))
clear_defaults()
# Set default retarget to milestone
tc.go(milestone_url)
tc.formvalue('milestone_table', 'retarget_default', mid1)
tc.submit('apply')
tc.find('type="radio" name="retarget_default" value="%s" '
'checked="checked"' % mid1)
tc.notfind('type="radio" name="ticket_default" value=".+" '
'checked="checked"')
# verify it is the default on the confirm delete page.
self._tester.go_to_milestone(mid2)
tc.submit(formname='deletemilestone')
tc.find('<option selected="selected" value="%s">%s</option>'
% (mid1, mid1))
clear_defaults()
# Set both
tc.go(milestone_url)
tc.formvalue('milestone_table', 'ticket_default', mid1)
tc.formvalue('milestone_table', 'retarget_default', mid1)
tc.submit('apply')
tc.find('type="radio" name="ticket_default" value="%s" '
'checked="checked"' % mid1)
tc.find('type="radio" name="retarget_default" value="%s" '
'checked="checked"' % mid1)
# verify it is the default on the newticket page.
tc.go(self._tester.url + '/newticket')
tc.find('<option selected="selected" value="%s">%s</option>'
% (mid1, mid1))
# verify it is the default on the confirm delete page.
self._tester.go_to_milestone(mid2)
tc.submit(formname='deletemilestone')
tc.find('<option selected="selected" value="%s">%s</option>'
% (mid1, mid1))
clear_defaults()
#Set neither
tc.go(milestone_url)
tc.submit('apply', formname='milestone_table')
tc.notfind('type="radio" name="retarget_default" value=".+" '
'checked="checked"')
tc.notfind('type="radio" name="ticket_default" value=".+" '
'checked="checked"')
# verify no default on the newticket page.
tc.go(self._tester.url + '/newticket')
tc.find('<th id="h_milestone" class="missing">[ \t\n]+'
'Milestone:[ \t\n]+</th>[ \t\n]+'
'(?!<td headers="h_milestone">)')
# verify none selected on the confirm delete page.
self._tester.go_to_milestone(mid2)
tc.submit(formname='deletemilestone')
tc.notfind('<option selected="selected" value="%s">%s</option>'
% (mid1, mid1))
class TestAdminPriority(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin create priority"""
self._tester.create_priority()
class TestAdminPriorityAuthorization(AuthorizationTestCaseSetup):
def runTest(self):
"""Check permissions required to access the Ticket Priority
panel."""
self.test_authorization('/admin/ticket/priority', 'TICKET_ADMIN',
"Manage Priorities")
class TestAdminPriorityDuplicates(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin create duplicate priority"""
name = "DuplicatePriority"
self._tester.create_priority(name)
self._tester.create_priority(name)
tc.find('Priority %s already exists' % name)
class TestAdminPriorityModify(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin modify priority"""
name = "ModifyPriority"
self._tester.create_priority(name)
priority_url = self._tester.url + '/admin/ticket/priority'
tc.go(priority_url)
tc.url(priority_url + '$')
tc.find(name)
tc.follow(name)
tc.formvalue('modenum', 'name', name * 2)
tc.submit('save')
tc.url(priority_url + '$')
tc.find(name * 2)
class TestAdminPriorityRemove(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin remove priority"""
name = "RemovePriority"
self._tester.create_priority(name)
priority_url = self._tester.url + '/admin/ticket/priority'
tc.go(priority_url)
tc.url(priority_url + '$')
tc.find(name)
tc.formvalue('enumtable', 'sel', name)
tc.submit('remove')
tc.url(priority_url + '$')
tc.notfind(name)
class TestAdminPriorityRemoveMulti(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin remove multiple priorities"""
name = "MultiRemovePriority"
count = 3
for i in range(count):
self._tester.create_priority("%s%s" % (name, i))
priority_url = self._tester.url + '/admin/ticket/priority'
tc.go(priority_url)
tc.url(priority_url + '$')
for i in range(count):
tc.find("%s%s" % (name, i))
for i in range(count):
tc.formvalue('enumtable', 'sel', "%s%s" % (name, i))
tc.submit('remove')
tc.url(priority_url + '$')
for i in range(count):
tc.notfind("%s%s" % (name, i))
class TestAdminPriorityNonRemoval(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin remove no selected priority"""
priority_url = self._tester.url + "/admin/ticket/priority"
tc.go(priority_url)
tc.submit('remove', formname='enumtable')
tc.find('No priority selected')
class TestAdminPriorityDefault(AdminEnumDefaultTestCaseSetup):
def runTest(self):
"""Admin default priority"""
name = self._tester.create_priority()
self.test_default('priority', name)
class TestAdminPriorityDetail(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin modify priority details"""
name = "DetailPriority"
# Create a priority
self._tester.create_priority(name + '1')
# Modify the details of the priority
priority_url = self._tester.url + "/admin/ticket/priority"
tc.go(priority_url)
tc.url(priority_url + '$')
tc.follow(name + '1')
tc.url(priority_url + '/' + name + '1')
tc.formvalue('modenum', 'name', name + '2')
tc.submit('save')
tc.url(priority_url + '$')
# Cancel more modifications
tc.go(priority_url)
tc.follow(name)
tc.formvalue('modenum', 'name', name + '3')
tc.submit('cancel')
tc.url(priority_url + '$')
# Verify that only the correct modifications show up
tc.notfind(name + '1')
tc.find(name + '2')
tc.notfind(name + '3')
class TestAdminPriorityRenumber(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin renumber priorities"""
valuesRE = re.compile('<select name="value_([0-9]+)">', re.M)
html = b.get_html()
max_priority = max([int(x) for x in valuesRE.findall(html)])
name = "RenumberPriority"
self._tester.create_priority(name + '1')
self._tester.create_priority(name + '2')
priority_url = self._tester.url + '/admin/ticket/priority'
tc.go(priority_url)
tc.url(priority_url + '$')
tc.find(name + '1')
tc.find(name + '2')
tc.formvalue('enumtable',
'value_%s' % (max_priority + 1), str(max_priority + 2))
tc.formvalue('enumtable',
'value_%s' % (max_priority + 2), str(max_priority + 1))
tc.submit('apply')
tc.url(priority_url + '$')
# Verify that their order has changed.
tc.find(name + '2.*' + name + '1', 's')
class TestAdminPriorityRenumberDup(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin badly renumber priorities"""
# Make the first priority the 2nd priority, and leave the 2nd priority
# as the 2nd priority.
priority_url = self._tester.url + '/admin/ticket/priority'
tc.go(priority_url)
tc.url(priority_url + '$')
tc.formvalue('enumtable', 'value_1', '2')
tc.submit('apply')
tc.url(priority_url + '$')
tc.find('Order numbers must be unique')
class TestAdminResolution(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin create resolution"""
self._tester.create_resolution()
class TestAdminResolutionAuthorization(AuthorizationTestCaseSetup):
def runTest(self):
"""Check permissions required to access the Ticket Resolutions
panel."""
self.test_authorization('/admin/ticket/resolution', 'TICKET_ADMIN',
"Manage Resolutions")
class TestAdminResolutionDuplicates(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin create duplicate resolution"""
name = "DuplicateResolution"
self._tester.create_resolution(name)
self._tester.create_resolution(name)
tc.find('Resolution value "%s" already exists' % name)
class TestAdminResolutionDefault(AdminEnumDefaultTestCaseSetup):
def runTest(self):
"""Admin default resolution"""
name = self._tester.create_resolution()
self.test_default('resolution', name)
class TestAdminSeverity(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin create severity"""
self._tester.create_severity()
class TestAdminSeverityAuthorization(AuthorizationTestCaseSetup):
def runTest(self):
"""Check permissions required to access the Ticket Severities
panel."""
self.test_authorization('/admin/ticket/severity', 'TICKET_ADMIN',
"Manage Severities")
class TestAdminSeverityDuplicates(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin create duplicate severity"""
name = "DuplicateSeverity"
self._tester.create_severity(name)
self._tester.create_severity(name)
tc.find('Severity value "%s" already exists' % name)
class TestAdminSeverityDefault(AdminEnumDefaultTestCaseSetup):
def runTest(self):
"""Admin default severity"""
name = self._tester.create_severity()
self.test_default('severity', name)
class TestAdminType(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin create type"""
self._tester.create_type()
class TestAdminTypeAuthorization(AuthorizationTestCaseSetup):
def runTest(self):
"""Check permissions required to access the Ticket Types
panel."""
self.test_authorization('/admin/ticket/type', 'TICKET_ADMIN',
"Manage Ticket Types")
class TestAdminTypeDuplicates(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin create duplicate type"""
name = "DuplicateType"
self._tester.create_type(name)
self._tester.create_type(name)
tc.find('Type value "%s" already exists' % name)
class TestAdminTypeDefault(AdminEnumDefaultTestCaseSetup):
def runTest(self):
"""Admin default type"""
name = self._tester.create_type()
self.test_default('type', name)
class TestAdminVersion(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin create version"""
self._tester.create_version()
class TestAdminVersionAuthorization(AuthorizationTestCaseSetup):
def runTest(self):
"""Check permissions required to access the Versions panel."""
self.test_authorization('/admin/ticket/versions', 'TICKET_ADMIN',
"Manage Versions")
class TestAdminVersionDuplicates(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin create duplicate version"""
name = "DuplicateVersion"
self._tester.create_version(name)
version_admin = self._tester.url + "/admin/ticket/versions"
tc.go(version_admin)
tc.url(version_admin)
tc.formvalue('addversion', 'name', name)
tc.submit()
tc.notfind(internal_error)
tc.find("Version %s already exists." % name)
class TestAdminVersionDetail(FunctionalTwillTestCaseSetup):
# This is somewhat pointless... the only place to find the version
# description is on the version details page.
def runTest(self):
"""Admin version details"""
name = "DetailVersion"
self._tester.create_version(name)
version_admin = self._tester.url + "/admin/ticket/versions"
tc.go(version_admin)
tc.url(version_admin)
tc.follow(name)
desc = 'Some version description.'
tc.formvalue('modifyversion', 'description', desc)
tc.submit('save')
tc.url(version_admin)
tc.follow(name)
tc.find(desc)
class TestAdminVersionDetailTime(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin version detail set time"""
name = "DetailTimeVersion"
self._tester.create_version(name)
version_admin = self._tester.url + "/admin/ticket/versions"
tc.go(version_admin)
tc.url(version_admin)
tc.follow(name)
tc.formvalue('modifyversion', 'time', '')
tc.submit('save')
tc.url(version_admin + '$')
tc.find(name + '(<[^>]*>|\\s)*<[^>]* name="default" value="%s"'
% name, 's')
class TestAdminVersionDetailCancel(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin version details"""
name = "DetailVersion"
self._tester.create_version(name)
version_admin = self._tester.url + "/admin/ticket/versions"
tc.go(version_admin)
tc.url(version_admin)
tc.follow(name)
desc = 'Some other version description.'
tc.formvalue('modifyversion', 'description', desc)
tc.submit('cancel')
tc.url(version_admin)
tc.follow(name)
tc.notfind(desc)
class TestAdminVersionRemove(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin remove version"""
name = "VersionRemove"
self._tester.create_version(name)
version_url = self._tester.url + "/admin/ticket/versions"
tc.go(version_url)
tc.formvalue('version_table', 'sel', name)
tc.submit('remove')
tc.url(version_url + '$')
tc.notfind(name)
class TestAdminVersionRemoveMulti(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin remove multiple versions"""
name = "MultiRemoveVersion"
count = 3
for i in range(count):
self._tester.create_version("%s%s" % (name, i))
version_url = self._tester.url + '/admin/ticket/versions'
tc.go(version_url)
tc.url(version_url + '$')
for i in range(count):
tc.find("%s%s" % (name, i))
for i in range(count):
tc.formvalue('version_table', 'sel', "%s%s" % (name, i))
tc.submit('remove')
tc.url(version_url + '$')
for i in range(count):
tc.notfind("%s%s" % (name, i))
class TestAdminVersionNonRemoval(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin remove no selected version"""
version_url = self._tester.url + "/admin/ticket/versions"
tc.go(version_url)
tc.submit('remove', formname='version_table')
tc.find('No version selected')
class TestAdminVersionDefault(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin set default version"""
name = "DefaultVersion"
self._tester.create_version(name)
version_url = self._tester.url + "/admin/ticket/versions"
tc.go(version_url)
tc.formvalue('version_table', 'default', name)
tc.submit('apply')
tc.find('type="radio" name="default" value="%s" checked="checked"' % \
name)
# verify it is the default on the newticket page.
tc.go(self._tester.url + '/newticket')
tc.find('<option selected="selected" value="%s">%s</option>'
% (name, name))
# Test the "Clear default" button
tc.go(version_url)
tc.submit('clear', formname='version_table')
tc.notfind('type="radio" name="default" value=".+" checked="checked"')
self._tester.create_ticket()
tc.find('<th id="h_version" class="missing">[ \t\n]+'
'Version:[ \t\n]+</th>[ \t\n]+'
'(?!<td headers="h_version">)')
class TestNewReport(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Create a new report"""
self._tester.create_report(
'Closed tickets, modified in the past 7 days by owner.', """
SELECT DISTINCT p.value AS __color__,
id AS ticket,
summary, component, milestone, t.type AS type,
reporter, time AS created,
changetime AS modified, description AS _description,
priority,
round(julianday('now') -
julianday(changetime, 'unixepoch')) as days,
resolution,
owner as __group__
FROM ticket t
LEFT JOIN enum p ON p.name = t.priority AND
p.type = 'priority'
WHERE ((julianday('now') -
julianday(changetime, 'unixepoch')) < 7)
AND status = 'closed'
ORDER BY __group__, changetime, p.value
""",
'List of all tickets that are closed, and have been modified in'
' the past 7 days, grouped by owner.\n\n(So they have probably'
' been closed this week.)')
class TestReportRealmDecoration(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Realm/id decoration in report"""
self._tester.create_report(
'Realm/id decoration',
"""\
SELECT NULL AS _realm, NULL AS id, NULL AS _parent_realm, NULL AS _parent_id
UNION ALL SELECT 'ticket', '42', NULL, NULL
UNION ALL SELECT 'report', '42', NULL, NULL
UNION ALL SELECT 'milestone', '42', NULL, NULL
UNION ALL SELECT 'wiki', 'WikiStart', NULL, NULL
UNION ALL SELECT 'changeset', '42/trunk', NULL, NULL
UNION ALL SELECT 'changeset', '42/trunk', 'repository', 'repo'
UNION ALL SELECT 'changeset', '43/tags', 'repository', ''
UNION ALL SELECT 'attachment', 'file.ext', 'ticket', '42'
UNION ALL SELECT 'attachment', 'file.ext', 'milestone', '42'
UNION ALL SELECT 'attachment', 'file.ext', 'wiki', 'WikiStart'
""", '')
tc.find('<a title="View ticket" href="[^"]*?/ticket/42">#42</a>')
tc.find('<a title="View report" href="[^"]*?/report/42">report:42</a>')
tc.find('<a title="View milestone" href="[^"]*?/milestone/42">42</a>')
tc.find('<a title="View wiki" href="[^"]*?/wiki/WikiStart">'
'WikiStart</a>')
tc.find('<a title="View changeset" href="[^"]*?/changeset/42/trunk">'
'Changeset 42/trunk</a>')
tc.find('<a title="View changeset" '
'href="[^"]*?/changeset/42/trunk/repo">'
'Changeset 42/trunk in repo</a>')
tc.find('<a title="View changeset" href="[^"]*?/changeset/43/tags">'
'Changeset 43/tags</a>')
tc.find('<a title="View attachment" '
'href="[^"]*?/attachment/ticket/42/file[.]ext">'
'file[.]ext [(]Ticket #42[)]</a>')
tc.find('<a title="View attachment" '
'href="[^"]*?/attachment/milestone/42/file[.]ext">'
'file[.]ext [(]Milestone 42[)]</a>')
tc.find('<a title="View attachment" '
'href="[^"]*?/attachment/wiki/WikiStart/file[.]ext">'
'file[.]ext [(]WikiStart[)]</a>')
class TestReportDynamicVariables(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Generate a report with dynamic variables in title, summary
and SQL"""
summary = random_sentence(3)
fields = {'component': 'component1'}
ticket_id = self._tester.create_ticket(summary, fields)
reportnum = self._tester.create_report(
"$USER's tickets for component $COMPONENT",
"""SELECT DISTINCT
t.id AS ticket, summary, component, version, milestone,
t.type AS type, priority, t.time AS created,
t.changetime AS _changetime, summary AS _description,
reporter AS _reporter
FROM ticket t
LEFT JOIN enum p ON p.name = t.priority AND p.type = 'priority'
LEFT JOIN ticket_change tc ON tc.ticket = t.id AND tc.author = $USER
AND tc.field = 'comment'
WHERE t.status <> 'closed'
AND component = $COMPONENT
AND (owner = $USER OR reporter = $USER OR author = $USER)
""",
"Tickets assigned to $USER for component $COMPONENT"
)
self._tester.go_to_report(reportnum, fields)
tc.find("admin's tickets for component component1")
tc.find("Tickets assigned to admin for component component1")
tc.find('<a title="View ticket" href="/ticket/%s">%s</a>' %
(ticket_id, summary))
class TestMilestone(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Create a milestone."""
self._tester.go_to_roadmap()
tc.submit(formname='add')
tc.url(self._tester.url + '/milestone\?action=new')
name = random_unique_camel()
due = format_datetime(datetime.now(tz=utc) + timedelta(hours=1),
tzinfo=localtz, locale=locale_en)
tc.formvalue('edit', 'name', name)
tc.formvalue('edit', 'due', True)
tc.formvalue('edit', 'duedate', due)
tc.notfind("Retarget associated open tickets to milestone:")
tc.submit('add')
tc.url(self._tester.url + '/milestone/' + name + '$')
tc.find(r'<h1>Milestone %s</h1>' % name)
tc.find(due)
self._tester.create_ticket(info={'milestone': name})
tc.find('<a class="milestone" href="/milestone/%(name)s" '
'title="Due in .+ (.+)">%(name)s</a>'
% {'name': name})
class TestMilestoneAddAttachment(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Add attachment to a milestone. Test that the attachment
button reads 'Attach file' when no files have been attached, and
'Attach another file' when there are existing attachments.
Feature added in http://trac.edgewall.org/ticket/10281."""
name = self._tester.create_milestone()
self._tester.go_to_milestone(name)
tc.find("Attach file")
filename = self._tester.attach_file_to_milestone(name)
self._tester.go_to_milestone(name)
tc.find("Attach another file")
tc.find('Attachments <span class="trac-count">\(1\)</span>')
tc.find(filename)
tc.find('Download all attachments as:\s+<a rel="nofollow" '
'href="/zip-attachment/milestone/%s/">.zip</a>' % name)
class TestMilestoneClose(FunctionalTwillTestCaseSetup):
"""Close a milestone and verify that tickets are retargeted
to the selected milestone"""
def runTest(self):
name = self._tester.create_milestone()
# Check that hint is shown when there are no tickets to retarget
self._tester.go_to_milestone(name)
tc.submit(formname='editmilestone')
tc.find("There are no tickets associated with this milestone.")
retarget_to = self._tester.create_milestone()
tid1 = self._tester.create_ticket(info={'milestone': name})
tid2 = self._tester.create_ticket(info={'milestone': name})
tc.formvalue('propertyform', 'action', 'resolve')
tc.formvalue('propertyform',
'action_resolve_resolve_resolution', 'fixed')
tc.submit('submit')
# Add a ticket and check that it is retargeted when milestone closed
tid = self._tester.create_ticket(info={'milestone': name})
self._tester.go_to_milestone(name)
completed = format_datetime(datetime.now(tz=utc) - timedelta(hours=1),
tzinfo=localtz, locale=locale_en)
tc.submit(formname='editmilestone')
tc.formvalue('edit', 'completed', True)
tc.formvalue('edit', 'completeddate', completed)
tc.formvalue('edit', 'target', retarget_to)
tc.submit('save')
tc.url(self._tester.url + '/milestone/%s$' % name)
tc.find('The open tickets associated with milestone "%s" '
'have been retargeted to milestone "%s".'
% (name, retarget_to))
tc.find("Completed")
self._tester.go_to_ticket(tid1)
tc.find('<a class="milestone" href="/milestone/%(name)s" '
'title="No date set">%(name)s</a>' % {'name': retarget_to})
tc.find('changed from <em>%s</em> to <em>%s</em>'
% (name, retarget_to))
tc.find("Ticket retargeted after milestone closed")
self._tester.go_to_ticket(tid2)
tc.find('<a class="closed milestone" href="/milestone/%(name)s" '
'title="Completed .+ ago (.+)">%(name)s</a>'
% {'name': name})
tc.notfind('changed from <em>%s</em> to <em>%s</em>'
% (name, retarget_to))
tc.notfind("Ticket retargeted after milestone closed")
class TestMilestoneDelete(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Delete a milestone and verify that tickets are retargeted
to the selected milestone."""
def submit_delete(name, retarget_to=None, tid=None):
tc.submit('delete', formname='edit')
tc.url(self._tester.url + '/roadmap')
tc.find('The milestone "%s" has been deleted.' % name)
tc.notfind('Milestone:.*%s' % name)
retarget_notice = 'The tickets associated with milestone "%s" ' \
'have been retargeted to milestone "%s".' \
% (name, str(retarget_to))
if retarget_to is not None:
tc.find('Milestone:.*%s' % retarget_to)
if tid is not None:
tc.find(retarget_notice)
self._tester.go_to_ticket(tid)
tc.find('Changed[ \t\n]+<a .*>\d+ seconds? ago</a>'
'[ \t\n]+by <span class="trac-author">admin</span>')
if retarget_to is not None:
tc.find('<a class="milestone" href="/milestone/%(name)s" '
'title="No date set">%(name)s</a>'
% {'name': retarget_to})
tc.find('<strong class="trac-field-milestone">Milestone'
'</strong>[ \t\n]+changed from <em>%s</em> to '
'<em>%s</em>' % (name, retarget_to))
else:
tc.find('<th id="h_milestone" class="missing">'
'[ \t\n]*Milestone:[ \t\n]*</th>')
tc.find('<strong class="trac-field-milestone">Milestone'
'</strong>[ \t\n]*<em>%s</em>[ \t\n]*deleted'
% name)
tc.find("Ticket retargeted after milestone deleted")
else:
tc.notfind(retarget_notice)
# No tickets associated with milestone to be retargeted
name = self._tester.create_milestone()
self._tester.go_to_milestone(name)
tc.submit(formname='deletemilestone')
tc.find("There are no tickets associated with this milestone.")
submit_delete(name)
# Don't select a milestone to retarget to
name = self._tester.create_milestone()
tid = self._tester.create_ticket(info={'milestone': name})
self._tester.go_to_milestone(name)
tc.submit(formname='deletemilestone')
submit_delete(name, tid=tid)
# Select a milestone to retarget to
name = self._tester.create_milestone()
retarget_to = self._tester.create_milestone()
tid = self._tester.create_ticket(info={'milestone': name})
self._tester.go_to_milestone(name)
tc.submit(formname='deletemilestone')
tc.formvalue('edit', 'target', retarget_to)
submit_delete(name, retarget_to, tid)
# Just navigate to the page and select cancel
name = self._tester.create_milestone()
tid = self._tester.create_ticket(info={'milestone': name})
self._tester.go_to_milestone(name)
tc.submit(formname='deletemilestone')
tc.submit('cancel', formname='edit')
tc.url(self._tester.url + '/milestone/%s' % name)
tc.notfind('The milestone "%s" has been deleted.' % name)
tc.notfind('The tickets associated with milestone "%s" '
'have been retargeted to milestone' % name)
self._tester.go_to_ticket(tid)
tc.find('<a class="milestone" href="/milestone/%(name)s" '
'title="No date set">%(name)s</a>' % {'name': name})
tc.notfind('<strong class="trac-field-milestone">Milestone</strong>'
'[ \t\n]*<em>%s</em>[ \t\n]*deleted' % name)
tc.notfind("Ticket retargeted after milestone deleted<br />")
class TestMilestoneRename(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Rename a milestone and verify that the rename is shown in the
change history for the associated tickets."""
name = self._tester.create_milestone()
new_name = random_unique_camel()
tid = self._tester.create_ticket(info={'milestone': name})
self._tester.go_to_milestone(name)
tc.submit(formname='editmilestone')
tc.formvalue('edit', 'name', new_name)
tc.submit('save')
tc.url(self._tester.url + '/milestone/' + new_name)
tc.find("Your changes have been saved.")
tc.find(r"<h1>Milestone %s</h1>" % new_name)
self._tester.go_to_ticket(tid)
tc.find('Changed[ \t\n]+<a .*>\d+ seconds? ago</a>[ \t\n]+'
'by <span class="trac-author">admin</span>')
tc.find('<a class="milestone" href="/milestone/%(name)s" '
'title="No date set">%(name)s</a>' % {'name': new_name})
tc.find('<strong class="trac-field-milestone">Milestone</strong>'
'[ \t\n]+changed from <em>%s</em> to <em>%s</em>'
% (name, new_name))
tc.find("Milestone renamed")
class RegressionTestRev5665(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin create version without release time (r5665)"""
self._tester.create_version(releasetime='')
class RegressionTestRev5994(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of the column label fix in r5994"""
env = self._testenv.get_trac_environment()
env.config.set('ticket-custom', 'custfield', 'text')
env.config.set('ticket-custom', 'custfield.label', 'Custom Field')
env.config.save()
try:
self._tester.go_to_query()
tc.find('<label>( |\\n)*<input[^<]*value="custfield"'
'[^<]*/>( |\\n)*Custom Field( |\\n)*</label>', 's')
finally:
pass
#env.config.set('ticket', 'restrict_owner', 'no')
#env.config.save()
class RegressionTestTicket4447(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/4447"""
env = self._testenv.get_trac_environment()
env.config.set('ticket-custom', 'newfield', 'text')
env.config.set('ticket-custom', 'newfield.label',
'Another Custom Field')
env.config.save()
ticketid = self._tester.create_ticket(summary="Hello World")
self._tester.add_comment(ticketid)
tc.notfind('<strong class="trac-field-newfield">Another Custom Field'
'</strong>[ \t\n]+<em></em>[ \t\n]+deleted')
tc.notfind('set to')
class RegressionTestTicket4630a(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/4630 a"""
env = self._testenv.get_trac_environment()
env.config.set('ticket', 'restrict_owner', 'yes')
env.config.save()
try:
# Make sure 'user' has logged in.
self._tester.go_to_front()
self._tester.logout()
self._tester.login('user')
self._tester.go_to_front()
self._tester.logout()
self._tester.login('joe')
self._tester.go_to_front()
self._tester.logout()
self._tester.login('admin')
self._tester.create_ticket()
tc.formvalue('propertyform', 'action', 'reassign')
tc.find('reassign_reassign_owner')
tc.formvalue('propertyform', 'action_reassign_reassign_owner',
'user')
tc.submit('submit')
finally:
# Undo the config change for now since this (failing)
# regression test causes problems for later tests.
env.config.set('ticket', 'restrict_owner', 'no')
env.config.save()
class RegressionTestTicket4630b(FunctionalTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/4630 b"""
# NOTE: this must be run after RegressionTestTicket4630 (user must
# have logged in)
from trac.perm import PermissionSystem
env = self._testenv.get_trac_environment()
perm = PermissionSystem(env)
users = perm.get_users_with_permission('TRAC_ADMIN')
self.assertEqual(users, ['admin'])
users = perm.get_users_with_permission('TICKET_MODIFY')
self.assertEqual(sorted(users), ['admin', 'joe', 'user'])
class RegressionTestTicket5022(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/5022
"""
summary = 'RegressionTestTicket5022'
ticket_id = self._tester.create_ticket(summary=summary)
tc.go(self._tester.url + '/newticket?id=%s' % ticket_id)
tc.notfind(summary)
class RegressionTestTicket5394a(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/5394 a
Order user list alphabetically in (re)assign action
"""
# set restrict_owner config
env = self._testenv.get_trac_environment()
env.config.set('ticket', 'restrict_owner', 'yes')
env.config.save()
self._tester.go_to_front()
self._tester.logout()
test_users = ['alice', 'bob', 'jane', 'john', 'charlie', 'alan',
'zorro']
# Apparently it takes a sec for the new user to be recognized by the
# environment. So we add all the users, then log in as the users
# in a second loop. This should be faster than adding a sleep(1)
# between the .adduser and .login steps.
for user in test_users:
self._testenv.adduser(user)
for user in test_users:
self._tester.login(user)
self._tester.go_to_front()
self._tester.logout()
self._tester.login('admin')
self._tester.create_ticket("regression test 5394a")
options = 'id="action_reassign_reassign_owner">' + \
''.join(['<option[^>]*>%s</option>' % user for user in
sorted(test_users + ['admin', 'joe', 'user'])])
tc.find(to_utf8(options), 's')
# We don't have a good way to fully delete a user from the Trac db.
# Once we do, we may want to cleanup our list of users here.
class RegressionTestTicket5394b(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/5394 b
Order user list alphabetically on new ticket page
"""
# Must run after RegressionTestTicket5394a
self._tester.go_to_front()
tc.follow('New Ticket')
tc.find('Create New Ticket')
test_users = ['alice', 'bob', 'jane', 'john', 'charlie', 'alan',
'zorro']
options = 'id="field-owner"[^>]*>[[:space:]]*<option/>.*' + \
'.*'.join(['<option[^>]*>%s</option>' % user for user in
sorted(test_users + ['admin', 'user'])])
options = '.*'.join(sorted(test_users + ['admin', 'user']))
tc.find(options, 's')
# TODO: this should probably be changed to be a testsuite derived from
# TestSetup
class RegressionTestTicket5497prep(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/5497 prep
When the component is changed, the owner should update to the
default owner of the component.
If component is changed and the owner is changed (reassigned action
for open tickets in the basic workflow), the owner should be the
specified owner, not the owner of the component.
"""
# The default owner for the component we're using for this testcase
# is 'user', and we'll manually assign to 'admin'.
self._tester.create_component('regression5497', 'user')
class RegressionTestTicket5497a(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/5497 a
Open ticket, component changed, owner not changed"""
self._tester.create_ticket("regression test 5497a")
tc.formvalue('propertyform', 'field-component', 'regression5497')
tc.submit('submit')
tc.find(regex_owned_by('user'))
class RegressionTestTicket5497b(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/5497 b
Open ticket, component changed, owner changed"""
self._tester.create_ticket("regression test 5497b")
tc.formvalue('propertyform', 'field-component', 'regression5497')
tc.formvalue('propertyform', 'action', 'reassign')
tc.formvalue('propertyform', 'action_reassign_reassign_owner',
'admin')
tc.submit('submit')
tc.notfind(regex_owned_by('user'))
tc.find(regex_owned_by('admin'))
class RegressionTestTicket5497c(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/5497 c
New ticket, component changed, owner not changed"""
self._tester.create_ticket("regression test 5497c",
{'component':'regression5497'})
tc.find(regex_owned_by('user'))
class RegressionTestTicket5497d(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/5497 d
New ticket, component changed, owner changed"""
self._tester.create_ticket("regression test 5497d",
{'component':'regression5497',
'owner':'admin'})
tc.find(regex_owned_by('admin'))
class RegressionTestTicket5602(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/5602"""
# Create a set of tickets, and assign them all to a milestone
milestone = self._tester.create_milestone()
ids = [self._tester.create_ticket(info={'milestone': milestone})
for x in range(5)]
# Need a ticket in each state: new, assigned, accepted, closed,
# reopened
# leave ids[0] as new
# make ids[1] be assigned
self._tester.go_to_ticket(ids[1])
tc.formvalue('propertyform', 'action', 'reassign')
tc.formvalue('propertyform', 'action_reassign_reassign_owner',
'admin')
tc.submit('submit')
# make ids[2] be accepted
self._tester.go_to_ticket(ids[2])
tc.formvalue('propertyform', 'action', 'accept')
tc.submit('submit')
# make ids[3] be closed
self._tester.go_to_ticket(ids[3])
tc.formvalue('propertyform', 'action', 'resolve')
tc.formvalue('propertyform', 'action_resolve_resolve_resolution',
'fixed')
tc.submit('submit')
# make ids[4] be reopened
self._tester.go_to_ticket(ids[4])
tc.formvalue('propertyform', 'action', 'resolve')
tc.formvalue('propertyform', 'action_resolve_resolve_resolution',
'fixed')
tc.submit('submit')
# FIXME: we have to wait a second to avoid "IntegrityError: columns
# ticket, time, field are not unique"
time.sleep(1)
tc.formvalue('propertyform', 'action', 'reopen')
tc.submit('submit')
tc.show()
tc.notfind("Python Traceback")
# Go to the milestone and follow the links to the closed and active
# tickets.
tc.go(self._tester.url + "/roadmap")
tc.follow(milestone)
tc.follow("closed:")
tc.find("Resolution:[ \t\n]+fixed")
tc.back()
tc.follow("active:")
tc.find("Status:[ \t\n]+new")
tc.find("Status:[ \t\n]+assigned")
tc.find("Status:[ \t\n]+accepted")
tc.notfind("Status:[ \t\n]+closed")
tc.find("Status:[ \t\n]+reopened")
class RegressionTestTicket5687(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/5687"""
self._tester.go_to_front()
self._tester.logout()
self._tester.login('user')
self._tester.create_ticket()
self._tester.logout()
self._tester.login('admin')
class RegressionTestTicket5930(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/5930
TypeError: from_string() takes exactly 3 non-keyword arguments (4
given)
Caused by a saved query
"""
self._tester.create_report('Saved Query', 'query:version=1.0', '')
tc.notfind(internal_error)
# TODO: Add a testcase for the following:
# Can you also throw in addition of a 1.0 ticket and a 2.0 ticket
# as part of the demo env, then see that only the correct one shows
# up in the report?
class RegressionTestTicket6048(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/6048"""
# Setup the DeleteTicket plugin
plugin = open(os.path.join(self._testenv.trac_src, 'sample-plugins',
'workflow', 'DeleteTicket.py')).read()
open(os.path.join(self._testenv.tracdir, 'plugins',
'DeleteTicket.py'), 'w').write(plugin)
env = self._testenv.get_trac_environment()
prevconfig = env.config.get('ticket', 'workflow')
env.config.set('ticket', 'workflow',
prevconfig + ',DeleteTicketActionController')
env.config.save()
env = self._testenv.get_trac_environment() # reload environment
# Create a ticket and delete it
ticket_id = self._tester.create_ticket('RegressionTestTicket6048')
# (Create a second ticket so that the ticket id does not get reused
# and confuse the tester object.)
self._tester.create_ticket(summary='RegressionTestTicket6048b')
self._tester.go_to_ticket(ticket_id)
tc.find('delete ticket')
tc.formvalue('propertyform', 'action', 'delete')
tc.submit('submit')
self._tester.go_to_ticket(ticket_id)
tc.find('Error: Invalid ticket number')
tc.find('Ticket %s does not exist.' % ticket_id)
# Remove the DeleteTicket plugin
env.config.set('ticket', 'workflow', prevconfig)
env.config.save()
env = self._testenv.get_trac_environment() # reload environment
for ext in ('py', 'pyc', 'pyo'):
filename = os.path.join(self._testenv.tracdir, 'plugins',
'DeleteTicket.%s' % ext)
if os.path.exists(filename):
os.unlink(filename)
class RegressionTestTicket6747(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/6747"""
env = self._testenv.get_trac_environment()
env.config.set('ticket-workflow', 'resolve.operations',
'set_resolution,set_owner')
env.config.set('ticket-workflow', 'resolve.set_owner',
'a_specified_owner')
env.config.save()
try:
self._tester.create_ticket("RegressionTestTicket6747")
tc.find("a_specified_owner")
tc.notfind("a_specified_owneras")
finally:
# Undo the config change to avoid causing problems for later
# tests.
env.config.set('ticket-workflow', 'resolve.operations',
'set_resolution')
env.config.remove('ticket-workflow', 'resolve.set_owner')
env.config.save()
class RegressionTestTicket6879a(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/6879 a
Make sure that previewing a close does not make the available actions
be those for the close status.
"""
# create a ticket, then preview resolving the ticket twice
self._tester.create_ticket("RegressionTestTicket6879 a")
tc.formvalue('propertyform', 'action', 'resolve')
tc.formvalue('propertyform', 'action_resolve_resolve_resolution',
'fixed')
tc.submit('preview')
tc.formvalue('propertyform', 'action', 'resolve')
tc.submit('preview')
class RegressionTestTicket6879b(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/6879 a
Make sure that previewing a close does not make the available actions
be those for the close status.
"""
# create a ticket, then preview resolving the ticket twice
self._tester.create_ticket("RegressionTestTicket6879 b")
tc.formvalue('propertyform', 'action', 'resolve')
tc.formvalue('propertyform', 'action_resolve_resolve_resolution',
'fixed')
tc.submit('preview')
tc.formvalue('propertyform', 'action', 'resolve')
tc.submit('submit')
class RegressionTestTicket6912a(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/6912 a"""
try:
self._tester.create_component(name='RegressionTestTicket6912a',
owner='')
except twill.utils.ClientForm.ItemNotFoundError, e:
raise twill.errors.TwillAssertionError(e)
class RegressionTestTicket6912b(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/6912 b"""
self._tester.create_component(name='RegressionTestTicket6912b',
owner='admin')
tc.follow('RegressionTestTicket6912b')
try:
tc.formvalue('modcomp', 'owner', '')
except twill.utils.ClientForm.ItemNotFoundError, e:
raise twill.errors.TwillAssertionError(e)
tc.submit('save', formname='modcomp')
tc.find('RegressionTestTicket6912b</a>[ \n\t]*</td>[ \n\t]*'
'<td class="owner"></td>', 's')
class RegressionTestTicket7821group(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/7821 group.
"""
env = self._testenv.get_trac_environment()
saved_default_query = env.config.get('query', 'default_query')
default_query = 'status!=closed&order=status&group=status&max=42' \
'&desc=1&groupdesc=1&col=summary|status|cc' \
'&cc~=$USER'
env.config.set('query', 'default_query', default_query)
env.config.save()
try:
self._tester.create_ticket('RegressionTestTicket7821 group')
self._tester.go_to_query()
# $USER
tc.find('<input type="text" name="0_cc" value="admin"'
' size="[0-9]+" />')
# col
tc.find('<input type="checkbox" name="col" value="summary"'
' checked="checked" />')
tc.find('<input type="checkbox" name="col" value="owner" />')
tc.find('<input type="checkbox" name="col" value="status"'
' checked="checked" />')
tc.find('<input type="checkbox" name="col" value="cc"'
' checked="checked" />')
# group
tc.find('<option selected="selected" value="status">Status'
'</option>')
# groupdesc
tc.find('<input type="checkbox" name="groupdesc" id="groupdesc"'
' checked="checked" />')
# max
tc.find('<input type="text" name="max" id="max" size="[0-9]*?"'
' value="42" />')
# col in results
tc.find('<a title="Sort by Ticket [(]ascending[)]" ')
tc.find('<a title="Sort by Summary [(]ascending[)]" ')
tc.find('<a title="Sort by Status [(]ascending[)]" ')
tc.find('<a title="Sort by Cc [(]ascending[)]" ')
tc.notfind('<a title="Sort by Owner "')
finally:
env.config.set('query', 'default_query', saved_default_query)
env.config.save()
class RegressionTestTicket7821var(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/7821 var"""
env = self._testenv.get_trac_environment()
saved_default_query = env.config.get('query', 'default_query')
saved_restrict_owner = env.config.get('ticket', 'restrict_owner')
default_query = '?status=!closed&cc=~$USER&owner=$USER'
env.config.set('query', 'default_query', default_query)
env.config.set('ticket', 'restrict_owner', 'no')
env.config.save()
try:
self._tester.create_ticket('RegressionTestTicket7821 var')
self._tester.go_to_query()
# $USER in default_query
tc.find('<input type="text" name="0_owner" value="admin"'
' size="[0-9]+" />')
tc.find('<input type="text" name="0_cc" value="admin"'
' size="[0-9]+" />')
# query:owner=$USER&or&cc~=$USER
tc.go(self._tester.url + \
'/intertrac/query:owner=$USER&or&cc~=$USER')
tc.find('<input type="text" name="0_owner" value="admin"'
' size="[0-9]+" />')
tc.find('<input type="text" name="1_cc" value="admin"'
' size="[0-9]+" />')
finally:
env.config.set('query', 'default_query', saved_default_query)
env.config.set('ticket', 'restrict_owner', saved_restrict_owner)
env.config.save()
class RegressionTestTicket8247(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/8247
Author field of ticket comment corresponding to the milestone removal
was always 'anonymous'."""
name = "MilestoneRemove"
self._tester.create_milestone(name)
id = self._tester.create_ticket(info={'milestone': name})
ticket_url = self._tester.url + "/ticket/%d" % id
tc.go(ticket_url)
tc.find(name)
tc.go(self._tester.url + "/admin/ticket/milestones")
tc.formvalue('milestone_table', 'sel', name)
tc.submit('remove')
tc.go(ticket_url)
tc.find('<strong class="trac-field-milestone">Milestone</strong>'
'[ \n\t]*<em>%s</em> deleted' % name)
tc.find('Changed <a.* ago</a> by '
'<span class="trac-author">admin</span>')
tc.notfind('</a> ago by anonymous')
class RegressionTestTicket8861(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/8816
When creating a milestone with an already existing name, you get
a warning. After changing the name you will find that the original
milestone with that name is renamed instead of a new one being
created."""
name = "8861Milestone"
self._tester.create_milestone(name)
tc.go(self._tester.url + "/milestone?action=new")
tc.formvalue('edit', 'name', name)
tc.submit('Add milestone')
tc.find('Milestone "%s" already exists' % name)
tc.formvalue('edit', 'name', name + '__')
tc.submit('Add milestone')
tc.go(self._tester.url + "/roadmap")
tc.find('Milestone: <em>%s</em>' % name)
tc.find('Milestone: <em>%s</em>' % (name + '__'))
class RegressionTestTicket9084(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/9084"""
ticketid = self._tester.create_ticket()
self._tester.add_comment(ticketid)
self._tester.go_to_ticket(ticketid)
tc.submit('2', formname='reply-to-comment-1') # '1' hidden, '2' submit
tc.formvalue('propertyform', 'comment', random_sentence(3))
tc.submit('Submit changes')
tc.notfind('AssertionError')
class RegressionTestTicket9981(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/9981"""
tid1 = self._tester.create_ticket()
self._tester.add_comment(tid1)
tc.formvalue('propertyform', 'action', 'resolve')
tc.submit('submit')
tid2 = self._tester.create_ticket()
comment = '[comment:1:ticket:%s]' % tid1
self._tester.add_comment(tid2, comment)
self._tester.go_to_ticket(tid2)
tc.find('<a class="closed ticket"[ \t\n]+'
'href="/ticket/%(num)s#comment:1"[ \t\n]+'
'title="Comment 1 for Ticket #%(num)s"' % {'num': tid1})
class RegressionTestTicket10010(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/10010
Allow configuring the default retargeting option when closing or
deleting a milestone."""
m1 = self._tester.create_milestone()
m2 = self._tester.create_milestone()
self._tester.create_ticket(info={'milestone': m1})
def go_to_and_find_markup(markup, find=True):
self._tester.go_to_milestone(m1)
tc.formvalue('editmilestone', 'action', 'edit')
tc.submit()
if find:
tc.find(markup)
else:
tc.notfind(markup)
self._tester.go_to_milestone(m1)
tc.formvalue('editmilestone', 'action', 'delete')
tc.submit()
if find:
tc.find(markup)
else:
tc.notfind(markup)
try:
go_to_and_find_markup('<option selected="selected" ', False)
self._testenv.set_config('milestone', 'default_retarget_to', m2)
go_to_and_find_markup('<option selected="selected" '
'value="%(name)s">%(name)s</option>' % {'name': m2})
self._testenv.set_config('milestone', 'default_retarget_to', m1)
go_to_and_find_markup('<option selected="selected" ', False)
self._testenv.set_config('milestone', 'default_retarget_to', '')
go_to_and_find_markup('<option selected="selected" ', False)
finally:
self._testenv.remove_config('milestone', 'default_retarget_to')
class RegressionTestTicket10772(FunctionalTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/10772"""
def find_prop(field, value=None):
if value and field == 'type':
tc.find(r'<span class="trac-%(field)s">\s*'
r'<a href="/query\?status=!closed&'
r'%(field)s=%(value)s">\s*%(value)s\s*</a>\s*</span>'
% {'field': field, 'value': value})
elif value and field == 'milestone':
tc.find(r'<td headers="h_%(field)s">\s*'
r'<a class="%(field)s" href="/%(field)s/%(value)s" '
r'title=".+">\s*%(value)s\s*</a>\s*</td>'
% {'field': field, 'value': value})
elif value:
tc.find(r'<td headers="h_%(field)s">\s*'
r'<a href="/query\?status=!closed&'
r'%(field)s=%(value)s">\s*%(value)s\s*</a>\s*</td>'
% {'field': field, 'value': value})
else:
tc.find(r'<td headers="h_%(field)s">\s*</td>'
% {'field': field})
self._testenv.set_config('ticket', 'optional_fields',
'component, milestone, priority, version')
try:
# TODO: use the //Clear default// buttons to clear these values
self._tester.go_to_admin("Components")
tc.submit('clear', formname='component_table')
self._tester.go_to_admin("Milestones")
tc.submit('clear', formname='milestone_table')
self._tester.go_to_admin("Versions")
tc.submit('clear', formname='version_table')
self._tester.go_to_admin("Priorities")
tc.formvalue('enumtable', 'default', 'major')
tc.submit('apply')
self._tester.go_to_ticket()
tc.formvalue('propertyform', 'field-summary', 'ticket summary')
tc.submit('submit')
find_prop('component')
find_prop('milestone')
find_prop('priority', 'major')
find_prop('version')
self._testenv.set_config('ticket', 'optional_fields', '')
self._tester.go_to_admin("Components")
tc.formvalue('component_table', 'default', 'component2')
tc.submit('apply')
self._tester.go_to_admin("Milestones")
tc.formvalue('milestone_table', 'ticket_default', 'milestone2')
tc.submit('apply')
self._tester.go_to_admin("Priorities")
tc.formvalue('enumtable', 'default', 'minor')
tc.submit('apply')
self._tester.go_to_admin("Versions")
tc.formvalue('version_table', 'default', '2.0')
tc.submit('apply')
self._tester.go_to_ticket()
self._tester.go_to_admin("Ticket Types")
tc.formvalue('enumtable', 'default', 'task')
tc.submit('apply')
self._tester.go_to_ticket()
tc.formvalue('propertyform', 'field-summary', 'ticket summary')
tc.submit('submit')
find_prop('component', 'component2')
find_prop('milestone', 'milestone2')
find_prop('priority', 'minor')
find_prop('version', '2.0')
find_prop('type', 'task')
finally:
self._testenv.remove_config('ticket', 'optional_fields')
class RegressionTestTicket11028(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/11028"""
self._tester.go_to_roadmap()
try:
# Check that a milestone is found on the roadmap,
# even for anonymous
tc.find('<a href="/milestone/milestone1">[ \n\t]*'
'Milestone: <em>milestone1</em>[ \n\t]*</a>')
self._tester.logout()
tc.find('<a href="/milestone/milestone1">[ \n\t]*'
'Milestone: <em>milestone1</em>[ \n\t]*</a>')
# Check that no milestones are found on the roadmap when
# MILESTONE_VIEW is revoked
self._testenv.revoke_perm('anonymous', 'MILESTONE_VIEW')
tc.reload()
tc.notfind('Milestone: <em>milestone\d+</em>')
# Check that roadmap can't be viewed without ROADMAP_VIEW
self._testenv.revoke_perm('anonymous', 'ROADMAP_VIEW')
self._tester.go_to_url(self._tester.url + '/roadmap')
tc.find('<h1>Error: Forbidden</h1>')
finally:
# Restore state prior to test execution
self._tester.login('admin')
self._testenv.grant_perm('anonymous',
('ROADMAP_VIEW', 'MILESTONE_VIEW'))
class RegressionTestTicket11153(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/11153"""
# Check that "View Tickets" mainnav entry links to the report page
self._tester.go_to_view_tickets()
# Check that "View Tickets" mainnav entry links to the query page
# when the user doesn't have REPORT_VIEW, and that the mainnav entry
# is not present when the user doesn't have TICKET_VIEW.
try:
self._tester.logout()
self._testenv.revoke_perm('anonymous', 'REPORT_VIEW')
self._tester.go_to_view_tickets('query')
self._testenv.revoke_perm('anonymous', 'TICKET_VIEW')
self._tester.go_to_front()
tc.notfind('\\bView Tickets\\b')
finally:
self._testenv.grant_perm('anonymous',
('REPORT_VIEW', 'TICKET_VIEW'))
self._tester.login('admin')
# Disable the ReportModule component and check that "View Tickets"
# mainnav entry links to the `/query` page.
env = self._testenv.get_trac_environment()
env.config.set('components', 'trac.ticket.report.ReportModule',
'disabled')
env.config.save()
try:
self._tester.go_to_view_tickets('query')
finally:
env.config.remove('components', 'trac.ticket.report.ReportModule')
env.config.save()
# Disable the QueryModule component and check that "View Tickets"
# mainnav entry links to the `/report` page
env.config.set('components', 'trac.ticket.query.QueryModule',
'disabled')
env.config.save()
try:
self._tester.go_to_view_tickets('report')
tc.notfind('<li class="last first">Available Reports</li>')
finally:
env.config.remove('components', 'trac.ticket.query.QueryModule')
env.config.save()
class RegressionTestTicket11176(FunctionalTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/11176
Fine-grained permission checks should be enforced on the Report list
page, the report pages and query pages."""
self._testenv.enable_authz_permpolicy("""
[report:1]
anonymous = REPORT_VIEW
[report:2]
anonymous = REPORT_VIEW
[report:*]
anonymous =
""")
self._tester.go_to_front()
self._tester.logout()
self._tester.go_to_view_tickets()
try:
# Check that permissions are enforced on the report list page
tc.find(r'<a title="View report" '
r'href="/report/1">[ \n\t]*<em>\{1\}</em>')
tc.find(r'<a title="View report" '
r'href="/report/2">[ \n\t]*<em>\{2\}</em>')
for report_num in range(3, 9):
tc.notfind(r'<a title="View report" '
r'href="/report/%(num)s">[ \n\t]*'
r'<em>\{%(num)s\}</em>' % {'num': report_num})
# Check that permissions are enforced on the report pages
tc.go(self._tester.url + '/report/1')
tc.find(r'<h1>\{1\} Active Tickets[ \n\t]*'
r'(<span class="numrows">\(\d+ matches\)</span>)?'
r'[ \n\t]*</h1>')
tc.go(self._tester.url + '/report/2')
tc.find(r'<h1>\{2\} Active Tickets by Version[ \n\t]*'
r'(<span class="numrows">\(\d+ matches\)</span>)?'
r'[ \n\t]*</h1>')
for report_num in range(3, 9):
tc.go(self._tester.url + '/report/%d' % report_num)
tc.find(r'<h1>Error: Forbidden</h1>')
# Check that permissions are enforced on the query pages
tc.go(self._tester.url + '/query?report=1')
tc.find(r'<h1>Active Tickets '
r'<span class="numrows">\(\d+ matches\)</span></h1>')
tc.go(self._tester.url + '/query?report=2')
tc.find(r'<h1>Active Tickets by Version '
r'<span class="numrows">\(\d+ matches\)</span></h1>')
for report_num in range(3, 9):
tc.go(self._tester.url + '/query?report=%d' % report_num)
tc.find(r'<h1>Error: Forbidden</h1>')
finally:
self._tester.login('admin')
self._testenv.disable_authz_permpolicy()
def functionalSuite(suite=None):
if not suite:
import trac.tests.functional
suite = trac.tests.functional.functionalSuite()
suite.addTest(TestTickets())
suite.addTest(TestTicketMaxSummarySize())
suite.addTest(TestTicketAddAttachment())
suite.addTest(TestTicketPreview())
suite.addTest(TestTicketNoSummary())
suite.addTest(TestTicketAltFormats())
suite.addTest(TestTicketCSVFormat())
suite.addTest(TestTicketTabFormat())
suite.addTest(TestTicketRSSFormat())
suite.addTest(TestTicketSearch())
suite.addTest(TestNonTicketSearch())
suite.addTest(TestTicketHistory())
suite.addTest(TestTicketHistoryDiff())
suite.addTest(TestTicketQueryLinks())
suite.addTest(TestTicketQueryLinksQueryModuleDisabled())
suite.addTest(TestTicketQueryOrClause())
suite.addTest(TestTicketCustomFieldTextNoFormat())
suite.addTest(TestTicketCustomFieldTextWikiFormat())
suite.addTest(TestTicketCustomFieldTextAreaNoFormat())
suite.addTest(TestTicketCustomFieldTextAreaWikiFormat())
suite.addTest(TestTicketCustomFieldTextReferenceFormat())
suite.addTest(TestTicketCustomFieldTextListFormat())
suite.addTest(RegressionTestTicket10828())
suite.addTest(TestTicketTimeline())
suite.addTest(TestAdminComponent())
suite.addTest(TestAdminComponentAuthorization())
suite.addTest(TestAdminComponentDuplicates())
suite.addTest(TestAdminComponentRemoval())
suite.addTest(TestAdminComponentNonRemoval())
suite.addTest(TestAdminComponentDefault())
suite.addTest(TestAdminComponentDetail())
suite.addTest(TestAdminComponentNoneDefined())
suite.addTest(TestAdminMilestone())
suite.addTest(TestAdminMilestoneAuthorization())
suite.addTest(TestAdminMilestoneSpace())
suite.addTest(TestAdminMilestoneDuplicates())
suite.addTest(TestAdminMilestoneDetail())
suite.addTest(TestAdminMilestoneDue())
suite.addTest(TestAdminMilestoneDetailDue())
suite.addTest(TestAdminMilestoneDetailRename())
suite.addTest(TestAdminMilestoneCompleted())
suite.addTest(TestAdminMilestoneCompletedFuture())
suite.addTest(TestAdminMilestoneRemove())
suite.addTest(TestAdminMilestoneRemoveMulti())
suite.addTest(TestAdminMilestoneNonRemoval())
suite.addTest(TestAdminMilestoneDefaults())
suite.addTest(TestAdminPriority())
suite.addTest(TestAdminPriorityAuthorization())
suite.addTest(TestAdminPriorityModify())
suite.addTest(TestAdminPriorityRemove())
suite.addTest(TestAdminPriorityRemoveMulti())
suite.addTest(TestAdminPriorityNonRemoval())
suite.addTest(TestAdminPriorityDefault())
suite.addTest(TestAdminPriorityDetail())
suite.addTest(TestAdminPriorityRenumber())
suite.addTest(TestAdminPriorityRenumberDup())
suite.addTest(TestAdminResolution())
suite.addTest(TestAdminResolutionAuthorization())
suite.addTest(TestAdminResolutionDuplicates())
suite.addTest(TestAdminResolutionDefault())
suite.addTest(TestAdminSeverity())
suite.addTest(TestAdminSeverityAuthorization())
suite.addTest(TestAdminSeverityDuplicates())
suite.addTest(TestAdminSeverityDefault())
suite.addTest(TestAdminType())
suite.addTest(TestAdminTypeAuthorization())
suite.addTest(TestAdminTypeDuplicates())
suite.addTest(TestAdminTypeDefault())
suite.addTest(TestAdminVersion())
suite.addTest(TestAdminVersionAuthorization())
suite.addTest(TestAdminVersionDuplicates())
suite.addTest(TestAdminVersionDetail())
suite.addTest(TestAdminVersionDetailTime())
suite.addTest(TestAdminVersionDetailCancel())
suite.addTest(TestAdminVersionRemove())
suite.addTest(TestAdminVersionRemoveMulti())
suite.addTest(TestAdminVersionNonRemoval())
suite.addTest(TestAdminVersionDefault())
suite.addTest(TestNewReport())
suite.addTest(TestReportRealmDecoration())
suite.addTest(TestReportDynamicVariables())
suite.addTest(TestMilestone())
suite.addTest(TestMilestoneAddAttachment())
suite.addTest(TestMilestoneClose())
suite.addTest(TestMilestoneDelete())
suite.addTest(TestMilestoneRename())
suite.addTest(RegressionTestRev5665())
suite.addTest(RegressionTestRev5994())
suite.addTest(RegressionTestTicket4447())
suite.addTest(RegressionTestTicket4630a())
suite.addTest(RegressionTestTicket4630b())
suite.addTest(RegressionTestTicket5022())
suite.addTest(RegressionTestTicket5394a())
suite.addTest(RegressionTestTicket5394b())
suite.addTest(RegressionTestTicket5497prep())
suite.addTest(RegressionTestTicket5497a())
suite.addTest(RegressionTestTicket5497b())
suite.addTest(RegressionTestTicket5497c())
suite.addTest(RegressionTestTicket5497d())
suite.addTest(RegressionTestTicket5602())
suite.addTest(RegressionTestTicket5687())
suite.addTest(RegressionTestTicket5930())
suite.addTest(RegressionTestTicket6048())
suite.addTest(RegressionTestTicket6747())
suite.addTest(RegressionTestTicket6879a())
suite.addTest(RegressionTestTicket6879b())
suite.addTest(RegressionTestTicket6912a())
suite.addTest(RegressionTestTicket6912b())
suite.addTest(RegressionTestTicket7821group())
suite.addTest(RegressionTestTicket7821var())
suite.addTest(RegressionTestTicket8247())
suite.addTest(RegressionTestTicket8861())
suite.addTest(RegressionTestTicket9084())
suite.addTest(RegressionTestTicket9981())
suite.addTest(RegressionTestTicket10010())
suite.addTest(RegressionTestTicket10772())
suite.addTest(RegressionTestTicket11028())
suite.addTest(RegressionTestTicket11153())
if ConfigObj:
suite.addTest(RegressionTestTicket11176())
else:
print "SKIP: RegressionTestTicket11176 (ConfigObj not installed)"
return suite
if __name__ == '__main__':
unittest.main(defaultTest='functionalSuite')
|
[MERGE] OPW 572488: report sxw/odt: fix inconsistent duplicate content/meta/style files within generated sxw/odt
forward-port of v6.0 server revid: xal@openerp.com-20130102152925-h81qv2fx300oakzv
bzr revid: xal@openerp.com-20130102155718-g07b2mv7gk7d0c7g
|
from cumulusci.robotframework.pageobjects import ListingPage
from cumulusci.robotframework.pageobjects import DetailPage
from cumulusci.robotframework.pageobjects import pageobject
from cumulusci.robotframework.utils import capture_screenshot_on_error
from BaseObjects import BaseNPSPPage
from NPSP import npsp_lex_locators
from datetime import datetime
import time
from logging import exception
from dateutil.relativedelta import relativedelta
import time
@pageobject("Listing", "npe03__Recurring_Donation__c")
class RDListingPage(BaseNPSPPage, ListingPage):
object_name = "npe03__Recurring_Donation__c"
@capture_screenshot_on_error
def wait_for_rd2_modal(self):
"""Based on the button name (Cancel) or (Save) on the modal footer, selects and clicks on the respective button"""
btnlocator = npsp_lex_locators["button-with-text"].format("Save")
self.selenium.scroll_element_into_view(btnlocator)
self.selenium.wait_until_page_contains_element(btnlocator,timeout=60,error="Recurring Donations Modal window did not open")
self.selenium.wait_until_element_is_visible(btnlocator,60)
@capture_screenshot_on_error
def click_rd2_modal_button(self, name):
"""Based on the button name (Cancel) or (Save) on the modal footer, selects and clicks on the respective button"""
btnlocator = npsp_lex_locators["button-with-text"].format(name)
self.builtin.sleep(2,"Wait for the elevate message to appear on the modal")
self.selenium.wait_until_element_is_visible(btnlocator,60)
self.selenium.scroll_element_into_view(btnlocator)
self.selenium.click_element(btnlocator)
@capture_screenshot_on_error
def select_value_from_rd2_modal_dropdown(self, dropdown, value):
"""Selects given value from the dropdown field on the rd2 modal"""
locator = npsp_lex_locators["erd"]["modal_dropdown_selector"].format(dropdown)
selection_value = npsp_lex_locators["erd"]["modal_selection_value"].format(value)
if self.npsp.check_if_element_exists(locator):
self.selenium.set_focus_to_element(locator)
self.selenium.wait_until_element_is_visible(locator)
self.selenium.scroll_element_into_view(locator)
self.salesforce._jsclick(locator)
self.selenium.wait_until_element_is_visible(selection_value)
self.selenium.scroll_element_into_view(selection_value)
self.selenium.click_element(selection_value)
else:
self.builtin.log(f"dropdown element {dropdown} not present")
@capture_screenshot_on_error
def populate_rd2_modal_form(self, **kwargs):
"""Populates the RD2 modal form fields with the respective fields and values"""
self.builtin.sleep(1,"For Rd2 modal dropdown values to get populated")
ns=self.npsp.get_npsp_namespace_prefix()
for key, value in kwargs.items():
locator = npsp_lex_locators["erd"]["modal_input_field"].format(key)
# Recurring Donation Name field only appears on a regression org hence this check
if key == "Recurring Donation Name":
if self.npsp.check_if_element_exists(locator):
self.selenium.set_focus_to_element(locator)
self.salesforce._populate_field(locator, value)
else:
self.builtin.log(f"Element {key} not found")
if key in ("Amount","Number of Planned Installments"):
if self.npsp.check_if_element_exists(locator):
self.selenium.set_focus_to_element(locator)
self.salesforce._populate_field(locator, value)
else:
self.builtin.log(f"Element {key} not found")
if key in ("Donor Type","Payment Method","Day of Month","Recurring Type"):
self.select_value_from_rd2_modal_dropdown(key, value)
if key in ("Account", "Contact"):
self.salesforce.populate_lookup_field(key, value)
@pageobject("Details", "npe03__Recurring_Donation__c")
class RDDetailPage(BaseNPSPPage, DetailPage):
object_name = "npe03__Recurring_Donation__c"
def _is_current_page(self):
""" Verify we are on the Recurring Donations Detail page
by verifying that the url contains '/view'
"""
for i in range(3):
time.sleep(2)
self.selenium.location_should_contain(
"/lightning/r/npe03__Recurring_Donation__c/",
message="Current page is not a Recurring Donations record view",
)
locator = npsp_lex_locators["bge"]["button"].format("Edit")
self.selenium.wait_until_page_contains_element(locator, error="Recurring donations Details page did not load fully")
edit_button = self.selenium.get_webelement(locator)
if self.npsp.check_if_element_displayed(edit_button):
return
else:
time.sleep(2)
i += 1
def refresh_opportunities(self):
"""Clicks on more actions dropdown and click the given title"""
locator = npsp_lex_locators["link-contains"].format("more actions")
self.selenium.click_element(locator)
self.selenium.wait_until_page_contains("Refresh Opportunities")
def click_actions_button(self, button_name):
"""Clicks on action button based on API version"""
if self.npsp.latest_api_version == 47.0:
self.selenium.click_link(button_name)
else:
self.selenium.click_button(button_name)
def go_to_recurring_donation_related_opportunities_page(self,rd_id):
""" Navigates to the related opportunities page of the given recurring donation ID """
objectname = "npe03__Donations__r"
values = self.npsp.get_url_formatted_object_name(objectname)
url = "{}/lightning/r/{}/related/{}/view".format(values['baseurl'],rd_id,objectname)
self.selenium.go_to(url)
self.salesforce.wait_until_loading_is_complete()
locator = npsp_lex_locators["link-title"].format("New")
new_button = self.selenium.get_webelement(locator)
self.selenium.wait_until_page_contains_element(new_button, error="Recurring Donations related opportunities page did not load fully")
@capture_screenshot_on_error
def edit_recurring_donation_status(self, **kwargs):
"""From the actions dropdown select edit action and edit the fields specified in the kwargs
| Example
| Edit Recurring Donation Status
| ... Recurring Period=Advanced
| ... Every=3
"""
locator = npsp_lex_locators["bge"]["button"].format("Edit")
edit_button = self.selenium.get_webelement(locator)
self.selenium.wait_until_element_is_visible(edit_button,60)
self.selenium.click_element(locator)
time.sleep(3)
btnlocator = npsp_lex_locators["button-with-text"].format("Save")
self.selenium.wait_until_element_is_visible(btnlocator,60)
self._populate_edit_status_values(**kwargs)
self.selenium.scroll_element_into_view(btnlocator)
self.selenium.click_element(btnlocator)
@capture_screenshot_on_error
def _populate_edit_status_values(self, **kwargs):
"""Takes the key value pairs to edit and makes changes accordingly"""
for key, value in kwargs.items():
if key in ("Amount", "Number of Planned Installments", "Every"):
locator = npsp_lex_locators["erd"]["modal_input_field"].format(key)
if self.npsp.check_if_element_exists(locator):
self.selenium.set_focus_to_element(locator)
self.salesforce._populate_field(locator, value)
else:
self.builtin.log(f"Element {key} not present")
else:
locator = npsp_lex_locators["erd"]["modal_dropdown_selector"].format(key)
selection_value = npsp_lex_locators["erd"]["modal_selection_value"].format(value)
if self.npsp.check_if_element_exists(locator):
self.selenium.set_focus_to_element(locator)
self.selenium.wait_until_element_is_visible(locator)
self.selenium.scroll_element_into_view(locator)
self.salesforce._jsclick(locator)
self.selenium.wait_until_element_is_visible(selection_value,60)
self.selenium.scroll_element_into_view(selection_value)
self.selenium.click_element(selection_value)
else:
self.builtin.log(f"Element {key} not present")
@capture_screenshot_on_error
def pause_recurring_donation(self, type=None):
"""Finds the pause button on the recurring donations details
view page, clicks the button and waits for the modal to appear"""
locator = npsp_lex_locators["bge"]["button"].format("Pause")
pause_button = self.selenium.get_webelement(locator)
self.selenium.wait_until_element_is_visible(pause_button,60)
self.selenium.click_element(locator)
if type != "Closed":
btnlocator = npsp_lex_locators["button-with-text"].format("Save")
self.selenium.wait_until_element_is_visible(btnlocator,60)
@capture_screenshot_on_error
def populate_pause_modal(self,**kwargs):
""" Populate the values in the pause recurring donation modal
based on the key value pair options in the kwargs passed as parameter
| Populate Pause Modal
| ... Paused Reason=Card Expired
| ... Date=${date} """
for key, value in kwargs.items():
if key in ("Paused Reason"):
locator = npsp_lex_locators["erd"]["modal_dropdown_selector"].format(key)
selection_value = npsp_lex_locators["erd"]["modal_selection_value"].format(value)
if self.npsp.check_if_element_exists(locator):
self.selenium.set_focus_to_element(locator)
self.selenium.wait_until_element_is_visible(locator)
self.selenium.scroll_element_into_view(locator)
self.salesforce._jsclick(locator)
self.selenium.wait_until_element_is_visible(selection_value)
self.selenium.click_element(selection_value)
else:
self.builtin.log(f"Element {key} not present")
elif key in ("Date"):
for v in value:
checkbox = npsp_lex_locators["erd"]["pause_date_checkbox"].format(v)
time.sleep(1)
if (checkbox.is_selected() == False):
self.selenium.click_element(checkbox)
else:
self.builtin.log("This checkbox is already in the expected status", "WARN")
else:
raise Exception("Key not supported expected keys <Paused Reason> or <Date>")
btnlocator = npsp_lex_locators["button-with-text"].format("Save")
self.selenium.scroll_element_into_view(btnlocator)
self.selenium.click_element(btnlocator)
self.salesforce.wait_until_modal_is_closed()
@capture_screenshot_on_error
def populate_pause_modal(self,**kwargs):
""" Populate the values in the pause recurring donation modal
based on the key value pair options in the kwargs passed as parameter
| Populate Pause Modal
| ... Paused Reason=Card Expired
| ... Date=${date} """
for key, value in kwargs.items():
if key in ("Paused Reason"):
locator = npsp_lex_locators["erd"]["modal_dropdown_selector"].format(key)
selection_value = npsp_lex_locators["erd"]["modal_selection_value"].format(value)
if self.npsp.check_if_element_exists(locator):
self.selenium.set_focus_to_element(locator)
self.selenium.wait_until_element_is_visible(locator)
self.selenium.scroll_element_into_view(locator)
self.salesforce._jsclick(locator)
self.selenium.wait_until_element_is_visible(selection_value)
self.selenium.click_element(selection_value)
else:
self.builtin.log(f"Element {key} not present")
if key in ("Date"):
for date in value:
checkbox = npsp_lex_locators["erd"]["pause_date_checkbox"].format(date)
time.sleep(1)
self.selenium.click_element(checkbox)
if "Validate" in key:
self.validate_message_text(value)
btnlocator = npsp_lex_locators["button-with-text"].format("Save")
self.selenium.scroll_element_into_view(btnlocator)
self.selenium.click_element(btnlocator)
self.salesforce.wait_until_modal_is_closed()
@capture_screenshot_on_error
def validate_message_text(self,txt):
"""Find the element containing warning message on the pause modal and
asserts the text displayed matches with the expected text"""
locator = npsp_lex_locators["erd"]["warning_message"].format(txt)
self.selenium.wait_until_element_is_visible(locator)
@capture_screenshot_on_error
def verify_pause_text_next_to_installment_date(self, *dates):
"""Accepts a list of dates and validates the presence of date element with the paused text next to it """
for date in dates:
locator = npsp_lex_locators["erd"]["date_with_paused_txt"].format(date)
self.selenium.wait_until_element_is_visible(locator)
self.selenium.element_text_should_be(locator, date)
@capture_screenshot_on_error
def verify_schedule_warning_messages_present(self):
"""Verify that the schedule warning messages are present when there are no schedules"""
time.sleep(2)
message_locator = npsp_lex_locators["erd"]["text_message"]
list_ele = self.selenium.get_webelements(message_locator)
p_count = len(list_ele)
if p_count == 2:
return
else:
raise Exception("Schedule warning messages do not exist")
@capture_screenshot_on_error
def validate_field_values_under_section(self, section=None, **kwargs):
"""Based on the section name , navigates to the sections and validates the key. value pair values passed in kwargs.
If the section is current schedule, waits for the Current schedule section card on the side bar
Validates the display fields in the card match with the values passed in the key value pair"""
if section == "Current Schedule":
active_schedule_card = npsp_lex_locators["erd"]["active_schedules_card"].format(section)
number_fields = ["Amount", "Installment Frequency"]
date_fields = ["Effective Date"]
self.selenium.wait_until_element_is_visible(active_schedule_card, 60)
for label, value in kwargs.items():
if label in number_fields:
locator = npsp_lex_locators["erd"]["formatted_number"].format(label)
actual_value = self.selenium.get_webelement(locator).text
elif label in date_fields:
locator = npsp_lex_locators["erd"]["formatted_date"].format(label)
actual_value = self.selenium.get_webelement(locator).text
else:
locator = npsp_lex_locators["erd"]["formatted_text"].format(label)
actual_value = self.selenium.get_webelement(locator).text
if self.npsp.check_if_element_exists(locator):
print(f"element exists {locator}")
actual_value = self.selenium.get_webelement(locator).text
print(f"actual value is {actual_value}")
self.builtin.log(f"actual value is {actual_value}")
assert (
value == actual_value
), "Expected {} value to be {} but found {}".format(
label, value, actual_value
)
else:
self.builtin.log("element Not found")
else:
for label, value in kwargs.items():
self.npsp.navigate_to_and_validate_field_value(
label, "contains", value, section
)
def get_next_payment_date_number(self, paynum, format=True):
"""Returns the next payment date from the list of payment schedules taking in the payment number as input
The date if formatted to ignore the preceding zeros. But if format is set to false, the date is returned
as is without any formatting
|Example
| Get Next Payment Date Number 2 #gets the 2nd installment payment date form the list of payment dates
| Get Ney Payment Date Number 1 False # gets the 1st installment payment date without any formatting
"""
datefield = npsp_lex_locators["erd"]["installment_date"].format(int(paynum))
installment_date = self.selenium.get_webelement(datefield).text
# This is to format the date by removing the trailing 0 which is being the common format across
# 01/06/2020 -> 1/6/2020
if format == True:
tokens = installment_date.split('/')
dd = tokens[0].lstrip('0')
mm = tokens[1].lstrip('0')
newString = f"{dd}/{mm}/{tokens[2]}"
else:
tokens = installment_date.split('/')
dd = tokens[0]
mm = tokens[1]
newString = f"{dd}/{mm}/{tokens[2]}"
return newString
@capture_screenshot_on_error
def validate_current_and_next_year_values(self, amount, rdtype=None):
"""Takes in the parameter current installment payment (amount) and an optional field of the rd type
calculates the current and next year value payments and validates them with the values displayed on the UI. """
installmentrow = npsp_lex_locators["erd"]["installment_row"]
installments = self.selenium.get_webelements(installmentrow)
count = len(installments)
if count == 0:
raise Exception("Zero installments found")
else:
print(f"Number of installments created is {count}")
i = 1
curr_year_value = 0
next_year_value = 0
next_year_count = 0
values = {}
while i <= count:
datefield = npsp_lex_locators["erd"]["installment_date"].format(i)
installment_date = self.selenium.get_webelement(datefield)
actual_date = self.selenium.get_webelement(installment_date).text
paused_locator = npsp_lex_locators["erd"]["date_with_paused_txt"].format(actual_date)
year = datetime.strptime(actual_date, "%m/%d/%Y").year
curr_year = datetime.now().year
next_year = (datetime.now() + relativedelta(years=1)).year
# increment the current year value if there is no paused text next to installment date
if curr_year == year and not self.npsp.check_if_element_exists(paused_locator):
curr_year_value = curr_year_value + int(amount)
# increment the next year value if there is no paused text next to installment date
elif next_year == year and not self.npsp.check_if_element_exists(paused_locator):
next_year_value = next_year_value + int(amount)
if next_year == year:
next_year_count = next_year_count + 1
i = i + 1
# This logic handles the scenario if the recurring donation is of type open, the entire year installments
# are accounted in the calculation for next year value
if rdtype == "Open":
next_year_value = next_year_value + (12-next_year_count)*int(amount)
values['Current Year Value']=f"${curr_year_value}.00"
#values['Next Year Value']=f"${ next_year_value}.00"
self.validate_field_values_under_section("Statistics",**values)
@capture_screenshot_on_error
def validate_upcoming_schedules(self, num_payments, startdate, dayofmonth):
"""Takes in the parameter (number of payments) and the donation start date
verifies that the payment schedules created on UI reflect the total number
verifies that the next payment dates are reflected correctly for all the schedules"""
installmentrow = npsp_lex_locators["erd"]["installment_row"]
installments = self.selenium.get_webelements(installmentrow)
count = len(installments)
print(f"Number of installments created is {count}")
assert count == int(num_payments), "Expected installments to be {} but found {}".format(num_payments, count)
date_object = datetime.strptime(startdate, "%m/%d/%Y").date()
# Create a list to store expected dates and actual dates from the ui. Compare the dates and throw exception
expected_dates = []
actual_dates = []
if count == int(num_payments):
i = 1
j = 1
while i <= count:
datefield = npsp_lex_locators["erd"]["installment_date"].format(i)
installment_date = self.selenium.get_webelement(datefield)
actual_date = self.selenium.get_webelement(installment_date).text
formatted_actual = datetime.strptime(actual_date, "%m/%d/%Y").date()
actual_dates.append(formatted_actual)
i = i + 1
while j <= count+1:
expected_date = date_object
expected_dates.append(expected_date)
date_object = (expected_date + relativedelta(months=+1))
j = j + 1
check = any(item in expected_dates for item in actual_dates)
assert (
check == True
), "expected_dates {} doesn't match the actual_dates {}".format(
expected_date, formatted_actual
)
else:
raise Exception("Number of payment installments do not match.")
adding the keyword Run Flow as a temporary option for now
from cumulusci.robotframework.pageobjects import ListingPage
from cumulusci.robotframework.pageobjects import DetailPage
from cumulusci.robotframework.pageobjects import pageobject
from cumulusci.robotframework.utils import capture_screenshot_on_error
from BaseObjects import BaseNPSPPage
from NPSP import npsp_lex_locators
from datetime import datetime
import time
from logging import exception
from dateutil.relativedelta import relativedelta
import time
@pageobject("Listing", "npe03__Recurring_Donation__c")
class RDListingPage(BaseNPSPPage, ListingPage):
object_name = "npe03__Recurring_Donation__c"
@capture_screenshot_on_error
def wait_for_rd2_modal(self):
"""Based on the button name (Cancel) or (Save) on the modal footer, selects and clicks on the respective button"""
btnlocator = npsp_lex_locators["button-with-text"].format("Save")
self.builtin.sleep(2,"Wait for the page to load fully")
self.selenium.scroll_element_into_view(btnlocator)
self.selenium.wait_until_page_contains_element(btnlocator,timeout=60,error="Recurring Donations Modal window did not open")
self.selenium.wait_until_element_is_visible(btnlocator,60)
@capture_screenshot_on_error
def click_rd2_modal_button(self, name):
"""Based on the button name (Cancel) or (Save) on the modal footer, selects and clicks on the respective button"""
btnlocator = npsp_lex_locators["button-with-text"].format(name)
self.builtin.sleep(2,"Wait for the elevate message to appear on the modal")
self.selenium.wait_until_element_is_visible(btnlocator,60)
self.selenium.scroll_element_into_view(btnlocator)
self.selenium.click_element(btnlocator)
@capture_screenshot_on_error
def select_value_from_rd2_modal_dropdown(self, dropdown, value):
"""Selects given value from the dropdown field on the rd2 modal"""
locator = npsp_lex_locators["erd"]["modal_dropdown_selector"].format(dropdown)
selection_value = npsp_lex_locators["erd"]["modal_selection_value"].format(value)
if self.npsp.check_if_element_exists(locator):
self.selenium.set_focus_to_element(locator)
self.selenium.wait_until_element_is_visible(locator)
self.selenium.scroll_element_into_view(locator)
self.salesforce._jsclick(locator)
self.selenium.wait_until_element_is_visible(selection_value)
self.selenium.scroll_element_into_view(selection_value)
self.selenium.click_element(selection_value)
else:
self.builtin.log(f"dropdown element {dropdown} not present")
@capture_screenshot_on_error
def populate_rd2_modal_form(self, **kwargs):
"""Populates the RD2 modal form fields with the respective fields and values"""
self.builtin.sleep(1,"For Rd2 modal dropdown values to get populated")
ns=self.npsp.get_npsp_namespace_prefix()
for key, value in kwargs.items():
locator = npsp_lex_locators["erd"]["modal_input_field"].format(key)
# Recurring Donation Name field only appears on a regression org hence this check
if key == "Recurring Donation Name":
if self.npsp.check_if_element_exists(locator):
self.selenium.set_focus_to_element(locator)
self.salesforce._populate_field(locator, value)
else:
self.builtin.log(f"Element {key} not found")
if key in ("Amount","Number of Planned Installments"):
if self.npsp.check_if_element_exists(locator):
self.selenium.set_focus_to_element(locator)
self.salesforce._populate_field(locator, value)
else:
self.builtin.log(f"Element {key} not found")
if key in ("Donor Type","Payment Method","Day of Month","Recurring Type"):
self.select_value_from_rd2_modal_dropdown(key, value)
if key in ("Account", "Contact"):
self.salesforce.populate_lookup_field(key, value)
@pageobject("Details", "npe03__Recurring_Donation__c")
class RDDetailPage(BaseNPSPPage, DetailPage):
object_name = "npe03__Recurring_Donation__c"
def _is_current_page(self):
""" Verify we are on the Recurring Donations Detail page
by verifying that the url contains '/view'
"""
for i in range(3):
time.sleep(2)
self.selenium.location_should_contain(
"/lightning/r/npe03__Recurring_Donation__c/",
message="Current page is not a Recurring Donations record view",
)
locator = npsp_lex_locators["bge"]["button"].format("Edit")
self.selenium.wait_until_page_contains_element(locator, error="Recurring donations Details page did not load fully")
edit_button = self.selenium.get_webelement(locator)
if self.npsp.check_if_element_displayed(edit_button):
return
else:
time.sleep(2)
i += 1
def refresh_opportunities(self):
"""Clicks on more actions dropdown and click the given title"""
locator = npsp_lex_locators["link-contains"].format("more actions")
self.selenium.click_element(locator)
self.selenium.wait_until_page_contains("Refresh Opportunities")
def click_actions_button(self, button_name):
"""Clicks on action button based on API version"""
if self.npsp.latest_api_version == 47.0:
self.selenium.click_link(button_name)
else:
self.selenium.click_button(button_name)
def go_to_recurring_donation_related_opportunities_page(self,rd_id):
""" Navigates to the related opportunities page of the given recurring donation ID """
objectname = "npe03__Donations__r"
values = self.npsp.get_url_formatted_object_name(objectname)
url = "{}/lightning/r/{}/related/{}/view".format(values['baseurl'],rd_id,objectname)
self.selenium.go_to(url)
self.salesforce.wait_until_loading_is_complete()
locator = npsp_lex_locators["link-title"].format("New")
new_button = self.selenium.get_webelement(locator)
self.selenium.wait_until_page_contains_element(new_button, error="Recurring Donations related opportunities page did not load fully")
@capture_screenshot_on_error
def edit_recurring_donation_status(self, **kwargs):
"""From the actions dropdown select edit action and edit the fields specified in the kwargs
| Example
| Edit Recurring Donation Status
| ... Recurring Period=Advanced
| ... Every=3
"""
locator = npsp_lex_locators["bge"]["button"].format("Edit")
edit_button = self.selenium.get_webelement(locator)
self.selenium.wait_until_element_is_visible(edit_button,60)
self.selenium.click_element(locator)
time.sleep(3)
btnlocator = npsp_lex_locators["button-with-text"].format("Save")
self.selenium.wait_until_element_is_visible(btnlocator,60)
self._populate_edit_status_values(**kwargs)
self.selenium.scroll_element_into_view(btnlocator)
self.selenium.click_element(btnlocator)
@capture_screenshot_on_error
def _populate_edit_status_values(self, **kwargs):
"""Takes the key value pairs to edit and makes changes accordingly"""
for key, value in kwargs.items():
if key in ("Amount", "Number of Planned Installments", "Every"):
locator = npsp_lex_locators["erd"]["modal_input_field"].format(key)
if self.npsp.check_if_element_exists(locator):
self.selenium.set_focus_to_element(locator)
self.salesforce._populate_field(locator, value)
else:
self.builtin.log(f"Element {key} not present")
else:
locator = npsp_lex_locators["erd"]["modal_dropdown_selector"].format(key)
selection_value = npsp_lex_locators["erd"]["modal_selection_value"].format(value)
if self.npsp.check_if_element_exists(locator):
self.selenium.set_focus_to_element(locator)
self.selenium.wait_until_element_is_visible(locator)
self.selenium.scroll_element_into_view(locator)
self.salesforce._jsclick(locator)
self.selenium.wait_until_element_is_visible(selection_value,60)
self.selenium.scroll_element_into_view(selection_value)
self.selenium.click_element(selection_value)
else:
self.builtin.log(f"Element {key} not present")
@capture_screenshot_on_error
def pause_recurring_donation(self, type=None):
"""Finds the pause button on the recurring donations details
view page, clicks the button and waits for the modal to appear"""
locator = npsp_lex_locators["bge"]["button"].format("Pause")
pause_button = self.selenium.get_webelement(locator)
self.selenium.wait_until_element_is_visible(pause_button,60)
self.selenium.click_element(locator)
if type != "Closed":
btnlocator = npsp_lex_locators["button-with-text"].format("Save")
self.selenium.wait_until_element_is_visible(btnlocator,60)
@capture_screenshot_on_error
def populate_pause_modal(self,**kwargs):
""" Populate the values in the pause recurring donation modal
based on the key value pair options in the kwargs passed as parameter
| Populate Pause Modal
| ... Paused Reason=Card Expired
| ... Date=${date} """
for key, value in kwargs.items():
if key in ("Paused Reason"):
locator = npsp_lex_locators["erd"]["modal_dropdown_selector"].format(key)
selection_value = npsp_lex_locators["erd"]["modal_selection_value"].format(value)
if self.npsp.check_if_element_exists(locator):
self.selenium.set_focus_to_element(locator)
self.selenium.wait_until_element_is_visible(locator)
self.selenium.scroll_element_into_view(locator)
self.salesforce._jsclick(locator)
self.selenium.wait_until_element_is_visible(selection_value)
self.selenium.click_element(selection_value)
else:
self.builtin.log(f"Element {key} not present")
elif key in ("Date"):
for v in value:
checkbox = npsp_lex_locators["erd"]["pause_date_checkbox"].format(v)
time.sleep(1)
if (checkbox.is_selected() == False):
self.selenium.click_element(checkbox)
else:
self.builtin.log("This checkbox is already in the expected status", "WARN")
else:
raise Exception("Key not supported expected keys <Paused Reason> or <Date>")
btnlocator = npsp_lex_locators["button-with-text"].format("Save")
self.selenium.scroll_element_into_view(btnlocator)
self.selenium.click_element(btnlocator)
self.salesforce.wait_until_modal_is_closed()
@capture_screenshot_on_error
def populate_pause_modal(self,**kwargs):
""" Populate the values in the pause recurring donation modal
based on the key value pair options in the kwargs passed as parameter
| Populate Pause Modal
| ... Paused Reason=Card Expired
| ... Date=${date} """
for key, value in kwargs.items():
if key in ("Paused Reason"):
locator = npsp_lex_locators["erd"]["modal_dropdown_selector"].format(key)
selection_value = npsp_lex_locators["erd"]["modal_selection_value"].format(value)
if self.npsp.check_if_element_exists(locator):
self.selenium.set_focus_to_element(locator)
self.selenium.wait_until_element_is_visible(locator)
self.selenium.scroll_element_into_view(locator)
self.salesforce._jsclick(locator)
self.selenium.wait_until_element_is_visible(selection_value)
self.selenium.click_element(selection_value)
else:
self.builtin.log(f"Element {key} not present")
if key in ("Date"):
for date in value:
checkbox = npsp_lex_locators["erd"]["pause_date_checkbox"].format(date)
time.sleep(1)
self.selenium.click_element(checkbox)
if "Validate" in key:
self.validate_message_text(value)
btnlocator = npsp_lex_locators["button-with-text"].format("Save")
self.selenium.scroll_element_into_view(btnlocator)
self.selenium.click_element(btnlocator)
self.salesforce.wait_until_modal_is_closed()
@capture_screenshot_on_error
def validate_message_text(self,txt):
"""Find the element containing warning message on the pause modal and
asserts the text displayed matches with the expected text"""
locator = npsp_lex_locators["erd"]["warning_message"].format(txt)
self.selenium.wait_until_element_is_visible(locator)
@capture_screenshot_on_error
def verify_pause_text_next_to_installment_date(self, *dates):
"""Accepts a list of dates and validates the presence of date element with the paused text next to it """
for date in dates:
locator = npsp_lex_locators["erd"]["date_with_paused_txt"].format(date)
self.selenium.wait_until_element_is_visible(locator)
self.selenium.element_text_should_be(locator, date)
@capture_screenshot_on_error
def verify_schedule_warning_messages_present(self):
"""Verify that the schedule warning messages are present when there are no schedules"""
time.sleep(2)
message_locator = npsp_lex_locators["erd"]["text_message"]
list_ele = self.selenium.get_webelements(message_locator)
p_count = len(list_ele)
if p_count == 2:
return
else:
raise Exception("Schedule warning messages do not exist")
@capture_screenshot_on_error
def validate_field_values_under_section(self, section=None, **kwargs):
"""Based on the section name , navigates to the sections and validates the key. value pair values passed in kwargs.
If the section is current schedule, waits for the Current schedule section card on the side bar
Validates the display fields in the card match with the values passed in the key value pair"""
if section == "Current Schedule":
active_schedule_card = npsp_lex_locators["erd"]["active_schedules_card"].format(section)
number_fields = ["Amount", "Installment Frequency"]
date_fields = ["Effective Date"]
self.selenium.wait_until_element_is_visible(active_schedule_card, 60)
for label, value in kwargs.items():
if label in number_fields:
locator = npsp_lex_locators["erd"]["formatted_number"].format(label)
actual_value = self.selenium.get_webelement(locator).text
elif label in date_fields:
locator = npsp_lex_locators["erd"]["formatted_date"].format(label)
actual_value = self.selenium.get_webelement(locator).text
else:
locator = npsp_lex_locators["erd"]["formatted_text"].format(label)
actual_value = self.selenium.get_webelement(locator).text
if self.npsp.check_if_element_exists(locator):
print(f"element exists {locator}")
actual_value = self.selenium.get_webelement(locator).text
print(f"actual value is {actual_value}")
self.builtin.log(f"actual value is {actual_value}")
assert (
value == actual_value
), "Expected {} value to be {} but found {}".format(
label, value, actual_value
)
else:
self.builtin.log("element Not found")
else:
for label, value in kwargs.items():
self.npsp.navigate_to_and_validate_field_value(
label, "contains", value, section
)
def get_next_payment_date_number(self, paynum, format=True):
"""Returns the next payment date from the list of payment schedules taking in the payment number as input
The date if formatted to ignore the preceding zeros. But if format is set to false, the date is returned
as is without any formatting
|Example
| Get Next Payment Date Number 2 #gets the 2nd installment payment date form the list of payment dates
| Get Ney Payment Date Number 1 False # gets the 1st installment payment date without any formatting
"""
datefield = npsp_lex_locators["erd"]["installment_date"].format(int(paynum))
installment_date = self.selenium.get_webelement(datefield).text
# This is to format the date by removing the trailing 0 which is being the common format across
# 01/06/2020 -> 1/6/2020
if format == True:
tokens = installment_date.split('/')
dd = tokens[0].lstrip('0')
mm = tokens[1].lstrip('0')
newString = f"{dd}/{mm}/{tokens[2]}"
else:
tokens = installment_date.split('/')
dd = tokens[0]
mm = tokens[1]
newString = f"{dd}/{mm}/{tokens[2]}"
return newString
@capture_screenshot_on_error
def validate_current_and_next_year_values(self, amount, rdtype=None):
"""Takes in the parameter current installment payment (amount) and an optional field of the rd type
calculates the current and next year value payments and validates them with the values displayed on the UI. """
installmentrow = npsp_lex_locators["erd"]["installment_row"]
installments = self.selenium.get_webelements(installmentrow)
count = len(installments)
if count == 0:
raise Exception("Zero installments found")
else:
print(f"Number of installments created is {count}")
i = 1
curr_year_value = 0
next_year_value = 0
next_year_count = 0
values = {}
while i <= count:
datefield = npsp_lex_locators["erd"]["installment_date"].format(i)
installment_date = self.selenium.get_webelement(datefield)
actual_date = self.selenium.get_webelement(installment_date).text
paused_locator = npsp_lex_locators["erd"]["date_with_paused_txt"].format(actual_date)
year = datetime.strptime(actual_date, "%m/%d/%Y").year
curr_year = datetime.now().year
next_year = (datetime.now() + relativedelta(years=1)).year
# increment the current year value if there is no paused text next to installment date
if curr_year == year and not self.npsp.check_if_element_exists(paused_locator):
curr_year_value = curr_year_value + int(amount)
# increment the next year value if there is no paused text next to installment date
elif next_year == year and not self.npsp.check_if_element_exists(paused_locator):
next_year_value = next_year_value + int(amount)
if next_year == year:
next_year_count = next_year_count + 1
i = i + 1
# This logic handles the scenario if the recurring donation is of type open, the entire year installments
# are accounted in the calculation for next year value
if rdtype == "Open":
next_year_value = next_year_value + (12-next_year_count)*int(amount)
values['Current Year Value']=f"${curr_year_value}.00"
#values['Next Year Value']=f"${ next_year_value}.00"
self.validate_field_values_under_section("Statistics",**values)
@capture_screenshot_on_error
def validate_upcoming_schedules(self, num_payments, startdate, dayofmonth):
"""Takes in the parameter (number of payments) and the donation start date
verifies that the payment schedules created on UI reflect the total number
verifies that the next payment dates are reflected correctly for all the schedules"""
installmentrow = npsp_lex_locators["erd"]["installment_row"]
installments = self.selenium.get_webelements(installmentrow)
count = len(installments)
print(f"Number of installments created is {count}")
assert count == int(num_payments), "Expected installments to be {} but found {}".format(num_payments, count)
date_object = datetime.strptime(startdate, "%m/%d/%Y").date()
# Create a list to store expected dates and actual dates from the ui. Compare the dates and throw exception
expected_dates = []
actual_dates = []
if count == int(num_payments):
i = 1
j = 1
while i <= count:
datefield = npsp_lex_locators["erd"]["installment_date"].format(i)
installment_date = self.selenium.get_webelement(datefield)
actual_date = self.selenium.get_webelement(installment_date).text
formatted_actual = datetime.strptime(actual_date, "%m/%d/%Y").date()
actual_dates.append(formatted_actual)
i = i + 1
while j <= count+1:
expected_date = date_object
expected_dates.append(expected_date)
date_object = (expected_date + relativedelta(months=+1))
j = j + 1
check = any(item in expected_dates for item in actual_dates)
assert (
check == True
), "expected_dates {} doesn't match the actual_dates {}".format(
expected_date, formatted_actual
)
else:
raise Exception("Number of payment installments do not match.") |
from django.core.management.base import BaseCommand
from django.conf import settings
from django.contrib.auth.models import User
from django.db import transaction
from sys import stderr
from cyder.core.ctnr.models import Ctnr, CtnrUser
from cyder.core.system.models import System, SystemKeyValue
from cyder.cydns.domain.models import Domain
from cyder.cydhcp.constants import (ALLOW_ANY, ALLOW_KNOWN, ALLOW_VRF,
ALLOW_LEGACY, ALLOW_LEGACY_AND_VRF)
from cyder.cydhcp.interface.dynamic_intr.models import (DynamicInterface,
DynamicIntrKeyValue)
from cyder.cydhcp.network.models import Network, NetworkKeyValue
from cyder.cydhcp.range.models import Range, RangeKeyValue
from cyder.cydhcp.site.models import Site
from cyder.cydhcp.vlan.models import Vlan
from cyder.cydhcp.vrf.models import Vrf
from cyder.cydhcp.workgroup.models import Workgroup, WorkgroupKeyValue
import ipaddr
import MySQLdb
from optparse import make_option
from lib.utilities import long2ip
cached = {}
host_option_values = None
allow_all_subnets = [
'10.192.76.2', '10.192.103.150', '10.192.15.2',
'10.197.32.0', '10.192.148.32', '10.192.144.32', '10.192.140.32',
'10.196.0.32', '10.196.4.32', '10.192.136.63', '10.196.8.8',
'10.196.16.8', '10.196.24.8', '10.196.32.8', '10.196.40.8',
'10.162.128.32', '10.162.136.32', '10.162.144.32', '10.198.0.80',
'10.198.0.140', '10.192.131.9', '10.255.255.255']
class NotInMaintain(Exception):
""""""
def calc_prefixlen(netmask):
bits = 0
while netmask:
bits += netmask & 1
netmask >>= 1
return bits
connection = MySQLdb.connect(host=settings.MIGRATION_HOST,
user=settings.MIGRATION_USER,
passwd=settings.MIGRATION_PASSWD,
db=settings.MIGRATION_DB,
charset='utf8')
cursor = connection.cursor()
def clean_zone_name(name):
name = name.replace(' ', '')
if name[:5] == "zone.":
name = name[5:]
return name
def create_subnet(subnet_id, name, subnet, netmask, status, vlan):
"""
Takes a row from the Maintain subnet table
returns a new network object and creates the vlan it is associated with
"""
prefixlen = str(calc_prefixlen(netmask))
network = str(ipaddr.IPv4Address(subnet & netmask))
s, _ = Site.objects.get_or_create(name='Campus')
v = None
if cursor.execute("SELECT * "
"FROM vlan "
"WHERE vlan_id = %s" % vlan):
vlan_id, vlan_name, vlan_number = cursor.fetchone()
v = Vlan.objects.get(name=vlan_name)
n, created = Network.objects.get_or_create(
network_str=network + '/' + prefixlen, ip_type='4',
site=s, vlan=v)
cursor.execute("SELECT dhcp_option, value "
"FROM object_option "
"WHERE object_id = {0} "
"AND type = 'subnet'".format(subnet_id))
results = cursor.fetchall()
for dhcp_option, value in results:
cursor.execute("SELECT name, type "
"FROM dhcp_options "
"WHERE id = {0}".format(dhcp_option))
name, type = cursor.fetchone()
kv, _ = NetworkKeyValue.objects.get_or_create(
value=str(value), key=name, network=n)
return (n, created)
def create_range(range_id, start, end, range_type, subnet_id, comment, en, known):
"""
Takes a row form the Maintain range table
returns a range which is saved in cyder
"""
# Set the allow statement
n = None
r = None
r_type = 'st' if range_type == 'static' else 'dy'
allow = ALLOW_LEGACY
if cursor.execute("SELECT * FROM subnet WHERE id = {0}".format(subnet_id)):
id, name, subnet, netmask, status, vlan = cursor.fetchone()
n = Network.objects.get(ip_lower=subnet,
prefixlen=str(calc_prefixlen(netmask)))
n.update_network()
if str(ipaddr.IPv4Address(start)) in allow_all_subnets:
allow = ALLOW_ANY
elif known:
allow = ALLOW_KNOWN
elif '128.193.177.71' == str(ipaddr.IPv4Address(start)):
allow = ALLOW_LEGACY_AND_VRF
v, _ = Vrf.objects.get_or_create(name="ip-phones-hack")
n.vrf = v
n.save()
elif '128.193.166.81' == str(ipaddr.IPv4Address(start)):
allow = ALLOW_LEGACY_AND_VRF
v, _ = Vrf.objects.get_or_create(name="avaya-hack")
n.vrf = v
n.save()
if int(n.network.network) < start < end < int(n.network.broadcast):
r, created = Range.objects.get_or_create(
start_lower=start, start_str=ipaddr.IPv4Address(start),
end_lower=end, end_str=ipaddr.IPv4Address(end),
range_type=r_type, allow=allow, ip_type='4',
network=n)
if not r:
r, created = Range.objects.get_or_create(
start_lower=start, start_str=ipaddr.IPv4Address(start),
end_lower=end, end_str=ipaddr.IPv4Address(end),
is_reserved=True, range_type=r_type, allow=allow, ip_type='4')
if '128.193.166.81' == str(ipaddr.IPv4Address(start)):
rk, _ = RangeKeyValue.objects.get_or_create(
range=r, value='L2Q=1,L2QVLAN=503', key='ipphone242',
is_option=True, is_quoted=True)
return (r, created)
def migrate_subnets():
print "Migrating subnets."
migrated = []
cursor.execute("SELECT * FROM subnet")
results = cursor.fetchall()
for row in results:
migrated.append(create_subnet(*row))
print ("Records in Maintain {0}\n"
"Records Migrated {1}\n"
"Records created {2}".format(
len(results),
len(migrated),
len([y for x, y in migrated if y])))
def migrate_ranges():
print "Migrating ranges."
cursor.execute("SELECT id, start, end, type, subnet, comment, enabled, "
"allow_all_hosts "
"FROM `ranges`")
results = cursor.fetchall()
migrated = []
for row in results:
migrated.append(create_range(*row))
print ("Records in Maintain {0}\n"
"Records Migrated {1}\n"
"Records created {2}".format(
len(results),
len(migrated),
len([y for x, y in migrated if y])))
def migrate_vlans():
print "Migrating VLANs."
cursor.execute("SELECT * FROM vlan")
results = cursor.fetchall()
migrated = []
for _, name, number in results:
migrated.append(Vlan.objects.get_or_create(name=name, number=number))
print ("Records in Maintain {0}\n"
"Records Migrated {1}\n"
"Records created {2}".format(
len(results),
len(migrated),
len([y for x, y in migrated if y])))
def migrate_workgroups():
print "Migrating workgroups."
cursor.execute("SELECT * FROM workgroup")
results = cursor.fetchall()
migrated = []
for id, name in results:
w, created = Workgroup.objects.get_or_create(name=name)
cursor.execute("SELECT dhcp_option, value "
"FROM object_option "
"WHERE object_id = {0} "
"AND type = 'workgroup'".format(id))
_results = cursor.fetchall()
for dhcp_option, value in _results:
cursor.execute("SELECT name, type "
"FROM dhcp_options "
"WHERE id = {0}".format(dhcp_option))
name, type = cursor.fetchone()
kv, _ = WorkgroupKeyValue.objects.get_or_create(
value=value, key=name, workgroup=w)
migrated.append((w, created))
print ("Records in Maintain {0}\n"
"Records Migrated {1}\n"
"Records created {2}".format(
len(results),
len(migrated),
len([y for x, y in migrated if y])))
def migrate_zones():
print "Migrating containers."
cursor.execute("SELECT name, description, comment, "
"support_mail, allow_blank_ha "
"FROM zone")
migrated = []
results = cursor.fetchall()
for name, desc, comment, email_contact, allow_blank_mac in results:
name = clean_zone_name(name)
migrated.append(
Ctnr.objects.get_or_create(
name=name,
description=comment or desc,
email_contact=email_contact or ''))
print ("Records in Maintain {0}\n"
"Records Migrated {1}\n"
"Records created {2}".format(
len(results),
len(migrated),
len([y for x, y in migrated if y])))
@transaction.commit_on_success
def migrate_dynamic_hosts():
print "Migrating dynamic hosts."
cursor.execute("SELECT dynamic_range, name, domain, ha, location, "
"workgroup, zone, enabled, last_seen "
"FROM host WHERE ip = 0")
sys_value_keys = {"type": "Hardware Type",
"os": "Operating System",
"location": "Location",
"department": "Department",
"serial": "Serial Number",
"other_id": "Other ID",
"purchase_date": "Purchase Date",
"po_number": "PO Number",
"warranty_date": "Warranty Date",
"owning_unit": "Owning Unit",
"user_id": "User ID"}
keys = ("id", "dynamic_range", "name", "workgroup", "enabled", "ha",
"type", "os", "location", "department", "serial", "other_id",
"purchase_date", "po_number", "warranty_date", "owning_unit",
"user_id", "last_seen", "expire", "ttl", "last_update", "domain",
"zone")
sql = "SELECT %s FROM host WHERE ip = 0" % ", ".join(keys)
count = 0
cursor.execute(sql)
for values in cursor.fetchall():
items = dict(zip(keys, values))
enabled = items['enabled']
mac = items['ha']
if len(mac) != 12 or mac == '0' * 12:
mac = ""
if mac == "":
enabled = False
r = maintain_find_range(items['dynamic_range'])
c = maintain_find_zone(items['zone'])
d = maintain_find_domain(items['domain'])
w = maintain_find_workgroup(items['workgroup'])
if not all([r, c, d]):
stderr.write("Trouble migrating host with mac {0}\n"
.format(items['ha']))
continue
s = System(name=items['name'])
s.save()
for key in sys_value_keys.keys():
value = items[key]
if not value or value == '0':
continue
kv = SystemKeyValue(system=s, key=sys_value_keys[key],
value=value)
kv.clean()
kv.save()
intr, _ = DynamicInterface.objects.get_or_create(
range=r, workgroup=w, ctnr=c, domain=d, mac=mac, system=s,
dhcp_enabled=enabled, dns_enabled=enabled,
last_seen=items['last_seen'])
for key, value in get_host_option_values(items['id']):
kv = DynamicIntrKeyValue(dynamic_interface=intr,
key=key, value=value)
kv.clean()
kv.save()
count += 1
if not count % 1000:
print "%s valid hosts found so far." % count
print "%s valid hosts found. Committing transaction." % count
def migrate_user():
print "Migrating users."
cursor.execute("SELECT username FROM user "
"WHERE username IN ( "
"SELECT DISTINCT username FROM zone_user )")
result = cursor.fetchall()
for username, in result:
username = username.lower()
user, _ = User.objects.get_or_create(username=username)
def migrate_zone_user():
print "Migrating user-container relationship."
NEW_LEVEL = {5: 0, 25: 1, 50: 2, 100: 2}
cursor.execute("SELECT * FROM zone_user")
result = cursor.fetchall()
for _, username, zone_id, level in result:
username = username.lower()
level = NEW_LEVEL[level]
user, _ = User.objects.get_or_create(username=username)
if zone_id == 0:
ctnr = Ctnr.objects.get(pk=1)
user.is_superuser = True
user.save()
else:
ctnr = maintain_find_zone(zone_id)
if not ctnr:
continue
CtnrUser.objects.get_or_create(user=user, ctnr=ctnr, level=level)
def migrate_zone_range():
print "Migrating container-range relationship."
cursor.execute("SELECT * FROM zone_range")
result = cursor.fetchall()
for _, zone_id, range_id, _, comment, _ in result:
c = maintain_find_zone(zone_id)
r = maintain_find_range(range_id)
if not (c and r):
continue
c.ranges.add(r)
c.save()
def migrate_zone_domain():
print "Migrating container-domain relationship."
cursor.execute("SELECT zone, domain FROM zone_domain")
results = cursor.fetchall()
for zone_id, domain_id in results:
ctnr = maintain_find_zone(zone_id)
domain = maintain_find_domain(domain_id)
if not ctnr or not domain:
continue
ctnr.domains.add(domain)
ctnr.save()
def migrate_zone_reverse():
print "Migrating container-reverse_domain relationship."
cursor.execute("SELECT ip,zone FROM pointer WHERE type='reverse'")
results = cursor.fetchall()
for ip, zone_id in results:
ctnr = maintain_find_zone(zone_id)
if not ctnr:
continue
doctets = []
octets = long2ip(ip).split(".")
for octet in octets:
doctets = [octet] + doctets
dname = ".".join(doctets) + ".in-addr.arpa"
domain, _ = Domain.objects.get_or_create(name=dname,
is_reverse=True)
ctnr.domains.add(domain)
ctnr.save()
def migrate_zone_workgroup():
print "Migrating container-workgroup relationship."
cursor.execute("SELECT * FROM zone_workgroup")
result = cursor.fetchall()
for _, workgroup_id, zone_id, _ in result:
c = maintain_find_zone(zone_id)
w = maintain_find_workgroup(workgroup_id)
if not (c and w):
continue
c.workgroups.add(w)
c.save()
def maintain_find_range(range_id):
start, end = maintain_get_cached('ranges', ['start', 'end'], range_id)
if start and end:
return Range.objects.get(start_lower=start, end_lower=end)
def maintain_find_domain(domain_id):
(name,) = maintain_get_cached('domain', ['name'], domain_id)
if name:
return Domain.objects.get(name=name)
def maintain_find_workgroup(workgroup_id):
(name,) = maintain_get_cached('workgroup', ['name'], workgroup_id)
if name:
return Workgroup.objects.get(name=name)
def maintain_find_zone(zone_id):
(name,) = maintain_get_cached('zone', ['name'], zone_id)
if name:
name = clean_zone_name(name)
try:
return Ctnr.objects.get(name=name)
except Ctnr.DoesNotExist:
return None
def maintain_get_cached(table, columns, object_id):
global cached
columns = tuple(columns)
if (table, columns) not in cached:
sql = "SELECT id, %s FROM %s" % (", ".join(columns), table)
print "Caching: %s" % sql
cursor.execute(sql)
results = cursor.fetchall()
results = [(r[0], tuple(r[1:])) for r in results]
cached[(table, columns)] = dict(results)
if object_id in cached[(table, columns)]:
return cached[(table, columns)][object_id]
else:
return (None for _ in columns)
def get_host_option_values(host_id):
global host_option_values
if host_option_values is None:
host_option_values = {}
sql = ("SELECT {0}.id, {1}.name, {2}.value FROM {0} "
"INNER JOIN {2} ON {2}.object_id = {0}.id "
"INNER JOIN {1} ON {1}.id = {2}.dhcp_option "
"WHERE {2}.type = '{3}'")
sql = sql.format("host", "dhcp_options", "object_option", "host")
print "Caching: %s" % sql
cursor.execute(sql)
results = cursor.fetchall()
for h_id, name, value in results:
if h_id not in host_option_values:
host_option_values[h_id] = set([])
host_option_values[h_id].add((name, value))
if host_id in host_option_values:
return host_option_values[host_id]
else:
return []
def migrate_all(skip=False):
migrate_vlans()
migrate_zones()
migrate_workgroups()
migrate_subnets()
migrate_ranges()
if not skip:
migrate_dynamic_hosts()
migrate_zone_range()
migrate_zone_workgroup()
migrate_zone_domain()
migrate_zone_reverse()
migrate_user()
migrate_zone_user()
def delete_all():
Range.objects.all().delete()
Vlan.objects.all().delete()
Network.objects.all().delete()
Vrf.objects.all().delete()
Ctnr.objects.filter(id__gt=2).delete() # First 2 are fixtures
DynamicInterface.objects.all().delete()
Workgroup.objects.all().delete()
User.objects.filter(id__gt=1).delete() # First user is a fixture
CtnrUser.objects.filter(id__gt=2).delete() # First 2 are fixtures
def do_everything(skip=False):
delete_all()
migrate_all(skip)
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('-D', '--delete',
action='store_true',
dest='delete',
default=False,
help='Delete things'),
make_option('-a', '--all',
action='store_true',
dest='all',
default=False,
help='Migrate everything'),
make_option('-S', '--skip',
action='store_true',
dest='skip',
default=False,
help='Ignore dynamic hosts when using -a option'),
make_option('-n', '--vlan',
action='store_true',
dest='vlan',
default=False,
help='Migrate vlans'),
make_option('-Z', '--zone',
action='store_true',
dest='zone',
default=False,
help='Migrate zones to ctnrs'),
make_option('-w', '--workgroup',
action='store_true',
dest='workgroup',
default=False,
help='Migrate workgroups'),
make_option('-s', '--subnet',
action='store_true',
dest='subnet',
default=False,
help='Migrate subnets'),
make_option('-r', '--range',
action='store_true',
dest='range',
default=False,
help='Migrate ranges'),
make_option('-d', '--dynamic',
action='store_true',
dest='dynamic',
default=False,
help='Migrate dynamic interfaces'),
make_option('-R', '--zone-range',
action='store_true',
dest='zone-range',
default=False,
help='Migrate zone/range relationship'),
make_option('-W', '--zone-workgroup',
action='store_true',
dest='zone-workgroup',
default=False,
help='Migrate zone/workgroup relationship'),
make_option('-z', '--zone-domain',
action='store_true',
dest='zone-domain',
default=False,
help='Migrate zone/domain relationship'),
make_option('-e', '--zone-reverse',
action='store_true',
dest='zone-reverse',
default=False,
help='Migrate zone/reverse domain relationship'),
make_option('-u', '--user',
action='store_true',
dest='user',
default=False,
help='Migrate users'),
make_option('-U', '--zone-user',
action='store_true',
dest='zone-user',
default=False,
help='Migrate zone/user relationship'))
def handle(self, **options):
if options['delete']:
delete_all()
if options['all']:
migrate_all(skip=options['skip'])
else:
if options['vlan']:
migrate_vlans()
if options['zone']:
migrate_zones()
if options['workgroup']:
migrate_workgroups()
if options['subnet']:
migrate_subnets()
if options['range']:
migrate_ranges()
if options['dynamic']:
migrate_dynamic_hosts()
if options['zone-range']:
migrate_zone_range()
if options['zone-workgroup']:
migrate_zone_workgroup()
if options['zone-domain']:
migrate_zone_domain()
if options['zone-reverse']:
migrate_zone_reverse()
if options['user']:
migrate_user()
if options['zone-user']:
migrate_zone_user()
Don't access non-existent fields
from django.core.management.base import BaseCommand
from django.conf import settings
from django.contrib.auth.models import User
from django.db import transaction
from sys import stderr
from cyder.core.ctnr.models import Ctnr, CtnrUser
from cyder.core.system.models import System, SystemKeyValue
from cyder.cydns.domain.models import Domain
from cyder.cydhcp.constants import (ALLOW_ANY, ALLOW_KNOWN, ALLOW_VRF,
ALLOW_LEGACY, ALLOW_LEGACY_AND_VRF)
from cyder.cydhcp.interface.dynamic_intr.models import (DynamicInterface,
DynamicIntrKeyValue)
from cyder.cydhcp.network.models import Network, NetworkKeyValue
from cyder.cydhcp.range.models import Range, RangeKeyValue
from cyder.cydhcp.site.models import Site
from cyder.cydhcp.vlan.models import Vlan
from cyder.cydhcp.vrf.models import Vrf
from cyder.cydhcp.workgroup.models import Workgroup, WorkgroupKeyValue
import ipaddr
import MySQLdb
from optparse import make_option
from lib.utilities import long2ip
cached = {}
host_option_values = None
allow_all_subnets = [
'10.192.76.2', '10.192.103.150', '10.192.15.2',
'10.197.32.0', '10.192.148.32', '10.192.144.32', '10.192.140.32',
'10.196.0.32', '10.196.4.32', '10.192.136.63', '10.196.8.8',
'10.196.16.8', '10.196.24.8', '10.196.32.8', '10.196.40.8',
'10.162.128.32', '10.162.136.32', '10.162.144.32', '10.198.0.80',
'10.198.0.140', '10.192.131.9', '10.255.255.255']
class NotInMaintain(Exception):
""""""
def calc_prefixlen(netmask):
bits = 0
while netmask:
bits += netmask & 1
netmask >>= 1
return bits
connection = MySQLdb.connect(host=settings.MIGRATION_HOST,
user=settings.MIGRATION_USER,
passwd=settings.MIGRATION_PASSWD,
db=settings.MIGRATION_DB,
charset='utf8')
cursor = connection.cursor()
def clean_zone_name(name):
name = name.replace(' ', '')
if name[:5] == "zone.":
name = name[5:]
return name
def create_subnet(subnet_id, name, subnet, netmask, status, vlan):
"""
Takes a row from the Maintain subnet table
returns a new network object and creates the vlan it is associated with
"""
prefixlen = str(calc_prefixlen(netmask))
network = str(ipaddr.IPv4Address(subnet & netmask))
s, _ = Site.objects.get_or_create(name='Campus')
v = None
if cursor.execute("SELECT * "
"FROM vlan "
"WHERE vlan_id = %s" % vlan):
vlan_id, vlan_name, vlan_number = cursor.fetchone()
v = Vlan.objects.get(name=vlan_name)
n, created = Network.objects.get_or_create(
network_str=network + '/' + prefixlen, ip_type='4',
site=s, vlan=v)
cursor.execute("SELECT dhcp_option, value "
"FROM object_option "
"WHERE object_id = {0} "
"AND type = 'subnet'".format(subnet_id))
results = cursor.fetchall()
for dhcp_option, value in results:
cursor.execute("SELECT name, type "
"FROM dhcp_options "
"WHERE id = {0}".format(dhcp_option))
name, type = cursor.fetchone()
kv, _ = NetworkKeyValue.objects.get_or_create(
value=str(value), key=name, network=n)
return (n, created)
def create_range(range_id, start, end, range_type, subnet_id, comment, en, known):
"""
Takes a row form the Maintain range table
returns a range which is saved in cyder
"""
# Set the allow statement
n = None
r = None
r_type = 'st' if range_type == 'static' else 'dy'
allow = ALLOW_LEGACY
if cursor.execute("SELECT * FROM subnet WHERE id = {0}".format(subnet_id)):
id, name, subnet, netmask, status, vlan = cursor.fetchone()
n = Network.objects.get(ip_lower=subnet,
prefixlen=str(calc_prefixlen(netmask)))
n.update_network()
if str(ipaddr.IPv4Address(start)) in allow_all_subnets:
allow = ALLOW_ANY
elif known:
allow = ALLOW_KNOWN
elif '128.193.177.71' == str(ipaddr.IPv4Address(start)):
allow = ALLOW_LEGACY_AND_VRF
v, _ = Vrf.objects.get_or_create(name="ip-phones-hack")
n.vrf = v
n.save()
elif '128.193.166.81' == str(ipaddr.IPv4Address(start)):
allow = ALLOW_LEGACY_AND_VRF
v, _ = Vrf.objects.get_or_create(name="avaya-hack")
n.vrf = v
n.save()
if int(n.network.network) < start < end < int(n.network.broadcast):
r, created = Range.objects.get_or_create(
start_lower=start, start_str=ipaddr.IPv4Address(start),
end_lower=end, end_str=ipaddr.IPv4Address(end),
range_type=r_type, allow=allow, ip_type='4',
network=n)
if not r:
r, created = Range.objects.get_or_create(
start_lower=start, start_str=ipaddr.IPv4Address(start),
end_lower=end, end_str=ipaddr.IPv4Address(end),
is_reserved=True, range_type=r_type, allow=allow, ip_type='4')
if '128.193.166.81' == str(ipaddr.IPv4Address(start)):
rk, _ = RangeKeyValue.objects.get_or_create(
range=r, value='L2Q=1,L2QVLAN=503', key='ipphone242',
is_option=True, is_quoted=True)
return (r, created)
def migrate_subnets():
print "Migrating subnets."
migrated = []
cursor.execute("SELECT * FROM subnet")
results = cursor.fetchall()
for row in results:
migrated.append(create_subnet(*row))
print ("Records in Maintain {0}\n"
"Records Migrated {1}\n"
"Records created {2}".format(
len(results),
len(migrated),
len([y for x, y in migrated if y])))
def migrate_ranges():
print "Migrating ranges."
cursor.execute("SELECT id, start, end, type, subnet, comment, enabled, "
"allow_all_hosts "
"FROM `ranges`")
results = cursor.fetchall()
migrated = []
for row in results:
migrated.append(create_range(*row))
print ("Records in Maintain {0}\n"
"Records Migrated {1}\n"
"Records created {2}".format(
len(results),
len(migrated),
len([y for x, y in migrated if y])))
def migrate_vlans():
print "Migrating VLANs."
cursor.execute("SELECT * FROM vlan")
results = cursor.fetchall()
migrated = []
for _, name, number in results:
migrated.append(Vlan.objects.get_or_create(name=name, number=number))
print ("Records in Maintain {0}\n"
"Records Migrated {1}\n"
"Records created {2}".format(
len(results),
len(migrated),
len([y for x, y in migrated if y])))
def migrate_workgroups():
print "Migrating workgroups."
cursor.execute("SELECT * FROM workgroup")
results = cursor.fetchall()
migrated = []
for id, name in results:
w, created = Workgroup.objects.get_or_create(name=name)
cursor.execute("SELECT dhcp_option, value "
"FROM object_option "
"WHERE object_id = {0} "
"AND type = 'workgroup'".format(id))
_results = cursor.fetchall()
for dhcp_option, value in _results:
cursor.execute("SELECT name, type "
"FROM dhcp_options "
"WHERE id = {0}".format(dhcp_option))
name, type = cursor.fetchone()
kv, _ = WorkgroupKeyValue.objects.get_or_create(
value=value, key=name, workgroup=w)
migrated.append((w, created))
print ("Records in Maintain {0}\n"
"Records Migrated {1}\n"
"Records created {2}".format(
len(results),
len(migrated),
len([y for x, y in migrated if y])))
def migrate_zones():
print "Migrating containers."
cursor.execute("SELECT name, description, comment, "
"support_mail, allow_blank_ha "
"FROM zone")
migrated = []
results = cursor.fetchall()
for name, desc, comment, email_contact, allow_blank_mac in results:
name = clean_zone_name(name)
migrated.append(
Ctnr.objects.get_or_create(
name=name,
description=comment or desc,
email_contact=email_contact or ''))
print ("Records in Maintain {0}\n"
"Records Migrated {1}\n"
"Records created {2}".format(
len(results),
len(migrated),
len([y for x, y in migrated if y])))
@transaction.commit_on_success
def migrate_dynamic_hosts():
print "Migrating dynamic hosts."
cursor.execute("SELECT dynamic_range, name, domain, ha, location, "
"workgroup, zone, enabled, last_seen "
"FROM host WHERE ip = 0")
sys_value_keys = {"type": "Hardware Type",
"os": "Operating System",
"location": "Location",
"department": "Department",
"serial": "Serial Number",
"other_id": "Other ID",
"purchase_date": "Purchase Date",
"po_number": "PO Number",
"warranty_date": "Warranty Date",
"owning_unit": "Owning Unit",
"user_id": "User ID"}
keys = ("id", "dynamic_range", "name", "workgroup", "enabled", "ha",
"type", "os", "location", "department", "serial", "other_id",
"purchase_date", "po_number", "warranty_date", "owning_unit",
"user_id", "last_seen", "expire", "ttl", "last_update", "domain",
"zone")
sql = "SELECT %s FROM host WHERE ip = 0" % ", ".join(keys)
count = 0
cursor.execute(sql)
for values in cursor.fetchall():
items = dict(zip(keys, values))
enabled = items['enabled']
mac = items['ha']
if len(mac) != 12 or mac == '0' * 12:
mac = ""
if mac == "":
enabled = False
r = maintain_find_range(items['dynamic_range'])
c = maintain_find_zone(items['zone'])
d = maintain_find_domain(items['domain'])
w = maintain_find_workgroup(items['workgroup'])
if not all([r, c, d]):
stderr.write("Trouble migrating host with mac {0}\n"
.format(items['ha']))
continue
s = System(name=items['name'])
s.save()
for key in sys_value_keys.keys():
value = items[key]
if not value or value == '0':
continue
kv = SystemKeyValue(system=s, key=sys_value_keys[key],
value=value)
kv.clean()
kv.save()
intr, _ = DynamicInterface.objects.get_or_create(
range=r, workgroup=w, ctnr=c, domain=d, mac=mac, system=s,
dhcp_enabled=enabled, last_seen=items['last_seen'])
for key, value in get_host_option_values(items['id']):
kv = DynamicIntrKeyValue(dynamic_interface=intr,
key=key, value=value)
kv.clean()
kv.save()
count += 1
if not count % 1000:
print "%s valid hosts found so far." % count
print "%s valid hosts found. Committing transaction." % count
def migrate_user():
print "Migrating users."
cursor.execute("SELECT username FROM user "
"WHERE username IN ( "
"SELECT DISTINCT username FROM zone_user )")
result = cursor.fetchall()
for username, in result:
username = username.lower()
user, _ = User.objects.get_or_create(username=username)
def migrate_zone_user():
print "Migrating user-container relationship."
NEW_LEVEL = {5: 0, 25: 1, 50: 2, 100: 2}
cursor.execute("SELECT * FROM zone_user")
result = cursor.fetchall()
for _, username, zone_id, level in result:
username = username.lower()
level = NEW_LEVEL[level]
user, _ = User.objects.get_or_create(username=username)
if zone_id == 0:
ctnr = Ctnr.objects.get(pk=1)
user.is_superuser = True
user.save()
else:
ctnr = maintain_find_zone(zone_id)
if not ctnr:
continue
CtnrUser.objects.get_or_create(user=user, ctnr=ctnr, level=level)
def migrate_zone_range():
print "Migrating container-range relationship."
cursor.execute("SELECT * FROM zone_range")
result = cursor.fetchall()
for _, zone_id, range_id, _, comment, _ in result:
c = maintain_find_zone(zone_id)
r = maintain_find_range(range_id)
if not (c and r):
continue
c.ranges.add(r)
c.save()
def migrate_zone_domain():
print "Migrating container-domain relationship."
cursor.execute("SELECT zone, domain FROM zone_domain")
results = cursor.fetchall()
for zone_id, domain_id in results:
ctnr = maintain_find_zone(zone_id)
domain = maintain_find_domain(domain_id)
if not ctnr or not domain:
continue
ctnr.domains.add(domain)
ctnr.save()
def migrate_zone_reverse():
print "Migrating container-reverse_domain relationship."
cursor.execute("SELECT ip,zone FROM pointer WHERE type='reverse'")
results = cursor.fetchall()
for ip, zone_id in results:
ctnr = maintain_find_zone(zone_id)
if not ctnr:
continue
doctets = []
octets = long2ip(ip).split(".")
for octet in octets:
doctets = [octet] + doctets
dname = ".".join(doctets) + ".in-addr.arpa"
domain, _ = Domain.objects.get_or_create(name=dname,
is_reverse=True)
ctnr.domains.add(domain)
ctnr.save()
def migrate_zone_workgroup():
print "Migrating container-workgroup relationship."
cursor.execute("SELECT * FROM zone_workgroup")
result = cursor.fetchall()
for _, workgroup_id, zone_id, _ in result:
c = maintain_find_zone(zone_id)
w = maintain_find_workgroup(workgroup_id)
if not (c and w):
continue
c.workgroups.add(w)
c.save()
def maintain_find_range(range_id):
start, end = maintain_get_cached('ranges', ['start', 'end'], range_id)
if start and end:
return Range.objects.get(start_lower=start, end_lower=end)
def maintain_find_domain(domain_id):
(name,) = maintain_get_cached('domain', ['name'], domain_id)
if name:
return Domain.objects.get(name=name)
def maintain_find_workgroup(workgroup_id):
(name,) = maintain_get_cached('workgroup', ['name'], workgroup_id)
if name:
return Workgroup.objects.get(name=name)
def maintain_find_zone(zone_id):
(name,) = maintain_get_cached('zone', ['name'], zone_id)
if name:
name = clean_zone_name(name)
try:
return Ctnr.objects.get(name=name)
except Ctnr.DoesNotExist:
return None
def maintain_get_cached(table, columns, object_id):
global cached
columns = tuple(columns)
if (table, columns) not in cached:
sql = "SELECT id, %s FROM %s" % (", ".join(columns), table)
print "Caching: %s" % sql
cursor.execute(sql)
results = cursor.fetchall()
results = [(r[0], tuple(r[1:])) for r in results]
cached[(table, columns)] = dict(results)
if object_id in cached[(table, columns)]:
return cached[(table, columns)][object_id]
else:
return (None for _ in columns)
def get_host_option_values(host_id):
global host_option_values
if host_option_values is None:
host_option_values = {}
sql = ("SELECT {0}.id, {1}.name, {2}.value FROM {0} "
"INNER JOIN {2} ON {2}.object_id = {0}.id "
"INNER JOIN {1} ON {1}.id = {2}.dhcp_option "
"WHERE {2}.type = '{3}'")
sql = sql.format("host", "dhcp_options", "object_option", "host")
print "Caching: %s" % sql
cursor.execute(sql)
results = cursor.fetchall()
for h_id, name, value in results:
if h_id not in host_option_values:
host_option_values[h_id] = set([])
host_option_values[h_id].add((name, value))
if host_id in host_option_values:
return host_option_values[host_id]
else:
return []
def migrate_all(skip=False):
migrate_vlans()
migrate_zones()
migrate_workgroups()
migrate_subnets()
migrate_ranges()
if not skip:
migrate_dynamic_hosts()
migrate_zone_range()
migrate_zone_workgroup()
migrate_zone_domain()
migrate_zone_reverse()
migrate_user()
migrate_zone_user()
def delete_all():
Range.objects.all().delete()
Vlan.objects.all().delete()
Network.objects.all().delete()
Vrf.objects.all().delete()
Ctnr.objects.filter(id__gt=2).delete() # First 2 are fixtures
DynamicInterface.objects.all().delete()
Workgroup.objects.all().delete()
User.objects.filter(id__gt=1).delete() # First user is a fixture
CtnrUser.objects.filter(id__gt=2).delete() # First 2 are fixtures
def do_everything(skip=False):
delete_all()
migrate_all(skip)
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('-D', '--delete',
action='store_true',
dest='delete',
default=False,
help='Delete things'),
make_option('-a', '--all',
action='store_true',
dest='all',
default=False,
help='Migrate everything'),
make_option('-S', '--skip',
action='store_true',
dest='skip',
default=False,
help='Ignore dynamic hosts when using -a option'),
make_option('-n', '--vlan',
action='store_true',
dest='vlan',
default=False,
help='Migrate vlans'),
make_option('-Z', '--zone',
action='store_true',
dest='zone',
default=False,
help='Migrate zones to ctnrs'),
make_option('-w', '--workgroup',
action='store_true',
dest='workgroup',
default=False,
help='Migrate workgroups'),
make_option('-s', '--subnet',
action='store_true',
dest='subnet',
default=False,
help='Migrate subnets'),
make_option('-r', '--range',
action='store_true',
dest='range',
default=False,
help='Migrate ranges'),
make_option('-d', '--dynamic',
action='store_true',
dest='dynamic',
default=False,
help='Migrate dynamic interfaces'),
make_option('-R', '--zone-range',
action='store_true',
dest='zone-range',
default=False,
help='Migrate zone/range relationship'),
make_option('-W', '--zone-workgroup',
action='store_true',
dest='zone-workgroup',
default=False,
help='Migrate zone/workgroup relationship'),
make_option('-z', '--zone-domain',
action='store_true',
dest='zone-domain',
default=False,
help='Migrate zone/domain relationship'),
make_option('-e', '--zone-reverse',
action='store_true',
dest='zone-reverse',
default=False,
help='Migrate zone/reverse domain relationship'),
make_option('-u', '--user',
action='store_true',
dest='user',
default=False,
help='Migrate users'),
make_option('-U', '--zone-user',
action='store_true',
dest='zone-user',
default=False,
help='Migrate zone/user relationship'))
def handle(self, **options):
if options['delete']:
delete_all()
if options['all']:
migrate_all(skip=options['skip'])
else:
if options['vlan']:
migrate_vlans()
if options['zone']:
migrate_zones()
if options['workgroup']:
migrate_workgroups()
if options['subnet']:
migrate_subnets()
if options['range']:
migrate_ranges()
if options['dynamic']:
migrate_dynamic_hosts()
if options['zone-range']:
migrate_zone_range()
if options['zone-workgroup']:
migrate_zone_workgroup()
if options['zone-domain']:
migrate_zone_domain()
if options['zone-reverse']:
migrate_zone_reverse()
if options['user']:
migrate_user()
if options['zone-user']:
migrate_zone_user()
|
from mimetypes import guess_type
from django.http import HttpResponse
from django.utils.encoding import smart_str
from django.utils.translation import ugettext as _
from django.contrib.auth.decorators import login_required
from jmbo.generic.views import GenericObjectList
from category.models import Category
from downloads.models import Download
def download_request(request, slug):
download = Download.permitted.get(slug=slug).as_leaf_class()
# increment view count
download.view_count += 1
download.save()
f, file_name = download.get_file(request)
mime = guess_type(f.name)
response = HttpResponse(content_type=mime[0])
# check if it has encoding
if mime[1]:
response['Content-Encoding'] = mime[1]
response['Content-Disposition'] = 'attachment; \
filename="%s"' % smart_str(file_name)
response['Cache-Control'] = 'no-cache, no-store, must-revalidate'
response['Expires'] = '0'
response['Pragma'] = 'no-store, no-cache'
response['X-Accel-Redirect'] = smart_str(f.url)
return response
# traverse up to parent and create absolute category name
def get_full_category(category_id, parent_id, cat_dict):
if parent_id is not None: # has parent
li = cat_dict[category_id]
li[2] = cat_dict[parent_id][1] + li[2]
li[3] += 1
get_full_category(category_id, cat_dict[parent_id][0], cat_dict)
class ObjectList(GenericObjectList):
def get_extra_context(self, *args, **kwargs):
dls = list(Download.permitted.filter(do_not_list=False))
# calculate all absolute category names
cat_dict = dict((id, [parent, title, title, 1]) for (id, parent, title)
in Category.objects.values_list('id', 'parent', 'title'))
for key in cat_dict.keys():
get_full_category(key, cat_dict[key][0], cat_dict)
# add None key for downloads without a category
cat_dict[None] = (None, '', '', 0)
# perform insertion sort on absolute category name
for i in range(1, len(dls)):
val = dls[i]
j = i - 1
while j >= 0 and cat_dict[dls[j].primary_category_id][2] > cat_dict[val.primary_category_id][2]:
dls[j + 1] = dls[j]
j -= 1
dls[j + 1] = val
# construct [(dl_object, depth), ...]
sorted_list = [(val,
cat_dict[val.primary_category_id][3]) for val in dls]
return {'title': _('Downloads'), 'sorted_list': sorted_list}
def get_queryset(self, *args, **kwargs):
return Download.permitted.none()
def get_paginate_by(self, *args, **kwargs):
return 20
object_list = ObjectList()
elimate race condition when incrementing view count
from mimetypes import guess_type
from django.http import HttpResponse
from django.utils.encoding import smart_str
from django.utils.translation import ugettext as _
from django.contrib.auth.decorators import login_required
from django.db.models import F
from jmbo.generic.views import GenericObjectList
from category.models import Category
from downloads.models import Download
def download_request(request, slug):
download = Download.permitted.get(slug=slug).as_leaf_class()
# increment view count
# contains race condition: download.view_count += 1
download.view_count = F('view_count') + 1
download.save()
f, file_name = download.get_file(request)
mime = guess_type(f.name)
response = HttpResponse(content_type=mime[0])
# check if it has encoding
if mime[1]:
response['Content-Encoding'] = mime[1]
response['Content-Disposition'] = 'attachment; \
filename="%s"' % smart_str(file_name)
response['Cache-Control'] = 'no-cache, no-store, must-revalidate'
response['Expires'] = '0'
response['Pragma'] = 'no-store, no-cache'
response['X-Accel-Redirect'] = smart_str(f.url)
return response
# traverse up to parent and create absolute category name
def get_full_category(category_id, parent_id, cat_dict):
if parent_id is not None: # has parent
li = cat_dict[category_id]
li[2] = cat_dict[parent_id][1] + li[2]
li[3] += 1
get_full_category(category_id, cat_dict[parent_id][0], cat_dict)
class ObjectList(GenericObjectList):
def get_extra_context(self, *args, **kwargs):
dls = list(Download.permitted.filter(do_not_list=False))
# calculate all absolute category names
cat_dict = dict((id, [parent, title, title, 1]) for (id, parent, title)
in Category.objects.values_list('id', 'parent', 'title'))
for key in cat_dict.keys():
get_full_category(key, cat_dict[key][0], cat_dict)
# add None key for downloads without a category
cat_dict[None] = (None, '', '', 0)
# perform insertion sort on absolute category name
for i in range(1, len(dls)):
val = dls[i]
j = i - 1
while j >= 0 and cat_dict[dls[j].primary_category_id][2] > cat_dict[val.primary_category_id][2]:
dls[j + 1] = dls[j]
j -= 1
dls[j + 1] = val
# construct [(dl_object, depth), ...]
sorted_list = [(val,
cat_dict[val.primary_category_id][3]) for val in dls]
return {'title': _('Downloads'), 'sorted_list': sorted_list}
def get_queryset(self, *args, **kwargs):
return Download.permitted.none()
def get_paginate_by(self, *args, **kwargs):
return 20
object_list = ObjectList()
|
# noinspection PyUnresolvedReferences
from globalvars import GlobalVars
from findspam import FindSpam
# noinspection PyUnresolvedReferences
from datetime import datetime
from utcdate import UtcDate
from apigetpost import api_get_post
from datahandling import *
from metasmoke import Metasmoke
from parsing import *
from spamhandling import handle_spam
from spamhandling import handle_user_with_all_spam
from gitmanager import GitManager
import threading
from threading import Thread
import random
import requests
import os
import time
import datahandling
import regex
from helpers import Response
# TODO: pull out code block to get user_id, chat_site, room_id into function
# TODO: Return result for all functions should be similar (tuple/named tuple?)
# TODO: Do we need uid == -2 check? Turn into "is_user_valid" check
# TODO: Consistant return structure
# if return...else return vs if return...return
def check_permissions(function):
def run_command(ev_room, ev_user_id, wrap2, *args, **kwargs):
if datahandling.is_privileged(ev_room, ev_user_id, wrap2):
kwargs['ev_room'] = ev_room
kwargs['ev_user_id'] = ev_user_id
kwargs['wrap2'] = wrap2
return function(*args, **kwargs)
else:
return Response(command_status=False,
message="You are not a privileged user. Please see [the privileges wiki page](" +
GlobalVars.bot_repository + "/wiki/Privileges) for information on what privileges"
" are and what is expected of privileged users.")
return run_command
# Functions go before the final dictionaries of command to function mappings
def post_message_in_room(room_id_str, msg, length_check=True):
if room_id_str == GlobalVars.charcoal_room_id:
GlobalVars.charcoal_hq.send_message(msg, length_check)
elif room_id_str == GlobalVars.meta_tavern_room_id:
GlobalVars.tavern_on_the_meta.send_message(msg, length_check)
elif room_id_str == GlobalVars.socvr_room_id:
GlobalVars.socvr.send_message(msg, length_check)
def is_report(post_site_id):
"""
Checks if a post is a report
:param post_site_id: Report to check
:return: Boolean stating if it is a report
"""
if post_site_id is None:
return False
return True
# noinspection PyIncorrectDocstring,PyUnusedLocal
def send_metasmoke_feedback(post_url, second_part_lower, ev_user_name, ev_user_id, ev_chat_host):
"""
Sends feedback to metasmoke
:param ev_user_name:
:param post_url: The post url we are sending
:param second_part_lower: Feedback
:param ev_user_name: User name supplying the feedback
:param ev_user_id: User ID supplying the feedback
:return: None
"""
t_metasmoke = Thread(name="metasmoke feedback send on #{url}".format(url=post_url),
target=Metasmoke.send_feedback_for_post,
args=(post_url, second_part_lower, ev_user_name, ev_user_id, ev_chat_host,))
t_metasmoke.start()
def single_random_user(ev_room):
"""
Returns a single user name from users in a room
:param ev_room: Room to select users from
:return: A single user tuple
"""
return random.choice(GlobalVars.users_chatting[ev_room])
#
#
# System command functions below here
# Each of these should take the *args and **kwargs parameters. This allows us to create functions that
# don't accept any parameters but still use the `command_dict` mappings
# --- Blacklist Functions --- #
# noinspection PyIncorrectDocstring,PyUnusedLocal
@check_permissions
def command_add_blacklist_user(message_parts, content_lower, message_url, ev_room, ev_user_id, wrap2, *args, **kwargs):
"""
Adds a user to the site blacklist
:param wrap2:
:param ev_user_id:
:param ev_room:
:param message_url:
:param content_lower:
:param message_parts:
:param kwargs: No additional arguments expected
:return: A string
"""
quiet_action = any([part.endswith('-') for part in message_parts])
uid, val = get_user_from_list_command(content_lower)
if uid > -1 and val != "":
add_blacklisted_user((uid, val), message_url, "")
return Response(command_status=True, message=None) if quiet_action \
else Response(command_status=True, message="User blacklisted (`{}` on `{}`).".format(uid, val))
elif uid == -2:
return Response(command_status=True, message="Error: {}".format(val))
else:
return Response(command_status=False,
message="Invalid format. Valid format: `!!/addblu profileurl` "
"*or* `!!/addblu userid sitename`.")
# noinspection PyIncorrectDocstring,PyUnusedLocal
def command_check_blacklist(content_lower, *args, **kwargs):
"""
Checks if a user is blacklisted
:param content_lower:
:param kwargs: No additional arguments expected
:return: A string
"""
uid, val = get_user_from_list_command(content_lower)
if uid > -1 and val != "":
if is_blacklisted_user((uid, val)):
return Response(command_status=True, message="User is blacklisted (`{}` on `{}`).".format(uid, val))
else:
return Response(command_status=True, message="User is not blacklisted (`{}` on `{}`).".format(uid, val))
elif uid == -2:
return Response(command_status=True, message="Error: {}".format(val))
else:
return Response(command_status=False,
message="Invalid format. Valid format: `!!/isblu profileurl` *or* `!!/isblu userid sitename`.")
# noinspection PyIncorrectDocstring,PyUnusedLocal
@check_permissions
def command_remove_blacklist_user(message_parts, content_lower, ev_room, ev_user_id, wrap2, *args, **kwargs):
"""
Removes user from site blacklist
:param wrap2:
:param ev_user_id:
:param ev_room:
:param content_lower:
:param message_parts:
:param kwargs: No additional arguments expected
:return: A string
"""
quiet_action = any([part.endswith('-') for part in message_parts])
uid, val = get_user_from_list_command(content_lower)
if uid > -1 and val != "":
if remove_blacklisted_user((uid, val)):
return Response(command_status=True, message=None) if quiet_action \
else Response(command_status=True,
message="User removed from blacklist (`{}` on `{}`).".format(uid, val))
else:
return Response(command_status=True, message="User is not blacklisted.")
elif uid == -2:
return Response(command_status=True, message="Error: {}".format(val))
else:
return Response(command_status=False,
message="Invalid format. Valid format: `!!/rmblu profileurl` *or* `!!/rmblu userid sitename`.")
# --- Whitelist functions --- #
# noinspection PyIncorrectDocstring,PyUnusedLocal
@check_permissions
def command_add_whitelist_user(message_parts, content_lower, ev_room, ev_user_id, wrap2, *args, **kwargs):
"""
Adds a user to site whitelist
:param wrap2:
:param ev_user_id:
:param ev_room:
:param content_lower:
:param message_parts:
:param kwargs: No additional arguments expected
:return: A string
"""
quiet_action = any([part.endswith('-') for part in message_parts])
uid, val = get_user_from_list_command(content_lower)
if uid > -1 and val != "":
add_whitelisted_user((uid, val))
return Response(command_status=True, message=None) if quiet_action \
else Response(command_status=True, message="User whitelisted (`{}` on `{}`).".format(uid, val))
elif uid == -2:
return Response(command_status=True, message="Error: {}".format(val))
else:
return Response(command_status=False,
message="Invalid format. Valid format: `!!/addwlu profileurl` *or* "
"`!!/addwlu userid sitename`.")
# noinspection PyIncorrectDocstring,PyUnusedLocal
def command_check_whitelist(content_lower, *args, **kwargs):
"""
Checks if a user is whitelisted
:param content_lower:
:param kwargs: No additional arguments expected
:return: A string
"""
uid, val = get_user_from_list_command(content_lower)
if uid > -1 and val != "":
if is_whitelisted_user((uid, val)):
return Response(command_status=True, message="User is whitelisted (`{}` on `{}`).".format(uid, val))
else:
return Response(command_status=True, message="User is not whitelisted (`{}` on `{}`).".format(uid, val))
elif uid == -2:
return Response(command_status=True, message="Error: {}".format(val))
else:
return Response(command_status=False,
message="Invalid format. Valid format: `!!/iswlu profileurl` *or* `!!/iswlu userid sitename`.")
# noinspection PyIncorrectDocstring,PyUnusedLocal
@check_permissions
def command_remove_whitelist_user(message_parts, content_lower, ev_room, ev_user_id, wrap2, *args, **kwargs):
"""
Removes a user from site whitelist
:param wrap2:
:param ev_user_id:
:param ev_room:
:param content_lower:
:param message_parts:
:param kwargs: No additional arguments expected
:return: A string
"""
quiet_action = any([part.endswith('-') for part in message_parts])
uid, val = get_user_from_list_command(content_lower)
if uid != -1 and val != "":
if remove_whitelisted_user((uid, val)):
return Response(command_status=True, message=None) if quiet_action \
else Response(command_status=True,
message="User removed from whitelist (`{}` on `{}`).".format(uid, val))
else:
return Response(command_status=True, message="User is not whitelisted.")
elif uid == -2:
return Response(command_status=True, message="Error: {}".format(val))
else:
return Response(command_status=False,
message="Invalid format. Valid format: `!!/rmwlu profileurl` *or* `!!/rmwlu userid sitename`.")
# noinspection PyIncorrectDocstring,PyUnusedLocal
@check_permissions
def command_blacklist_help(*args, **kwargs):
"""
Returns a string which explains the usage of the new blacklist commands.
:return: A string
"""
return Response(command_status=True, message="The !!/blacklist command has been deprecated. "
"Please use !!/blacklist-website, !!/blacklist-username "
"or !!/blacklist-keyword. Remember to escape dots "
"in URLs using \\.")
# noinspection PyIncorrectDocstring,PyUnusedLocal
def command_blacklist_website(message_parts, ev_user_name, ev_room, ev_user_id, wrap2, *args, **kwargs):
"""
Adds a string to the website blacklist and commits/pushes to GitHub
:param message_parts:
:param ev_user_name:
:param ev_room:
:param :ev_user_id:
:return: A Response
"""
chat_user_profile_link = "http://chat.{host}/users/{id}".format(host=wrap2.host, id=str(ev_user_id))
website_pattern = " ".join(message_parts[1:])
# noinspection PyProtectedMember
try:
regex.compile(website_pattern)
except regex._regex_core.error:
return Response(command_status=False, message="An invalid website pattern was provided, not blacklisting.")
result = GitManager.add_to_blacklist(
blacklist="website",
item_to_blacklist=website_pattern,
username=ev_user_name,
chat_profile_link=chat_user_profile_link,
code_permissions=datahandling.is_code_privileged(ev_room, ev_user_id, wrap2)
)
return Response(command_status=result[0], message=result[1])
# noinspection PyIncorrectDocstring,PyUnusedLocal
def command_blacklist_keyword(message_parts, ev_user_name, ev_room, ev_user_id, wrap2, *args, **kwargs):
"""
Adds a string to the keyword blacklist and commits/pushes to GitHub
:param message_parts:
:param ev_user_name:
:param ev_room:
:param :ev_user_id:
:return: A Response
"""
chat_user_profile_link = "http://chat.{host}/users/{id}".format(host=wrap2.host, id=str(ev_user_id))
keyword_pattern = " ".join(message_parts[1:])
# noinspection PyProtectedMember
try:
regex.compile(keyword_pattern)
except regex._regex_core.error:
return Response(command_status=False, message="An invalid keyword pattern was provided, not blacklisting.")
result = GitManager.add_to_blacklist(
blacklist="keyword",
item_to_blacklist=keyword_pattern,
username=ev_user_name,
chat_profile_link=chat_user_profile_link,
code_permissions=datahandling.is_code_privileged(ev_room, ev_user_id, wrap2)
)
return Response(command_status=result[0], message=result[1])
# noinspection PyIncorrectDocstring,PyUnusedLocal
def command_blacklist_username(message_parts, ev_user_name, ev_room, ev_user_id, wrap2, *args, **kwargs):
"""
Adds a string to the username blacklist and commits/pushes to GitHub
:param message_parts:
:param ev_user_name:
:param ev_room:
:param :ev_user_id:
:return: A Response
"""
chat_user_profile_link = "http://chat.{host}/users/{id}".format(host=wrap2.host, id=str(ev_user_id))
username_pattern = " ".join(message_parts[1:])
# noinspection PyProtectedMember
try:
regex.compile(username_pattern)
except regex._regex_core.error:
return Response(command_status=False, message="An invalid username pattern was provided, not blacklisting.")
result = GitManager.add_to_blacklist(
blacklist="username",
item_to_blacklist=username_pattern,
username=ev_user_name,
chat_profile_link=chat_user_profile_link,
code_permissions=datahandling.is_code_privileged(ev_room, ev_user_id, wrap2)
)
return Response(command_status=result[0], message=result[1])
# noinspection PyIncorrectDocstring,PyUnusedLocal
@check_permissions
def command_gitstatus(wrap2, *args, **kwargs):
return Response(command_status=True, message=GitManager.current_git_status())
# --- Joke Commands --- #
# noinspection PyIncorrectDocstring,PyUnusedLocal
def command_blame(ev_room, *args, **kwargs):
"""
Returns a string with a user to blame (This is a joke command)
:param ev_room:
:param kwargs: No additional arguments expected
:return: A string
"""
GlobalVars.users_chatting[ev_room] = list(set(GlobalVars.users_chatting[ev_room]))
user_to_blame = single_random_user(ev_room)
return Response(command_status=True, message=u"It's [{}]({})'s fault.".format(user_to_blame[0], user_to_blame[1]))
# noinspection PyIncorrectDocstring,PyUnusedLocal
def command_brownie(*args, **kwargs):
"""
Returns a string equal to "Brown!" (This is a joke command)
:return: A string
"""
return Response(command_status=True, message="Brown!")
# noinspection PyIncorrectDocstring,PyUnusedLocal
def command_coffee(ev_user_name, *args, **kwargs):
"""
Returns a string stating who the coffee is for (This is a joke command)
:param ev_user_name:
:param kwargs: No additional arguments expected
:return: A string
"""
return Response(command_status=True, message=u"*brews coffee for @" + ev_user_name.replace(" ", "") + "*")
# noinspection PyIncorrectDocstring,PyUnusedLocal
def command_lick(*args, **kwargs):
"""
Returns a string when a user says 'lick' (This is a joke command)
:return: A string
"""
return Response(command_status=True, message="*licks ice cream cone*")
# noinspection PyIncorrectDocstring,PyUnusedLocal
def command_tea(ev_user_name, *args, **kwargs):
"""
Returns a string stating who the tea is for (This is a joke command)
:param ev_user_name:
:param kwargs: No additional arguments expected
:return: A string
"""
return Response(command_status=True,
message=u"*brews a cup of {choice} tea for @{user}*".format(
choice=random.choice(['earl grey', 'green', 'chamomile',
'lemon', 'darjeeling', 'mint', 'jasmine']),
user=ev_user_name.replace(" ", "")))
# noinspection PyIncorrectDocstring,PyUnusedLocal
def command_wut(*args, **kwargs):
"""
Returns a string when a user asks 'wut' (This is a joke command)
:return: A string
"""
return Response(command_status=True, message="Whaddya mean, 'wut'? Humans...")
""" Uncomment when Winterbash comes back
# noinspection PyIncorrectDocstring,PyUnusedLocal
def command_hats(*args, **kwargs):
wb_start = datetime(2016, 12, 19, 0, 0, 0)
wb_end = datetime(2017, 1, 9, 0, 0, 0)
now = datetime.utcnow()
return_string = ""
if wb_start > now:
diff = wb_start - now
hours, remainder = divmod(diff.seconds, 3600)
minutes, seconds = divmod(remainder, 60)
daystr = "days" if diff.days != 1 else "day"
hourstr = "hours" if hours != 1 else "hour"
minutestr = "minutes" if minutes != 1 else "minute"
secondstr = "seconds" if seconds != 1 else "second"
return_string = "WE LOVE HATS! Winter Bash will begin in {} {}, {} {}, {} {}, and {} {}.".format(
diff.days, daystr, hours, hourstr, minutes, minutestr, seconds, secondstr)
elif wb_end > now:
diff = wb_end - now
hours, remainder = divmod(diff.seconds, 3600)
minutes, seconds = divmod(remainder, 60)
daystr = "days" if diff.days != 1 else "day"
hourstr = "hours" if hours != 1 else "hour"
minutestr = "minutes" if minutes != 1 else "minute"
secondstr = "seconds" if seconds != 1 else "second"
return_string = "Winter Bash won't end for {} {}, {} {}, {} {}, and {} {}. GO EARN SOME HATS!".format(
diff.days, daystr, hours, hourstr, minutes, minutestr, seconds, secondstr)
return Response(command_status=True, message=return_string)
"""
# --- Block application from posting functions --- #
# noinspection PyIncorrectDocstring,PyUnusedLocal
@check_permissions
def command_block(message_parts, ev_room, ev_user_id, wrap2, *args, **kwargs):
"""
Blocks posts from application for a period of time
:param ev_room:
:param wrap2:
:param ev_user_id:
:param message_parts:
:param kwargs: No additional arguments expected
:return: A string
"""
room_id = message_parts[2] if len(message_parts) > 2 else "all"
time_to_block = message_parts[1] if len(message_parts) > 1 else "0"
if not time_to_block.isdigit():
return Response(command_status=False, message="Invalid duration.")
time_to_block = int(time_to_block)
time_to_block = time_to_block if 0 < time_to_block < 14400 else 900
GlobalVars.blockedTime[room_id] = time.time() + time_to_block
which_room = "globally" if room_id == "all" else "in room " + room_id
report = "Reports blocked for {} seconds {}.".format(time_to_block, which_room)
if room_id != GlobalVars.charcoal_room_id:
GlobalVars.charcoal_hq.send_message(report)
return Response(command_status=True, message=report)
# noinspection PyIncorrectDocstring,PyUnusedLocal
@check_permissions
def command_unblock(message_parts, ev_room, ev_user_id, wrap2, *args, **kwargs):
"""
Unblocks posting to a room
:param ev_room:
:param wrap2:
:param ev_user_id:
:param message_parts:
:param kwargs: No additional arguments expected
:return: A string
"""
room_id = message_parts[2] if len(message_parts) > 2 else "all"
GlobalVars.blockedTime[room_id] = time.time()
which_room = "globally" if room_id == "all" else "in room " + room_id
report = "Reports unblocked {}.".format(GlobalVars.blockedTime - time.time(), which_room)
if room_id != GlobalVars.charcoal_room_id:
GlobalVars.charcoal_hq.send_message(report)
return Response(command_status=True, message=report)
# --- Administration Commands --- #
# noinspection PyIncorrectDocstring,PyUnusedLocal
def command_alive(ev_room, *args, **kwargs):
"""
Returns a string indicating the process is still active
:param ev_room:
:param kwargs: No additional arguments expected
:return: A string
"""
if ev_room == GlobalVars.meta_tavern_room_id or ev_room == GlobalVars.socvr_room_id:
return Response(command_status=True,
message=random.choice(['Yup', 'You doubt me?', 'Of course',
'... did I miss something?', 'plz send teh coffee',
'Watching this endless list of new questions *never* gets boring',
'Kinda sorta']))
else:
return Response(command_status=True, message='Of course')
# noinspection PyIncorrectDocstring,PyUnusedLocal
@check_permissions
def command_allspam(message_parts, ev_room, ev_user_id, wrap2, ev_user_name, ev_room_name, *args, **kwargs):
"""
Reports all of a user's posts as spam
:param ev_room_name:
:param ev_user_name:
:param wrap2:
:param ev_user_id:
:param ev_room:
:param message_parts:
:param kwargs: No additional arguments expected
:return:
"""
if len(message_parts) != 2:
return Response(command_status=False, message="1 argument expected")
url = message_parts[1]
user = get_user_from_url(url)
if user is None:
return Response(command_status=True, message="That doesn't look like a valid user URL.")
why = u"User manually reported by *{}* in room *{}*.\n".format(ev_user_name, ev_room_name.decode('utf-8'))
handle_user_with_all_spam(user, why)
return Response(command_status=True, message=None)
# noinspection PyIncorrectDocstring,PyUnusedLocal
@check_permissions
def command_errorlogs(ev_room, ev_user_id, wrap2, message_parts, *args, **kwargs):
"""
Shows the most recent lines in the error logs
:param message_parts:
:param wrap2:
:param ev_user_id:
:param ev_room:
:param kwargs: No additional arguments expected
:return:
"""
count = -1
if len(message_parts) > 2:
return Response(command_status=False, message="The !!/errorlogs command requires either 0 or 1 arguments.")
try:
count = int(message_parts[1])
except (ValueError, IndexError):
count = 50
logs_part = fetch_lines_from_error_log(count)
post_message_in_room(room_id_str=ev_room, msg=logs_part, length_check=False)
return Response(command_status=True, message=None)
# noinspection PyIncorrectDocstring,PyUnusedLocal
def command_help(*args, **kwargs):
"""
Returns the help text
:param kwargs: No additional arguments expected
:return: A string
"""
return Response(command_status=True, message="I'm " + GlobalVars.chatmessage_prefix +
", a bot that detects spam and offensive posts on the network and "
"posts alerts to chat. "
"[A command list is available here](" + GlobalVars.bot_repository +
"/wiki/Commands).")
# noinspection PyIncorrectDocstring,PyUnusedLocal
def command_location(*args, **kwargs):
"""
Returns the current location the application is running from
:return: A string with current location
"""
return Response(command_status=True, message=GlobalVars.location)
# noinspection PyIncorrectDocstring,PyUnusedLocal,PyProtectedMember
@check_permissions
def command_master(ev_room, ev_user_id, wrap2, *args, **kwargs):
"""
Forces a system exit with exit code = 8
:param wrap2:
:param ev_user_id:
:param ev_room:
:param kwargs: No additional arguments expected
:return: None
"""
os._exit(8)
# noinspection PyIncorrectDocstring,PyUnusedLocal,PyProtectedMember
@check_permissions
def command_pull(ev_room, ev_user_id, wrap2, *args, **kwargs):
"""
Pull an update from GitHub
:param wrap2:
:param ev_user_id:
:param ev_room:
:param kwargs: No additional arguments expected
:return: String on failure, None on success
"""
request = requests.get('https://api.github.com/repos/Charcoal-SE/SmokeDetector/git/refs/heads/deploy')
latest_sha = request.json()["object"]["sha"]
request = requests.get(
'https://api.github.com/repos/Charcoal-SE/SmokeDetector/commits/{commit_code}/statuses'.format(
commit_code=latest_sha))
states = []
for status in request.json():
state = status["state"]
states.append(state)
if "success" in states:
os._exit(3)
elif "error" in states or "failure" in states:
return Response(command_status=True, message="CI build failed! :( Please check your commit.")
elif "pending" in states or not states:
return Response(command_status=True,
message="CI build is still pending, wait until the build has finished and then pull again.")
# noinspection PyIncorrectDocstring,PyUnusedLocal,PyProtectedMember
@check_permissions
def command_reboot(ev_room, ev_user_id, wrap2, *args, **kwargs):
"""
Forces a system exit with exit code = 5
:param wrap2:
:param ev_user_id:
:param ev_room:
:param kwargs: No additional arguments expected
:return: None
"""
post_message_in_room(room_id_str=ev_room, msg="Goodbye, cruel world")
os._exit(5)
# noinspection PyIncorrectDocstring,PyUnusedLocal
def command_privileged(ev_room, ev_user_id, wrap2, *args, **kwargs):
"""
Tells user whether or not they have privileges
:param wrap2:
:param ev_user_id:
:param ev_room:
:param kwargs: No additional arguments expected
:return: A string
"""
if is_privileged(ev_room, ev_user_id, wrap2):
return Response(command_status=True, message="Yes, you are a privileged user.")
return Response(command_status=True,
message="No, you are not a privileged user. Please see [the privileges wiki page](" +
GlobalVars.bot_repository + "/wiki/Privileges) for information on what privileges "
"are and what is expected of privileged users.")
# noinspection PyIncorrectDocstring,PyUnusedLocal
def command_code_privileged(ev_room, ev_user_id, wrap2, *args, **kwargs):
"""
Tells user whether or not they have code privileges
:param wrap2:
:param ev_user_id:
:param ev_room:
:param kwargs: No additional arguments expected
:return: A string
"""
if is_code_privileged(ev_room, ev_user_id, wrap2):
return Response(command_status=True, message="Yes, you have code privileges.")
return Response(command_status=True,
message="No, you are not a code-privileged user.")
# noinspection PyIncorrectDocstring,PyUnusedLocal
def command_quota(*args, **kwargs):
"""
Report how many API hits remain for the day
:return: A string
"""
return Response(command_status=True, message="The current API quota remaining is {}.".format(GlobalVars.apiquota))
# noinspection PyIncorrectDocstring,PyUnusedLocal
def command_queuestatus(*args, **kwargs):
"""
Report current API queue
:return: A string
"""
return Response(command_status=True, message=GlobalVars.bodyfetcher.print_queue())
# noinspection PyIncorrectDocstring,PyUnusedLocal,PyProtectedMember
@check_permissions
def command_stappit(message_parts, ev_room, ev_user_id, wrap2, *args, **kwargs):
"""
Forces a system exit with exit code = 6
:param message_parts:
:param wrap2:
:param ev_user_id:
:param ev_room:
:param kwargs: No additional arguments expected
:return: None
"""
if len(message_parts) == 1 or " ".join(message_parts[1:]).lower() in GlobalVars.location.lower():
post_message_in_room(room_id_str=ev_room, msg="Goodbye, cruel world")
os._exit(6)
return Response(command_status=True, message=None)
# noinspection PyIncorrectDocstring,PyUnusedLocal
def command_status(*args, **kwargs):
"""
Returns the amount of time the application has been running
:return: A string
"""
now = datetime.utcnow()
diff = now - UtcDate.startup_utc_date
minutes, remainder = divmod(diff.seconds, 60)
minute_str = "minutes" if minutes != 1 else "minute"
return Response(command_status=True,
message='Running since {time} UTC ({minute_count} {plurality})'.format(
time=GlobalVars.startup_utc,
minute_count=minutes, plurality=minute_str))
# noinspection PyIncorrectDocstring,PyUnusedLocal
@check_permissions
def command_stop_flagging(*args, **kwargs):
t_metasmoke = Thread(name="stop_autoflagging", target=Metasmoke.stop_autoflagging,
args=())
t_metasmoke.start()
return Response(command_status=True, message="Request sent...")
# noinspection PyIncorrectDocstring,PyUnusedLocal,PyProtectedMember
@check_permissions
def command_standby(message_parts, ev_room, ev_user_id, wrap2, *args, **kwargs):
"""
Forces a system exit with exit code = 7
:param message_parts:
:param wrap2:
:param ev_user_id:
:param ev_room:
:param kwargs: No additional arguments expected
:return: None
"""
if " ".join(message_parts[1:]).lower() in GlobalVars.location.lower():
m = "{location} is switching to standby".format(location=GlobalVars.location)
post_message_in_room(room_id_str=ev_room, msg=m)
os._exit(7)
return Response(command_status=True, message=None)
# noinspection PyIncorrectDocstring,PyUnusedLocal
def command_test(content, content_lower, *args, **kwargs):
"""
Test a post to determine if it'd be automatically reported
:param content_lower:
:param content:
:param kwargs: No additional arguments expected
:return: A string
"""
string_to_test = content[8:]
test_as_answer = False
if len(string_to_test) == 0:
return Response(command_status=True, message="Nothing to test")
result = "> "
reasons, why = FindSpam.test_post(string_to_test, string_to_test, string_to_test, "", test_as_answer, False, 1, 0)
if len(reasons) == 0:
result += "Would not be caught for title, body, and username."
return Response(command_status=True, message=result)
result += ", ".join(reasons).capitalize()
if why is not None and len(why) > 0:
result += "\n----------\n"
result += why
return Response(command_status=True, message=result)
# noinspection PyIncorrectDocstring,PyUnusedLocal
def command_test_answer(content, content_lower, *args, **kwargs):
"""
Test an answer to determine if it'd be automatically reported
:param content_lower:
:param content:
:param kwargs: No additional arguments expected
:return: A string
"""
string_to_test = content[10:]
test_as_answer = True
if len(string_to_test) == 0:
return Response(command_status=True, message="Nothing to test")
result = "> "
reasons, why = FindSpam.test_post("Valid title", string_to_test, "Valid username", "", test_as_answer, False, 1, 0)
if len(reasons) == 0:
result += "Would not be caught as an answer."
return Response(command_status=True, message=result)
result += ", ".join(reasons).capitalize()
if why is not None and len(why) > 0:
result += "\n----------\n"
result += why
return Response(command_status=True, message=result)
# noinspection PyIncorrectDocstring,PyUnusedLocal
def command_test_question(content, content_lower, *args, **kwargs):
"""
Test a question to determine if it'd be automatically reported
:param content_lower:
:param content:
:param kwargs: No additional arguments expected
:return: A string
"""
string_to_test = content[10:]
test_as_answer = False
if len(string_to_test) == 0:
return Response(command_status=True, message="Nothing to test")
result = "> "
reasons, why = FindSpam.test_post("Valid title", string_to_test, "Valid username", "", test_as_answer, False, 1, 0)
if len(reasons) == 0:
result += "Would not be caught as a question."
return Response(command_status=True, message=result)
result += ", ".join(reasons).capitalize()
if why is not None and len(why) > 0:
result += "\n----------\n"
result += why
return Response(command_status=True, message=result)
# noinspection PyIncorrectDocstring,PyUnusedLocal
def command_test_title(content, content_lower, *args, **kwargs):
"""
Test a title to determine if it'd be automatically reported
:param content_lower:
:param content:
:param kwargs: No additional arguments expected
:return: A string
"""
string_to_test = content[10:]
test_as_answer = False
if len(string_to_test) == 0:
return Response(command_status=True, message="Nothing to test")
result = "> "
reasons, why = FindSpam.test_post(string_to_test, "Valid question body", "Valid username", "",
test_as_answer, False, 1, 0)
if len(reasons) == 0:
result += "Would not be caught as a title."
return Response(command_status=True, message=result)
result += ", ".join(reasons).capitalize()
if why is not None and len(why) > 0:
result += "\n----------\n"
result += why
return Response(command_status=True, message=result)
# noinspection PyIncorrectDocstring,PyUnusedLocal
def command_test_username(content, content_lower, *args, **kwargs):
"""
Test a username to determine if it'd be automatically reported
:param content_lower:
:param content:
:param kwargs: No additional arguments expected
:return: A string
"""
string_to_test = content[10:]
test_as_answer = False
if len(string_to_test) == 0:
return Response(command_status=True, message="Nothing to test")
result = "> "
reasons, why = FindSpam.test_post("Valid title", "Valid post body", string_to_test, "",
test_as_answer, False, 1, 0)
if len(reasons) == 0:
result += "Would not be caught as a username."
return Response(command_status=True, message=result)
result += ", ".join(reasons).capitalize()
if why is not None and len(why) > 0:
result += "\n----------\n"
result += why
return Response(command_status=True, message=result)
# noinspection PyIncorrectDocstring,PyUnusedLocal
def command_thread_descriptions(*args, **kwargs):
"""
Returns a description of current threads, for debugging
:return: A string
"""
threads = ("{ident}: {name}".format(ident=t.ident, name=t.name) for t in threading.enumerate())
return Response(command_status=True, message="{threads}".format(threads="\n".join(list(threads))))
# noinspection PyIncorrectDocstring,PyUnusedLocal
def command_version(*args, **kwargs):
"""
Returns the current version of the application
:return: A string
"""
return Response(command_status=True, message='[{commit_name}]({repository}/commit/{commit_code})'.format(
commit_name=GlobalVars.commit_with_author, commit_code=GlobalVars.commit, repository=GlobalVars.bot_repository))
# noinspection PyIncorrectDocstring,PyUnusedLocal
def command_whoami(ev_room, *args, **kwargs):
"""
Returns user id of smoke detector
:param ev_room:
:param kwargs: No additional arguments expected
:return:
"""
if ev_room in GlobalVars.smokeDetector_user_id:
return Response(command_status=True,
message="My id for this room is {}.".format(GlobalVars.smokeDetector_user_id[ev_room]))
return Response(command_status=True,
message="I don't know my user ID for this room. (Something is wrong, and it's apnorton's fault.)")
# noinspection PyIncorrectDocstring,PyUnusedLocal,PyUnboundLocalVariable
def command_pending(content, content_lower, *args, **kwargs):
"""
Finds posts with TP feedback that have yet to be deleted.
:param args: No additional arguments expected.
:param kwargs: No additional arguments expected.
:return:
"""
try:
page = int(content[11:])
except ValueError:
return Response(
command_status=False,
message="Expected an integer page number and got '{0!r}'"
" instead (ValueError).".format(content[11:]))
posts = requests.get("https://metasmoke.erwaysoftware.com/api/undeleted?pagesize=2&page={}&key={}".format(
page, GlobalVars.metasmoke_key)).json()
messages = []
for post in posts['items']:
messages.append("[{0}]({1}) ([MS](https://m.erwaysoftware.com/post/{0}))".format(post['id'], post['link']))
return Response(command_status=True,
message=", ".join(messages))
# --- Notification functions --- #
# noinspection PyIncorrectDocstring,PyUnusedLocal
def command_allnotifications(message_parts, ev_user_id, wrap2, *args, **kwargs):
"""
Returns a string stating what sites a user will be notified about
:param wrap2:
:param ev_user_id:
:param message_parts:
:param kwargs: No additional arguments expected
:return: A string
"""
if len(message_parts) != 2:
return Response(command_status=False, message="1 argument expected")
user_id = int(ev_user_id)
chat_site = wrap2.host
room_id = message_parts[1]
if not room_id.isdigit():
return Response(command_status=False, message="Room ID is invalid.")
sites = get_all_notification_sites(user_id, chat_site, room_id)
if len(sites) == 0:
return Response(command_status=True, message="You won't get notified for any sites in that room.")
return Response(command_status=True, message="You will get notified for these sites:\r\n" + ", ".join(sites))
# noinspection PyIncorrectDocstring,PyUnusedLocal
def command_notify(message_parts, ev_user_id, wrap2, *args, **kwargs):
"""
Subscribe a user to events on a site in a single room
:param wrap2:
:param ev_user_id:
:param message_parts:
:param kwargs: No additional arguments expected
:return: A string
"""
if len(message_parts) != 3:
return Response(command_status=False, message="2 arguments expected")
user_id = int(ev_user_id)
chat_site = wrap2.host
room_id = message_parts[1]
if not room_id.isdigit():
return Response(command_status=False, message="Room ID is invalid.")
room_id = int(room_id)
quiet_action = any([part.endswith('-') for part in message_parts])
se_site = message_parts[2].replace('-', '')
response, full_site = add_to_notification_list(user_id, chat_site, room_id, se_site)
if response == 0:
return Response(command_status=True, message=None) if quiet_action \
else Response(command_status=True,
message="You'll now get pings from me if I report a post on `{site_name}`, in room "
"`{room_id}` on `chat.{chat_domain}`".format(site_name=se_site,
room_id=room_id,
chat_domain=chat_site))
elif response == -1:
return Response(command_status=True, message="That notification configuration is already registered.")
elif response == -2:
return Response(command_status=False, message="The given SE site does not exist.")
else:
return Response(command_status=False, message="Unrecognized code returned when adding notification.")
# noinspection PyIncorrectDocstring,PyUnusedLocal
def command_unnotify(message_parts, ev_user_id, wrap2, *args, **kwargs):
"""
Unsubscribes a user to specific events
:param wrap2:
:param ev_user_id:
:param message_parts:
:param kwargs: No additional arguments expected
:return: A string
"""
if len(message_parts) != 3:
return Response(command_status=False, message="2 arguments expected")
user_id = int(ev_user_id)
chat_site = wrap2.host
room_id = message_parts[1]
if not room_id.isdigit():
return Response(command_status=False, message="Room ID is invalid.")
room_id = int(room_id)
quiet_action = any([part.endswith('-') for part in message_parts])
se_site = message_parts[2].replace('-', '')
response = remove_from_notification_list(user_id, chat_site, room_id, se_site)
if response:
return Response(command_status=True, message=None) if quiet_action \
else Response(command_status=True,
message="I will no longer ping you if I report a post on `{site_name}`, in room `{room_id}` "
"on `chat.{chat_domain}`".format(site_name=se_site,
room_id=room_id,
chat_domain=chat_site))
return Response(command_status=True, message="That configuration doesn't exist.")
# noinspection PyIncorrectDocstring,PyUnusedLocal
def command_willbenotified(message_parts, ev_user_id, wrap2, *args, **kwargs):
"""
Returns a string stating whether a user will be notified or not
:param wrap2:
:param ev_user_id:
:param message_parts:
:param kwargs: No additional arguments expected
:return: A string
"""
if len(message_parts) != 3:
return Response(command_status=False, message="2 arguments expected")
user_id = int(ev_user_id)
chat_site = wrap2.host
room_id = message_parts[1]
if not room_id.isdigit():
return Response(command_status=False, message="Room ID is invalid")
room_id = int(room_id)
se_site = message_parts[2]
will_be_notified = will_i_be_notified(user_id, chat_site, room_id, se_site)
if will_be_notified:
return Response(command_status=True, message="Yes, you will be notified for that site in that room.")
return Response(command_status=True, message="No, you won't be notified for that site in that room.")
# --- Post Responses --- #
# noinspection PyIncorrectDocstring,PyUnusedLocal
@check_permissions
def command_report_post(ev_room, ev_user_id, wrap2, message_parts, message_url,
ev_user_name, ev_room_name, *args, **kwargs):
"""
Report a post (or posts)
:param ev_room_name:
:param ev_user_name:
:param message_url:
:param message_parts:
:param wrap2:
:param ev_user_id:
:param ev_room:
:param kwargs: No additional arguments expected
:return: A string (or None)
"""
crn, wait = can_report_now(ev_user_id, wrap2.host)
if not crn:
return Response(command_status=False, message="You can execute the !!/report command again in {} seconds. "
"To avoid one user sending lots of reports in a few commands and "
"slowing SmokeDetector down due to rate-limiting, you have to "
"wait 30 seconds after you've reported multiple posts using "
"!!/report, even if your current command just has one URL. (Note "
"that this timeout won't be applied if you only used !!/report "
"for one post)".format(wait))
if len(message_parts) < 2:
return Response(command_status=False, message="Not enough arguments.")
output = []
index = 0
urls = list(set(message_parts[1:]))
if len(urls) > 5:
return Response(command_status=False, message="To avoid SmokeDetector reporting posts too slowly, you can "
"report at most 5 posts at a time. This is to avoid "
"SmokeDetector's chat messages getting rate-limited too much, "
"which would slow down reports.")
for url in urls:
index += 1
post_data = api_get_post(url)
if post_data is None:
output.append("Post {}: That does not look like a valid post URL.".format(index))
continue
if post_data is False:
output.append("Post {}: Could not find data for this post in the API. "
"It may already have been deleted.".format(index))
continue
if has_already_been_posted(post_data.site, post_data.post_id, post_data.title) and not is_false_positive(
(post_data.post_id, post_data.site)):
# Don't re-report if the post wasn't marked as a false positive. If it was marked as a false positive,
# this re-report might be attempting to correct that/fix a mistake/etc.
output.append("Post {}: Already recently reported".format(index))
continue
user = get_user_from_url(post_data.owner_url)
if user is not None:
add_blacklisted_user(user, message_url, post_data.post_url)
why = u"Post manually reported by user *{}* in room *{}*.\n".format(ev_user_name,
ev_room_name.decode('utf-8'))
batch = ""
if len(urls) > 1:
batch = " (batch report: post {} out of {})".format(index, len(urls))
handle_spam(title=post_data.title,
body=post_data.body,
poster=post_data.owner_name,
site=post_data.site,
post_url=post_data.post_url,
poster_url=post_data.owner_url,
post_id=post_data.post_id,
reasons=["Manually reported " + post_data.post_type + batch],
is_answer=post_data.post_type == "answer",
why=why,
owner_rep=post_data.owner_rep,
post_score=post_data.score,
up_vote_count=post_data.up_vote_count,
down_vote_count=post_data.down_vote_count,
question_id=post_data.question_id)
if 1 < len(urls) > len(output):
add_or_update_multiple_reporter(ev_user_id, wrap2.host, time.time())
if len(output) > 0:
return Response(command_status=True, message=os.linesep.join(output))
return Response(command_status=True, message=None)
#
#
# Subcommands go below here
# noinspection PyIncorrectDocstring,PyUnusedLocal,PyBroadException
@check_permissions
def subcommand_delete(ev_room, ev_user_id, wrap2, msg, *args, **kwargs):
"""
Attempts to delete a post from room
:param msg:
:param wrap2:
:param ev_user_id:
:param ev_room:
:param kwargs: No additional arguments expected
:return: None
"""
try:
msg.delete()
except:
pass # couldn't delete message
return Response(command_status=True, message=None)
# noinspection PyIncorrectDocstring,PyUnusedLocal
@check_permissions
def subcommand_editlink(ev_room, ev_user_id, wrap2, msg_content, msg, *args, **kwargs):
"""
Removes link from a marked report message
:param msg:
:param msg_content:
:param wrap2:
:param ev_user_id:
:param ev_room:
:param kwargs: No additional arguments expected
:return: None
"""
edited = edited_message_after_postgone_command(msg_content)
if edited is None:
return Response(command_status=True, message="That's not a report.")
msg.edit(edited)
return Response(command_status=True, message=None)
# noinspection PyIncorrectDocstring,PyUnusedLocal,PyBroadException
@check_permissions
def subcommand_falsepositive(ev_room, ev_user_id, wrap2, post_site_id, post_url,
quiet_action, post_type, msg, second_part_lower, ev_user_name,
msg_content, *args, **kwargs):
"""
Marks a post as a false positive
:param msg_content:
:param ev_user_name:
:param second_part_lower:
:param msg:
:param post_type:
:param quiet_action:
:param post_url:
:param post_site_id:
:param wrap2:
:param ev_user_id:
:param ev_room:
:param kwargs: No additional arguments expected
:return: None or a string
"""
if not is_report(post_site_id):
return Response(command_status=True, message="That message is not a report.")
send_metasmoke_feedback(post_url=post_url,
second_part_lower=second_part_lower,
ev_user_name=ev_user_name,
ev_user_id=ev_user_id,
ev_chat_host=wrap2.host)
add_false_positive((post_site_id[0], post_site_id[1]))
user_added = False
user_removed = False
url_from_msg = fetch_owner_url_from_msg_content(msg_content)
user = None
if url_from_msg is not None:
user = get_user_from_url(url_from_msg)
if second_part_lower.startswith("falseu") or second_part_lower.startswith("fpu"):
if user is not None:
add_whitelisted_user(user)
user_added = True
if "Blacklisted user:" in msg_content:
if user is not None:
remove_blacklisted_user(user)
user_removed = True
if post_type == "question":
if user_added and not quiet_action:
return Response(command_status=True, message="Registered question as false positive and whitelisted user.")
elif user_removed and not quiet_action:
return Response(command_status=True,
message="Registered question as false positive and removed user from the blacklist.")
elif not quiet_action:
return Response(command_status=True, message="Registered question as false positive.")
elif post_type == "answer":
if user_added and not quiet_action:
return Response(command_status=True, message="Registered answer as false positive and whitelisted user.")
elif user_removed and not quiet_action:
return Response(command_status=True,
message="Registered answer as false positive and removed user from the blacklist.")
elif not quiet_action:
return Response(command_status=True, message="Registered answer as false positive.")
try:
if int(msg.room.id) != int(GlobalVars.charcoal_hq.id):
print "Deleting message from room."
msg.delete()
except:
pass
return Response(command_status=True, message=None)
# noinspection PyIncorrectDocstring,PyUnusedLocal
@check_permissions
def subcommand_ignore(ev_room, ev_user_id, wrap2, post_site_id, post_url, quiet_action, second_part_lower, ev_user_name,
*args, **kwargs):
"""
Marks a post to be ignored
:param quiet_action:
:param post_url:
:param post_site_id:
:param wrap2:
:param ev_user_id:
:param ev_room:
:param kwargs: No additional arguments expected
:return: String or None
"""
if not is_report(post_site_id):
return Response(command_status=True, message="That message is not a report.")
send_metasmoke_feedback(post_url=post_url,
second_part_lower=second_part_lower,
ev_user_name=ev_user_name,
ev_user_id=ev_user_id,
ev_chat_host=wrap2.host)
add_ignored_post(post_site_id[0:2])
if not quiet_action:
return Response(command_status=True, message="Post ignored; alerts about it will no longer be posted.")
else:
return Response(command_status=True, message=None)
# noinspection PyIncorrectDocstring,PyUnusedLocal
@check_permissions
def subcommand_naa(ev_room, ev_user_id, wrap2, post_site_id, post_url, quiet_action,
second_part_lower, ev_user_name, post_type, *args, **kwargs):
"""
Marks a post as NAA
:param post_type:
:param ev_user_name:
:param second_part_lower:
:param quiet_action:
:param post_url:
:param post_site_id:
:param wrap2:
:param ev_user_id:
:param ev_room:
:param kwargs: No additional arguments expected
:return: String or None
:return:
"""
if not is_report(post_site_id):
return Response(command_status=True, message="That message is not a report.")
if post_type != "answer":
return Response(command_status=True, message="That report was a question; questions cannot be marked as NAAs.")
send_metasmoke_feedback(post_url=post_url,
second_part_lower=second_part_lower,
ev_user_name=ev_user_name,
ev_user_id=ev_user_id,
ev_chat_host=wrap2.host)
add_ignored_post(post_site_id[0:2])
if quiet_action:
return Response(command_status=True, message=None)
return Response(command_status=True, message="Recorded answer as an NAA in metasmoke.")
# noinspection PyIncorrectDocstring,PyUnusedLocal
@check_permissions
def subcommand_truepositive(ev_room, ev_user_id, wrap2, post_site_id, post_url, quiet_action,
post_type, message_url, msg, second_part_lower, ev_user_name,
msg_content, *args, **kwargs):
"""
Marks a post as a true positive
:param msg_content:
:param ev_user_name:
:param second_part_lower:
:param msg:
:param message_url:
:param post_type:
:param quiet_action:
:param post_url:
:param post_site_id:
:param wrap2:
:param ev_user_id:
:param ev_room:
:param kwargs: No additional arguments expected
:return: None or a string
"""
if not is_report(post_site_id):
return Response(command_status=True, message="That message is not a report.")
send_metasmoke_feedback(post_url=post_url,
second_part_lower=second_part_lower,
ev_user_name=ev_user_name,
ev_user_id=ev_user_id,
ev_chat_host=wrap2.host)
user_added = False
if second_part_lower.startswith("trueu") or second_part_lower.startswith("tpu"):
url_from_msg = fetch_owner_url_from_msg_content(msg_content)
if url_from_msg is not None:
user = get_user_from_url(url_from_msg)
if user is not None:
add_blacklisted_user(user, message_url, "http:" + post_url)
user_added = True
if post_type == "question":
if quiet_action:
return Response(command_status=True, message=None)
if user_added:
return Response(command_status=True, message="Blacklisted user and registered question as true positive.")
return Response(command_status=True,
message="Recorded question as true positive in metasmoke. Use `tpu` or `trueu` if you want "
"to blacklist a user.")
elif post_type == "answer":
if quiet_action:
return Response(command_status=True, message=None)
if user_added:
return Response(command_status=True, message="Blacklisted user.")
return Response(command_status=True, message="Recorded answer as true positive in metasmoke. If you want to "
"blacklist the poster of the answer, use `trueu` or `tpu`.")
else:
return Response(command_status=False, message="Post type was not recognized (not `question` or `answer`) - "
"call a developer! "
"No action was taken.")
# noinspection PyIncorrectDocstring,PyUnusedLocal
def subcommand_why(msg_content, *args, **kwargs):
"""
Returns reasons a post was reported
:param msg_content:
:param kwargs: No additional arguments expected
:return: A string
"""
post_info = fetch_post_id_and_site_from_msg_content(msg_content)
if post_info is None:
post_info = fetch_user_from_allspam_report(msg_content)
if post_info is None:
return Response(command_status=True, message="That's not a report.")
why = get_why_allspam(post_info)
if why is not None or why != "":
return Response(command_status=True, message=why)
else:
post_id, site, _ = post_info
why = get_why(site, post_id)
if why is not None or why != "":
return Response(command_status=True, message=why)
return Response(command_status=True, message="There is no `why` data for that user (anymore).")
# noinspection PyIncorrectDocstring,PyUnusedLocal
def subcommand_autoflagged(msg_content, post_url, *args, **kwargs):
"""
Determines whether a post was automatically flagged by Metasmoke
:param msg_content:
:param post_url:
:param kwargs: No additional arguments expected
:return: A string
"""
autoflagged, names = Metasmoke.determine_if_autoflagged(post_url)
if autoflagged:
return Response(command_status=True,
message="That post was automatically flagged, using flags from: {}.".format(", ".join(names)))
else:
return Response(command_status=True, message="That post was **not** automatically flagged by metasmoke.")
# This dictionary defines our commands and the associated function to call
# To use this your calling code will look like this
# command_dict['command'](paramer1, parameter2, ...)
# Each key can have a different set of parameters so 'command1' could look like this
# command_dict['command1'](paramer1)
# Triggering input:
# !!/alive
# Hardcoded key example of above input:
# command_dict["!!/alive"]()
command_dict = {
"!!/addblu": command_add_blacklist_user,
"!!/addblu-": command_add_blacklist_user,
"!!/addwlu": command_add_whitelist_user,
"!!/addwlu-": command_add_whitelist_user,
"!!/alive": command_alive,
"!!/allnotificationsites": command_allnotifications,
"!!/allspam": command_allspam,
"!!/amiprivileged": command_privileged,
"!!/amicodeprivileged": command_code_privileged,
"!!/apiquota": command_quota,
"!!/blame": command_blame,
"!!/block": command_block,
"!!/brownie": command_brownie,
"!!/blacklist": command_blacklist_help,
"!!/blacklist-website": command_blacklist_website,
"!!/blacklist-keyword": command_blacklist_keyword,
"!!/blacklist-username": command_blacklist_username,
"!!/commands": command_help,
"!!/coffee": command_coffee,
"!!/errorlogs": command_errorlogs,
"!!/gitstatus": command_gitstatus,
"!!/help": command_help,
# "!!/hats": command_hats, (uncomment when Winterbash begins)
"!!/info": command_help,
"!!/isblu": command_check_blacklist,
"!!/iswlu": command_check_whitelist,
"!!/lick": command_lick,
"!!/location": command_location,
"!!/master": command_master,
"!!/notify": command_notify,
"!!/notify-": command_notify,
"!!/pull": command_pull,
"!!/pending": command_pending,
"!!/reboot": command_reboot,
"!!/reportuser": command_allspam,
"!!/rmblu": command_remove_blacklist_user,
"!!/rmblu-": command_remove_blacklist_user,
"!!/rmwlu": command_remove_whitelist_user,
"!!/rmwlu-": command_remove_whitelist_user,
"!!/report": command_report_post,
"!!/restart": command_reboot,
"!!/rev": command_version,
"!!/stappit": command_stappit,
"!!/status": command_status,
"!!/stopflagging": command_stop_flagging,
"!!/standby": command_standby,
"!!/tea": command_tea,
"!!/test": command_test,
"!!/testanswer": command_test_answer,
"!!/test-a": command_test_answer,
"!!/testquestion": command_test_question,
"!!/test-q": command_test_question,
"!!/testtitle": command_test_title,
"!!/test-t": command_test_title,
"!!/testusername": command_test_username,
"!!/testuser": command_test_username,
"!!/test-u": command_test_username,
"!!/threads": command_thread_descriptions,
"!!/unblock": command_unblock,
"!!/unnotify": command_unnotify,
"!!/unnotify-": command_unnotify,
"!!/ver": command_version,
"!!/willibenotified": command_willbenotified,
"!!/whoami": command_whoami,
"!!/wut": command_wut,
"!!/queuestatus": command_queuestatus
}
# This dictionary defines our subcommands and the associated function to call
# To use this your calling code will look like this
# second_part_dict['command'](paramer1, parameter2, ...)
# Each key can have a different set of parameters so 'command1' could look like this
# second_part_dict['command1'](paramer1)
# Triggering input:
# sd false
# Hardcoded key example of above input:
# command_dict["!//alive"]()
subcommand_dict = {
"false": subcommand_falsepositive,
"fp": subcommand_falsepositive,
"falseu": subcommand_falsepositive,
"fpu": subcommand_falsepositive,
"false-": subcommand_falsepositive,
"fp-": subcommand_falsepositive,
"falseu-": subcommand_falsepositive,
"fpu-": subcommand_falsepositive,
"true": subcommand_truepositive,
"tp": subcommand_truepositive,
"trueu": subcommand_truepositive,
"tpu": subcommand_truepositive,
"true-": subcommand_truepositive,
"tp-": subcommand_truepositive,
"trueu-": subcommand_truepositive,
"tpu-": subcommand_truepositive,
"ignore": subcommand_ignore,
"ignore-": subcommand_ignore,
"naa": subcommand_naa,
"naa-": subcommand_naa,
"delete": subcommand_delete,
"remove": subcommand_delete,
"gone": subcommand_delete,
"poof": subcommand_delete,
"del": subcommand_delete,
"postgone": subcommand_editlink,
"why": subcommand_why,
"why?": subcommand_why,
"autoflagged?": subcommand_autoflagged,
"autoflagged": subcommand_autoflagged,
}
Remove a debugging output line.
Whoops, I left this in. My bad!
# noinspection PyUnresolvedReferences
from globalvars import GlobalVars
from findspam import FindSpam
# noinspection PyUnresolvedReferences
from datetime import datetime
from utcdate import UtcDate
from apigetpost import api_get_post
from datahandling import *
from metasmoke import Metasmoke
from parsing import *
from spamhandling import handle_spam
from spamhandling import handle_user_with_all_spam
from gitmanager import GitManager
import threading
from threading import Thread
import random
import requests
import os
import time
import datahandling
import regex
from helpers import Response
# TODO: pull out code block to get user_id, chat_site, room_id into function
# TODO: Return result for all functions should be similar (tuple/named tuple?)
# TODO: Do we need uid == -2 check? Turn into "is_user_valid" check
# TODO: Consistant return structure
# if return...else return vs if return...return
def check_permissions(function):
def run_command(ev_room, ev_user_id, wrap2, *args, **kwargs):
if datahandling.is_privileged(ev_room, ev_user_id, wrap2):
kwargs['ev_room'] = ev_room
kwargs['ev_user_id'] = ev_user_id
kwargs['wrap2'] = wrap2
return function(*args, **kwargs)
else:
return Response(command_status=False,
message="You are not a privileged user. Please see [the privileges wiki page](" +
GlobalVars.bot_repository + "/wiki/Privileges) for information on what privileges"
" are and what is expected of privileged users.")
return run_command
# Functions go before the final dictionaries of command to function mappings
def post_message_in_room(room_id_str, msg, length_check=True):
if room_id_str == GlobalVars.charcoal_room_id:
GlobalVars.charcoal_hq.send_message(msg, length_check)
elif room_id_str == GlobalVars.meta_tavern_room_id:
GlobalVars.tavern_on_the_meta.send_message(msg, length_check)
elif room_id_str == GlobalVars.socvr_room_id:
GlobalVars.socvr.send_message(msg, length_check)
def is_report(post_site_id):
"""
Checks if a post is a report
:param post_site_id: Report to check
:return: Boolean stating if it is a report
"""
if post_site_id is None:
return False
return True
# noinspection PyIncorrectDocstring,PyUnusedLocal
def send_metasmoke_feedback(post_url, second_part_lower, ev_user_name, ev_user_id, ev_chat_host):
"""
Sends feedback to metasmoke
:param ev_user_name:
:param post_url: The post url we are sending
:param second_part_lower: Feedback
:param ev_user_name: User name supplying the feedback
:param ev_user_id: User ID supplying the feedback
:return: None
"""
t_metasmoke = Thread(name="metasmoke feedback send on #{url}".format(url=post_url),
target=Metasmoke.send_feedback_for_post,
args=(post_url, second_part_lower, ev_user_name, ev_user_id, ev_chat_host,))
t_metasmoke.start()
def single_random_user(ev_room):
"""
Returns a single user name from users in a room
:param ev_room: Room to select users from
:return: A single user tuple
"""
return random.choice(GlobalVars.users_chatting[ev_room])
#
#
# System command functions below here
# Each of these should take the *args and **kwargs parameters. This allows us to create functions that
# don't accept any parameters but still use the `command_dict` mappings
# --- Blacklist Functions --- #
# noinspection PyIncorrectDocstring,PyUnusedLocal
@check_permissions
def command_add_blacklist_user(message_parts, content_lower, message_url, ev_room, ev_user_id, wrap2, *args, **kwargs):
"""
Adds a user to the site blacklist
:param wrap2:
:param ev_user_id:
:param ev_room:
:param message_url:
:param content_lower:
:param message_parts:
:param kwargs: No additional arguments expected
:return: A string
"""
quiet_action = any([part.endswith('-') for part in message_parts])
uid, val = get_user_from_list_command(content_lower)
if uid > -1 and val != "":
add_blacklisted_user((uid, val), message_url, "")
return Response(command_status=True, message=None) if quiet_action \
else Response(command_status=True, message="User blacklisted (`{}` on `{}`).".format(uid, val))
elif uid == -2:
return Response(command_status=True, message="Error: {}".format(val))
else:
return Response(command_status=False,
message="Invalid format. Valid format: `!!/addblu profileurl` "
"*or* `!!/addblu userid sitename`.")
# noinspection PyIncorrectDocstring,PyUnusedLocal
def command_check_blacklist(content_lower, *args, **kwargs):
"""
Checks if a user is blacklisted
:param content_lower:
:param kwargs: No additional arguments expected
:return: A string
"""
uid, val = get_user_from_list_command(content_lower)
if uid > -1 and val != "":
if is_blacklisted_user((uid, val)):
return Response(command_status=True, message="User is blacklisted (`{}` on `{}`).".format(uid, val))
else:
return Response(command_status=True, message="User is not blacklisted (`{}` on `{}`).".format(uid, val))
elif uid == -2:
return Response(command_status=True, message="Error: {}".format(val))
else:
return Response(command_status=False,
message="Invalid format. Valid format: `!!/isblu profileurl` *or* `!!/isblu userid sitename`.")
# noinspection PyIncorrectDocstring,PyUnusedLocal
@check_permissions
def command_remove_blacklist_user(message_parts, content_lower, ev_room, ev_user_id, wrap2, *args, **kwargs):
"""
Removes user from site blacklist
:param wrap2:
:param ev_user_id:
:param ev_room:
:param content_lower:
:param message_parts:
:param kwargs: No additional arguments expected
:return: A string
"""
quiet_action = any([part.endswith('-') for part in message_parts])
uid, val = get_user_from_list_command(content_lower)
if uid > -1 and val != "":
if remove_blacklisted_user((uid, val)):
return Response(command_status=True, message=None) if quiet_action \
else Response(command_status=True,
message="User removed from blacklist (`{}` on `{}`).".format(uid, val))
else:
return Response(command_status=True, message="User is not blacklisted.")
elif uid == -2:
return Response(command_status=True, message="Error: {}".format(val))
else:
return Response(command_status=False,
message="Invalid format. Valid format: `!!/rmblu profileurl` *or* `!!/rmblu userid sitename`.")
# --- Whitelist functions --- #
# noinspection PyIncorrectDocstring,PyUnusedLocal
@check_permissions
def command_add_whitelist_user(message_parts, content_lower, ev_room, ev_user_id, wrap2, *args, **kwargs):
"""
Adds a user to site whitelist
:param wrap2:
:param ev_user_id:
:param ev_room:
:param content_lower:
:param message_parts:
:param kwargs: No additional arguments expected
:return: A string
"""
quiet_action = any([part.endswith('-') for part in message_parts])
uid, val = get_user_from_list_command(content_lower)
if uid > -1 and val != "":
add_whitelisted_user((uid, val))
return Response(command_status=True, message=None) if quiet_action \
else Response(command_status=True, message="User whitelisted (`{}` on `{}`).".format(uid, val))
elif uid == -2:
return Response(command_status=True, message="Error: {}".format(val))
else:
return Response(command_status=False,
message="Invalid format. Valid format: `!!/addwlu profileurl` *or* "
"`!!/addwlu userid sitename`.")
# noinspection PyIncorrectDocstring,PyUnusedLocal
def command_check_whitelist(content_lower, *args, **kwargs):
"""
Checks if a user is whitelisted
:param content_lower:
:param kwargs: No additional arguments expected
:return: A string
"""
uid, val = get_user_from_list_command(content_lower)
if uid > -1 and val != "":
if is_whitelisted_user((uid, val)):
return Response(command_status=True, message="User is whitelisted (`{}` on `{}`).".format(uid, val))
else:
return Response(command_status=True, message="User is not whitelisted (`{}` on `{}`).".format(uid, val))
elif uid == -2:
return Response(command_status=True, message="Error: {}".format(val))
else:
return Response(command_status=False,
message="Invalid format. Valid format: `!!/iswlu profileurl` *or* `!!/iswlu userid sitename`.")
# noinspection PyIncorrectDocstring,PyUnusedLocal
@check_permissions
def command_remove_whitelist_user(message_parts, content_lower, ev_room, ev_user_id, wrap2, *args, **kwargs):
"""
Removes a user from site whitelist
:param wrap2:
:param ev_user_id:
:param ev_room:
:param content_lower:
:param message_parts:
:param kwargs: No additional arguments expected
:return: A string
"""
quiet_action = any([part.endswith('-') for part in message_parts])
uid, val = get_user_from_list_command(content_lower)
if uid != -1 and val != "":
if remove_whitelisted_user((uid, val)):
return Response(command_status=True, message=None) if quiet_action \
else Response(command_status=True,
message="User removed from whitelist (`{}` on `{}`).".format(uid, val))
else:
return Response(command_status=True, message="User is not whitelisted.")
elif uid == -2:
return Response(command_status=True, message="Error: {}".format(val))
else:
return Response(command_status=False,
message="Invalid format. Valid format: `!!/rmwlu profileurl` *or* `!!/rmwlu userid sitename`.")
# noinspection PyIncorrectDocstring,PyUnusedLocal
@check_permissions
def command_blacklist_help(*args, **kwargs):
"""
Returns a string which explains the usage of the new blacklist commands.
:return: A string
"""
return Response(command_status=True, message="The !!/blacklist command has been deprecated. "
"Please use !!/blacklist-website, !!/blacklist-username "
"or !!/blacklist-keyword. Remember to escape dots "
"in URLs using \\.")
# noinspection PyIncorrectDocstring,PyUnusedLocal
def command_blacklist_website(message_parts, ev_user_name, ev_room, ev_user_id, wrap2, *args, **kwargs):
"""
Adds a string to the website blacklist and commits/pushes to GitHub
:param message_parts:
:param ev_user_name:
:param ev_room:
:param :ev_user_id:
:return: A Response
"""
chat_user_profile_link = "http://chat.{host}/users/{id}".format(host=wrap2.host, id=str(ev_user_id))
website_pattern = " ".join(message_parts[1:])
# noinspection PyProtectedMember
try:
regex.compile(website_pattern)
except regex._regex_core.error:
return Response(command_status=False, message="An invalid website pattern was provided, not blacklisting.")
result = GitManager.add_to_blacklist(
blacklist="website",
item_to_blacklist=website_pattern,
username=ev_user_name,
chat_profile_link=chat_user_profile_link,
code_permissions=datahandling.is_code_privileged(ev_room, ev_user_id, wrap2)
)
return Response(command_status=result[0], message=result[1])
# noinspection PyIncorrectDocstring,PyUnusedLocal
def command_blacklist_keyword(message_parts, ev_user_name, ev_room, ev_user_id, wrap2, *args, **kwargs):
"""
Adds a string to the keyword blacklist and commits/pushes to GitHub
:param message_parts:
:param ev_user_name:
:param ev_room:
:param :ev_user_id:
:return: A Response
"""
chat_user_profile_link = "http://chat.{host}/users/{id}".format(host=wrap2.host, id=str(ev_user_id))
keyword_pattern = " ".join(message_parts[1:])
# noinspection PyProtectedMember
try:
regex.compile(keyword_pattern)
except regex._regex_core.error:
return Response(command_status=False, message="An invalid keyword pattern was provided, not blacklisting.")
result = GitManager.add_to_blacklist(
blacklist="keyword",
item_to_blacklist=keyword_pattern,
username=ev_user_name,
chat_profile_link=chat_user_profile_link,
code_permissions=datahandling.is_code_privileged(ev_room, ev_user_id, wrap2)
)
return Response(command_status=result[0], message=result[1])
# noinspection PyIncorrectDocstring,PyUnusedLocal
def command_blacklist_username(message_parts, ev_user_name, ev_room, ev_user_id, wrap2, *args, **kwargs):
"""
Adds a string to the username blacklist and commits/pushes to GitHub
:param message_parts:
:param ev_user_name:
:param ev_room:
:param :ev_user_id:
:return: A Response
"""
chat_user_profile_link = "http://chat.{host}/users/{id}".format(host=wrap2.host, id=str(ev_user_id))
username_pattern = " ".join(message_parts[1:])
# noinspection PyProtectedMember
try:
regex.compile(username_pattern)
except regex._regex_core.error:
return Response(command_status=False, message="An invalid username pattern was provided, not blacklisting.")
result = GitManager.add_to_blacklist(
blacklist="username",
item_to_blacklist=username_pattern,
username=ev_user_name,
chat_profile_link=chat_user_profile_link,
code_permissions=datahandling.is_code_privileged(ev_room, ev_user_id, wrap2)
)
return Response(command_status=result[0], message=result[1])
# noinspection PyIncorrectDocstring,PyUnusedLocal
@check_permissions
def command_gitstatus(wrap2, *args, **kwargs):
return Response(command_status=True, message=GitManager.current_git_status())
# --- Joke Commands --- #
# noinspection PyIncorrectDocstring,PyUnusedLocal
def command_blame(ev_room, *args, **kwargs):
"""
Returns a string with a user to blame (This is a joke command)
:param ev_room:
:param kwargs: No additional arguments expected
:return: A string
"""
GlobalVars.users_chatting[ev_room] = list(set(GlobalVars.users_chatting[ev_room]))
user_to_blame = single_random_user(ev_room)
return Response(command_status=True, message=u"It's [{}]({})'s fault.".format(user_to_blame[0], user_to_blame[1]))
# noinspection PyIncorrectDocstring,PyUnusedLocal
def command_brownie(*args, **kwargs):
"""
Returns a string equal to "Brown!" (This is a joke command)
:return: A string
"""
return Response(command_status=True, message="Brown!")
# noinspection PyIncorrectDocstring,PyUnusedLocal
def command_coffee(ev_user_name, *args, **kwargs):
"""
Returns a string stating who the coffee is for (This is a joke command)
:param ev_user_name:
:param kwargs: No additional arguments expected
:return: A string
"""
return Response(command_status=True, message=u"*brews coffee for @" + ev_user_name.replace(" ", "") + "*")
# noinspection PyIncorrectDocstring,PyUnusedLocal
def command_lick(*args, **kwargs):
"""
Returns a string when a user says 'lick' (This is a joke command)
:return: A string
"""
return Response(command_status=True, message="*licks ice cream cone*")
# noinspection PyIncorrectDocstring,PyUnusedLocal
def command_tea(ev_user_name, *args, **kwargs):
"""
Returns a string stating who the tea is for (This is a joke command)
:param ev_user_name:
:param kwargs: No additional arguments expected
:return: A string
"""
return Response(command_status=True,
message=u"*brews a cup of {choice} tea for @{user}*".format(
choice=random.choice(['earl grey', 'green', 'chamomile',
'lemon', 'darjeeling', 'mint', 'jasmine']),
user=ev_user_name.replace(" ", "")))
# noinspection PyIncorrectDocstring,PyUnusedLocal
def command_wut(*args, **kwargs):
"""
Returns a string when a user asks 'wut' (This is a joke command)
:return: A string
"""
return Response(command_status=True, message="Whaddya mean, 'wut'? Humans...")
""" Uncomment when Winterbash comes back
# noinspection PyIncorrectDocstring,PyUnusedLocal
def command_hats(*args, **kwargs):
wb_start = datetime(2016, 12, 19, 0, 0, 0)
wb_end = datetime(2017, 1, 9, 0, 0, 0)
now = datetime.utcnow()
return_string = ""
if wb_start > now:
diff = wb_start - now
hours, remainder = divmod(diff.seconds, 3600)
minutes, seconds = divmod(remainder, 60)
daystr = "days" if diff.days != 1 else "day"
hourstr = "hours" if hours != 1 else "hour"
minutestr = "minutes" if minutes != 1 else "minute"
secondstr = "seconds" if seconds != 1 else "second"
return_string = "WE LOVE HATS! Winter Bash will begin in {} {}, {} {}, {} {}, and {} {}.".format(
diff.days, daystr, hours, hourstr, minutes, minutestr, seconds, secondstr)
elif wb_end > now:
diff = wb_end - now
hours, remainder = divmod(diff.seconds, 3600)
minutes, seconds = divmod(remainder, 60)
daystr = "days" if diff.days != 1 else "day"
hourstr = "hours" if hours != 1 else "hour"
minutestr = "minutes" if minutes != 1 else "minute"
secondstr = "seconds" if seconds != 1 else "second"
return_string = "Winter Bash won't end for {} {}, {} {}, {} {}, and {} {}. GO EARN SOME HATS!".format(
diff.days, daystr, hours, hourstr, minutes, minutestr, seconds, secondstr)
return Response(command_status=True, message=return_string)
"""
# --- Block application from posting functions --- #
# noinspection PyIncorrectDocstring,PyUnusedLocal
@check_permissions
def command_block(message_parts, ev_room, ev_user_id, wrap2, *args, **kwargs):
"""
Blocks posts from application for a period of time
:param ev_room:
:param wrap2:
:param ev_user_id:
:param message_parts:
:param kwargs: No additional arguments expected
:return: A string
"""
room_id = message_parts[2] if len(message_parts) > 2 else "all"
time_to_block = message_parts[1] if len(message_parts) > 1 else "0"
if not time_to_block.isdigit():
return Response(command_status=False, message="Invalid duration.")
time_to_block = int(time_to_block)
time_to_block = time_to_block if 0 < time_to_block < 14400 else 900
GlobalVars.blockedTime[room_id] = time.time() + time_to_block
which_room = "globally" if room_id == "all" else "in room " + room_id
report = "Reports blocked for {} seconds {}.".format(time_to_block, which_room)
if room_id != GlobalVars.charcoal_room_id:
GlobalVars.charcoal_hq.send_message(report)
return Response(command_status=True, message=report)
# noinspection PyIncorrectDocstring,PyUnusedLocal
@check_permissions
def command_unblock(message_parts, ev_room, ev_user_id, wrap2, *args, **kwargs):
"""
Unblocks posting to a room
:param ev_room:
:param wrap2:
:param ev_user_id:
:param message_parts:
:param kwargs: No additional arguments expected
:return: A string
"""
room_id = message_parts[2] if len(message_parts) > 2 else "all"
GlobalVars.blockedTime[room_id] = time.time()
which_room = "globally" if room_id == "all" else "in room " + room_id
report = "Reports unblocked {}.".format(GlobalVars.blockedTime - time.time(), which_room)
if room_id != GlobalVars.charcoal_room_id:
GlobalVars.charcoal_hq.send_message(report)
return Response(command_status=True, message=report)
# --- Administration Commands --- #
# noinspection PyIncorrectDocstring,PyUnusedLocal
def command_alive(ev_room, *args, **kwargs):
"""
Returns a string indicating the process is still active
:param ev_room:
:param kwargs: No additional arguments expected
:return: A string
"""
if ev_room == GlobalVars.meta_tavern_room_id or ev_room == GlobalVars.socvr_room_id:
return Response(command_status=True,
message=random.choice(['Yup', 'You doubt me?', 'Of course',
'... did I miss something?', 'plz send teh coffee',
'Watching this endless list of new questions *never* gets boring',
'Kinda sorta']))
else:
return Response(command_status=True, message='Of course')
# noinspection PyIncorrectDocstring,PyUnusedLocal
@check_permissions
def command_allspam(message_parts, ev_room, ev_user_id, wrap2, ev_user_name, ev_room_name, *args, **kwargs):
"""
Reports all of a user's posts as spam
:param ev_room_name:
:param ev_user_name:
:param wrap2:
:param ev_user_id:
:param ev_room:
:param message_parts:
:param kwargs: No additional arguments expected
:return:
"""
if len(message_parts) != 2:
return Response(command_status=False, message="1 argument expected")
url = message_parts[1]
user = get_user_from_url(url)
if user is None:
return Response(command_status=True, message="That doesn't look like a valid user URL.")
why = u"User manually reported by *{}* in room *{}*.\n".format(ev_user_name, ev_room_name.decode('utf-8'))
handle_user_with_all_spam(user, why)
return Response(command_status=True, message=None)
# noinspection PyIncorrectDocstring,PyUnusedLocal
@check_permissions
def command_errorlogs(ev_room, ev_user_id, wrap2, message_parts, *args, **kwargs):
"""
Shows the most recent lines in the error logs
:param message_parts:
:param wrap2:
:param ev_user_id:
:param ev_room:
:param kwargs: No additional arguments expected
:return:
"""
count = -1
if len(message_parts) > 2:
return Response(command_status=False, message="The !!/errorlogs command requires either 0 or 1 arguments.")
try:
count = int(message_parts[1])
except (ValueError, IndexError):
count = 50
logs_part = fetch_lines_from_error_log(count)
post_message_in_room(room_id_str=ev_room, msg=logs_part, length_check=False)
return Response(command_status=True, message=None)
# noinspection PyIncorrectDocstring,PyUnusedLocal
def command_help(*args, **kwargs):
"""
Returns the help text
:param kwargs: No additional arguments expected
:return: A string
"""
return Response(command_status=True, message="I'm " + GlobalVars.chatmessage_prefix +
", a bot that detects spam and offensive posts on the network and "
"posts alerts to chat. "
"[A command list is available here](" + GlobalVars.bot_repository +
"/wiki/Commands).")
# noinspection PyIncorrectDocstring,PyUnusedLocal
def command_location(*args, **kwargs):
"""
Returns the current location the application is running from
:return: A string with current location
"""
return Response(command_status=True, message=GlobalVars.location)
# noinspection PyIncorrectDocstring,PyUnusedLocal,PyProtectedMember
@check_permissions
def command_master(ev_room, ev_user_id, wrap2, *args, **kwargs):
"""
Forces a system exit with exit code = 8
:param wrap2:
:param ev_user_id:
:param ev_room:
:param kwargs: No additional arguments expected
:return: None
"""
os._exit(8)
# noinspection PyIncorrectDocstring,PyUnusedLocal,PyProtectedMember
@check_permissions
def command_pull(ev_room, ev_user_id, wrap2, *args, **kwargs):
"""
Pull an update from GitHub
:param wrap2:
:param ev_user_id:
:param ev_room:
:param kwargs: No additional arguments expected
:return: String on failure, None on success
"""
request = requests.get('https://api.github.com/repos/Charcoal-SE/SmokeDetector/git/refs/heads/deploy')
latest_sha = request.json()["object"]["sha"]
request = requests.get(
'https://api.github.com/repos/Charcoal-SE/SmokeDetector/commits/{commit_code}/statuses'.format(
commit_code=latest_sha))
states = []
for status in request.json():
state = status["state"]
states.append(state)
if "success" in states:
os._exit(3)
elif "error" in states or "failure" in states:
return Response(command_status=True, message="CI build failed! :( Please check your commit.")
elif "pending" in states or not states:
return Response(command_status=True,
message="CI build is still pending, wait until the build has finished and then pull again.")
# noinspection PyIncorrectDocstring,PyUnusedLocal,PyProtectedMember
@check_permissions
def command_reboot(ev_room, ev_user_id, wrap2, *args, **kwargs):
"""
Forces a system exit with exit code = 5
:param wrap2:
:param ev_user_id:
:param ev_room:
:param kwargs: No additional arguments expected
:return: None
"""
post_message_in_room(room_id_str=ev_room, msg="Goodbye, cruel world")
os._exit(5)
# noinspection PyIncorrectDocstring,PyUnusedLocal
def command_privileged(ev_room, ev_user_id, wrap2, *args, **kwargs):
"""
Tells user whether or not they have privileges
:param wrap2:
:param ev_user_id:
:param ev_room:
:param kwargs: No additional arguments expected
:return: A string
"""
if is_privileged(ev_room, ev_user_id, wrap2):
return Response(command_status=True, message="Yes, you are a privileged user.")
return Response(command_status=True,
message="No, you are not a privileged user. Please see [the privileges wiki page](" +
GlobalVars.bot_repository + "/wiki/Privileges) for information on what privileges "
"are and what is expected of privileged users.")
# noinspection PyIncorrectDocstring,PyUnusedLocal
def command_code_privileged(ev_room, ev_user_id, wrap2, *args, **kwargs):
"""
Tells user whether or not they have code privileges
:param wrap2:
:param ev_user_id:
:param ev_room:
:param kwargs: No additional arguments expected
:return: A string
"""
if is_code_privileged(ev_room, ev_user_id, wrap2):
return Response(command_status=True, message="Yes, you have code privileges.")
return Response(command_status=True,
message="No, you are not a code-privileged user.")
# noinspection PyIncorrectDocstring,PyUnusedLocal
def command_quota(*args, **kwargs):
"""
Report how many API hits remain for the day
:return: A string
"""
return Response(command_status=True, message="The current API quota remaining is {}.".format(GlobalVars.apiquota))
# noinspection PyIncorrectDocstring,PyUnusedLocal
def command_queuestatus(*args, **kwargs):
"""
Report current API queue
:return: A string
"""
return Response(command_status=True, message=GlobalVars.bodyfetcher.print_queue())
# noinspection PyIncorrectDocstring,PyUnusedLocal,PyProtectedMember
@check_permissions
def command_stappit(message_parts, ev_room, ev_user_id, wrap2, *args, **kwargs):
"""
Forces a system exit with exit code = 6
:param message_parts:
:param wrap2:
:param ev_user_id:
:param ev_room:
:param kwargs: No additional arguments expected
:return: None
"""
if len(message_parts) == 1 or " ".join(message_parts[1:]).lower() in GlobalVars.location.lower():
post_message_in_room(room_id_str=ev_room, msg="Goodbye, cruel world")
os._exit(6)
return Response(command_status=True, message=None)
# noinspection PyIncorrectDocstring,PyUnusedLocal
def command_status(*args, **kwargs):
"""
Returns the amount of time the application has been running
:return: A string
"""
now = datetime.utcnow()
diff = now - UtcDate.startup_utc_date
minutes, remainder = divmod(diff.seconds, 60)
minute_str = "minutes" if minutes != 1 else "minute"
return Response(command_status=True,
message='Running since {time} UTC ({minute_count} {plurality})'.format(
time=GlobalVars.startup_utc,
minute_count=minutes, plurality=minute_str))
# noinspection PyIncorrectDocstring,PyUnusedLocal
@check_permissions
def command_stop_flagging(*args, **kwargs):
t_metasmoke = Thread(name="stop_autoflagging", target=Metasmoke.stop_autoflagging,
args=())
t_metasmoke.start()
return Response(command_status=True, message="Request sent...")
# noinspection PyIncorrectDocstring,PyUnusedLocal,PyProtectedMember
@check_permissions
def command_standby(message_parts, ev_room, ev_user_id, wrap2, *args, **kwargs):
"""
Forces a system exit with exit code = 7
:param message_parts:
:param wrap2:
:param ev_user_id:
:param ev_room:
:param kwargs: No additional arguments expected
:return: None
"""
if " ".join(message_parts[1:]).lower() in GlobalVars.location.lower():
m = "{location} is switching to standby".format(location=GlobalVars.location)
post_message_in_room(room_id_str=ev_room, msg=m)
os._exit(7)
return Response(command_status=True, message=None)
# noinspection PyIncorrectDocstring,PyUnusedLocal
def command_test(content, content_lower, *args, **kwargs):
"""
Test a post to determine if it'd be automatically reported
:param content_lower:
:param content:
:param kwargs: No additional arguments expected
:return: A string
"""
string_to_test = content[8:]
test_as_answer = False
if len(string_to_test) == 0:
return Response(command_status=True, message="Nothing to test")
result = "> "
reasons, why = FindSpam.test_post(string_to_test, string_to_test, string_to_test, "", test_as_answer, False, 1, 0)
if len(reasons) == 0:
result += "Would not be caught for title, body, and username."
return Response(command_status=True, message=result)
result += ", ".join(reasons).capitalize()
if why is not None and len(why) > 0:
result += "\n----------\n"
result += why
return Response(command_status=True, message=result)
# noinspection PyIncorrectDocstring,PyUnusedLocal
def command_test_answer(content, content_lower, *args, **kwargs):
"""
Test an answer to determine if it'd be automatically reported
:param content_lower:
:param content:
:param kwargs: No additional arguments expected
:return: A string
"""
string_to_test = content[10:]
test_as_answer = True
if len(string_to_test) == 0:
return Response(command_status=True, message="Nothing to test")
result = "> "
reasons, why = FindSpam.test_post("Valid title", string_to_test, "Valid username", "", test_as_answer, False, 1, 0)
if len(reasons) == 0:
result += "Would not be caught as an answer."
return Response(command_status=True, message=result)
result += ", ".join(reasons).capitalize()
if why is not None and len(why) > 0:
result += "\n----------\n"
result += why
return Response(command_status=True, message=result)
# noinspection PyIncorrectDocstring,PyUnusedLocal
def command_test_question(content, content_lower, *args, **kwargs):
"""
Test a question to determine if it'd be automatically reported
:param content_lower:
:param content:
:param kwargs: No additional arguments expected
:return: A string
"""
string_to_test = content[10:]
test_as_answer = False
if len(string_to_test) == 0:
return Response(command_status=True, message="Nothing to test")
result = "> "
reasons, why = FindSpam.test_post("Valid title", string_to_test, "Valid username", "", test_as_answer, False, 1, 0)
if len(reasons) == 0:
result += "Would not be caught as a question."
return Response(command_status=True, message=result)
result += ", ".join(reasons).capitalize()
if why is not None and len(why) > 0:
result += "\n----------\n"
result += why
return Response(command_status=True, message=result)
# noinspection PyIncorrectDocstring,PyUnusedLocal
def command_test_title(content, content_lower, *args, **kwargs):
"""
Test a title to determine if it'd be automatically reported
:param content_lower:
:param content:
:param kwargs: No additional arguments expected
:return: A string
"""
string_to_test = content[10:]
test_as_answer = False
if len(string_to_test) == 0:
return Response(command_status=True, message="Nothing to test")
result = "> "
reasons, why = FindSpam.test_post(string_to_test, "Valid question body", "Valid username", "",
test_as_answer, False, 1, 0)
if len(reasons) == 0:
result += "Would not be caught as a title."
return Response(command_status=True, message=result)
result += ", ".join(reasons).capitalize()
if why is not None and len(why) > 0:
result += "\n----------\n"
result += why
return Response(command_status=True, message=result)
# noinspection PyIncorrectDocstring,PyUnusedLocal
def command_test_username(content, content_lower, *args, **kwargs):
"""
Test a username to determine if it'd be automatically reported
:param content_lower:
:param content:
:param kwargs: No additional arguments expected
:return: A string
"""
string_to_test = content[10:]
test_as_answer = False
if len(string_to_test) == 0:
return Response(command_status=True, message="Nothing to test")
result = "> "
reasons, why = FindSpam.test_post("Valid title", "Valid post body", string_to_test, "",
test_as_answer, False, 1, 0)
if len(reasons) == 0:
result += "Would not be caught as a username."
return Response(command_status=True, message=result)
result += ", ".join(reasons).capitalize()
if why is not None and len(why) > 0:
result += "\n----------\n"
result += why
return Response(command_status=True, message=result)
# noinspection PyIncorrectDocstring,PyUnusedLocal
def command_thread_descriptions(*args, **kwargs):
"""
Returns a description of current threads, for debugging
:return: A string
"""
threads = ("{ident}: {name}".format(ident=t.ident, name=t.name) for t in threading.enumerate())
return Response(command_status=True, message="{threads}".format(threads="\n".join(list(threads))))
# noinspection PyIncorrectDocstring,PyUnusedLocal
def command_version(*args, **kwargs):
"""
Returns the current version of the application
:return: A string
"""
return Response(command_status=True, message='[{commit_name}]({repository}/commit/{commit_code})'.format(
commit_name=GlobalVars.commit_with_author, commit_code=GlobalVars.commit, repository=GlobalVars.bot_repository))
# noinspection PyIncorrectDocstring,PyUnusedLocal
def command_whoami(ev_room, *args, **kwargs):
"""
Returns user id of smoke detector
:param ev_room:
:param kwargs: No additional arguments expected
:return:
"""
if ev_room in GlobalVars.smokeDetector_user_id:
return Response(command_status=True,
message="My id for this room is {}.".format(GlobalVars.smokeDetector_user_id[ev_room]))
return Response(command_status=True,
message="I don't know my user ID for this room. (Something is wrong, and it's apnorton's fault.)")
# noinspection PyIncorrectDocstring,PyUnusedLocal,PyUnboundLocalVariable
def command_pending(content, content_lower, *args, **kwargs):
"""
Finds posts with TP feedback that have yet to be deleted.
:param args: No additional arguments expected.
:param kwargs: No additional arguments expected.
:return:
"""
try:
page = int(content[11:])
except ValueError:
return Response(
command_status=False,
message="Expected an integer page number and got '{0!r}'"
" instead (ValueError).".format(content[11:]))
posts = requests.get("https://metasmoke.erwaysoftware.com/api/undeleted?pagesize=2&page={}&key={}".format(
page, GlobalVars.metasmoke_key)).json()
messages = []
for post in posts['items']:
messages.append("[{0}]({1}) ([MS](https://m.erwaysoftware.com/post/{0}))".format(post['id'], post['link']))
return Response(command_status=True,
message=", ".join(messages))
# --- Notification functions --- #
# noinspection PyIncorrectDocstring,PyUnusedLocal
def command_allnotifications(message_parts, ev_user_id, wrap2, *args, **kwargs):
"""
Returns a string stating what sites a user will be notified about
:param wrap2:
:param ev_user_id:
:param message_parts:
:param kwargs: No additional arguments expected
:return: A string
"""
if len(message_parts) != 2:
return Response(command_status=False, message="1 argument expected")
user_id = int(ev_user_id)
chat_site = wrap2.host
room_id = message_parts[1]
if not room_id.isdigit():
return Response(command_status=False, message="Room ID is invalid.")
sites = get_all_notification_sites(user_id, chat_site, room_id)
if len(sites) == 0:
return Response(command_status=True, message="You won't get notified for any sites in that room.")
return Response(command_status=True, message="You will get notified for these sites:\r\n" + ", ".join(sites))
# noinspection PyIncorrectDocstring,PyUnusedLocal
def command_notify(message_parts, ev_user_id, wrap2, *args, **kwargs):
"""
Subscribe a user to events on a site in a single room
:param wrap2:
:param ev_user_id:
:param message_parts:
:param kwargs: No additional arguments expected
:return: A string
"""
if len(message_parts) != 3:
return Response(command_status=False, message="2 arguments expected")
user_id = int(ev_user_id)
chat_site = wrap2.host
room_id = message_parts[1]
if not room_id.isdigit():
return Response(command_status=False, message="Room ID is invalid.")
room_id = int(room_id)
quiet_action = any([part.endswith('-') for part in message_parts])
se_site = message_parts[2].replace('-', '')
response, full_site = add_to_notification_list(user_id, chat_site, room_id, se_site)
if response == 0:
return Response(command_status=True, message=None) if quiet_action \
else Response(command_status=True,
message="You'll now get pings from me if I report a post on `{site_name}`, in room "
"`{room_id}` on `chat.{chat_domain}`".format(site_name=se_site,
room_id=room_id,
chat_domain=chat_site))
elif response == -1:
return Response(command_status=True, message="That notification configuration is already registered.")
elif response == -2:
return Response(command_status=False, message="The given SE site does not exist.")
else:
return Response(command_status=False, message="Unrecognized code returned when adding notification.")
# noinspection PyIncorrectDocstring,PyUnusedLocal
def command_unnotify(message_parts, ev_user_id, wrap2, *args, **kwargs):
"""
Unsubscribes a user to specific events
:param wrap2:
:param ev_user_id:
:param message_parts:
:param kwargs: No additional arguments expected
:return: A string
"""
if len(message_parts) != 3:
return Response(command_status=False, message="2 arguments expected")
user_id = int(ev_user_id)
chat_site = wrap2.host
room_id = message_parts[1]
if not room_id.isdigit():
return Response(command_status=False, message="Room ID is invalid.")
room_id = int(room_id)
quiet_action = any([part.endswith('-') for part in message_parts])
se_site = message_parts[2].replace('-', '')
response = remove_from_notification_list(user_id, chat_site, room_id, se_site)
if response:
return Response(command_status=True, message=None) if quiet_action \
else Response(command_status=True,
message="I will no longer ping you if I report a post on `{site_name}`, in room `{room_id}` "
"on `chat.{chat_domain}`".format(site_name=se_site,
room_id=room_id,
chat_domain=chat_site))
return Response(command_status=True, message="That configuration doesn't exist.")
# noinspection PyIncorrectDocstring,PyUnusedLocal
def command_willbenotified(message_parts, ev_user_id, wrap2, *args, **kwargs):
"""
Returns a string stating whether a user will be notified or not
:param wrap2:
:param ev_user_id:
:param message_parts:
:param kwargs: No additional arguments expected
:return: A string
"""
if len(message_parts) != 3:
return Response(command_status=False, message="2 arguments expected")
user_id = int(ev_user_id)
chat_site = wrap2.host
room_id = message_parts[1]
if not room_id.isdigit():
return Response(command_status=False, message="Room ID is invalid")
room_id = int(room_id)
se_site = message_parts[2]
will_be_notified = will_i_be_notified(user_id, chat_site, room_id, se_site)
if will_be_notified:
return Response(command_status=True, message="Yes, you will be notified for that site in that room.")
return Response(command_status=True, message="No, you won't be notified for that site in that room.")
# --- Post Responses --- #
# noinspection PyIncorrectDocstring,PyUnusedLocal
@check_permissions
def command_report_post(ev_room, ev_user_id, wrap2, message_parts, message_url,
ev_user_name, ev_room_name, *args, **kwargs):
"""
Report a post (or posts)
:param ev_room_name:
:param ev_user_name:
:param message_url:
:param message_parts:
:param wrap2:
:param ev_user_id:
:param ev_room:
:param kwargs: No additional arguments expected
:return: A string (or None)
"""
crn, wait = can_report_now(ev_user_id, wrap2.host)
if not crn:
return Response(command_status=False, message="You can execute the !!/report command again in {} seconds. "
"To avoid one user sending lots of reports in a few commands and "
"slowing SmokeDetector down due to rate-limiting, you have to "
"wait 30 seconds after you've reported multiple posts using "
"!!/report, even if your current command just has one URL. (Note "
"that this timeout won't be applied if you only used !!/report "
"for one post)".format(wait))
if len(message_parts) < 2:
return Response(command_status=False, message="Not enough arguments.")
output = []
index = 0
urls = list(set(message_parts[1:]))
if len(urls) > 5:
return Response(command_status=False, message="To avoid SmokeDetector reporting posts too slowly, you can "
"report at most 5 posts at a time. This is to avoid "
"SmokeDetector's chat messages getting rate-limited too much, "
"which would slow down reports.")
for url in urls:
index += 1
post_data = api_get_post(url)
if post_data is None:
output.append("Post {}: That does not look like a valid post URL.".format(index))
continue
if post_data is False:
output.append("Post {}: Could not find data for this post in the API. "
"It may already have been deleted.".format(index))
continue
if has_already_been_posted(post_data.site, post_data.post_id, post_data.title) and not is_false_positive(
(post_data.post_id, post_data.site)):
# Don't re-report if the post wasn't marked as a false positive. If it was marked as a false positive,
# this re-report might be attempting to correct that/fix a mistake/etc.
output.append("Post {}: Already recently reported".format(index))
continue
user = get_user_from_url(post_data.owner_url)
if user is not None:
add_blacklisted_user(user, message_url, post_data.post_url)
why = u"Post manually reported by user *{}* in room *{}*.\n".format(ev_user_name,
ev_room_name.decode('utf-8'))
batch = ""
if len(urls) > 1:
batch = " (batch report: post {} out of {})".format(index, len(urls))
handle_spam(title=post_data.title,
body=post_data.body,
poster=post_data.owner_name,
site=post_data.site,
post_url=post_data.post_url,
poster_url=post_data.owner_url,
post_id=post_data.post_id,
reasons=["Manually reported " + post_data.post_type + batch],
is_answer=post_data.post_type == "answer",
why=why,
owner_rep=post_data.owner_rep,
post_score=post_data.score,
up_vote_count=post_data.up_vote_count,
down_vote_count=post_data.down_vote_count,
question_id=post_data.question_id)
if 1 < len(urls) > len(output):
add_or_update_multiple_reporter(ev_user_id, wrap2.host, time.time())
if len(output) > 0:
return Response(command_status=True, message=os.linesep.join(output))
return Response(command_status=True, message=None)
#
#
# Subcommands go below here
# noinspection PyIncorrectDocstring,PyUnusedLocal,PyBroadException
@check_permissions
def subcommand_delete(ev_room, ev_user_id, wrap2, msg, *args, **kwargs):
"""
Attempts to delete a post from room
:param msg:
:param wrap2:
:param ev_user_id:
:param ev_room:
:param kwargs: No additional arguments expected
:return: None
"""
try:
msg.delete()
except:
pass # couldn't delete message
return Response(command_status=True, message=None)
# noinspection PyIncorrectDocstring,PyUnusedLocal
@check_permissions
def subcommand_editlink(ev_room, ev_user_id, wrap2, msg_content, msg, *args, **kwargs):
"""
Removes link from a marked report message
:param msg:
:param msg_content:
:param wrap2:
:param ev_user_id:
:param ev_room:
:param kwargs: No additional arguments expected
:return: None
"""
edited = edited_message_after_postgone_command(msg_content)
if edited is None:
return Response(command_status=True, message="That's not a report.")
msg.edit(edited)
return Response(command_status=True, message=None)
# noinspection PyIncorrectDocstring,PyUnusedLocal,PyBroadException
@check_permissions
def subcommand_falsepositive(ev_room, ev_user_id, wrap2, post_site_id, post_url,
quiet_action, post_type, msg, second_part_lower, ev_user_name,
msg_content, *args, **kwargs):
"""
Marks a post as a false positive
:param msg_content:
:param ev_user_name:
:param second_part_lower:
:param msg:
:param post_type:
:param quiet_action:
:param post_url:
:param post_site_id:
:param wrap2:
:param ev_user_id:
:param ev_room:
:param kwargs: No additional arguments expected
:return: None or a string
"""
if not is_report(post_site_id):
return Response(command_status=True, message="That message is not a report.")
send_metasmoke_feedback(post_url=post_url,
second_part_lower=second_part_lower,
ev_user_name=ev_user_name,
ev_user_id=ev_user_id,
ev_chat_host=wrap2.host)
add_false_positive((post_site_id[0], post_site_id[1]))
user_added = False
user_removed = False
url_from_msg = fetch_owner_url_from_msg_content(msg_content)
user = None
if url_from_msg is not None:
user = get_user_from_url(url_from_msg)
if second_part_lower.startswith("falseu") or second_part_lower.startswith("fpu"):
if user is not None:
add_whitelisted_user(user)
user_added = True
if "Blacklisted user:" in msg_content:
if user is not None:
remove_blacklisted_user(user)
user_removed = True
if post_type == "question":
if user_added and not quiet_action:
return Response(command_status=True, message="Registered question as false positive and whitelisted user.")
elif user_removed and not quiet_action:
return Response(command_status=True,
message="Registered question as false positive and removed user from the blacklist.")
elif not quiet_action:
return Response(command_status=True, message="Registered question as false positive.")
elif post_type == "answer":
if user_added and not quiet_action:
return Response(command_status=True, message="Registered answer as false positive and whitelisted user.")
elif user_removed and not quiet_action:
return Response(command_status=True,
message="Registered answer as false positive and removed user from the blacklist.")
elif not quiet_action:
return Response(command_status=True, message="Registered answer as false positive.")
try:
if int(msg.room.id) != int(GlobalVars.charcoal_hq.id):
msg.delete()
except:
pass
return Response(command_status=True, message=None)
# noinspection PyIncorrectDocstring,PyUnusedLocal
@check_permissions
def subcommand_ignore(ev_room, ev_user_id, wrap2, post_site_id, post_url, quiet_action, second_part_lower, ev_user_name,
*args, **kwargs):
"""
Marks a post to be ignored
:param quiet_action:
:param post_url:
:param post_site_id:
:param wrap2:
:param ev_user_id:
:param ev_room:
:param kwargs: No additional arguments expected
:return: String or None
"""
if not is_report(post_site_id):
return Response(command_status=True, message="That message is not a report.")
send_metasmoke_feedback(post_url=post_url,
second_part_lower=second_part_lower,
ev_user_name=ev_user_name,
ev_user_id=ev_user_id,
ev_chat_host=wrap2.host)
add_ignored_post(post_site_id[0:2])
if not quiet_action:
return Response(command_status=True, message="Post ignored; alerts about it will no longer be posted.")
else:
return Response(command_status=True, message=None)
# noinspection PyIncorrectDocstring,PyUnusedLocal
@check_permissions
def subcommand_naa(ev_room, ev_user_id, wrap2, post_site_id, post_url, quiet_action,
second_part_lower, ev_user_name, post_type, *args, **kwargs):
"""
Marks a post as NAA
:param post_type:
:param ev_user_name:
:param second_part_lower:
:param quiet_action:
:param post_url:
:param post_site_id:
:param wrap2:
:param ev_user_id:
:param ev_room:
:param kwargs: No additional arguments expected
:return: String or None
:return:
"""
if not is_report(post_site_id):
return Response(command_status=True, message="That message is not a report.")
if post_type != "answer":
return Response(command_status=True, message="That report was a question; questions cannot be marked as NAAs.")
send_metasmoke_feedback(post_url=post_url,
second_part_lower=second_part_lower,
ev_user_name=ev_user_name,
ev_user_id=ev_user_id,
ev_chat_host=wrap2.host)
add_ignored_post(post_site_id[0:2])
if quiet_action:
return Response(command_status=True, message=None)
return Response(command_status=True, message="Recorded answer as an NAA in metasmoke.")
# noinspection PyIncorrectDocstring,PyUnusedLocal
@check_permissions
def subcommand_truepositive(ev_room, ev_user_id, wrap2, post_site_id, post_url, quiet_action,
post_type, message_url, msg, second_part_lower, ev_user_name,
msg_content, *args, **kwargs):
"""
Marks a post as a true positive
:param msg_content:
:param ev_user_name:
:param second_part_lower:
:param msg:
:param message_url:
:param post_type:
:param quiet_action:
:param post_url:
:param post_site_id:
:param wrap2:
:param ev_user_id:
:param ev_room:
:param kwargs: No additional arguments expected
:return: None or a string
"""
if not is_report(post_site_id):
return Response(command_status=True, message="That message is not a report.")
send_metasmoke_feedback(post_url=post_url,
second_part_lower=second_part_lower,
ev_user_name=ev_user_name,
ev_user_id=ev_user_id,
ev_chat_host=wrap2.host)
user_added = False
if second_part_lower.startswith("trueu") or second_part_lower.startswith("tpu"):
url_from_msg = fetch_owner_url_from_msg_content(msg_content)
if url_from_msg is not None:
user = get_user_from_url(url_from_msg)
if user is not None:
add_blacklisted_user(user, message_url, "http:" + post_url)
user_added = True
if post_type == "question":
if quiet_action:
return Response(command_status=True, message=None)
if user_added:
return Response(command_status=True, message="Blacklisted user and registered question as true positive.")
return Response(command_status=True,
message="Recorded question as true positive in metasmoke. Use `tpu` or `trueu` if you want "
"to blacklist a user.")
elif post_type == "answer":
if quiet_action:
return Response(command_status=True, message=None)
if user_added:
return Response(command_status=True, message="Blacklisted user.")
return Response(command_status=True, message="Recorded answer as true positive in metasmoke. If you want to "
"blacklist the poster of the answer, use `trueu` or `tpu`.")
else:
return Response(command_status=False, message="Post type was not recognized (not `question` or `answer`) - "
"call a developer! "
"No action was taken.")
# noinspection PyIncorrectDocstring,PyUnusedLocal
def subcommand_why(msg_content, *args, **kwargs):
"""
Returns reasons a post was reported
:param msg_content:
:param kwargs: No additional arguments expected
:return: A string
"""
post_info = fetch_post_id_and_site_from_msg_content(msg_content)
if post_info is None:
post_info = fetch_user_from_allspam_report(msg_content)
if post_info is None:
return Response(command_status=True, message="That's not a report.")
why = get_why_allspam(post_info)
if why is not None or why != "":
return Response(command_status=True, message=why)
else:
post_id, site, _ = post_info
why = get_why(site, post_id)
if why is not None or why != "":
return Response(command_status=True, message=why)
return Response(command_status=True, message="There is no `why` data for that user (anymore).")
# noinspection PyIncorrectDocstring,PyUnusedLocal
def subcommand_autoflagged(msg_content, post_url, *args, **kwargs):
"""
Determines whether a post was automatically flagged by Metasmoke
:param msg_content:
:param post_url:
:param kwargs: No additional arguments expected
:return: A string
"""
autoflagged, names = Metasmoke.determine_if_autoflagged(post_url)
if autoflagged:
return Response(command_status=True,
message="That post was automatically flagged, using flags from: {}.".format(", ".join(names)))
else:
return Response(command_status=True, message="That post was **not** automatically flagged by metasmoke.")
# This dictionary defines our commands and the associated function to call
# To use this your calling code will look like this
# command_dict['command'](paramer1, parameter2, ...)
# Each key can have a different set of parameters so 'command1' could look like this
# command_dict['command1'](paramer1)
# Triggering input:
# !!/alive
# Hardcoded key example of above input:
# command_dict["!!/alive"]()
command_dict = {
"!!/addblu": command_add_blacklist_user,
"!!/addblu-": command_add_blacklist_user,
"!!/addwlu": command_add_whitelist_user,
"!!/addwlu-": command_add_whitelist_user,
"!!/alive": command_alive,
"!!/allnotificationsites": command_allnotifications,
"!!/allspam": command_allspam,
"!!/amiprivileged": command_privileged,
"!!/amicodeprivileged": command_code_privileged,
"!!/apiquota": command_quota,
"!!/blame": command_blame,
"!!/block": command_block,
"!!/brownie": command_brownie,
"!!/blacklist": command_blacklist_help,
"!!/blacklist-website": command_blacklist_website,
"!!/blacklist-keyword": command_blacklist_keyword,
"!!/blacklist-username": command_blacklist_username,
"!!/commands": command_help,
"!!/coffee": command_coffee,
"!!/errorlogs": command_errorlogs,
"!!/gitstatus": command_gitstatus,
"!!/help": command_help,
# "!!/hats": command_hats, (uncomment when Winterbash begins)
"!!/info": command_help,
"!!/isblu": command_check_blacklist,
"!!/iswlu": command_check_whitelist,
"!!/lick": command_lick,
"!!/location": command_location,
"!!/master": command_master,
"!!/notify": command_notify,
"!!/notify-": command_notify,
"!!/pull": command_pull,
"!!/pending": command_pending,
"!!/reboot": command_reboot,
"!!/reportuser": command_allspam,
"!!/rmblu": command_remove_blacklist_user,
"!!/rmblu-": command_remove_blacklist_user,
"!!/rmwlu": command_remove_whitelist_user,
"!!/rmwlu-": command_remove_whitelist_user,
"!!/report": command_report_post,
"!!/restart": command_reboot,
"!!/rev": command_version,
"!!/stappit": command_stappit,
"!!/status": command_status,
"!!/stopflagging": command_stop_flagging,
"!!/standby": command_standby,
"!!/tea": command_tea,
"!!/test": command_test,
"!!/testanswer": command_test_answer,
"!!/test-a": command_test_answer,
"!!/testquestion": command_test_question,
"!!/test-q": command_test_question,
"!!/testtitle": command_test_title,
"!!/test-t": command_test_title,
"!!/testusername": command_test_username,
"!!/testuser": command_test_username,
"!!/test-u": command_test_username,
"!!/threads": command_thread_descriptions,
"!!/unblock": command_unblock,
"!!/unnotify": command_unnotify,
"!!/unnotify-": command_unnotify,
"!!/ver": command_version,
"!!/willibenotified": command_willbenotified,
"!!/whoami": command_whoami,
"!!/wut": command_wut,
"!!/queuestatus": command_queuestatus
}
# This dictionary defines our subcommands and the associated function to call
# To use this your calling code will look like this
# second_part_dict['command'](paramer1, parameter2, ...)
# Each key can have a different set of parameters so 'command1' could look like this
# second_part_dict['command1'](paramer1)
# Triggering input:
# sd false
# Hardcoded key example of above input:
# command_dict["!//alive"]()
subcommand_dict = {
"false": subcommand_falsepositive,
"fp": subcommand_falsepositive,
"falseu": subcommand_falsepositive,
"fpu": subcommand_falsepositive,
"false-": subcommand_falsepositive,
"fp-": subcommand_falsepositive,
"falseu-": subcommand_falsepositive,
"fpu-": subcommand_falsepositive,
"true": subcommand_truepositive,
"tp": subcommand_truepositive,
"trueu": subcommand_truepositive,
"tpu": subcommand_truepositive,
"true-": subcommand_truepositive,
"tp-": subcommand_truepositive,
"trueu-": subcommand_truepositive,
"tpu-": subcommand_truepositive,
"ignore": subcommand_ignore,
"ignore-": subcommand_ignore,
"naa": subcommand_naa,
"naa-": subcommand_naa,
"delete": subcommand_delete,
"remove": subcommand_delete,
"gone": subcommand_delete,
"poof": subcommand_delete,
"del": subcommand_delete,
"postgone": subcommand_editlink,
"why": subcommand_why,
"why?": subcommand_why,
"autoflagged?": subcommand_autoflagged,
"autoflagged": subcommand_autoflagged,
}
|
# -*- coding: utf-8 -*-
"""Wrapper to run RS4PI from the command line.
:copyright: Copyright (c) 2017 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
from dicompylercore import dicomparser, dvhcalc
from pykern import pkio
from pykern import pksubprocess
from pykern.pkdebug import pkdp, pkdc
from sirepo import feature_config
from sirepo import simulation_db
from sirepo.template import template_common
import numpy as np
import py.path
import sirepo.template.rs4pi as template
import struct
import time
def run(cfg_dir):
cfg_dir = pkio.py_path(cfg_dir)
data = simulation_db.read_json(template_common.INPUT_BASE_NAME)
if data['report'] == 'doseCalculation':
_run_dose_calculation(data, cfg_dir)
elif data['report'] == 'dvhReport':
_run_dvh(data, cfg_dir)
else:
raise RuntimeError('unknown report: {}'.format(data['report']))
def run_background(cfg_dir):
with pkio.save_chdir(cfg_dir):
simulation_db.write_result({})
def _parent_file(cfg_dir, filename):
return str(pkio.py_path(cfg_dir.dirname).join(filename))
def _run_dose_calculation(data, cfg_dir):
if not feature_config.cfg.rs4pi_dose_calc:
return _run_dose_calculation_fake(data, cfg_dir)
with pkio.save_chdir(cfg_dir):
pksubprocess.check_call_with_signals(['bash', str(cfg_dir.join(template.DOSE_CALC_SH))])
dicom_dose = template.generate_rtdose_file(data, cfg_dir)
data['models']['dicomDose'] = dicom_dose
# save results into simulation input data file, this is needed for further calls to get_simulation_frame()
simulation_db.write_json(template_common.INPUT_BASE_NAME, data)
simulation_db.write_result({
'dicomDose': dicom_dose,
})
def _run_dose_calculation_fake(data, cfg_dir):
time.sleep(5)
simulation_db.write_result({})
def _run_dvh(data, cfg_dir):
if not len(data['models']['dvhReport']['roiNumbers']):
simulation_db.write_result({
'error': 'No selection',
})
y_range = None
plots = []
for roi_number in data['models']['dvhReport']['roiNumbers']:
roi_number = int(roi_number)
dp = dicomparser.DicomParser(_parent_file(cfg_dir, template.RTSTRUCT_EXPORT_FILENAME))
for roi in dp.ds.ROIContourSequence:
if roi.ReferencedROINumber == roi_number:
for c in roi.ContourSequence:
if 'ContourImageSequence' not in c:
c.ContourImageSequence = []
s = dp.GetStructures()[roi_number]
s['planes'] = dp.GetStructureCoordinates(roi_number)
s['thickness'] = dp.CalculatePlaneThickness(s['planes'])
rtdose = dicomparser.DicomParser(_parent_file(cfg_dir, template._DOSE_DICOM_FILE))
calcdvh = dvhcalc.calculate_dvh(s, rtdose, None, True, None)
counts = calcdvh.histogram
# cumulative
counts = counts[::-1].cumsum()[::-1]
# relative volume
if len(counts) and counts.max() > 0:
counts = 100 * counts / counts.max()
bins = np.arange(0, calcdvh.histogram.size + 1.0) / 100.0
min_y = np.min(counts)
max_y = np.max(counts)
if y_range:
if min_y < y_range[0]:
y_range[0] = min_y
if max_y > y_range[1]:
y_range[1] = max_y
else:
y_range = [min_y, max_y]
plots.append({
'points': counts.tolist(),
'color': '#{}'.format(struct.pack('BBB', *s['color']).encode('hex')),
'label': s['name'],
})
res = {
'title': '',
'x_range': [bins[0], bins[-1], 100],
'y_label': 'Volume [%]',
'x_label': 'Dose [gy]',
'y_range': y_range,
'plots': sorted(plots, key=lambda v: v['label'].lower()),
}
simulation_db.write_result(res)
rs4pi: get max x range for DVH report
# -*- coding: utf-8 -*-
"""Wrapper to run RS4PI from the command line.
:copyright: Copyright (c) 2017 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
from dicompylercore import dicomparser, dvhcalc
from pykern import pkio
from pykern import pksubprocess
from pykern.pkdebug import pkdp, pkdc
from sirepo import feature_config
from sirepo import simulation_db
from sirepo.template import template_common
import numpy as np
import py.path
import sirepo.template.rs4pi as template
import struct
import time
def run(cfg_dir):
cfg_dir = pkio.py_path(cfg_dir)
data = simulation_db.read_json(template_common.INPUT_BASE_NAME)
if data['report'] == 'doseCalculation':
_run_dose_calculation(data, cfg_dir)
elif data['report'] == 'dvhReport':
_run_dvh(data, cfg_dir)
else:
raise RuntimeError('unknown report: {}'.format(data['report']))
def run_background(cfg_dir):
with pkio.save_chdir(cfg_dir):
simulation_db.write_result({})
def _parent_file(cfg_dir, filename):
return str(pkio.py_path(cfg_dir.dirname).join(filename))
def _run_dose_calculation(data, cfg_dir):
if not feature_config.cfg.rs4pi_dose_calc:
dicom_dose = _run_dose_calculation_fake(data, cfg_dir)
else:
with pkio.save_chdir(cfg_dir):
pksubprocess.check_call_with_signals(['bash', str(cfg_dir.join(template.DOSE_CALC_SH))])
dicom_dose = template.generate_rtdose_file(data, cfg_dir)
data['models']['dicomDose'] = dicom_dose
# save results into simulation input data file, this is needed for further calls to get_simulation_frame()
simulation_db.write_json(template_common.INPUT_BASE_NAME, data)
simulation_db.write_result({
'dicomDose': dicom_dose,
})
def _run_dose_calculation_fake(data, cfg_dir):
dicom_dose = data['models']['dicomDose']
dicom_dose['startTime'] = int(time.time())
time.sleep(5)
return dicom_dose
def _run_dvh(data, cfg_dir):
dvh_report = data['models']['dvhReport']
if not len(dvh_report['roiNumbers']):
simulation_db.write_result({
'error': 'No selection',
})
y_range = None
plots = []
max_x = 0
for roi_number in data['models']['dvhReport']['roiNumbers']:
roi_number = int(roi_number)
dp = dicomparser.DicomParser(_parent_file(cfg_dir, template.RTSTRUCT_EXPORT_FILENAME))
for roi in dp.ds.ROIContourSequence:
if roi.ReferencedROINumber == roi_number:
for c in roi.ContourSequence:
if 'ContourImageSequence' not in c:
c.ContourImageSequence = []
s = dp.GetStructures()[roi_number]
s['planes'] = dp.GetStructureCoordinates(roi_number)
s['thickness'] = dp.CalculatePlaneThickness(s['planes'])
rtdose = dicomparser.DicomParser(_parent_file(cfg_dir, template._DOSE_DICOM_FILE))
calcdvh = dvhcalc.calculate_dvh(s, rtdose, None, True, None)
counts = np.append(calcdvh.histogram, 0.0)
if dvh_report['dvhType'] == 'cumulative':
counts = counts[::-1].cumsum()[::-1]
else:
counts = np.append(abs(np.diff(counts) * -1), [0])
if dvh_report['dvhVolume'] == 'relative':
if dvh_report['dvhType'] == 'differential':
counts = counts[::-1].cumsum()[::-1]
if len(counts) and counts.max() > 0:
counts = 100 * counts / counts.max()
if dvh_report['dvhType'] == 'differential':
counts = np.append(abs(np.diff(counts) * -1), [0])
else:
counts /= 10
max_x = max(max_x, counts.size)
min_y = np.min(counts)
max_y = np.max(counts)
if y_range:
if min_y < y_range[0]:
y_range[0] = min_y
if max_y > y_range[1]:
y_range[1] = max_y
else:
y_range = [min_y, max_y]
plots.append({
'points': counts.tolist(),
'color': '#{}'.format(struct.pack('BBB', *s['color']).encode('hex')),
'label': s['name'],
})
res = {
'title': '',
'x_range': [0, max_x / 100.0, max_x],
'y_label': 'Volume [{}]'.format('%' if dvh_report['dvhVolume'] == 'relative' else 'm³'),
'x_label': 'Dose [gy]',
'y_range': y_range,
'plots': sorted(plots, key=lambda v: v['label'].lower()),
}
simulation_db.write_result(res)
|
import getopt
import sys
import pickle
from nltk.stem.porter import PorterStemmer
from nltk.tokenize import word_tokenize
from collections import Counter
import math
import string
import operator
import heapq
from pprint import pprint
dictionary = {}
postings_file = None
postings_sizes = []
starting_byte_offset = 0
all_doc_ids = []
def usage():
print("usage: " + sys.argv[0] + " -d dictionary-file -p postings-file -q file-of-queries -l lengths-file -o output-file-of-results")
def getPosting(index_of_term):
# calculate byte offset
posting_offset = 0 if index_of_term - 1 < 0 else postings_sizes[index_of_term - 1]
byte_offset = starting_byte_offset + posting_offset
postings_file.seek(byte_offset, 0)
posting = pickle.load(postings_file)
return posting
def preprocess_query(query):
stemmer = PorterStemmer()
punctuations = set(string.punctuation)
return Counter([stemmer.stem(token) for token in word_tokenize(query.lower()) if token not in punctuations])
def handleQuery(query):
query = preprocess_query(query)
scores = {} # To be replaced by heapq
query_weights = []
for term, query_tf in query.items():
if term in dictionary:
dict_entry = dictionary.get(term)
postings_entry = getPosting(dict_entry['index'])
idf = math.log10(len(lengths) / dict_entry['doc_freq'])
query_tf_weight = 1 + math.log10(query_tf)
for doc_id, doc_tf in postings_entry:
doc_tf_weight = 1 + math.log10(doc_tf)
if doc_id not in scores:
scores[doc_id] = 0
scores[doc_id] += doc_tf_weight * query_tf_weight * idf
query_weights.append(query_tf_weight * idf)
query_l2_norm = math.sqrt(sum([math.pow(1 + math.log10(query_weight), 2) for query_weight in query_weights]))
for doc_id, score in scores.items():
scores[doc_id] /= lengths[doc_id] * query_l2_norm
#heapq by default is min heap, so * -1 to all score value
scores_heap = [(-score, doc_id) for doc_id, score in scores.items()]
heapq.heapify(scores_heap)
if len(scores_heap) >= 10:
return [heapq.heappop(scores_heap)[1] for i in range(10)]
else:
return [heapq.heappop(scores_heap)[1] for i in range(len(scores_heap))]
if __name__ == '__main__':
dict_path = postings_path = query_path = output_path = lengths_path = None
try:
opts, args = getopt.getopt(sys.argv[1:], 'd:p:q:o:l:')
except getopt.GetoptError as err:
usage()
sys.exit(2)
for o, a in opts:
if o == '-d':
dict_path = a
elif o == '-p':
postings_path = a
elif o == '-q':
query_path = a
elif o == '-o':
output_path = a
elif o == '-l':
lengths_path = a
else:
assert False, "unhandled option"
if dict_path == None or postings_path == None or query_path == None or output_path == None or lengths_path == None:
usage()
sys.exit(2)
with open(dict_path, 'rb') as f:
dictionary = pickle.load(f)
with open(lengths_path, 'rb') as f:
lengths = pickle.load(f)
# load postings object sizes to calculate seek offset from current position of file
postings_file = open(postings_path, 'rb')
postings_sizes = pickle.load(postings_file)
starting_byte_offset = postings_file.tell()
output_file = open(output_path, 'w')
with open(query_path, 'r') as f:
for line in f:
line = line.strip()
if line != '':
result = handleQuery(line)
output = ' '.join(result)
print('OUTPUT', output)
output_file.write(output + '\n')
output_file.close()
postings_file.close()
added custom class to handle comparison of doc id if having same score
import getopt
import sys
import pickle
from nltk.stem.porter import PorterStemmer
from nltk.tokenize import word_tokenize
from collections import Counter
import math
import string
import operator
import heapq
from pprint import pprint
dictionary = {}
postings_file = None
postings_sizes = []
starting_byte_offset = 0
all_doc_ids = []
class ScoreDocIDPair(object):
def __init__(self, score, doc_id):
self.score = score
self.doc_id = doc_id
def __lt__(self, other):
return self.doc_id < other.doc_id if self.score == other.score else self.score < other.score
def __repr__(self):
return '%6s : %.10f' % (self.doc_id, self.score)
def __str__(self):
return '%6s : %.10f' % (self.doc_id, self.score)
def usage():
print("usage: " + sys.argv[0] + " -d dictionary-file -p postings-file -q file-of-queries -l lengths-file -o output-file-of-results")
def getPosting(index_of_term):
# calculate byte offset
posting_offset = 0 if index_of_term - 1 < 0 else postings_sizes[index_of_term - 1]
byte_offset = starting_byte_offset + posting_offset
postings_file.seek(byte_offset, 0)
posting = pickle.load(postings_file)
return posting
def preprocess_query(query):
stemmer = PorterStemmer()
punctuations = set(string.punctuation)
return Counter([stemmer.stem(token) for token in word_tokenize(query.lower()) if token not in punctuations])
def handleQuery(query):
query = preprocess_query(query)
scores = {} # To be replaced by heapq
query_weights = []
for term, query_tf in query.items():
if term in dictionary:
dict_entry = dictionary.get(term)
postings_entry = getPosting(dict_entry['index'])
idf = math.log10(len(lengths) / dict_entry['doc_freq'])
query_tf_weight = 1 + math.log10(query_tf)
for doc_id, doc_tf in postings_entry:
doc_tf_weight = 1 + math.log10(doc_tf)
if doc_id not in scores:
scores[doc_id] = 0
scores[doc_id] += doc_tf_weight * query_tf_weight * idf
query_weights.append(query_tf_weight * idf)
query_l2_norm = math.sqrt(sum([math.pow(1 + math.log10(query_weight), 2) for query_weight in query_weights]))
for doc_id, score in scores.items():
scores[doc_id] /= lengths[doc_id] * query_l2_norm
#heapq by default is min heap, so * -1 to all score value
scores_heap = [ScoreDocIDPair(-score, doc_id) for doc_id, score in scores.items()]
heapq.heapify(scores_heap)
return [heapq.heappop(scores_heap).doc_id for i in range(min(len(scores_heap), 10))]
if __name__ == '__main__':
dict_path = postings_path = query_path = output_path = lengths_path = None
try:
opts, args = getopt.getopt(sys.argv[1:], 'd:p:q:o:l:')
except getopt.GetoptError as err:
usage()
sys.exit(2)
for o, a in opts:
if o == '-d':
dict_path = a
elif o == '-p':
postings_path = a
elif o == '-q':
query_path = a
elif o == '-o':
output_path = a
elif o == '-l':
lengths_path = a
else:
assert False, "unhandled option"
if dict_path == None or postings_path == None or query_path == None or output_path == None or lengths_path == None:
usage()
sys.exit(2)
with open(dict_path, 'rb') as f:
dictionary = pickle.load(f)
with open(lengths_path, 'rb') as f:
lengths = pickle.load(f)
# load postings object sizes to calculate seek offset from current position of file
postings_file = open(postings_path, 'rb')
postings_sizes = pickle.load(postings_file)
starting_byte_offset = postings_file.tell()
output_file = open(output_path, 'w')
with open(query_path, 'r') as f:
for line in f:
line = line.strip()
if line != '':
result = handleQuery(line)
output = ' '.join(result)
print('OUTPUT', output)
output_file.write(output + '\n')
output_file.close()
postings_file.close()
|
# calibratormodel.py
#
# David J. Lampert (djlampert@gmail.com)
#
# contains the CalibratorModel class, a child of the HSPFModel class to use
# for running simulations during a hydrology calibration that minimizes
# memory requirements
from pyhspf.core import HSPFModel, WDMUtil
class CalibratorModel(HSPFModel):
"""
Child of the HSPFModel class that strips down the external targets block.
"""
def __init__(self,
units = 'Metric',
):
HSPFModel.__init__(self, units = units)
def build_uci(self,
reach,
start,
end,
states = None,
atemp = False,
snow = False,
hydrology = False,
verbose = False,
):
"""
Builds the User Control Input (UCI) file for an HSPF Simulation for
a hydrology calibration.
"""
if verbose: print('generating the UCI file from the watershed data\n')
# file types and name; see HSPF documentation for more explanation
# ftypes are the different file classes for HSPF I/O (echo, WDM, etc)
# funits are the Fortran numbers assigned to the files (10-98)
# fnames are the names or paths to the files
self.ucifile = '{}.uci'.format(self.filename)
self.wdmoutfile = '{}_out.wdm'.format(self.filename)
# echo file for input file processing name (assumed same as uci)
self.echofile = self.ucifile[:-4] + '.ech'
# external target names
self.ftypes = ['MESSU', 'WDM1', 'WDM2']
self.fnames = [self.echofile, self.wdminfile, self.wdmoutfile]
self.funits = [10, 11, 12]
# set the initial states if provided (in dictionary form from the
# Postprocessor class)
if states is not None: self.set_states(states)
# "RUN" marks the start of the execution
lines = ['RUN', '']
# add the GLOBAL block
lines = lines + self.global_block(1, start, end)
# add the FILES block
lines = lines + self.files_block()
# add the OPN SEQUENCE block
lines = lines + self.opn_sequence_block()
# add SPEC-ACTIONS block
lines = lines + self.spec_actions_block()
# add the PERLND block
lines += self.perlnd_block(hydrology = hydrology,
atemp = atemp, snow = snow)
# add the IMPLND block
lines += self.implnd_block(hydrology = hydrology, atemp = atemp,
snow = snow)
# add the RCHRES block if needed
if hydrology: lines += self.rchres_block(hydrology = hydrology)
# add the EXT SOURCES block
lines = lines + self.ext_sources_block()
# add the SCHEMATIC and MASS-LINK blocks if needed
if hydrology:
lines = lines + self.schematic_block()
lines = lines + self.masslink_block(hydrology = hydrology)
# add the EXT TARGETS block
lines += self.ext_targets_block(reach, start.year, verbose = verbose)
# add the FTABLES block for the RCHRESES if needed
if hydrology: lines += self.ftables_block()
# "close" the simulation info
lines.append('END RUN')
# write the lines to the uci file
with open(self.ucifile, 'w') as f:
for line in lines: f.write(line + '\n')
def ext_targets_block(self,
comid,
year,
tcode = 4,
tsstep = 1,
verbose = False,
):
"""
Adds the EXT TARGETS block to a UCI file and creates the output WDM
file.
tcode is the time code: 2 = minutes, 3 = hours, 4 = days
tsstep is the time step in tcode units
e.g., tcode = 3, tsstep = 4 is a 4-hour time step
this method enables a single external target with aggregation that
isn't possible using the HSPFModel in the core.
"""
lines = ['EXT TARGETS',
'<-Volume-> <-Grp> <-Member-><--Mult-->Tran <-Volume->' +
' <Member> Tsys Aggr Amd ***',
'<Name> x <Name> x x<-factor->strg <Name>' +
' x <Name>qf tem strg strg***']
wdm = WDMUtil(verbose = verbose, messagepath = self.messagepath)
wdm.open(self.wdmoutfile, 'w')
# dataset numbers are assigned by reach in order (subject to revision)
# keep track of dsns in a dictionary
n = 1
# since this class is just for calibration of a single gage, only need
# to keep up with reach outflow volume
otype = 'RCHRES'
group = 'HYDR'
var = 'ROVOL'
tstype = 'VOL'
tsform = 1
idcons = 'ROVOL'
func = 'SUM '
# this overwrites all the rchreses with just the comid for the gage
reaches = [r for r in self.rchreses if r.subbasin == comid]
new = self.add_ext_targets(reaches, wdm, year, n, otype,
group, var, tsform, tstype, idcons, func,
tcode, tsstep)
lines = lines + new
n += len(new)
# close the wdmeditor
wdm.close(self.wdmoutfile)
wdm.close_message()
# finish up
lines = lines + ['END EXT TARGETS', '']
return lines
def build_submodel(self,
hspfmodel,
comid,
upcomids = [],
name = None,
verbose = True,
):
"""
Builds a submodel from an existing HSPFModel "model" by removing
any subbasins downstream from "comid" and (optionally) any subbasins
upstream from "upcomids." Removing upstream subbasins necessitates
external time series representing mass inflows.
"""
if name is None: name = comid
self.build_from_existing(hspfmodel, name)
# turn on the modules
#if self.atemp: model.add_atemp()
#if self.snow: model.add_snow()
#if self.hydrology: model.add_hydrology()
# add the time series
#for f in self.hspfmodel.flowgages:
# start, tstep, data = self.hspfmodel.flowgages[f]
# model.add_timeseries('flowgage', f, start, data, tstep = tstep)
#for p in self.hspfmodel.precipitations:
# start, tstep, data = self.hspfmodel.precipitations[p]
# model.add_timeseries('precipitation', p, start, data, tstep = tstep)
#for e in self.hspfmodel.evaporations:
# start, tstep, data = self.hspfmodel.evaporations[e]
# model.add_timeseries('evaporation', e, start, data, tstep = tstep)
#for t in self.hspfmodel.temperatures:
# start, tstep, data = self.hspfmodel.temperatures[t]
# model.add_timeseries('temperature', t, start, data, tstep = tstep)
#for t in self.hspfmodel.dewpoints:
# start, tstep, data = self.hspfmodel.dewpoints[t]
# model.add_timeseries('dewpoint', t, start, data, tstep = tstep)
#for t in self.hspfmodel.windspeeds:
# start, tstep, data = self.hspfmodel.windspeeds[t]
# model.add_timeseries('wind', t, start, data, tstep = tstep)
#for t in self.hspfmodel.solars:
# start, tstep, data = self.hspfmodel.solars[t]
# model.add_timeseries('solar', t, start, data, tstep = tstep)
#for t in self.hspfmodel.snowfalls:
# start, tstep, data = self.hspfmodel.snowfalls[t]
# model.add_timeseries('snowfall', t, start, data, tstep = tstep)
#for t in self.hspfmodel.snowdepths:
# start, tstep, data = self.hspfmodel.snowdepths[t]
# model.add_timeseries('snowdepth', t, start, data, tstep = tstep)
#for tstype, identifier in self.hspfmodel.watershed_timeseries.items():
# model.assign_watershed_timeseries(tstype, identifier)
#for tstype, d in self.hspfmodel.subbasin_timeseries.items():
# for subbasin, identifier in d.items():
# if subbasin in model.subbasins:
# model.assign_subbasin_timeseries(tstype, subbasin,
# identifier)
#for tstype, d in self.hspfmodel.landuse_timeseries.items():
# for luc, identifier in d.items():
# model.assign_landuse_timeseries(tstype, luc, identifier)
#return model
# find the subbasins between the outlet and the upstream comids and
# store in an updown dictionary
updown = {up:down
for up, down in hspfmodel.updown.items()
if down == comid}
current = 0
while current != len(updown):
# see if the current length changes to check if done
current = len(updown)
# iterate throught the subbasins and see if any need to be added
for up, down in hspfmodel.updown.items():
if (up not in updown and # not already there
up not in upcomids and # between the boundaries
down in updown): # downstream is there
updown[up] = down
# overwrite the old updown dictionary
self.updown = updown
# overwrite the inlets and outlets
self.inlets = [hspfmodel.updown[c] for c in upcomids]
self.outlets = [comid]
# overwrite the old subbasin dictionary
self.subbasins = {c: subbasin
for c, subbasin in self.subbasins.items()
if c in updown or c == comid}
# overwrite the perlnd, implnd, rchres lists
self.perlnds = [p for p in self.perlnds
if p.subbasin in self.subbasins]
self.implnds = [i for i in self.implnds
if i.subbasin in self.subbasins]
self.rchreses = [r for r in self.rchreses
if r.subbasin in self.subbasins]
# build with the updated model subbasin info
#submodel.build()
# add in the modules
#if self.temp: submodel.add_temp()
#if self.snow:
#
# densities = [o.RDENPF
# for o in hspfmodel.perlnds + hspfmodel.implnds]
# depths = [o.packsnow / o.RDENPF
# for o in hspfmodel.perlnds + hspfmodel.implnds]
#
# depth = sum(depths) / len(depths)
# density = sum(densities) / len(densities)
#
# submodel.add_snow(depth = depth, density = density)
#
#if self.hydrology: submodel.add_hydrology()
# add the flowgage data to the model
for identifier in hspfmodel.flowgages:
if identifier == comid:
start_date, tstep, data = hspfmodel.flowgages[identifier]
self.add_timeseries('flowgage', identifier, start_date,
data, tstep = tstep)
# add the watershed time series dictionaries for the model
timeseries = {'inflow': hspfmodel.inflows,
'temperature': hspfmodel.temperatures,
'dewpoint': hspfmodel.dewpoints,
'wind': hspfmodel.windspeeds,
'solar': hspfmodel.solars,
'snowfall': hspfmodel.snowfalls,
'snowdepth': hspfmodel.snowdepths,
'precipitation': hspfmodel.precipitations,
'evaporation': hspfmodel.evaporations,
'flowgage': hspfmodel.flowgages,
}
#for tstype, d in timeseries.items():
# for identifier in d:
# start_date, tstep, data = d[identifier]
# self.add_timeseries(tstype, identifier, start_date, data,
# tstep = tstep)
# add and assign all the watershed timeseries
for tstype, identifier in hspfmodel.watershed_timeseries.items():
ts = timeseries[tstype]
s, t, data = ts[identifier]
self.add_timeseries(tstype, identifier, s, data, tstep = t)
self.assign_watershed_timeseries(tstype, identifier)
# add and assign all the land use time series
for tstype, d in hspfmodel.landuse_timeseries.items():
ts = timeseries[tstype]
for landuse, identifier in d.items():
s, t, data = ts[identifier]
self.add_timeseries(tstype, identifier, s, data, tstep = t)
self.assign_landuse_timeseries(tstype, landuse, identifier)
# add and assign subbasin land use time series inside the submodel
for tstype, d in hspfmodel.subbasin_timeseries.items():
ts = timeseries[tstype]
for c, identifier in d.items():
if c in updown or c == comid:
s, t, l = ts[identifier]
self.add_timeseries(tstype, identifier, s, l, tstep = t)
self.assign_subbasin_timeseries(tstype, c, identifier)
# add and assign operation time series inside the submodel
for tstype, d1 in hspfmodel.operation_timeseries.items():
ts = timeseries[tstype]
for subbasin, d2 in d1.items():
for otype, id in d2.items():
if subbasin in self.subbasins:
s, t, l = ts[id]
self.add_timeseries(tstype, id, s, l, tstep = t)
self.assign_operation_timeseries(tstype, subbasin,
otype, id)
# add the influent flows as needed
for upcomid in upcomids:
print('warning: input flow time series for subbasin ' +
'{} must be specified'.format(upcomid))
# add the subbasin timeseries as needed
#for identifier in hspfmodel.subbasin_timeseries:
# if identifier in submodel.subbasins.keys():
# start_date, tstep, data = hspfmodel.precipitations[identifier]
# submodel.add_timeseries('precipitation', identifier, start_date,
# data, tstep = tstep)
# add the landuse timeseries as needed
#landuse_keys = {'Corn': 'cereals',
# 'Soybeans': 'legumes',
# 'Pasture/grass': 'pasture',
# 'Other grain': 'cereals',
# 'Hay/alfalfa': 'alfalfa',
# 'Water/wetland': 'wetlands',
# 'Fallow land': 'fallow',
# 'Forest': 'others',
# 'Developed': 'others',
# 'Impervious': 'others',
# 'Other': 'others',
# }
#ltypes = [landuse_keys[i] for i in hspfmodel.landuse]
#for identifier in hspfmodel.evaporations:
# if identifier in ltypes:
# start_date, tstep, data = hspfmodel.evaporations[identifier]
# submodel.add_timeseries('evaporation', identifier, start_date,
# data, tstep = tstep)
# add the influent flows as needed
#for upcomid in upcomids:
# find the upstream gage number
# upgage = [v for k, v in
# hspfmodel.subbasin_timeseries['flowgage'].items()
# if k == upcomid][0]
# incomid = hspfmodel.updown[upcomid]
# find the outlet flows from the previous upstream calibration
# t = (self.directory, self.HUC8, upgage)
# flowfile = '{}/{}/calibrations/{}/outletflows'.format(*t)
# get the time series and add it to the model
# if not os.path.isfile(flowfile):
# raise RuntimeError('warning: upstream calibration of gage ' +
# '{} does not exist\n'.format(upgage))
# with open(flowfile, 'rb') as f: times, data = pickle.load(f)
# tstep = math.ceil((times[1] - times[0]).total_seconds() / 60)
# submodel.add_timeseries('inflow', '{}'.format(incomid), times[0],
# data, tstep = tstep)
# assign the inflows from upstream to any subbasins
# otype = 'Reach'
# submodel.assign_operation_timeseries('inflow', incomid, 'Reach',
# '{}'.format(incomid))
# assign as needed
#for tstype, identifier in hspfmodel.watershed_timeseries.items():
#
# submodel.assign_watershed_timeseries(tstype, identifier)
#for tstype, d in hspfmodel.subbasin_timeseries.items():
# for subbasin, identifier in d.items():
# if subbasin in submodel.subbasins:
# submodel.assign_subbasin_timeseries(tstype, subbasin,
# identifier)
#for tstype, d in hspfmodel.landuse_timeseries.items():
# for landtype, identifier in d.items():
# if landtype in submodel.landuse:
# submodel.assign_landuse_timeseries(tstype, landtype,
# identifier)
#for tstype, d1 in hspfmodel.operation_timeseries.items():
# for subbasin, d2 in d1.items():
# for otype, identifier in d2.items():
# if subbasin in submodel.subbasins:
# submodel.assign_operation_timeseries(tstype, subbasin,
# otype, identifier)
#with open(picklefile, 'wb') as f: pickle.dump(submodel, f)
removed frivolous comments from calibratormodel
# calibratormodel.py
#
# David J. Lampert (djlampert@gmail.com)
#
# contains the CalibratorModel class, a child of the HSPFModel class to use
# for running simulations during a hydrology calibration that minimizes
# memory requirements
from pyhspf.core import HSPFModel, WDMUtil
class CalibratorModel(HSPFModel):
"""
Child of the HSPFModel class that strips down the external targets block.
"""
def __init__(self,
units = 'Metric',
):
HSPFModel.__init__(self, units = units)
def build_uci(self,
reach,
start,
end,
states = None,
atemp = False,
snow = False,
hydrology = False,
verbose = False,
):
"""
Builds the User Control Input (UCI) file for an HSPF Simulation for
a hydrology calibration.
"""
if verbose: print('generating the UCI file from the watershed data\n')
# file types and name; see HSPF documentation for more explanation
# ftypes are the different file classes for HSPF I/O (echo, WDM, etc)
# funits are the Fortran numbers assigned to the files (10-98)
# fnames are the names or paths to the files
self.ucifile = '{}.uci'.format(self.filename)
self.wdmoutfile = '{}_out.wdm'.format(self.filename)
# echo file for input file processing name (assumed same as uci)
self.echofile = self.ucifile[:-4] + '.ech'
# external target names
self.ftypes = ['MESSU', 'WDM1', 'WDM2']
self.fnames = [self.echofile, self.wdminfile, self.wdmoutfile]
self.funits = [10, 11, 12]
# set the initial states if provided (in dictionary form from the
# Postprocessor class)
if states is not None: self.set_states(states)
# "RUN" marks the start of the execution
lines = ['RUN', '']
# add the GLOBAL block
lines = lines + self.global_block(1, start, end)
# add the FILES block
lines = lines + self.files_block()
# add the OPN SEQUENCE block
lines = lines + self.opn_sequence_block()
# add SPEC-ACTIONS block
lines = lines + self.spec_actions_block()
# add the PERLND block
lines += self.perlnd_block(hydrology = hydrology,
atemp = atemp, snow = snow)
# add the IMPLND block
lines += self.implnd_block(hydrology = hydrology, atemp = atemp,
snow = snow)
# add the RCHRES block if needed
if hydrology: lines += self.rchres_block(hydrology = hydrology)
# add the EXT SOURCES block
lines = lines + self.ext_sources_block()
# add the SCHEMATIC and MASS-LINK blocks if needed
if hydrology:
lines = lines + self.schematic_block()
lines = lines + self.masslink_block(hydrology = hydrology)
# add the EXT TARGETS block
lines += self.ext_targets_block(reach, start.year, verbose = verbose)
# add the FTABLES block for the RCHRESES if needed
if hydrology: lines += self.ftables_block()
# "close" the simulation info
lines.append('END RUN')
# write the lines to the uci file
with open(self.ucifile, 'w') as f:
for line in lines: f.write(line + '\n')
def ext_targets_block(self,
comid,
year,
tcode = 4,
tsstep = 1,
verbose = False,
):
"""
Adds the EXT TARGETS block to a UCI file and creates the output WDM
file.
tcode is the time code: 2 = minutes, 3 = hours, 4 = days
tsstep is the time step in tcode units
e.g., tcode = 3, tsstep = 4 is a 4-hour time step
this method enables a single external target with aggregation that
isn't possible using the HSPFModel in the core.
"""
lines = ['EXT TARGETS',
'<-Volume-> <-Grp> <-Member-><--Mult-->Tran <-Volume->' +
' <Member> Tsys Aggr Amd ***',
'<Name> x <Name> x x<-factor->strg <Name>' +
' x <Name>qf tem strg strg***']
wdm = WDMUtil(verbose = verbose, messagepath = self.messagepath)
wdm.open(self.wdmoutfile, 'w')
# dataset numbers are assigned by reach in order (subject to revision)
# keep track of dsns in a dictionary
n = 1
# since this class is just for calibration of a single gage, only need
# to keep up with reach outflow volume
otype = 'RCHRES'
group = 'HYDR'
var = 'ROVOL'
tstype = 'VOL'
tsform = 1
idcons = 'ROVOL'
func = 'SUM '
# this overwrites all the rchreses with just the comid for the gage
reaches = [r for r in self.rchreses if r.subbasin == comid]
new = self.add_ext_targets(reaches, wdm, year, n, otype,
group, var, tsform, tstype, idcons, func,
tcode, tsstep)
lines = lines + new
n += len(new)
# close the wdmeditor
wdm.close(self.wdmoutfile)
wdm.close_message()
# finish up
lines = lines + ['END EXT TARGETS', '']
return lines
def build_submodel(self,
hspfmodel,
comid,
upcomids = [],
name = None,
verbose = True,
):
"""
Builds a submodel from an existing HSPFModel "model" by removing
any subbasins downstream from "comid" and (optionally) any subbasins
upstream from "upcomids." Removing upstream subbasins necessitates
external time series representing mass inflows.
"""
if name is None: name = comid
self.build_from_existing(hspfmodel, name)
# find the subbasins between the outlet and the upstream comids and
# store in an updown dictionary
updown = {up:down
for up, down in hspfmodel.updown.items()
if down == comid}
current = 0
while current != len(updown):
# see if the current length changes to check if done
current = len(updown)
# iterate throught the subbasins and see if any need to be added
for up, down in hspfmodel.updown.items():
if (up not in updown and # not already there
up not in upcomids and # between the boundaries
down in updown): # downstream is there
updown[up] = down
# overwrite the old updown dictionary
self.updown = updown
# overwrite the inlets and outlets
self.inlets = [hspfmodel.updown[c] for c in upcomids]
self.outlets = [comid]
# overwrite the old subbasin dictionary
self.subbasins = {c: subbasin
for c, subbasin in self.subbasins.items()
if c in updown or c == comid}
# overwrite the perlnd, implnd, rchres lists
self.perlnds = [p for p in self.perlnds
if p.subbasin in self.subbasins]
self.implnds = [i for i in self.implnds
if i.subbasin in self.subbasins]
self.rchreses = [r for r in self.rchreses
if r.subbasin in self.subbasins]
# add the flowgage data to the model
for identifier in hspfmodel.flowgages:
if identifier == comid:
start_date, tstep, data = hspfmodel.flowgages[identifier]
self.add_timeseries('flowgage', identifier, start_date,
data, tstep = tstep)
# add the watershed time series dictionaries for the model
timeseries = {'inflow': hspfmodel.inflows,
'temperature': hspfmodel.temperatures,
'dewpoint': hspfmodel.dewpoints,
'wind': hspfmodel.windspeeds,
'solar': hspfmodel.solars,
'snowfall': hspfmodel.snowfalls,
'snowdepth': hspfmodel.snowdepths,
'precipitation': hspfmodel.precipitations,
'evaporation': hspfmodel.evaporations,
'flowgage': hspfmodel.flowgages,
}
for tstype, identifier in hspfmodel.watershed_timeseries.items():
ts = timeseries[tstype]
s, t, data = ts[identifier]
self.add_timeseries(tstype, identifier, s, data, tstep = t)
self.assign_watershed_timeseries(tstype, identifier)
# add and assign all the land use time series
for tstype, d in hspfmodel.landuse_timeseries.items():
ts = timeseries[tstype]
for landuse, identifier in d.items():
s, t, data = ts[identifier]
self.add_timeseries(tstype, identifier, s, data, tstep = t)
self.assign_landuse_timeseries(tstype, landuse, identifier)
# add and assign subbasin land use time series inside the submodel
for tstype, d in hspfmodel.subbasin_timeseries.items():
ts = timeseries[tstype]
for c, identifier in d.items():
if c in updown or c == comid:
s, t, l = ts[identifier]
self.add_timeseries(tstype, identifier, s, l, tstep = t)
self.assign_subbasin_timeseries(tstype, c, identifier)
# add and assign operation time series inside the submodel
for tstype, d1 in hspfmodel.operation_timeseries.items():
ts = timeseries[tstype]
for subbasin, d2 in d1.items():
for otype, id in d2.items():
if subbasin in self.subbasins:
s, t, l = ts[id]
self.add_timeseries(tstype, id, s, l, tstep = t)
self.assign_operation_timeseries(tstype, subbasin,
otype, id)
# add the influent flows as needed
for upcomid in upcomids:
print('warning: input flow time series for subbasin ' +
'{} must be specified'.format(upcomid))
|
#!/usr/bin/env python3
import os
import sys
import getopt
import ntpath
import xmltodict # Need to be installed
from PIL import Image # Need to install "pillow"
from PIL.ExifTags import TAGS, GPSTAGS
_usage = 'gpx_report -g track.gpx\ngpx_report -p photo_folder'
opt_force = False
opt_dir = ''
opt_gpx = ''
def errexit(errmsg):
print('ERROR! ' + errmsg, file=sys.stderr)
sys.exit(1)
# Stolen: V
def path_leaf(path):
head, tail = ntpath.split(path)
return tail or ntpath.basename(head)
# def rreplace(s, old, new, occurrence):
# li = s.rsplit(old, occurrence)
# return new.join(li)
def get_exif_data(image):
"""Returns a dictionary from the exif data of an PIL Image item. Also converts the GPS Tags"""
exif_data = {}
info = image._getexif()
if info:
for tag, value in info.items():
decoded = TAGS.get(tag, tag)
if decoded == "GPSInfo":
gps_data = {}
for t in value:
sub_decoded = GPSTAGS.get(t, t)
gps_data[sub_decoded] = value[t]
exif_data[decoded] = gps_data
else:
exif_data[decoded] = value
if 36867 in info:
exif_data['time'] = info[36867]
return exif_data
def _get_if_exist(data, key):
if key in data:
return data[key]
return None
def _convert_to_degress(value):
"""Helper function to convert the GPS coordinates stored in the EXIF to degress in float format"""
d0 = value[0][0]
d1 = value[0][1]
d = float(d0) / float(d1)
m0 = value[1][0]
m1 = value[1][1]
m = float(m0) / float(m1)
s0 = value[2][0]
s1 = value[2][1]
s = float(s0) / float(s1)
return d + (m / 60.0) + (s / 3600.0)
def get_lat_lon(exif_data):
"""Returns the latitude and longitude, if available,
from the provided exif_data (obtained through get_exif_data above)"""
lat = None
lon = None
if "GPSInfo" in exif_data:
gps_info = exif_data["GPSInfo"]
gps_latitude = _get_if_exist(gps_info, "GPSLatitude")
gps_latitude_ref = _get_if_exist(gps_info, 'GPSLatitudeRef')
gps_longitude = _get_if_exist(gps_info, 'GPSLongitude')
gps_longitude_ref = _get_if_exist(gps_info, 'GPSLongitudeRef')
# gps_altitude = _get_if_exist(gps_info, 'GPSAltitude')
# gps_altitude_ref = _get_if_exist(gps_info, 'GPSAltitudeRef')
# print(gps_altitude)
# print(gps_altitude_ref)
if gps_latitude and gps_latitude_ref and gps_longitude and gps_longitude_ref:
lat = _convert_to_degress(gps_latitude)
if gps_latitude_ref != "N":
lat = 0 - lat
lon = _convert_to_degress(gps_longitude)
if gps_longitude_ref != "E":
lon = 0 - lon
return lat, lon
# Stolen ^
def process_GPX(filename):
wpts = []
dirname = os.path.dirname(filename)
def makeRow(id, wpt):
rv = '<tr><td>' + str(id) + '</td><td>' + wpt['lat'] + '</td><td>' + wpt['lon'] + '</td>' + '<td>' + wpt['ele'] + '</td>'
if wpt['name'] == 'Photo':
rv += '<td><a href="' + wpt['link'] + '"><img src="' + wpt['link'] + '" height="32"></td></a>'
else:
auoname = os.path.join(dirname, wpt['link']) # Audio Old Name
aunname = auoname[:-4] + 'mp3' # Audio New Name
if not os.path.isfile(auoname):
errexit('Audio file not found: "' + auoname + '"')
os.system('ffmpeg -y -i ' + auoname + ' ' + aunname)
rv += '<td><audio controls><source src="' + wpt['link'][:-4] + \
'mp3" type="audio/mpeg">Your browser does not support the audio element.</audio></td>'
return rv + '</td>'
if not filename.endswith('.gpx'):
errexit('File with non-GPX extension given: "' + filename + '"')
with open(filename, 'r') as pFile:
od1 = xmltodict.parse(pFile.read())
print(od1)
# print()
if 'gpx' in od1 and 'wpt' in od1['gpx']:
print('Found some waypoints...')
for wp in od1['gpx']['wpt']:
if wp['name'] in ('Voice recording', 'Photo'):
wpts.append({'lat': wp['@lat'], 'lon': wp['@lon'], 'ele': wp['ele'],
'name': wp['name'], 'link': wp['link']['text']})
# print(wpts)
if len(wpts) < 1:
print('Looks like there where no suitable waypoints :(')
sys.exit(0)
# ofname = filename[:-4] + '.htm'
ofname = os.path.join(os.path.dirname(filename), 'index.html')
print('Will try to write index.html ...')
if os.path.exists(ofname) and os.path.isfile(ofname):
if opt_force:
print('Overwriting...')
else:
errexit('Destination HTML file exists! -f to overwrite!')
try:
pFile = open(ofname, 'w')
except OSError:
errexit('Could not create file for writing: "' + ofname + '"')
head = """
<html><head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<!-- Latest compiled and minified CSS -->
<title>GPX</title>
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css"
integrity="sha384-BVYiiSIFeK1dGmJRAkycuHAHRg32OmUcww7on3RYdg4Va+PmSTsz/K68vbdEjh4u"
crossorigin="anonymous"></head>"""
head += '<body>'
head += '<div class="panel panel-default"><div class="panel-heading">Generated from file "' + \
path_leaf(filename) + '"</div><table class="table">'
pFile.write(head)
pFile.write('<tr><td>No</td><td>Latitude</td><td>Longitude</td><td>Elevation</td><td>Media</td></tr>')
for i in range(len(wpts)):
pFile.write(makeRow(i, wpts[i]))
pFile.write('</div></table></body></html>')
def process_folder(folder):
wpts = []
exts = ['jpg', 'JPG', 'jpeg', 'JPEG']
def format_waypoint(wpt):
# '\n\t\t<ele>227.0</ele>' + \ #TODO FIXME
rv = '\t<wpt lat="' + str(wpt['lat']) + '" lon="' + str(wpt['lon']) + '">' + \
'\n\t\t<time>' + wpt['time'] + '</time>' + \
'\n\t\t<name><![CDATA[Photo]]></name>' + \
'\n\t\t<link href="' + wpt['fname'] + '">' + \
'\n\t\t\t<text>' + wpt['fname'] + '</text>' + \
'\n\t\t</link>' + \
'\n\t\t<sat>0</sat>' + \
'\n\t</wpt>'
return rv
if not os.path.isdir(folder):
errexit('Invalid foldername given: "' + folder + '"')
for dirname, dirnames, filenames in os.walk(folder):
for fname in filenames:
if fname.split('.')[1] in exts:
# print('Valid: ' + fname)
with open(os.path.join(folder, fname), 'rb') as pFile:
img = Image.open(os.path.join(folder, fname))
exif_data = get_exif_data(img)
lat, lon = get_lat_lon(exif_data)
# print(lat, lon)
lat = round(lat, 7)
lon = round(lon, 7)
# print(lat, lon)
wpts.append({'lat': lat, 'lon': lon, 'time': exif_data['time'], 'fname': fname})
if len(wpts) < 1:
errexit('Could not found photos with GPS tags!')
gpx_header = '<?xml version="1.0" encoding="UTF-8" ?>' + \
'\n<gpx xmlns="http://www.topografix.com/GPX/1/1" version="1.1" creator="gpx_report" ' + \
'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" ' + \
'xsi:schemaLocation="http://www.topografix.com/GPX/1/1 http://www.topografix.com/GPX/1/1/gpx.xsd ">'
gpx_footer = '</gpx>'
gpx_fname = os.path.join(folder, path_leaf(folder) + '.gpx')
print('Destination filename: ' + gpx_fname)
if os.path.exists(gpx_fname):
if os.path.isfile(gpx_fname):
if opt_force:
print('Overwriting existing GPX file...')
else:
errexit('Target GPX file exists! -f to overwrite!')
else:
errexit('Will not create GPX file, probably folder with same name exists!')
try:
with open(gpx_fname, 'w') as pFile:
pFile.write(gpx_header)
for wpt in wpts:
pFile.write(format_waypoint(wpt))
pFile.write(gpx_footer)
except OSError:
errexit('Write error. Permission issue?')
except:
errexit('Something went wrong.')
if __name__ == '__main__':
try:
opts, args = getopt.getopt(sys.argv[1:], 'hfg:d:', ['gpx=', 'dir=', 'help', 'force'])
except getopt.GetoptError:
print(_usage)
sys.exit(2)
for opt, arg in opts:
if opt in ('-h', '--help'):
print('Help:\n' + _usage)
sys.exit(0)
elif opt in ('-d', '--dir'):
opt_dir = arg
elif opt in ('-g', '--gpx'):
opt_gpx = arg
elif opt in ('-f', '--force'):
opt_force = True
else:
print(_usage)
errexit('Invalid option given: "' + opt + '"')
if opt_dir == '' and opt_gpx == '':
errexit('use either -g or -d for input!')
if opt_gpx != '':
if not os.path.isfile(opt_gpx):
errexit('Invalid parameter! Not a file "' + opt_gpx + '"')
try:
pFile = open(opt_gpx, 'r')
pFile.close()
except OSError:
errexit('Invalid parameter! Can not open "' + opt_gpx + '"')
process_GPX(opt_gpx)
if opt_dir != '':
if not os.path.isdir(opt_dir):
errexit('Invalid parameter! Not a folder "' + opt_dir + '"')
process_folder(opt_dir)
Fix for images with no GPS data
#!/usr/bin/env python3
import os
import sys
import getopt
import ntpath
import xmltodict # Need to be installed
from PIL import Image # Need to install "pillow"
from PIL.ExifTags import TAGS, GPSTAGS
_usage = 'gpx_report -g track.gpx\ngpx_report -p photo_folder'
opt_force = False
opt_dir = ''
opt_gpx = ''
def errexit(errmsg):
print('ERROR! ' + errmsg, file=sys.stderr)
sys.exit(1)
# Stolen: V
def path_leaf(path):
head, tail = ntpath.split(path)
return tail or ntpath.basename(head)
# def rreplace(s, old, new, occurrence):
# li = s.rsplit(old, occurrence)
# return new.join(li)
def get_exif_data(image):
"""Returns a dictionary from the exif data of an PIL Image item. Also converts the GPS Tags"""
exif_data = {}
info = image._getexif()
if info:
for tag, value in info.items():
decoded = TAGS.get(tag, tag)
if decoded == "GPSInfo":
gps_data = {}
for t in value:
sub_decoded = GPSTAGS.get(t, t)
gps_data[sub_decoded] = value[t]
exif_data[decoded] = gps_data
else:
exif_data[decoded] = value
if 36867 in info:
exif_data['time'] = info[36867]
return exif_data
def _get_if_exist(data, key):
if key in data:
return data[key]
return None
def _convert_to_degress(value):
"""Helper function to convert the GPS coordinates stored in the EXIF to degress in float format"""
d0 = value[0][0]
d1 = value[0][1]
d = float(d0) / float(d1)
m0 = value[1][0]
m1 = value[1][1]
m = float(m0) / float(m1)
s0 = value[2][0]
s1 = value[2][1]
s = float(s0) / float(s1)
return d + (m / 60.0) + (s / 3600.0)
def get_lat_lon(exif_data):
"""Returns the latitude and longitude, if available,
from the provided exif_data (obtained through get_exif_data above)"""
lat = None
lon = None
if "GPSInfo" in exif_data:
gps_info = exif_data["GPSInfo"]
gps_latitude = _get_if_exist(gps_info, "GPSLatitude")
gps_latitude_ref = _get_if_exist(gps_info, 'GPSLatitudeRef')
gps_longitude = _get_if_exist(gps_info, 'GPSLongitude')
gps_longitude_ref = _get_if_exist(gps_info, 'GPSLongitudeRef')
# gps_altitude = _get_if_exist(gps_info, 'GPSAltitude')
# gps_altitude_ref = _get_if_exist(gps_info, 'GPSAltitudeRef')
# print(gps_altitude)
# print(gps_altitude_ref)
if gps_latitude and gps_latitude_ref and gps_longitude and gps_longitude_ref:
lat = _convert_to_degress(gps_latitude)
if gps_latitude_ref != "N":
lat = 0 - lat
lon = _convert_to_degress(gps_longitude)
if gps_longitude_ref != "E":
lon = 0 - lon
return lat, lon
# Stolen ^
def process_GPX(filename):
wpts = []
dirname = os.path.dirname(filename)
def makeRow(id, wpt):
rv = '<tr><td>' + str(id) + '</td><td>' + wpt['lat'] + '</td><td>' + wpt['lon'] + '</td>' + '<td>' + wpt['ele'] + '</td>'
if wpt['name'] == 'Photo':
rv += '<td><a href="' + wpt['link'] + '"><img src="' + wpt['link'] + '" height="32"></td></a>'
else:
auoname = os.path.join(dirname, wpt['link']) # Audio Old Name
aunname = auoname[:-4] + 'mp3' # Audio New Name
if not os.path.isfile(auoname):
errexit('Audio file not found: "' + auoname + '"')
os.system('ffmpeg -y -i ' + auoname + ' ' + aunname)
rv += '<td><audio controls><source src="' + wpt['link'][:-4] + \
'mp3" type="audio/mpeg">Your browser does not support the audio element.</audio></td>'
return rv + '</td>'
if not filename.endswith('.gpx'):
errexit('File with non-GPX extension given: "' + filename + '"')
with open(filename, 'r') as pFile:
od1 = xmltodict.parse(pFile.read())
print(od1)
# print()
if 'gpx' in od1 and 'wpt' in od1['gpx']:
print('Found some waypoints...')
for wp in od1['gpx']['wpt']:
if wp['name'] in ('Voice recording', 'Photo'):
wpts.append({'lat': wp['@lat'], 'lon': wp['@lon'], 'ele': wp['ele'],
'name': wp['name'], 'link': wp['link']['text']})
# print(wpts)
if len(wpts) < 1:
print('Looks like there where no suitable waypoints :(')
sys.exit(0)
# ofname = filename[:-4] + '.htm'
ofname = os.path.join(os.path.dirname(filename), 'index.html')
print('Will try to write index.html ...')
if os.path.exists(ofname) and os.path.isfile(ofname):
if opt_force:
print('Overwriting...')
else:
errexit('Destination HTML file exists! -f to overwrite!')
try:
pFile = open(ofname, 'w')
except OSError:
errexit('Could not create file for writing: "' + ofname + '"')
head = """
<html><head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<!-- Latest compiled and minified CSS -->
<title>GPX</title>
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css"
integrity="sha384-BVYiiSIFeK1dGmJRAkycuHAHRg32OmUcww7on3RYdg4Va+PmSTsz/K68vbdEjh4u"
crossorigin="anonymous"></head>"""
head += '<body>'
head += '<div class="panel panel-default"><div class="panel-heading">Generated from file "' + \
path_leaf(filename) + '"</div><table class="table">'
pFile.write(head)
pFile.write('<tr><td>No</td><td>Latitude</td><td>Longitude</td><td>Elevation</td><td>Media</td></tr>')
for i in range(len(wpts)):
pFile.write(makeRow(i, wpts[i]))
pFile.write('</div></table></body></html>')
def process_folder(folder):
wpts = []
exts = ['jpg', 'JPG', 'jpeg', 'JPEG']
def format_waypoint(wpt):
# '\n\t\t<ele>227.0</ele>' + \ #TODO FIXME
rv = '\t<wpt lat="' + str(wpt['lat']) + '" lon="' + str(wpt['lon']) + '">' + \
'\n\t\t<time>' + wpt['time'] + '</time>' + \
'\n\t\t<name><![CDATA[Photo]]></name>' + \
'\n\t\t<link href="' + wpt['fname'] + '">' + \
'\n\t\t\t<text>' + wpt['fname'] + '</text>' + \
'\n\t\t</link>' + \
'\n\t\t<sat>0</sat>' + \
'\n\t</wpt>'
return rv
if not os.path.isdir(folder):
errexit('Invalid foldername given: "' + folder + '"')
for dirname, dirnames, filenames in os.walk(folder):
for fname in filenames:
if fname.split('.')[1] in exts:
# print('Valid: ' + fname)
with open(os.path.join(folder, fname), 'rb') as pFile:
img = Image.open(os.path.join(folder, fname))
exif_data = get_exif_data(img)
lat, lon = get_lat_lon(exif_data)
if lon is None or lat is None:
continue
lat = round(lat, 7)
lon = round(lon, 7)
wpts.append({'lat': lat, 'lon': lon, 'time': exif_data['time'], 'fname': fname})
if len(wpts) < 1:
errexit('Could not found photos with GPS tags!')
gpx_header = '<?xml version="1.0" encoding="UTF-8" ?>' + \
'\n<gpx xmlns="http://www.topografix.com/GPX/1/1" version="1.1" creator="gpx_report" ' + \
'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" ' + \
'xsi:schemaLocation="http://www.topografix.com/GPX/1/1 http://www.topografix.com/GPX/1/1/gpx.xsd ">'
gpx_footer = '</gpx>'
gpx_fname = os.path.join(folder, path_leaf(folder) + '.gpx')
print('Destination filename: ' + gpx_fname)
if os.path.exists(gpx_fname):
if os.path.isfile(gpx_fname):
if opt_force:
print('Overwriting existing GPX file...')
else:
errexit('Target GPX file exists! -f to overwrite!')
else:
errexit('Will not create GPX file, probably folder with same name exists!')
try:
with open(gpx_fname, 'w') as pFile:
pFile.write(gpx_header)
for wpt in wpts:
pFile.write(format_waypoint(wpt))
pFile.write(gpx_footer)
except OSError:
errexit('Write error. Permission issue?')
except:
errexit('Something went wrong.')
if __name__ == '__main__':
try:
opts, args = getopt.getopt(sys.argv[1:], 'hfg:d:', ['gpx=', 'dir=', 'help', 'force'])
except getopt.GetoptError:
print(_usage)
sys.exit(2)
for opt, arg in opts:
if opt in ('-h', '--help'):
print('Help:\n' + _usage)
sys.exit(0)
elif opt in ('-d', '--dir'):
opt_dir = arg
elif opt in ('-g', '--gpx'):
opt_gpx = arg
elif opt in ('-f', '--force'):
opt_force = True
else:
print(_usage)
errexit('Invalid option given: "' + opt + '"')
if opt_dir == '' and opt_gpx == '':
errexit('use either -g or -d for input!')
if opt_gpx != '':
if not os.path.isfile(opt_gpx):
errexit('Invalid parameter! Not a file "' + opt_gpx + '"')
try:
pFile = open(opt_gpx, 'r')
pFile.close()
except OSError:
errexit('Invalid parameter! Can not open "' + opt_gpx + '"')
process_GPX(opt_gpx)
if opt_dir != '':
if not os.path.isdir(opt_dir):
errexit('Invalid parameter! Not a folder "' + opt_dir + '"')
process_folder(opt_dir)
|
"""
This file contains the database schema in SQLAlchemy format.
"""
from sqlalchemy import event
from sqlalchemy import Column, ForeignKey, Integer, String
from sqlalchemy.engine import Engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship as sqlalchemy_relationship
Base = declarative_base()
@event.listens_for(Engine, "connect")
def set_sqlite_pragma(dbapi_connection, connection_record):
"""
Auto enable foreign keys for SQLite.
"""
cursor = dbapi_connection.cursor()
cursor.execute("PRAGMA foreign_keys=ON")
cursor.close()
# TODO: Backref
class Association(Base):
# Relationships are to be read "left RELATION right"
__tablename__ = "association"
id = Column(Integer, primary_key=True)
left_id = Column(Integer, ForeignKey("papers.id", ondelete="CASCADE"))
right_id = Column(Integer, ForeignKey("papers.id", ondelete="CASCADE"))
relationship_id = Column(Integer,
ForeignKey("relationships.id",
ondelete="CASCADE"))
right_paper = sqlalchemy_relationship("Paper",
foreign_keys=right_id,
back_populates="related_by")
relationship = sqlalchemy_relationship("Relationship")
left_paper = sqlalchemy_relationship("Paper",
foreign_keys=left_id,
back_populates="related_to")
class Paper(Base):
__tablename__ = "papers"
id = Column(Integer, primary_key=True)
doi = Column(String(), nullable=True, unique=True)
arxiv_id = Column(String(25), nullable=True, unique=True)
# related_to are papers related to this paper (this_paper R …)
related_to = sqlalchemy_relationship("Association",
foreign_keys="Association.left_id",
back_populates="left_paper")
# related_by are papers referenced by this paper (… R this_paper)
related_by = sqlalchemy_relationship("Association",
foreign_keys="Association.right_id",
back_populates="right_paper")
def __repr__(self):
return "<Paper(id='%d', doi='%s', arxiv_id='%s')>" % (
self.id,
self.doi,
self.arxiv_id,
)
def json_api_repr(self):
"""
Dict to dump for the JSON API.
"""
relationships = [a.relationship.name for a in self.related_to]
return {
"types": self.__tablename__,
"id": self.id,
"attributes": {
"doi": self.doi,
"arxiv_id": self.arxiv_id,
},
"links": {
"self": "/papers/%d" % (self.id,)
},
"relationships": {
k: {
"links": {
"related": (
"/papers/%d/relationships/%s?reverse={reverse}" %
(self.id, k)
)
}
}
for k in relationships
}
}
class Relationship(Base):
__tablename__ = "relationships"
id = Column(Integer, primary_key=True)
name = Column(String(), unique=True)
associations = sqlalchemy_relationship("Association",
back_populates="relationship")
Fix ON DELETE CASCADE issue on relationships
"""
This file contains the database schema in SQLAlchemy format.
"""
import sqlite3
from sqlalchemy import event
from sqlalchemy import Column, ForeignKey, Integer, String
from sqlalchemy.engine import Engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship as sqlalchemy_relationship
Base = declarative_base()
@event.listens_for(Engine, "connect")
def set_sqlite_pragma(dbapi_connection, connection_record):
"""
Auto enable foreign keys for SQLite.
"""
# Play well with other DB backends
if type(dbapi_connection) is sqlite3.Connection:
cursor = dbapi_connection.cursor()
cursor.execute("PRAGMA foreign_keys=ON")
cursor.close()
class Association(Base):
# Relationships are to be read "left RELATION right"
__tablename__ = "association"
id = Column(Integer, primary_key=True)
left_id = Column(Integer, ForeignKey("papers.id", ondelete="CASCADE"))
right_id = Column(Integer, ForeignKey("papers.id", ondelete="CASCADE"))
relationship_id = Column(Integer,
ForeignKey("relationships.id",
ondelete="CASCADE"))
right_paper = sqlalchemy_relationship("Paper",
foreign_keys=right_id,
back_populates="related_by",
passive_deletes=True)
relationship = sqlalchemy_relationship("Relationship",
passive_deletes=True)
left_paper = sqlalchemy_relationship("Paper",
foreign_keys=left_id,
back_populates="related_to",
passive_deletes=True)
class Paper(Base):
__tablename__ = "papers"
id = Column(Integer, primary_key=True)
doi = Column(String(), nullable=True, unique=True)
arxiv_id = Column(String(25), nullable=True, unique=True)
# related_to are papers related to this paper (this_paper R …)
related_to = sqlalchemy_relationship("Association",
foreign_keys="Association.left_id",
back_populates="left_paper",
passive_deletes=True)
# related_by are papers referenced by this paper (… R this_paper)
related_by = sqlalchemy_relationship("Association",
foreign_keys="Association.right_id",
back_populates="right_paper",
passive_deletes=True)
def __repr__(self):
return "<Paper(id='%d', doi='%s', arxiv_id='%s')>" % (
self.id,
self.doi,
self.arxiv_id,
)
def json_api_repr(self):
"""
Dict to dump for the JSON API.
"""
relationships = [a.relationship.name for a in self.related_to]
return {
"types": self.__tablename__,
"id": self.id,
"attributes": {
"doi": self.doi,
"arxiv_id": self.arxiv_id,
},
"links": {
"self": "/papers/%d" % (self.id,)
},
"relationships": {
k: {
"links": {
"related": (
"/papers/%d/relationships/%s?reverse={reverse}" %
(self.id, k)
)
}
}
for k in relationships
}
}
class Relationship(Base):
__tablename__ = "relationships"
id = Column(Integer, primary_key=True)
name = Column(String(), unique=True)
associations = sqlalchemy_relationship("Association",
back_populates="relationship",
passive_deletes=True)
|
import datetime
import logging
import hashlib
import hmac
import json
import traceback
from app.models import SocialNetworkApp, SocialNetworkAppUser, Initiative
from app.sync import save_sn_post, publish_idea_cp, save_sn_comment, publish_comment_cp, save_sn_vote, \
delete_post, delete_comment, delete_vote
from app.utils import get_timezone_aware_datetime, calculate_token_expiration_time
from connectors.social_network import Facebook
from django.conf import settings
from django.http import HttpResponse, HttpResponseForbidden
from django.shortcuts import render, redirect
from django.views.decorators.csrf import csrf_exempt
from django.utils import timezone
from django.utils.translation import activate, ugettext as _
logger = logging.getLogger(__name__)
def _process_post(post_id, update, fb_app, u_datetime):
template_url_post = 'https://www.facebook.com/{}/posts/{}'
if not 'message' in update.keys() or not update['message'].strip():
# Posts without text are ignored
return None
url = template_url_post.format(post_id.split('_')[0],post_id.split('_')[1])
post = {'id': post_id, 'text': update['message'], 'title': '',
'user_info': {'name': update['sender_name'], 'id': str(update['sender_id'])},
'url': url, 'datetime': u_datetime, 'positive_votes': 0, 'negative_votes': 0,
'comments': 0}
ret_data = save_sn_post(fb_app, post)
if ret_data: publish_idea_cp(ret_data['idea'])
def _process_comment(comment_id, comment_raw, fb_app, c_datetime):
if not comment_raw['message'].strip():
# Comments without text are ignored
return None
if comment_raw['post_id'] == comment_raw['parent_id']:
parent_type = 'post'
else:
parent_type = 'comment'
comment = {'id': comment_id, 'text': comment_raw['message'],
'user_info': {'name': comment_raw['sender_name'], 'id': str(comment_raw['sender_id'])},
'datetime': c_datetime, 'positive_votes': 0, 'negative_votes': 0, 'url': None,
'parent_type': parent_type, 'parent_id': comment_raw['parent_id'], 'comments': 0}
ret_data = save_sn_comment(fb_app, comment)
if ret_data: publish_comment_cp(ret_data['comment'])
def _generate_like_id(like_raw):
return like_raw['parent_id'].split('_')[1]+'_'+str(like_raw['sender_id'])
def _process_like(like_raw, fb_app, l_datetime):
if like_raw['post_id'] == like_raw['parent_id']:
parent_type = 'post'
else:
parent_type = 'comment'
like = {'id': _generate_like_id(like_raw),
'user_info': {'id': str(like_raw['sender_id']), 'name': like_raw['sender_name']},
'parent_type': parent_type, 'parent_id': like_raw['parent_id'], 'value': 1,
'datetime': l_datetime}
save_sn_vote(fb_app, like)
def _process_update(fb_app, update, u_datetime):
if update['item'] == 'post' or update['item'] == 'share' or update['item'] == 'status':
post_id = str(update['post_id'])
if update['verb'] == 'add':
_process_post(post_id, update, fb_app, u_datetime)
elif update['verb'] == 'remove':
delete_post(post_id)
else:
logger.info('Action type {} are ignored'.format(update['verb']))
elif update['item'] == 'comment':
comment_id = str(update['comment_id'])
if update['verb'] == 'add':
_process_comment(comment_id, update, fb_app, u_datetime)
elif update['verb'] == 'remove':
delete_comment(comment_id)
else:
logger.info('Action type {} are ignored'.format(update['verb']))
elif update['item'] == 'like':
if update['verb'] == 'add':
_process_like(update, fb_app, u_datetime)
elif update['verb'] == 'remove':
delete_vote(_generate_like_id(update))
else:
logger.info('Action type {} are ignored'.format(update['verb']))
else:
# Ignore the rest
logger.info('Updates of type {} are ignored'.format(update['item']))
def _get_datetime(raw_datetime):
try:
dt = datetime.datetime.fromtimestamp(raw_datetime)
if timezone.is_naive(dt):
return get_timezone_aware_datetime(dt).isoformat()
else:
return dt.isoformat()
except Exception as e:
logger.warning('Error when trying to calculate the update datetime. Reason: {}'.format(e))
logger.warning(traceback.format_exc())
return None
def _encode_payload(payload):
try:
if type(payload) == type(' '.decode()):
return payload.encode()
else:
return payload
except Exception as e:
logger.warning('Error when trying to encode a payload. Reason: {}'.format(e))
logger.warning(traceback.format_exc())
return None
def _process_post_request(fb_app, exp_signature, payload):
# Save the current signature
fb_app.last_real_time_update_sig = str(exp_signature)
fb_app.save()
req_json = json.loads(payload)
req_json = _encode_payload(req_json)
if req_json['object'] == fb_app.object_real_time_updates:
entries = req_json['entry']
for entry in entries:
if entry['id'] == fb_app.page_id:
e_datetime = _get_datetime(entry['time'])
if e_datetime:
changes = entry['changes']
for change in changes:
if change['field'] == fb_app.field_real_time_updates:
_process_update(fb_app, change['value'], e_datetime)
else:
logger.info('Unknown update field. Expected: {}, received: {}'.
format(fb_app.field_real_time_updates, change['field']))
else:
logger.info('Unknown page id {}. Update will be ignored'.format(entry['id']))
else:
logger.info('Unknown update objects. Expected: {}, received: {}'.
format(fb_app.object_real_time_updates, req_json['object']))
def _calculate_signature(app_secret, payload):
try:
return 'sha1=' + hmac.new(str(app_secret), msg=unicode(str(payload)), digestmod=hashlib.sha1).hexdigest()
except Exception as e:
logger.warning('Signature could not be generated. Reason: {}'.format(e))
logger.warning(traceback.format_exc())
return None
def _get_facebook_app():
apps = SocialNetworkApp.objects.all()
for app in apps:
if app.connector.name.lower() == 'facebook':
return app
return None
@csrf_exempt
def fb_real_time_updates(request):
fb_app = _get_facebook_app()
if fb_app:
if request.method == 'GET':
challenge = request.GET.get('hub.challenge')
token = request.GET.get('hub.verify_token')
if fb_app.token_real_time_updates == token:
return HttpResponse(challenge)
elif request.method == 'POST':
req_signature = request.META.get('HTTP_X_HUB_SIGNATURE')
exp_signature = _calculate_signature(fb_app.app_secret, request.body)
if req_signature == exp_signature and \
not exp_signature == fb_app.last_real_time_update_sig:
# I'm comparing the current signature against the last one
# to discard duplicates that seem to arrive consecutively
_process_post_request(fb_app, exp_signature, request.body)
return HttpResponse()
else:
logger.info('The received signature does not correspond to the expected one or '
'the request is a duplicate')
return HttpResponseForbidden()
def is_supported_language(language_code):
supported_languages = dict(settings.LANGUAGES).keys()
return language_code in supported_languages
def index(request):
# Detect the default language to show the page
# If the preferred language is supported, the page will be presented in that language
# Otherwise english will be chosen
language_to_render = None
browser_language_code = request.META.get('HTTP_ACCEPT_LANGUAGE', None)
logger.info('Languages info: ' + browser_language_code)
if browser_language_code:
languages = [language for language in browser_language_code.split(',') if
'=' not in language]
for language in languages:
language_code = language.split('-')[0]
if is_supported_language(language_code):
language_to_render = language_code
break
if not language_to_render:
activate('en')
else:
activate(language_to_render)
logger.info(language_to_render)
return render(request, 'app/index.html')
def login_fb(request):
fb_app = _get_facebook_app()
access_token = request.GET.get('access_token')
user_id = request.GET.get('user_id')
ret_token = Facebook.get_long_lived_access_token(fb_app.app_id, fb_app.app_secret,
access_token)
try:
user = SocialNetworkAppUser.objects.get(external_id=user_id)
user.access_token = ret_token['access_token']
user.access_token_exp = calculate_token_expiration_time(ret_token['expiration'])
user.save()
except SocialNetworkAppUser.DoesNotExist:
user_fb = Facebook.get_info_user(fb_app, user_id, access_token)
new_app_user = {'email': user_fb['email'], 'snapp': fb_app, 'access_token': ret_token['access_token'],
'access_token_exp': calculate_token_expiration_time(ret_token['expiration']),
'external_id': user_id}
if 'name' in user_fb.keys():
new_app_user.update({'name': user_fb['name']})
if 'url' in user_fb.keys():
new_app_user.update({'url': user_fb['url']})
user = SocialNetworkAppUser(**new_app_user)
user.save()
return redirect('/')
def check_user(request):
user_id = request.GET.get('user_id')
try:
msg_logged = _('Congrats!, You are already logged into')
msg_group = _('{}group{}').format('<a href="{}" target="_blank"><u>','</u></a>')
msg_join = _('Join the ')
msg_ini = _('of the initiative to start participate from Facebook')
user = SocialNetworkAppUser.objects.get(external_id=user_id)
# Taking (hardcoded) the first active initiative where the user participate in
fb_app = user.snapp
for initiative in fb_app.initiative_set.all():
if initiative.active:
msg_group = msg_group.format(initiative.social_network.all()[0].community.url)
return HttpResponse(msg_logged + ' <b>Social Ideation App</b>. ' + msg_join + msg_group + ' ' + msg_ini)
return HttpResponse(msg_logged)
except SocialNetworkAppUser.DoesNotExist:
return HttpResponse()
Remove debug statements
import datetime
import logging
import hashlib
import hmac
import json
import traceback
from app.models import SocialNetworkApp, SocialNetworkAppUser, Initiative
from app.sync import save_sn_post, publish_idea_cp, save_sn_comment, publish_comment_cp, save_sn_vote, \
delete_post, delete_comment, delete_vote
from app.utils import get_timezone_aware_datetime, calculate_token_expiration_time
from connectors.social_network import Facebook
from django.conf import settings
from django.http import HttpResponse, HttpResponseForbidden
from django.shortcuts import render, redirect
from django.views.decorators.csrf import csrf_exempt
from django.utils import timezone
from django.utils.translation import activate, ugettext as _
logger = logging.getLogger(__name__)
def _process_post(post_id, update, fb_app, u_datetime):
template_url_post = 'https://www.facebook.com/{}/posts/{}'
if not 'message' in update.keys() or not update['message'].strip():
# Posts without text are ignored
return None
url = template_url_post.format(post_id.split('_')[0],post_id.split('_')[1])
post = {'id': post_id, 'text': update['message'], 'title': '',
'user_info': {'name': update['sender_name'], 'id': str(update['sender_id'])},
'url': url, 'datetime': u_datetime, 'positive_votes': 0, 'negative_votes': 0,
'comments': 0}
ret_data = save_sn_post(fb_app, post)
if ret_data: publish_idea_cp(ret_data['idea'])
def _process_comment(comment_id, comment_raw, fb_app, c_datetime):
if not comment_raw['message'].strip():
# Comments without text are ignored
return None
if comment_raw['post_id'] == comment_raw['parent_id']:
parent_type = 'post'
else:
parent_type = 'comment'
comment = {'id': comment_id, 'text': comment_raw['message'],
'user_info': {'name': comment_raw['sender_name'], 'id': str(comment_raw['sender_id'])},
'datetime': c_datetime, 'positive_votes': 0, 'negative_votes': 0, 'url': None,
'parent_type': parent_type, 'parent_id': comment_raw['parent_id'], 'comments': 0}
ret_data = save_sn_comment(fb_app, comment)
if ret_data: publish_comment_cp(ret_data['comment'])
def _generate_like_id(like_raw):
return like_raw['parent_id'].split('_')[1]+'_'+str(like_raw['sender_id'])
def _process_like(like_raw, fb_app, l_datetime):
if like_raw['post_id'] == like_raw['parent_id']:
parent_type = 'post'
else:
parent_type = 'comment'
like = {'id': _generate_like_id(like_raw),
'user_info': {'id': str(like_raw['sender_id']), 'name': like_raw['sender_name']},
'parent_type': parent_type, 'parent_id': like_raw['parent_id'], 'value': 1,
'datetime': l_datetime}
save_sn_vote(fb_app, like)
def _process_update(fb_app, update, u_datetime):
if update['item'] == 'post' or update['item'] == 'share' or update['item'] == 'status':
post_id = str(update['post_id'])
if update['verb'] == 'add':
_process_post(post_id, update, fb_app, u_datetime)
elif update['verb'] == 'remove':
delete_post(post_id)
else:
logger.info('Action type {} are ignored'.format(update['verb']))
elif update['item'] == 'comment':
comment_id = str(update['comment_id'])
if update['verb'] == 'add':
_process_comment(comment_id, update, fb_app, u_datetime)
elif update['verb'] == 'remove':
delete_comment(comment_id)
else:
logger.info('Action type {} are ignored'.format(update['verb']))
elif update['item'] == 'like':
if update['verb'] == 'add':
_process_like(update, fb_app, u_datetime)
elif update['verb'] == 'remove':
delete_vote(_generate_like_id(update))
else:
logger.info('Action type {} are ignored'.format(update['verb']))
else:
# Ignore the rest
logger.info('Updates of type {} are ignored'.format(update['item']))
def _get_datetime(raw_datetime):
try:
dt = datetime.datetime.fromtimestamp(raw_datetime)
if timezone.is_naive(dt):
return get_timezone_aware_datetime(dt).isoformat()
else:
return dt.isoformat()
except Exception as e:
logger.warning('Error when trying to calculate the update datetime. Reason: {}'.format(e))
logger.warning(traceback.format_exc())
return None
def _encode_payload(payload):
try:
if type(payload) == type(' '.decode()):
return payload.encode()
else:
return payload
except Exception as e:
logger.warning('Error when trying to encode a payload. Reason: {}'.format(e))
logger.warning(traceback.format_exc())
return None
def _process_post_request(fb_app, exp_signature, payload):
# Save the current signature
fb_app.last_real_time_update_sig = str(exp_signature)
fb_app.save()
req_json = json.loads(payload)
req_json = _encode_payload(req_json)
if req_json['object'] == fb_app.object_real_time_updates:
entries = req_json['entry']
for entry in entries:
if entry['id'] == fb_app.page_id:
e_datetime = _get_datetime(entry['time'])
if e_datetime:
changes = entry['changes']
for change in changes:
if change['field'] == fb_app.field_real_time_updates:
_process_update(fb_app, change['value'], e_datetime)
else:
logger.info('Unknown update field. Expected: {}, received: {}'.
format(fb_app.field_real_time_updates, change['field']))
else:
logger.info('Unknown page id {}. Update will be ignored'.format(entry['id']))
else:
logger.info('Unknown update objects. Expected: {}, received: {}'.
format(fb_app.object_real_time_updates, req_json['object']))
def _calculate_signature(app_secret, payload):
try:
return 'sha1=' + hmac.new(str(app_secret), msg=unicode(str(payload)), digestmod=hashlib.sha1).hexdigest()
except Exception as e:
logger.warning('Signature could not be generated. Reason: {}'.format(e))
logger.warning(traceback.format_exc())
return None
def _get_facebook_app():
apps = SocialNetworkApp.objects.all()
for app in apps:
if app.connector.name.lower() == 'facebook':
return app
return None
@csrf_exempt
def fb_real_time_updates(request):
fb_app = _get_facebook_app()
if fb_app:
if request.method == 'GET':
challenge = request.GET.get('hub.challenge')
token = request.GET.get('hub.verify_token')
if fb_app.token_real_time_updates == token:
return HttpResponse(challenge)
elif request.method == 'POST':
req_signature = request.META.get('HTTP_X_HUB_SIGNATURE')
exp_signature = _calculate_signature(fb_app.app_secret, request.body)
if req_signature == exp_signature and \
not exp_signature == fb_app.last_real_time_update_sig:
# I'm comparing the current signature against the last one
# to discard duplicates that seem to arrive consecutively
_process_post_request(fb_app, exp_signature, request.body)
return HttpResponse()
else:
logger.info('The received signature does not correspond to the expected one or '
'the request is a duplicate')
return HttpResponseForbidden()
def is_supported_language(language_code):
supported_languages = dict(settings.LANGUAGES).keys()
return language_code in supported_languages
def index(request):
# Detect the default language to show the page
# If the preferred language is supported, the page will be presented in that language
# Otherwise english will be chosen
language_to_render = None
browser_language_code = request.META.get('HTTP_ACCEPT_LANGUAGE', None)
if browser_language_code:
languages = [language for language in browser_language_code.split(',') if
'=' not in language]
for language in languages:
language_code = language.split('-')[0]
if is_supported_language(language_code):
language_to_render = language_code
break
if not language_to_render:
activate('en')
else:
activate(language_to_render)
return render(request, 'app/index.html')
def login_fb(request):
fb_app = _get_facebook_app()
access_token = request.GET.get('access_token')
user_id = request.GET.get('user_id')
ret_token = Facebook.get_long_lived_access_token(fb_app.app_id, fb_app.app_secret,
access_token)
try:
user = SocialNetworkAppUser.objects.get(external_id=user_id)
user.access_token = ret_token['access_token']
user.access_token_exp = calculate_token_expiration_time(ret_token['expiration'])
user.save()
except SocialNetworkAppUser.DoesNotExist:
user_fb = Facebook.get_info_user(fb_app, user_id, access_token)
new_app_user = {'email': user_fb['email'], 'snapp': fb_app, 'access_token': ret_token['access_token'],
'access_token_exp': calculate_token_expiration_time(ret_token['expiration']),
'external_id': user_id}
if 'name' in user_fb.keys():
new_app_user.update({'name': user_fb['name']})
if 'url' in user_fb.keys():
new_app_user.update({'url': user_fb['url']})
user = SocialNetworkAppUser(**new_app_user)
user.save()
return redirect('/')
def check_user(request):
user_id = request.GET.get('user_id')
try:
msg_logged = _('Congrats!, You are already logged into')
msg_group = _('{}group{}').format('<a href="{}" target="_blank"><u>','</u></a>')
msg_join = _('Join the ')
msg_ini = _('of the initiative to start participate from Facebook')
user = SocialNetworkAppUser.objects.get(external_id=user_id)
# Taking (hardcoded) the first active initiative where the user participate in
fb_app = user.snapp
for initiative in fb_app.initiative_set.all():
if initiative.active:
msg_group = msg_group.format(initiative.social_network.all()[0].community.url)
return HttpResponse(msg_logged + ' <b>Social Ideation App</b>. ' + msg_join + msg_group + ' ' + msg_ini)
return HttpResponse(msg_logged)
except SocialNetworkAppUser.DoesNotExist:
return HttpResponse() |
# Perfect Tic Tac Toe AI, it will never lose.
# Created and programmed by Jordan Oberstein.
import random
X = "X"
O = "O"
empty = " "
S = [" ", " ", " ", " ", " ", " ", " ", " ", " "]
turn = 0
def Board(): # prints board
print "\n ",S[0],"|",S[1],"|",S[2]
print " ","---------"
print " ",S[3],"|",S[4],"|",S[5]
print " ","---------"
print " ",S[6],"|",S[7],"|",S[8],"\n"
def Instructions():
print "This is a game of Tic Tac Toe, the computer will never lose."
print "Fill in spaces on the board according to the board printed below.\n"
print " 1 | 2 | 3"
print " ---------"
print " 4 | 5 | 6"
print " ---------"
print " 7 | 8 | 9\n"
print "If you get 3 of your leters in a row (horizontally, vertically, or diagonally), then you win!"
print "Good luck!\n"
def Lines(): # all win conditions
global WinConditions, row1, row2, row3, col1, col2, col3, dia1, dia2
row1 = (S[0], S[1], S[2])
row2 = (S[3], S[4], S[5])
row3 = (S[6], S[7], S[8])
col1 = (S[0], S[3], S[6])
col2 = (S[1], S[4], S[7])
col3 = (S[2], S[5], S[8])
dia1 = (S[0], S[4], S[8])
dia2 = (S[2], S[4], S[6])
WinConditions = [row1, row2, row3, col1, col2, col3, dia1, dia2]
def Letter(): # assigns chosen letter to player
global player, cpu
cpu = empty
player = raw_input('What letter would you like to be: ')
while not (player == "X" or player == "O" or player == "x" or player == "o"):
player = raw_input('What letter would you like to be: ')
if player == X or player == 'x':
player = X
cpu = O
if player == O or player == "o":
player = O
cpu = X
def WhoGoesFirst(): # randomly chooses order of turns
global order
Letter()
choice = random.choice('XO')
print choice + " will go first"
if choice == X:
order = [X, O, X, O, X, O, X, O, X]
if choice == O:
order = [O, X, O, X, O, X, O, X, O]
print order,"is the order."
print player + " is the player."
print cpu + " is the cpu."
def CheckWin():
global winner
winner = empty
for i in range(0, 8):
if WinConditions[i] == (X, X, X):
winner = "X"
print winner + " wins using WinCondition",WinConditions[i]
if WinConditions[i] == (O, O, O):
winner = "O"
print winner + " wins using WinCondition",WinConditions[i]
i += 1
def Process(): # process of following 3 functions used at end of each turn
Board()
Lines()
CheckWin()
def MovePlayer(turn): # function for player's move
global moveP
moveP = raw_input('Choose a Space from 1-9 for ' + player + ' to Go: ')
while not moveP.isdigit() or int(moveP) not in range (1, 10) or S[int(moveP) - 1] is not empty:
moveP = raw_input('Choose a Space from 1-9 for ' + player + ' to Go: ')
S[int(moveP) - 1] = order[turn]
print "The Player has gone on space",moveP,"index",int(moveP) - 1
Process()
def CWin(): # checks if cpu can win
global moveC
print moveC
if row1 == (empty, cpu, cpu) or col1 == (empty, cpu, cpu) or dia1 == (empty, cpu, cpu):
moveC = 0
if row1 == (cpu, empty, cpu) or col2 == (empty, cpu, cpu):
moveC = 1
if row1 == (cpu, cpu, empty) or col3 == (empty, cpu, cpu) or dia2 == (empty, cpu, cpu):
moveC = 2
if row2 == (empty, cpu, cpu) or col1 == (cpu, empty, cpu):
moveC = 3
if row2 == (cpu, empty, cpu) or col2 == (cpu, empty, cpu) or dia1 == (cpu, empty, cpu) or dia2 == (cpu, empty, cpu):
moveC = 4
if row2 == (cpu, cpu, empty) or col3 == (cpu, empty, cpu):
moveC = 5
if row3 == (empty, cpu, cpu) or col1 == (cpu, cpu, empty) or dia2 == (cpu, cpu, empty):
moveC = 6
if row3 == (cpu, empty, cpu) or col2 == (cpu, cpu, empty):
moveC = 7
if row3 == (cpu, cpu, empty) or col3 == (cpu, cpu, empty) or dia1 == (cpu, cpu, empty):
moveC = 8
def CBlock(): # checks if player can win (blocks forks)
global moveC, BlockFork
print moveC
if row1 == (empty, player, player) or col1 == (empty, player, player) or dia1 == (empty, player, player):
moveC = 0
BlockFork.append(moveC)
if row1 == (player, empty, player) or col2 == (empty, player, player):
moveC = 1
BlockFork.append(moveC)
if row1 == (player, player, empty) or col3 == (empty, player, player) or dia2 == (empty, player, player):
moveC = 2
BlockFork.append(moveC)
if row2 == (empty, player, player) or col1 == (player, empty, player):
moveC = 3
BlockFork.append(moveC)
if row2 == (player, empty, player) or col2 == (player, empty, player) or dia1 == (player, empty, player) or dia2 == (player, empty, player):
moveC = 4
BlockFork.append(moveC)
if row2 == (player, player, empty) or col3 == (player, empty, player):
moveC = 5
BlockFork.append(moveC)
if row3 == (empty, player, player) or col1 == (player, player, empty) or dia2 == (player, player, empty):
moveC = 6
BlockFork.append(moveC)
if row3 == (player, empty, player) or col2 == (player, player, empty):
moveC = 7
BlockFork.append(moveC)
if row3 == (player, player, empty) or col3 == (player, player, empty) or dia1 == (player, player, empty):
moveC = 8
BlockFork.append(moveC)
print "LIST",BlockFork
def Restrict(): # combines previous 2 restrictions into 1 function
print "Before CBlock"
CBlock()
print "Between CWin and CBlock"
CWin()
print "After CWin"
def ZEROMoveCPU(turn): # cpu move for turn 0
moveC = random.randint(0, 4)
while S[moveC * 2] is not empty or moveC == 2:
moveC = random.randint(0, 4)
print moveC,"is random intiger"
S[moveC * 2] = order[turn]
print "The Computer will go on space",(moveC * 2) + 1,"index",moveC * 2
Process()
def ONEMoveCPU(turn): # cpu move for turn 1
moveC = 2
while S[moveC * 2] is not empty:
moveC = random.randint(0, 4)
while S[moveC * 2] is not empty:
moveC = random.randint(0, 4)
print moveC,"is random intiger"
S[moveC * 2] = order[turn]
print "The Computer will go on space",(moveC * 2) + 1,"index",moveC * 2
Process()
def TWOMoveCPU(turn): # cpu move for turn 2
if (S[1] or S[3] or S[5] or S[7]) == player:
S[4] = order[turn]
print "The Computer will go on space: 4 index 5"
if (S[0] or S[2] or S[6] or S[8]) == player:
moveC = random.randint(0, 4)
while S[moveC * 2] is not empty or moveC == 2:
moveC = random.randint(0, 4)
S[moveC * 2] = order[turn]
print "The Computer will go on space",(moveC * 2) + 1,"index",moveC * 2
if S[4] == player:
if S[0] == cpu:
moveC = 0
S[8] = order[turn]
if S[2] == cpu:
moveC = 2
S[6] = order[turn]
if S[6] == cpu:
moveC = 6
S[2] = order[turn]
if S[8] == cpu:
moveC = 8
S[0] = order[turn]
print "The Computer will go on space",moveC + 1,"index",moveC
Process()
def MoveCPU(turn): # cpu move for turns > 2
global moveC, BlockFork
BlockFork = []
moveC = random.randint(0, 8)
while S[moveC] is not empty:
moveC = random.randint(0, 8)
print moveC,"random move, placeholder"
Restrict()
print moveC,"Final Option for Move C"
S[moveC] = order[turn]
print "The Computer will go on space",moveC + 1,"index",moveC
Process()
def Main(turn): # combines function into complete game
Instructions()
WhoGoesFirst()
Process()
while turn < 9: # gameplay in this loop
if order[turn] == player:
print "turn:",turn
MovePlayer(turn)
if order[turn] == cpu:
print "turn:",turn
if turn == 0:
ZEROMoveCPU(turn)
if turn == 1:
ONEMoveCPU(turn)
if turn == 2:
TWOMoveCPU(turn)
if turn > 2:
MoveCPU(turn)
turn += 1
if winner is not empty:
turn = 9
print winner + " Is tne Winner!\n"
if winner is empty and turn == 9:
print "The Game Is a Tie.\n"
Main(turn)
Remove Trailing Spaces
# Perfect Tic Tac Toe AI, it will never lose.
# Created and programmed by Jordan Oberstein.
import random
X = "X"
O = "O"
empty = " "
S = [" ", " ", " ", " ", " ", " ", " ", " ", " "]
turn = 0
def Board(): # prints board
print "\n ",S[0],"|",S[1],"|",S[2]
print " ","---------"
print " ",S[3],"|",S[4],"|",S[5]
print " ","---------"
print " ",S[6],"|",S[7],"|",S[8],"\n"
def Instructions():
print "This is a game of Tic Tac Toe, the computer will never lose."
print "Fill in spaces on the board according to the board printed below.\n"
print " 1 | 2 | 3"
print " ---------"
print " 4 | 5 | 6"
print " ---------"
print " 7 | 8 | 9\n"
print "If you get 3 of your leters in a row (horizontally, vertically, or diagonally), then you win!"
print "Good luck!\n"
def Lines(): # all win conditions
global WinConditions, row1, row2, row3, col1, col2, col3, dia1, dia2
row1 = (S[0], S[1], S[2])
row2 = (S[3], S[4], S[5])
row3 = (S[6], S[7], S[8])
col1 = (S[0], S[3], S[6])
col2 = (S[1], S[4], S[7])
col3 = (S[2], S[5], S[8])
dia1 = (S[0], S[4], S[8])
dia2 = (S[2], S[4], S[6])
WinConditions = [row1, row2, row3, col1, col2, col3, dia1, dia2]
def Letter(): # assigns chosen letter to player
global player, cpu
cpu = empty
player = raw_input('What letter would you like to be: ')
while not (player == "X" or player == "O" or player == "x" or player == "o"):
player = raw_input('What letter would you like to be: ')
if player == X or player == 'x':
player = X
cpu = O
if player == O or player == "o":
player = O
cpu = X
def WhoGoesFirst(): # randomly chooses order of turns
global order
Letter()
choice = random.choice('XO')
print choice + " will go first"
if choice == X:
order = [X, O, X, O, X, O, X, O, X]
if choice == O:
order = [O, X, O, X, O, X, O, X, O]
print order,"is the order."
print player + " is the player."
print cpu + " is the cpu."
def CheckWin():
global winner
winner = empty
for i in range(0, 8):
if WinConditions[i] == (X, X, X):
winner = "X"
print winner + " wins using WinCondition",WinConditions[i]
if WinConditions[i] == (O, O, O):
winner = "O"
print winner + " wins using WinCondition",WinConditions[i]
i += 1
def Process(): # process of following 3 functions used at end of each turn
Board()
Lines()
CheckWin()
def MovePlayer(turn): # function for player's move
global moveP
moveP = raw_input('Choose a Space from 1-9 for ' + player + ' to Go: ')
while not moveP.isdigit() or int(moveP) not in range (1, 10) or S[int(moveP) - 1] is not empty:
moveP = raw_input('Choose a Space from 1-9 for ' + player + ' to Go: ')
S[int(moveP) - 1] = order[turn]
print "The Player has gone on space",moveP,"index",int(moveP) - 1
Process()
def CWin(): # checks if cpu can win
global moveC
print moveC
if row1 == (empty, cpu, cpu) or col1 == (empty, cpu, cpu) or dia1 == (empty, cpu, cpu):
moveC = 0
if row1 == (cpu, empty, cpu) or col2 == (empty, cpu, cpu):
moveC = 1
if row1 == (cpu, cpu, empty) or col3 == (empty, cpu, cpu) or dia2 == (empty, cpu, cpu):
moveC = 2
if row2 == (empty, cpu, cpu) or col1 == (cpu, empty, cpu):
moveC = 3
if row2 == (cpu, empty, cpu) or col2 == (cpu, empty, cpu) or dia1 == (cpu, empty, cpu) or dia2 == (cpu, empty, cpu):
moveC = 4
if row2 == (cpu, cpu, empty) or col3 == (cpu, empty, cpu):
moveC = 5
if row3 == (empty, cpu, cpu) or col1 == (cpu, cpu, empty) or dia2 == (cpu, cpu, empty):
moveC = 6
if row3 == (cpu, empty, cpu) or col2 == (cpu, cpu, empty):
moveC = 7
if row3 == (cpu, cpu, empty) or col3 == (cpu, cpu, empty) or dia1 == (cpu, cpu, empty):
moveC = 8
def CBlock(): # checks if player can win (blocks forks)
global moveC, BlockFork
print moveC
if row1 == (empty, player, player) or col1 == (empty, player, player) or dia1 == (empty, player, player):
moveC = 0
BlockFork.append(moveC)
if row1 == (player, empty, player) or col2 == (empty, player, player):
moveC = 1
BlockFork.append(moveC)
if row1 == (player, player, empty) or col3 == (empty, player, player) or dia2 == (empty, player, player):
moveC = 2
BlockFork.append(moveC)
if row2 == (empty, player, player) or col1 == (player, empty, player):
moveC = 3
BlockFork.append(moveC)
if row2 == (player, empty, player) or col2 == (player, empty, player) or dia1 == (player, empty, player) or dia2 == (player, empty, player):
moveC = 4
BlockFork.append(moveC)
if row2 == (player, player, empty) or col3 == (player, empty, player):
moveC = 5
BlockFork.append(moveC)
if row3 == (empty, player, player) or col1 == (player, player, empty) or dia2 == (player, player, empty):
moveC = 6
BlockFork.append(moveC)
if row3 == (player, empty, player) or col2 == (player, player, empty):
moveC = 7
BlockFork.append(moveC)
if row3 == (player, player, empty) or col3 == (player, player, empty) or dia1 == (player, player, empty):
moveC = 8
BlockFork.append(moveC)
print "LIST",BlockFork
def Restrict(): # combines previous 2 restrictions into 1 function
print "Before CBlock"
CBlock()
print "Between CWin and CBlock"
CWin()
print "After CWin"
def ZEROMoveCPU(turn): # cpu move for turn 0
moveC = random.randint(0, 4)
while S[moveC * 2] is not empty or moveC == 2:
moveC = random.randint(0, 4)
print moveC,"is random intiger"
S[moveC * 2] = order[turn]
print "The Computer will go on space",(moveC * 2) + 1,"index",moveC * 2
Process()
def ONEMoveCPU(turn): # cpu move for turn 1
moveC = 2
while S[moveC * 2] is not empty:
moveC = random.randint(0, 4)
while S[moveC * 2] is not empty:
moveC = random.randint(0, 4)
print moveC,"is random intiger"
S[moveC * 2] = order[turn]
print "The Computer will go on space",(moveC * 2) + 1,"index",moveC * 2
Process()
def TWOMoveCPU(turn): # cpu move for turn 2
if (S[1] or S[3] or S[5] or S[7]) == player:
S[4] = order[turn]
print "The Computer will go on space: 4 index 5"
if (S[0] or S[2] or S[6] or S[8]) == player:
moveC = random.randint(0, 4)
while S[moveC * 2] is not empty or moveC == 2:
moveC = random.randint(0, 4)
S[moveC * 2] = order[turn]
print "The Computer will go on space",(moveC * 2) + 1,"index",moveC * 2
if S[4] == player:
if S[0] == cpu:
moveC = 0
S[8] = order[turn]
if S[2] == cpu:
moveC = 2
S[6] = order[turn]
if S[6] == cpu:
moveC = 6
S[2] = order[turn]
if S[8] == cpu:
moveC = 8
S[0] = order[turn]
print "The Computer will go on space",moveC + 1,"index",moveC
Process()
def MoveCPU(turn): # cpu move for turns > 2
global moveC, BlockFork
BlockFork = []
moveC = random.randint(0, 8)
while S[moveC] is not empty:
moveC = random.randint(0, 8)
print moveC,"random move, placeholder"
Restrict()
print moveC,"Final Option for Move C"
S[moveC] = order[turn]
print "The Computer will go on space",moveC + 1,"index",moveC
Process()
def Main(turn): # combines function into complete game
Instructions()
WhoGoesFirst()
Process()
while turn < 9: # gameplay in this loop
if order[turn] == player:
print "turn:",turn
MovePlayer(turn)
if order[turn] == cpu:
print "turn:",turn
if turn == 0:
ZEROMoveCPU(turn)
if turn == 1:
ONEMoveCPU(turn)
if turn == 2:
TWOMoveCPU(turn)
if turn > 2:
MoveCPU(turn)
turn += 1
if winner is not empty:
turn = 9
print winner + " Is tne Winner!\n"
if winner is empty and turn == 9:
print "The Game Is a Tie.\n"
Main(turn)
|
# coding=utf-8
# Filename: k40.py
# pylint: disable=locally-disabled
"""
A collection of k40 related functions and modules.
"""
from __future__ import division, absolute_import, print_function
import os
from itertools import combinations
from collections import defaultdict
from functools import partial
from datetime import datetime
import io
from scipy import optimize
import numpy as np
import h5py
import pickle
import numba as nb
import km3pipe as kp
from km3pipe.io.daq import TMCHData
__author__ = "Jonas Reubelt"
__email__ = "jreubelt@km3net.de"
__status__ = "Development"
log = kp.logger.logging.getLogger(__name__) # pylint: disable=C0103
# log.setLevel(logging.DEBUG)
TIMESLICE_LENGTH = 0.1 # [s]
class K40BackgroundSubtractor(kp.Module):
"""Subtracts random coincidence background from K40 data
Required Services
-----------------
'MedianPMTRates()': dict (key=dom_id, value=list of pmt rates)
Output Keys
-----------
'K40Counts': dict, Corrected K40 counts
"""
def configure(self):
self.combs = list(combinations(range(31), 2))
def process(self, blob):
print('Subtracting random background calculated from single rates')
counts = self.services['TwofoldCounts']
dom_ids = list(counts.keys())
mean_rates = self.services['GetMedianPMTRates']()
corrected_counts = {}
livetime = self.services['GetLivetime']()
for dom_id in dom_ids:
try:
pmt_rates = mean_rates[dom_id]
except KeyError:
log.warning("Skipping BG correction for DOM {}."
.format(dom_id))
corrected_counts[dom_id] = counts[dom_id]
continue
k40_rates = counts[dom_id] / livetime
bg_rates = []
for c in self.combs:
bg_rates.append(pmt_rates[c[0]]*pmt_rates[c[1]]*1e-9)
corrected_counts[dom_id] = (k40_rates.T -
np.array(bg_rates)).T * livetime
blob["CorrectedTwofoldCounts"] = corrected_counts
pickle.dump({'data': corrected_counts,
'livetime': livetime},
open("k40_counts_bg_sub.p", "wb"))
pickle.dump(mean_rates, open('mean_rates.p', 'wb'))
return blob
class IntraDOMCalibrator(kp.Module):
"""Intra DOM calibrator which performs the calibration from K40Counts.
Parameters
----------
det_id: int
Detector ID [default: 14]
ctmin: float
Minimum cos(angle)
mode: str ('offline' | 'online')
Calibration mode [default: 'online']
Input Keys
----------
'TwofoldCounts': dict (key=dom_id,
value=matrix of k40 counts 465x(dt*2+1))
'CorrectedTwofoldCounts': dict (key=dom_id,
value=matrix of k40 counts 465x(dt*2+1))
Output Keys
-----------
'IntraDOMCalibration': dict (key=dom_id, value=calibration)
"""
def configure(self):
det_id = self.get("det_id") or 14
self.detector = kp.hardware.Detector(det_id=det_id)
self.ctmin = self.require("ctmin")
self.mode = self.get("mode", default="online")
def process(self, blob):
if self.mode != 'online':
return blob
if 'CorrectedTwofoldCounts' in blob:
log.info("Using corrected twofold counts")
fit_background = False
twofold_counts = blob['CorrectedTwofoldCounts']
else:
log.info("No corrected twofold counts found, fitting background.")
twofold_counts = self.services['TwofoldCounts']
fit_background = True
blob['IntraDOMCalibration'] = self.calibrate(twofold_counts,
fit_background)
return blob
def calibrate(self, twofold_counts, fit_background=False):
print("Starting calibration:")
calibration = {}
for dom_id, data in counts.items():
print(" calibrating DOM '{0}'".format(dom_id))
try:
calib = calibrate_dom(dom_id, data,
self.detector,
livetime=self.services['GetLivetime'](),
fit_background=fit_background,
ad_fit_shape='exp', ctmin=self.ctmin)
except RuntimeError:
log.error(" skipping DOM '{0}'.".format(dom_id))
else:
calibration[dom_id] = calib
return calibration
def finish(self):
if self.mode == 'offline':
print("Starting offline calibration")
twofold_counts = self.services['TwofoldCounts']
self.calibrate(twofold_counts, fit_background=True)
class TwofoldCounter(kp.Module):
"""Counts twofold coincidences in timeslice hits per PMT combination.
Parameters
----------
'tmax': int
time window of twofold coincidences [ns]
'dump_filename': str
name for the dump file
Input Keys
----------
'TSHits': RawHitSeries
Services
--------
'TwofoldCounts': dict (key=dom_id, value=matrix (465,(dt*2+1)))
'ResetTwofoldCounts': reset the TwofoldCounts dict
'GetLivetime()': float
'DumpTwofoldCounts': Writes twofold counts into 'dump_filename'
"""
def configure(self):
self.tmax = self.get("tmax") or 20
self.dump_filename = self.get("dump_filename")
self.counts = None
self.n_timeslices = None
self.start_time = datetime.utcnow()
self.reset()
self.expose(self.counts, 'TwofoldCounts')
self.expose(self.get_livetime, 'GetLivetime')
self.expose(self.reset, 'ResetTwofoldCounts')
if self.dump_filename is not None:
self.expose(self.dump, 'DumpTwofoldCounts')
def reset(self):
"""Reset coincidence counter"""
self.counts = defaultdict(partial(np.zeros, (465, self.tmax * 2 + 1)))
self.n_timeslices = 0
def get_livetime(self):
return self.n_timeslices * TIMESLICE_LENGTH
def process(self, blob):
log.debug("Processing timeslice")
self.n_timeslices += 1
hits = blob['TSHits']
dom_ids = np.unique(hits.dom_id)
for dom_id in dom_ids:
mask = hits.dom_id == dom_id
times = hits.time[mask]
channel_ids = hits.channel_id[mask]
sort_idc = np.argsort(times, kind='quicksort')
add_to_twofold_matrix(times[sort_idc],
channel_ids[sort_idc],
self.counts[dom_id],
tmax=self.tmax)
return blob
def dump(self):
"""Write coincidence counts into a Python pickle"""
print("Dumping data to {}".format(self.dump_filename))
pickle.dump({'data': self.counts,
'livetime': self.get_livetime()},
open(self.dump_filename, "wb"))
class MedianPMTRatesService(kp.Module):
def configure(self):
self.rates = defaultdict(lambda: defaultdict(list))
self.expose(self.get_median_rates, 'GetMedianPMTRates')
def process(self, blob):
tmch_data = TMCHData(io.BytesIO(blob['CHData']))
dom_id = tmch_data.dom_id
for channel_id, rate in enumerate(tmch_data.pmt_rates):
self.rates[dom_id][channel_id].append(rate)
def get_median_rates(self):
print("Calculating median PMT rates.")
median_rates = {}
for dom_id in self.rates.keys():
median_rates[dom_id] = [np.median(self.rates[dom_id][c])
for c in range(31)]
self.rates = defaultdict(lambda: defaultdict(list))
return median_rates
class ResetTwofoldCounts(kp.Module):
def process(self, blob):
if 'DumpTwofoldCounts' in self.services:
print("Request twofold dump...")
self.services['DumpTwofoldCounts']()
print("Resetting twofold counts")
self.services['ResetTwofoldCounts']()
return blob
def calibrate_dom(dom_id, data, detector, livetime=None, fixed_ang_dist=None,
auto_scale=False, ad_fit_shape='pexp', fit_background=True,
ctmin=-1.):
"""Calibrate intra DOM PMT time offsets, efficiencies and sigmas
Parameters
----------
dom_id: DOM ID
data: dict of coincidences or root or hdf5 file
detector: instance of detector class
livetime: data-taking duration [s]
fixed_ang_dist: fixing angular distribution e.g. for data mc comparison
auto_scale: auto scales the fixed angular distribution to the data
Returns
-------
return_data: dictionary with fit results
"""
if isinstance(data, str):
filename = data
loaders = {'.h5': load_k40_coincidences_from_hdf5,
'.root': load_k40_coincidences_from_rootfile}
try:
loader = loaders[os.path.splitext(filename)[1]]
except KeyError:
log.critical('File format not supported.')
raise IOError
else:
data, livetime = loader(filename, dom_id)
combs = np.array(list(combinations(range(31), 2)))
angles = calculate_angles(detector, combs)
cos_angles = np.cos(angles)
angles = angles[cos_angles >= ctmin]
data = data[cos_angles >= ctmin]
combs = combs[cos_angles >= ctmin]
try:
fit_res = fit_delta_ts(data, livetime, fit_background=fit_background)
rates, means, sigmas, popts, pcovs = fit_res
except:
return 0
rate_errors = np.array([np.diag(pc)[2] for pc in pcovs])
# mean_errors = np.array([np.diag(pc)[0] for pc in pcovs])
scale_factor = None
if type(fixed_ang_dist) in (list, np.ndarray):
if auto_scale:
scale_factor = np.mean(rates[angles < 1.5]) / \
np.mean(fixed_ang_dist[angles < 1.5])
fitted_rates = fixed_ang_dist * scale_factor
else:
fitted_rates = fixed_ang_dist
exp_popts = []
exp_pcov = []
print('Using fixed angular distribution')
else:
fit_res = fit_angular_distribution(angles,
rates,
rate_errors,
shape=ad_fit_shape)
fitted_rates, exp_popts, exp_pcov = fit_res
# t0_weights = np.array([0. if a>1. else 1. for a in angles])
if not fit_background:
minimize_weights = calculate_weights(fitted_rates, data)
else:
minimize_weights = fitted_rates
opt_t0s = minimize_t0s(means, minimize_weights, combs)
opt_sigmas = minimize_sigmas(sigmas, minimize_weights, combs)
opt_qes = minimize_qes(fitted_rates, rates, minimize_weights, combs)
corrected_means = correct_means(means, opt_t0s.x, combs)
corrected_rates = correct_rates(rates, opt_qes.x, combs)
rms_means, rms_corrected_means = calculate_rms_means(means,
corrected_means)
rms_rates, rms_corrected_rates = calculate_rms_rates(rates, fitted_rates,
corrected_rates)
cos_angles = np.cos(angles)
return_data = {'opt_t0s': opt_t0s,
'opt_qes': opt_qes,
'data': data,
'means': means,
'rates': rates,
'fitted_rates': fitted_rates,
'angles': angles,
'corrected_means': corrected_means,
'corrected_rates': corrected_rates,
'rms_means': rms_means,
'rms_corrected_means': rms_corrected_means,
'rms_rates': rms_rates,
'rms_corrected_rates': rms_corrected_rates,
'gaussian_popts': popts,
'livetime': livetime,
'exp_popts': exp_popts,
'exp_pcov': exp_pcov,
'scale_factor': scale_factor,
'opt_sigmas': opt_sigmas,
'sigmas': sigmas,
'combs': combs}
return return_data
def calculate_weights(fitted_rates, data):
comb_mean_rates = np.mean(data, axis=1)
greater_zero = np.array(comb_mean_rates > 0, dtype=int)
return fitted_rates * greater_zero
def load_k40_coincidences_from_hdf5(filename, dom_id):
"""Load k40 coincidences from hdf5 file
Parameters
----------
filename: filename of hdf5 file
dom_id: DOM ID
Returns
-------
data: numpy array of coincidences
livetime: duration of data-taking
"""
with h5py.File(filename, 'r') as h5f:
data = h5f['/k40counts/{0}'.format(dom_id)]
livetime = data.attrs['livetime']
data = np.array(data)
return data, livetime
def load_k40_coincidences_from_rootfile(filename, dom_id):
"""Load k40 coincidences from JMonitorK40 ROOT file
Parameters
----------
filename: root file produced by JMonitorK40
dom_id: DOM ID
Returns
-------
data: numpy array of coincidences
dom_weight: weight to apply to coincidences to get rate in Hz
"""
from ROOT import TFile
root_file_monitor = TFile(filename, "READ")
dom_name = str(dom_id) + ".2S"
histo_2d_monitor = root_file_monitor.Get(dom_name)
data = []
for c in range(1, histo_2d_monitor.GetNbinsX() + 1):
combination = []
for b in range(1, histo_2d_monitor.GetNbinsY() + 1):
combination.append(histo_2d_monitor.GetBinContent(c, b))
data.append(combination)
weights = {}
weights_histo = root_file_monitor.Get('weights_hist')
try:
for i in range(1, weights_histo.GetNbinsX() + 1):
# we have to read all the entries, unfortunately
weight = weights_histo.GetBinContent(i)
label = weights_histo.GetXaxis().GetBinLabel(i)
weights[label[3:]] = weight
dom_weight = weights[str(dom_id)]
except AttributeError:
log.info("Weights histogram broken or not found, setting weight to 1.")
dom_weight = 1.
return np.array(data), dom_weight
def gaussian(x, mean, sigma, rate, offset):
return rate / np.sqrt(2 * np.pi) / \
sigma * np.exp(-0.5*(x-mean)**2 / sigma**2) + offset
def gaussian_wo_offset(x, mean, sigma, rate):
return rate / np.sqrt(2 * np.pi) / \
sigma * np.exp(-0.5*(x-mean)**2 / sigma**2)
def fit_delta_ts(data, livetime, fit_background=True):
"""Fits gaussians to delta t for each PMT pair.
Parameters
----------
data: 2d np.array: x = PMT combinations (465), y = time, entry = frequency
livetime: length of data taking in seconds
fit_background: if True: fits gaussian with offset, else without offset
Returns
-------
numpy arrays with rates and means for all PMT combinations
"""
data = data / livetime
start = -(data.shape[1] - 1) / 2
end = -start + 1
xs = np.arange(start, end)
rates = []
sigmas = []
means = []
popts = []
pcovs = []
for combination in data:
mean0 = np.argmax(combination) + start
try:
if fit_background:
popt, pcov = optimize.curve_fit(gaussian,
xs,
combination,
p0=[mean0, 4., 5., 0.1],
bounds=([start, 0, 0, 0],
[end, 10, 10, 1]))
else:
popt, pcov = optimize.curve_fit(gaussian_wo_offset,
xs,
combination,
p0=[mean0, 4., 5.],
bounds=([start, 0, 0],
[end, 10, 10]))
except RuntimeError:
popt = (0, 0, 0, 0)
rates.append(popt[2])
means.append(popt[0])
sigmas.append(popt[1])
popts.append(popt)
pcovs.append(pcov)
return (np.array(rates),
np.array(means),
np.array(sigmas),
np.array(popts),
np.array(pcovs))
def calculate_angles(detector, combs):
"""Calculates angles between PMT combinations according to positions in
detector_file
Parameters
----------
detector_file: file from which to read the PMT positions (.detx)
combs: pmt combinations
Returns
-------
angles: numpy array of angles between all PMT combinations
"""
angles = []
pmt_angles = detector.pmt_angles
for first, second in combs:
angles.append(kp.math.angle_between(np.array(pmt_angles[first]),
np.array(pmt_angles[second])))
return np.array(angles)
def exponential_polinomial(x, p1, p2, p3, p4):
return 1 * np.exp(p1 + x * (p2 + x * (p3 + x * p4)))
def exponential(x, a, b):
return a * np.exp(b * x)
def fit_angular_distribution(angles, rates, rate_errors, shape='pexp'):
"""Fits angular distribution of rates.
Parameters
----------
rates: numpy array
with rates for all PMT combinations
angles: numpy array
with angles for all PMT combinations
shape:
which function to fit; exp for exponential or pexp for
exponential_polinomial
Returns
-------
fitted_rates: numpy array of fitted rates (fit_function(angles, *popt))
"""
if shape == 'exp':
fit_function = exponential
# p0 = [-0.91871169, 2.72224241, -1.19065965, 1.48054122]
if shape == 'pexp':
fit_function = exponential_polinomial
# p0 = [0.34921202, 2.8629577]
cos_angles = np.cos(angles)
popt, pcov = optimize.curve_fit(fit_function, cos_angles, rates)
fitted_rates = fit_function(cos_angles, *popt)
return fitted_rates, popt, pcov
def minimize_t0s(means, weights, combs):
"""Varies t0s to minimize the deviation of the gaussian means from zero.
Parameters
----------
means: numpy array of means of all PMT combinations
weights: numpy array of weights for the squared sum
combs: pmt combinations to use for minimization
Returns
-------
opt_t0s: optimal t0 values for all PMTs
"""
def make_quality_function(means, weights, combs):
def quality_function(t0s):
sq_sum = 0
for mean, comb, weight in zip(means, combs, weights):
sq_sum += ((mean - (t0s[comb[1]] - t0s[comb[0]])) * weight)**2
return sq_sum
return quality_function
qfunc = make_quality_function(means, weights, combs)
# t0s = np.zeros(31)
t0s = np.random.rand(31)
bounds = [(0, 0)]+[(-10., 10.)] * 30
opt_t0s = optimize.minimize(qfunc, t0s, bounds=bounds)
return opt_t0s
def minimize_sigmas(sigmas, weights, combs):
"""Varies sigmas to minimize gaussian sigma12 - sqrt(sigma1² + sigma2²).
Parameters
----------
sigmas: numpy array of fitted sigmas of gaussians
weights: numpy array of weights for the squared sum
combs: pmt combinations to use for minimization
Returns
-------
opt_sigmas: optimal sigma values for all PMTs
"""
def make_quality_function(sigmas, weights, combs):
def quality_function(s):
sq_sum = 0
for sigma, comb, weight in zip(sigmas, combs, weights):
sigma_sqsum = np.sqrt(s[comb[1]]**2 + s[comb[0]]**2)
sq_sum += ((sigma - sigma_sqsum) * weight)**2
return sq_sum
return quality_function
qfunc = make_quality_function(sigmas, weights, combs)
s = np.ones(31) * 2.5
# s = np.random.rand(31)
bounds = [(0., 5.)] * 31
opt_sigmas = optimize.minimize(qfunc, s, bounds=bounds)
return opt_sigmas
def minimize_qes(fitted_rates, rates, weights, combs):
"""Varies QEs to minimize the deviation of the rates from the fitted_rates.
Parameters
----------
fitted_rates: numpy array of fitted rates from fit_angular_distribution
rates: numpy array of rates of all PMT combinations
weights: numpy array of weights for the squared sum
combs: pmt combinations to use for minimization
Returns
-------
opt_qes: optimal qe values for all PMTs
"""
def make_quality_function(fitted_rates, rates, weights, combs):
def quality_function(qes):
sq_sum = 0
for fitted_rate, comb, rate, weight \
in zip(fitted_rates, combs, rates, weights):
sq_sum += ((rate / qes[comb[0]] / qes[comb[1]]
- fitted_rate) * weight)**2
return sq_sum
return quality_function
qfunc = make_quality_function(fitted_rates, rates, weights, combs)
qes = np.ones(31)
bounds = [(0.1, 2.)] * 31
opt_qes = optimize.minimize(qfunc, qes, bounds=bounds)
return opt_qes
def correct_means(means, opt_t0s, combs):
"""Applies optimal t0s to gaussians means.
Should be around zero afterwards.
Parameters
----------
means: numpy array of means of gaussians of all PMT combinations
opt_t0s: numpy array of optimal t0 values for all PMTs
combs: pmt combinations used to correct
Returns
-------
corrected_means: numpy array of corrected gaussian means for all PMT combs
"""
corrected_means = np.array([(opt_t0s[comb[1]] - opt_t0s[comb[0]])
- mean for mean, comb in zip(means, combs)])
return corrected_means
def correct_rates(rates, opt_qes, combs):
"""Applies optimal qes to rates.
Should be closer to fitted_rates afterwards.
Parameters
----------
rates: numpy array of rates of all PMT combinations
opt_qes: numpy array of optimal qe values for all PMTs
combs: pmt combinations used to correct
Returns
-------
corrected_rates: numpy array of corrected rates for all PMT combinations
"""
corrected_rates = np.array([rate / opt_qes[comb[0]] / opt_qes[comb[1]]
for rate, comb in zip(rates, combs)])
return corrected_rates
def calculate_rms_means(means, corrected_means):
"""Calculates RMS of means from zero before and after correction
Parameters
----------
means: numpy array of means of gaussians of all PMT combinations
corrected_means: numpy array of corrected gaussian means for all PMT combs
Returns
-------
rms_means: RMS of means from zero
rms_corrected_means: RMS of corrected_means from zero
"""
rms_means = np.sqrt(np.mean((means - 0)**2))
rms_corrected_means = np.sqrt(np.mean((corrected_means - 0)**2))
return rms_means, rms_corrected_means
def calculate_rms_rates(rates, fitted_rates, corrected_rates):
"""Calculates RMS of rates from fitted_rates before and after correction
Parameters
----------
rates: numpy array of rates of all PMT combinations
corrected_rates: numpy array of corrected rates for all PMT combinations
Returns
-------
rms_rates: RMS of rates from fitted_rates
rms_corrected_rates: RMS of corrected_ratesrates from fitted_rates
"""
rms_rates = np.sqrt(np.mean((rates - fitted_rates)**2))
rms_corrected_rates = np.sqrt(np.mean((corrected_rates - fitted_rates)**2))
return rms_rates, rms_corrected_rates
@nb.jit
def get_comb_index(i, j):
"""Return the index of PMT pair combinations"""
return i*30-i*(i+1)//2 + j-1
@nb.jit
def add_to_twofold_matrix(times, tdcs, mat, tmax=10):
"""Add counts to twofold coincidences for a given `tmax`.
Parameters
----------
times: np.ndarray of hit times (int32)
tdcs: np.ndarray of channel_ids (uint8)
mat: ref to a np.array((465, tmax * 2 + 1))
tmax: int (time window)
Returns
-------
mat: coincidence matrix (np.array((465, tmax * 2 + 1)))
"""
h_idx = 0 # index of initial hit
c_idx = 0 # index of coincident candidate hit
n_hits = len(times)
multiplicity = 0
while h_idx <= n_hits:
c_idx = h_idx + 1
if (c_idx < n_hits) and (times[c_idx] - times[h_idx] <= tmax):
multiplicity = 2
c_idx += 1
while (c_idx < n_hits) and (times[c_idx] - times[h_idx] <= tmax):
c_idx += 1
multiplicity += 1
if multiplicity != 2:
h_idx = c_idx
continue
c_idx -= 1
h_tdc = tdcs[h_idx]
c_tdc = tdcs[c_idx]
h_time = times[h_idx]
c_time = times[c_idx]
if h_tdc != c_tdc:
dt = int(c_time - h_time)
if h_tdc > c_tdc:
mat[get_comb_index(c_tdc, h_tdc), -dt+tmax] += 1
else:
mat[get_comb_index(h_tdc, c_tdc), dt+tmax] += 1
h_idx = c_idx
# jmonitork40_comb_indices = \
# np.array((254, 423, 424, 391, 392, 255, 204, 205, 126, 120, 121, 0,
# 22, 12, 80, 81, 23, 48, 49, 148, 150, 96, 296, 221, 190, 191, 297, 312,
# 313, 386, 355, 132, 110, 431, 42, 433, 113, 256, 134, 358, 192, 74,
# 176, 36, 402, 301, 270, 69, 384, 2, 156, 38, 178, 70, 273, 404, 302,
# 77, 202, 351, 246, 440, 133, 262, 103, 118, 44, 141, 34, 4, 64, 30,
# 196, 91, 172, 61, 292, 84, 157, 198, 276, 182, 281, 410, 381, 289,
# 405, 439, 247, 356, 102, 263, 119, 140, 45, 35, 88, 65, 194, 31,
# 7, 60, 173, 82, 294, 158, 409, 277, 280, 183, 200, 288, 382, 406,
# 212, 432, 128, 388, 206, 264, 105, 72, 144, 52, 283, 6, 19, 14,
# 169, 24, 310, 97, 379, 186, 218, 59, 93, 152, 317, 304, 111, 387,
# 129, 207, 104, 265, 73, 18, 53, 5, 284, 146, 168, 15, 308, 26,
# 98, 92, 187, 58, 219, 380, 316, 154, 305, 112, 434, 257, 357, 135,
# 193, 300, 177, 401, 37, 75, 68, 271, 1, 385, 159, 403, 179, 272,
# 71, 39, 76, 303, 203, 213, 393, 248, 442, 298, 145, 184, 89, 377,
# 315, 216, 57, 309, 27, 99, 8, 54, 16, 171, 287, 153, 21, 78,
# 394, 441, 249, 299, 314, 185, 376, 90, 147, 56, 217, 25, 311, 100,
# 286, 55, 170, 17, 9, 20, 155, 79, 425, 426, 383, 306, 220, 290,
# 291, 307, 188, 189, 149, 151, 101, 86, 13, 50, 51, 87, 28, 29,
# 3, 352, 399, 375, 274, 407, 197, 285, 180, 279, 83, 295, 160, 199,
# 66, 174, 63, 33, 10, 95, 40, 400, 282, 275, 195, 408, 378, 278,
# 181, 293, 85, 161, 32, 67, 62, 175, 201, 94, 11, 41, 435, 415,
# 359, 360, 436, 347, 348, 258, 259, 318, 136, 162, 222, 223, 137, 114,
# 115, 43, 451, 443, 266, 389, 335, 456, 208, 396, 363, 250, 238, 327,
# 235, 107, 130, 215, 116, 343, 344, 452, 461, 462, 331, 332, 417, 226,
# 324, 371, 372, 229, 240, 241, 163, 142, 267, 230, 412, 122, 428, 319,
# 353, 227, 340, 166, 47, 108, 253, 138, 444, 411, 231, 427, 123, 320,
# 46, 228, 165, 341, 354, 252, 109, 139, 455, 336, 395, 209, 364, 106,
# 239, 234, 328, 251, 214, 131, 117, 373, 447, 243, 418, 164, 369, 325,
# 460, 342, 329, 237, 224, 242, 448, 419, 339, 370, 459, 326, 167, 236,
# 330, 225, 127, 365, 124, 333, 244, 450, 430, 397, 211, 260, 366, 429,
# 334, 449, 245, 125, 210, 398, 261, 321, 420, 421, 422, 322, 367, 368,
# 323, 345, 413, 232, 143, 268, 446, 361, 463, 464, 346, 453, 454, 416,
# 374, 233, 337, 458, 349, 414, 457, 338, 350, 445, 269, 362, 390, 437,
# 438))
#
"""
jmonitork40_comb_indices = \
np.array((417, 418, 419, 420, 421, 422, 363, 364, 365, 366, 367, 368,
318, 319, 320, 321, 322, 323, 156, 157, 158, 159, 160, 161, 96, 97, 98,
99, 100, 101, 461, 369, 324, 371, 464, 427, 331, 237, 238, 333, 434, 415,
339, 231, 175, 232, 342, 278, 184, 61, 62, 186, 281, 220, 162, 54, 13,
56, 168, 459, 370, 325, 374, 423, 428, 328, 244, 239, 338, 343, 411, 346,
226, 178, 229, 270, 271, 181, 68, 69, 191, 170, 216, 164, 50, 16, 58,
462, 373, 326, 327, 429, 424, 337, 240, 245, 222, 345, 412, 347, 228, 179,
180, 272, 273, 190, 70, 71, 48, 163, 217, 172, 57, 17, 463, 372, 234,
332, 430, 431, 334, 241, 174, 230, 340, 416, 341, 233, 60, 185, 279, 280,
187, 63, 12, 52, 165, 221, 166, 59, 460, 242, 235, 336, 425, 432, 330,
223, 176, 225, 348, 413, 350, 64, 65, 189, 274, 275, 183, 49, 14, 55,
173, 218, 169, 335, 236, 243, 329, 433, 426, 344, 224, 177, 227, 349, 414,
188, 66, 67, 182, 276, 277, 171, 53, 15, 51, 167, 219, 387, 204, 128,
209, 396, 443, 435, 263, 112, 120, 249, 375, 283, 73, 6, 85, 301, 310,
306, 148, 22, 28, 154, 388, 208, 126, 211, 254, 451, 452, 256, 104, 105,
282, 383, 285, 84, 1, 87, 144, 312, 313, 150, 24, 25, 395, 210, 129,
110, 262, 436, 445, 248, 121, 72, 284, 376, 300, 86, 7, 18, 146, 307,
314, 152, 29, 389, 205, 111, 118, 247, 446, 437, 265, 4, 81, 299, 377,
287, 75, 19, 26, 149, 315, 308, 155, 390, 255, 102, 103, 257, 453, 454,
80, 0, 83, 286, 384, 289, 145, 20, 21, 151, 316, 317, 444, 246, 119,
113, 264, 438, 298, 82, 5, 74, 288, 378, 311, 147, 27, 23, 153, 309,
351, 136, 42, 138, 354, 403, 194, 33, 34, 200, 410, 385, 292, 91, 3,
94, 297, 359, 137, 44, 133, 399, 404, 196, 40, 35, 202, 290, 379, 303,
92, 10, 79, 352, 132, 45, 192, 405, 400, 198, 36, 41, 88, 302, 380,
294, 78, 11, 353, 139, 30, 195, 406, 407, 201, 37, 2, 90, 293, 386,
296, 95, 360, 38, 31, 197, 401, 408, 203, 89, 8, 77, 295, 381, 305,
193, 32, 39, 199, 409, 402, 291, 76, 9, 93, 304, 382, 355, 134, 46,
141, 362, 455, 439, 251, 108, 124, 269, 356, 140, 43, 143, 258, 447, 448,
260, 116, 117, 361, 142, 47, 106, 250, 440, 457, 268, 125, 357, 135, 107,
122, 267, 458, 441, 253, 358, 259, 114, 115, 261, 449, 450, 456, 266, 123,
109, 252, 442, 391, 212, 127, 214, 394, 397, 213, 130, 207, 392, 206, 131,
393, 215, 398))
"""
Fix typo
# coding=utf-8
# Filename: k40.py
# pylint: disable=locally-disabled
"""
A collection of k40 related functions and modules.
"""
from __future__ import division, absolute_import, print_function
import os
from itertools import combinations
from collections import defaultdict
from functools import partial
from datetime import datetime
import io
from scipy import optimize
import numpy as np
import h5py
import pickle
import numba as nb
import km3pipe as kp
from km3pipe.io.daq import TMCHData
__author__ = "Jonas Reubelt"
__email__ = "jreubelt@km3net.de"
__status__ = "Development"
log = kp.logger.logging.getLogger(__name__) # pylint: disable=C0103
# log.setLevel(logging.DEBUG)
TIMESLICE_LENGTH = 0.1 # [s]
class K40BackgroundSubtractor(kp.Module):
"""Subtracts random coincidence background from K40 data
Required Services
-----------------
'MedianPMTRates()': dict (key=dom_id, value=list of pmt rates)
Output Keys
-----------
'K40Counts': dict, Corrected K40 counts
"""
def configure(self):
self.combs = list(combinations(range(31), 2))
def process(self, blob):
print('Subtracting random background calculated from single rates')
counts = self.services['TwofoldCounts']
dom_ids = list(counts.keys())
mean_rates = self.services['GetMedianPMTRates']()
corrected_counts = {}
livetime = self.services['GetLivetime']()
for dom_id in dom_ids:
try:
pmt_rates = mean_rates[dom_id]
except KeyError:
log.warning("Skipping BG correction for DOM {}."
.format(dom_id))
corrected_counts[dom_id] = counts[dom_id]
continue
k40_rates = counts[dom_id] / livetime
bg_rates = []
for c in self.combs:
bg_rates.append(pmt_rates[c[0]]*pmt_rates[c[1]]*1e-9)
corrected_counts[dom_id] = (k40_rates.T -
np.array(bg_rates)).T * livetime
blob["CorrectedTwofoldCounts"] = corrected_counts
pickle.dump({'data': corrected_counts,
'livetime': livetime},
open("k40_counts_bg_sub.p", "wb"))
pickle.dump(mean_rates, open('mean_rates.p', 'wb'))
return blob
class IntraDOMCalibrator(kp.Module):
"""Intra DOM calibrator which performs the calibration from K40Counts.
Parameters
----------
det_id: int
Detector ID [default: 14]
ctmin: float
Minimum cos(angle)
mode: str ('offline' | 'online')
Calibration mode [default: 'online']
Input Keys
----------
'TwofoldCounts': dict (key=dom_id,
value=matrix of k40 counts 465x(dt*2+1))
'CorrectedTwofoldCounts': dict (key=dom_id,
value=matrix of k40 counts 465x(dt*2+1))
Output Keys
-----------
'IntraDOMCalibration': dict (key=dom_id, value=calibration)
"""
def configure(self):
det_id = self.get("det_id") or 14
self.detector = kp.hardware.Detector(det_id=det_id)
self.ctmin = self.require("ctmin")
self.mode = self.get("mode", default="online")
def process(self, blob):
if self.mode != 'online':
return blob
if 'CorrectedTwofoldCounts' in blob:
log.info("Using corrected twofold counts")
fit_background = False
twofold_counts = blob['CorrectedTwofoldCounts']
else:
log.info("No corrected twofold counts found, fitting background.")
twofold_counts = self.services['TwofoldCounts']
fit_background = True
blob['IntraDOMCalibration'] = self.calibrate(twofold_counts,
fit_background)
return blob
def calibrate(self, twofold_counts, fit_background=False):
print("Starting calibration:")
calibration = {}
for dom_id, data in twofold_counts.items():
print(" calibrating DOM '{0}'".format(dom_id))
try:
calib = calibrate_dom(dom_id, data,
self.detector,
livetime=self.services['GetLivetime'](),
fit_background=fit_background,
ad_fit_shape='exp', ctmin=self.ctmin)
except RuntimeError:
log.error(" skipping DOM '{0}'.".format(dom_id))
else:
calibration[dom_id] = calib
return calibration
def finish(self):
if self.mode == 'offline':
print("Starting offline calibration")
twofold_counts = self.services['TwofoldCounts']
self.calibrate(twofold_counts, fit_background=True)
class TwofoldCounter(kp.Module):
"""Counts twofold coincidences in timeslice hits per PMT combination.
Parameters
----------
'tmax': int
time window of twofold coincidences [ns]
'dump_filename': str
name for the dump file
Input Keys
----------
'TSHits': RawHitSeries
Services
--------
'TwofoldCounts': dict (key=dom_id, value=matrix (465,(dt*2+1)))
'ResetTwofoldCounts': reset the TwofoldCounts dict
'GetLivetime()': float
'DumpTwofoldCounts': Writes twofold counts into 'dump_filename'
"""
def configure(self):
self.tmax = self.get("tmax") or 20
self.dump_filename = self.get("dump_filename")
self.counts = None
self.n_timeslices = None
self.start_time = datetime.utcnow()
self.reset()
self.expose(self.counts, 'TwofoldCounts')
self.expose(self.get_livetime, 'GetLivetime')
self.expose(self.reset, 'ResetTwofoldCounts')
if self.dump_filename is not None:
self.expose(self.dump, 'DumpTwofoldCounts')
def reset(self):
"""Reset coincidence counter"""
self.counts = defaultdict(partial(np.zeros, (465, self.tmax * 2 + 1)))
self.n_timeslices = 0
def get_livetime(self):
return self.n_timeslices * TIMESLICE_LENGTH
def process(self, blob):
log.debug("Processing timeslice")
self.n_timeslices += 1
hits = blob['TSHits']
dom_ids = np.unique(hits.dom_id)
for dom_id in dom_ids:
mask = hits.dom_id == dom_id
times = hits.time[mask]
channel_ids = hits.channel_id[mask]
sort_idc = np.argsort(times, kind='quicksort')
add_to_twofold_matrix(times[sort_idc],
channel_ids[sort_idc],
self.counts[dom_id],
tmax=self.tmax)
return blob
def dump(self):
"""Write coincidence counts into a Python pickle"""
print("Dumping data to {}".format(self.dump_filename))
pickle.dump({'data': self.counts,
'livetime': self.get_livetime()},
open(self.dump_filename, "wb"))
class MedianPMTRatesService(kp.Module):
def configure(self):
self.rates = defaultdict(lambda: defaultdict(list))
self.expose(self.get_median_rates, 'GetMedianPMTRates')
def process(self, blob):
tmch_data = TMCHData(io.BytesIO(blob['CHData']))
dom_id = tmch_data.dom_id
for channel_id, rate in enumerate(tmch_data.pmt_rates):
self.rates[dom_id][channel_id].append(rate)
def get_median_rates(self):
print("Calculating median PMT rates.")
median_rates = {}
for dom_id in self.rates.keys():
median_rates[dom_id] = [np.median(self.rates[dom_id][c])
for c in range(31)]
self.rates = defaultdict(lambda: defaultdict(list))
return median_rates
class ResetTwofoldCounts(kp.Module):
def process(self, blob):
if 'DumpTwofoldCounts' in self.services:
print("Request twofold dump...")
self.services['DumpTwofoldCounts']()
print("Resetting twofold counts")
self.services['ResetTwofoldCounts']()
return blob
def calibrate_dom(dom_id, data, detector, livetime=None, fixed_ang_dist=None,
auto_scale=False, ad_fit_shape='pexp', fit_background=True,
ctmin=-1.):
"""Calibrate intra DOM PMT time offsets, efficiencies and sigmas
Parameters
----------
dom_id: DOM ID
data: dict of coincidences or root or hdf5 file
detector: instance of detector class
livetime: data-taking duration [s]
fixed_ang_dist: fixing angular distribution e.g. for data mc comparison
auto_scale: auto scales the fixed angular distribution to the data
Returns
-------
return_data: dictionary with fit results
"""
if isinstance(data, str):
filename = data
loaders = {'.h5': load_k40_coincidences_from_hdf5,
'.root': load_k40_coincidences_from_rootfile}
try:
loader = loaders[os.path.splitext(filename)[1]]
except KeyError:
log.critical('File format not supported.')
raise IOError
else:
data, livetime = loader(filename, dom_id)
combs = np.array(list(combinations(range(31), 2)))
angles = calculate_angles(detector, combs)
cos_angles = np.cos(angles)
angles = angles[cos_angles >= ctmin]
data = data[cos_angles >= ctmin]
combs = combs[cos_angles >= ctmin]
try:
fit_res = fit_delta_ts(data, livetime, fit_background=fit_background)
rates, means, sigmas, popts, pcovs = fit_res
except:
return 0
rate_errors = np.array([np.diag(pc)[2] for pc in pcovs])
# mean_errors = np.array([np.diag(pc)[0] for pc in pcovs])
scale_factor = None
if type(fixed_ang_dist) in (list, np.ndarray):
if auto_scale:
scale_factor = np.mean(rates[angles < 1.5]) / \
np.mean(fixed_ang_dist[angles < 1.5])
fitted_rates = fixed_ang_dist * scale_factor
else:
fitted_rates = fixed_ang_dist
exp_popts = []
exp_pcov = []
print('Using fixed angular distribution')
else:
fit_res = fit_angular_distribution(angles,
rates,
rate_errors,
shape=ad_fit_shape)
fitted_rates, exp_popts, exp_pcov = fit_res
# t0_weights = np.array([0. if a>1. else 1. for a in angles])
if not fit_background:
minimize_weights = calculate_weights(fitted_rates, data)
else:
minimize_weights = fitted_rates
opt_t0s = minimize_t0s(means, minimize_weights, combs)
opt_sigmas = minimize_sigmas(sigmas, minimize_weights, combs)
opt_qes = minimize_qes(fitted_rates, rates, minimize_weights, combs)
corrected_means = correct_means(means, opt_t0s.x, combs)
corrected_rates = correct_rates(rates, opt_qes.x, combs)
rms_means, rms_corrected_means = calculate_rms_means(means,
corrected_means)
rms_rates, rms_corrected_rates = calculate_rms_rates(rates, fitted_rates,
corrected_rates)
cos_angles = np.cos(angles)
return_data = {'opt_t0s': opt_t0s,
'opt_qes': opt_qes,
'data': data,
'means': means,
'rates': rates,
'fitted_rates': fitted_rates,
'angles': angles,
'corrected_means': corrected_means,
'corrected_rates': corrected_rates,
'rms_means': rms_means,
'rms_corrected_means': rms_corrected_means,
'rms_rates': rms_rates,
'rms_corrected_rates': rms_corrected_rates,
'gaussian_popts': popts,
'livetime': livetime,
'exp_popts': exp_popts,
'exp_pcov': exp_pcov,
'scale_factor': scale_factor,
'opt_sigmas': opt_sigmas,
'sigmas': sigmas,
'combs': combs}
return return_data
def calculate_weights(fitted_rates, data):
comb_mean_rates = np.mean(data, axis=1)
greater_zero = np.array(comb_mean_rates > 0, dtype=int)
return fitted_rates * greater_zero
def load_k40_coincidences_from_hdf5(filename, dom_id):
"""Load k40 coincidences from hdf5 file
Parameters
----------
filename: filename of hdf5 file
dom_id: DOM ID
Returns
-------
data: numpy array of coincidences
livetime: duration of data-taking
"""
with h5py.File(filename, 'r') as h5f:
data = h5f['/k40counts/{0}'.format(dom_id)]
livetime = data.attrs['livetime']
data = np.array(data)
return data, livetime
def load_k40_coincidences_from_rootfile(filename, dom_id):
"""Load k40 coincidences from JMonitorK40 ROOT file
Parameters
----------
filename: root file produced by JMonitorK40
dom_id: DOM ID
Returns
-------
data: numpy array of coincidences
dom_weight: weight to apply to coincidences to get rate in Hz
"""
from ROOT import TFile
root_file_monitor = TFile(filename, "READ")
dom_name = str(dom_id) + ".2S"
histo_2d_monitor = root_file_monitor.Get(dom_name)
data = []
for c in range(1, histo_2d_monitor.GetNbinsX() + 1):
combination = []
for b in range(1, histo_2d_monitor.GetNbinsY() + 1):
combination.append(histo_2d_monitor.GetBinContent(c, b))
data.append(combination)
weights = {}
weights_histo = root_file_monitor.Get('weights_hist')
try:
for i in range(1, weights_histo.GetNbinsX() + 1):
# we have to read all the entries, unfortunately
weight = weights_histo.GetBinContent(i)
label = weights_histo.GetXaxis().GetBinLabel(i)
weights[label[3:]] = weight
dom_weight = weights[str(dom_id)]
except AttributeError:
log.info("Weights histogram broken or not found, setting weight to 1.")
dom_weight = 1.
return np.array(data), dom_weight
def gaussian(x, mean, sigma, rate, offset):
return rate / np.sqrt(2 * np.pi) / \
sigma * np.exp(-0.5*(x-mean)**2 / sigma**2) + offset
def gaussian_wo_offset(x, mean, sigma, rate):
return rate / np.sqrt(2 * np.pi) / \
sigma * np.exp(-0.5*(x-mean)**2 / sigma**2)
def fit_delta_ts(data, livetime, fit_background=True):
"""Fits gaussians to delta t for each PMT pair.
Parameters
----------
data: 2d np.array: x = PMT combinations (465), y = time, entry = frequency
livetime: length of data taking in seconds
fit_background: if True: fits gaussian with offset, else without offset
Returns
-------
numpy arrays with rates and means for all PMT combinations
"""
data = data / livetime
start = -(data.shape[1] - 1) / 2
end = -start + 1
xs = np.arange(start, end)
rates = []
sigmas = []
means = []
popts = []
pcovs = []
for combination in data:
mean0 = np.argmax(combination) + start
try:
if fit_background:
popt, pcov = optimize.curve_fit(gaussian,
xs,
combination,
p0=[mean0, 4., 5., 0.1],
bounds=([start, 0, 0, 0],
[end, 10, 10, 1]))
else:
popt, pcov = optimize.curve_fit(gaussian_wo_offset,
xs,
combination,
p0=[mean0, 4., 5.],
bounds=([start, 0, 0],
[end, 10, 10]))
except RuntimeError:
popt = (0, 0, 0, 0)
rates.append(popt[2])
means.append(popt[0])
sigmas.append(popt[1])
popts.append(popt)
pcovs.append(pcov)
return (np.array(rates),
np.array(means),
np.array(sigmas),
np.array(popts),
np.array(pcovs))
def calculate_angles(detector, combs):
"""Calculates angles between PMT combinations according to positions in
detector_file
Parameters
----------
detector_file: file from which to read the PMT positions (.detx)
combs: pmt combinations
Returns
-------
angles: numpy array of angles between all PMT combinations
"""
angles = []
pmt_angles = detector.pmt_angles
for first, second in combs:
angles.append(kp.math.angle_between(np.array(pmt_angles[first]),
np.array(pmt_angles[second])))
return np.array(angles)
def exponential_polinomial(x, p1, p2, p3, p4):
return 1 * np.exp(p1 + x * (p2 + x * (p3 + x * p4)))
def exponential(x, a, b):
return a * np.exp(b * x)
def fit_angular_distribution(angles, rates, rate_errors, shape='pexp'):
"""Fits angular distribution of rates.
Parameters
----------
rates: numpy array
with rates for all PMT combinations
angles: numpy array
with angles for all PMT combinations
shape:
which function to fit; exp for exponential or pexp for
exponential_polinomial
Returns
-------
fitted_rates: numpy array of fitted rates (fit_function(angles, *popt))
"""
if shape == 'exp':
fit_function = exponential
# p0 = [-0.91871169, 2.72224241, -1.19065965, 1.48054122]
if shape == 'pexp':
fit_function = exponential_polinomial
# p0 = [0.34921202, 2.8629577]
cos_angles = np.cos(angles)
popt, pcov = optimize.curve_fit(fit_function, cos_angles, rates)
fitted_rates = fit_function(cos_angles, *popt)
return fitted_rates, popt, pcov
def minimize_t0s(means, weights, combs):
"""Varies t0s to minimize the deviation of the gaussian means from zero.
Parameters
----------
means: numpy array of means of all PMT combinations
weights: numpy array of weights for the squared sum
combs: pmt combinations to use for minimization
Returns
-------
opt_t0s: optimal t0 values for all PMTs
"""
def make_quality_function(means, weights, combs):
def quality_function(t0s):
sq_sum = 0
for mean, comb, weight in zip(means, combs, weights):
sq_sum += ((mean - (t0s[comb[1]] - t0s[comb[0]])) * weight)**2
return sq_sum
return quality_function
qfunc = make_quality_function(means, weights, combs)
# t0s = np.zeros(31)
t0s = np.random.rand(31)
bounds = [(0, 0)]+[(-10., 10.)] * 30
opt_t0s = optimize.minimize(qfunc, t0s, bounds=bounds)
return opt_t0s
def minimize_sigmas(sigmas, weights, combs):
"""Varies sigmas to minimize gaussian sigma12 - sqrt(sigma1² + sigma2²).
Parameters
----------
sigmas: numpy array of fitted sigmas of gaussians
weights: numpy array of weights for the squared sum
combs: pmt combinations to use for minimization
Returns
-------
opt_sigmas: optimal sigma values for all PMTs
"""
def make_quality_function(sigmas, weights, combs):
def quality_function(s):
sq_sum = 0
for sigma, comb, weight in zip(sigmas, combs, weights):
sigma_sqsum = np.sqrt(s[comb[1]]**2 + s[comb[0]]**2)
sq_sum += ((sigma - sigma_sqsum) * weight)**2
return sq_sum
return quality_function
qfunc = make_quality_function(sigmas, weights, combs)
s = np.ones(31) * 2.5
# s = np.random.rand(31)
bounds = [(0., 5.)] * 31
opt_sigmas = optimize.minimize(qfunc, s, bounds=bounds)
return opt_sigmas
def minimize_qes(fitted_rates, rates, weights, combs):
"""Varies QEs to minimize the deviation of the rates from the fitted_rates.
Parameters
----------
fitted_rates: numpy array of fitted rates from fit_angular_distribution
rates: numpy array of rates of all PMT combinations
weights: numpy array of weights for the squared sum
combs: pmt combinations to use for minimization
Returns
-------
opt_qes: optimal qe values for all PMTs
"""
def make_quality_function(fitted_rates, rates, weights, combs):
def quality_function(qes):
sq_sum = 0
for fitted_rate, comb, rate, weight \
in zip(fitted_rates, combs, rates, weights):
sq_sum += ((rate / qes[comb[0]] / qes[comb[1]]
- fitted_rate) * weight)**2
return sq_sum
return quality_function
qfunc = make_quality_function(fitted_rates, rates, weights, combs)
qes = np.ones(31)
bounds = [(0.1, 2.)] * 31
opt_qes = optimize.minimize(qfunc, qes, bounds=bounds)
return opt_qes
def correct_means(means, opt_t0s, combs):
"""Applies optimal t0s to gaussians means.
Should be around zero afterwards.
Parameters
----------
means: numpy array of means of gaussians of all PMT combinations
opt_t0s: numpy array of optimal t0 values for all PMTs
combs: pmt combinations used to correct
Returns
-------
corrected_means: numpy array of corrected gaussian means for all PMT combs
"""
corrected_means = np.array([(opt_t0s[comb[1]] - opt_t0s[comb[0]])
- mean for mean, comb in zip(means, combs)])
return corrected_means
def correct_rates(rates, opt_qes, combs):
"""Applies optimal qes to rates.
Should be closer to fitted_rates afterwards.
Parameters
----------
rates: numpy array of rates of all PMT combinations
opt_qes: numpy array of optimal qe values for all PMTs
combs: pmt combinations used to correct
Returns
-------
corrected_rates: numpy array of corrected rates for all PMT combinations
"""
corrected_rates = np.array([rate / opt_qes[comb[0]] / opt_qes[comb[1]]
for rate, comb in zip(rates, combs)])
return corrected_rates
def calculate_rms_means(means, corrected_means):
"""Calculates RMS of means from zero before and after correction
Parameters
----------
means: numpy array of means of gaussians of all PMT combinations
corrected_means: numpy array of corrected gaussian means for all PMT combs
Returns
-------
rms_means: RMS of means from zero
rms_corrected_means: RMS of corrected_means from zero
"""
rms_means = np.sqrt(np.mean((means - 0)**2))
rms_corrected_means = np.sqrt(np.mean((corrected_means - 0)**2))
return rms_means, rms_corrected_means
def calculate_rms_rates(rates, fitted_rates, corrected_rates):
"""Calculates RMS of rates from fitted_rates before and after correction
Parameters
----------
rates: numpy array of rates of all PMT combinations
corrected_rates: numpy array of corrected rates for all PMT combinations
Returns
-------
rms_rates: RMS of rates from fitted_rates
rms_corrected_rates: RMS of corrected_ratesrates from fitted_rates
"""
rms_rates = np.sqrt(np.mean((rates - fitted_rates)**2))
rms_corrected_rates = np.sqrt(np.mean((corrected_rates - fitted_rates)**2))
return rms_rates, rms_corrected_rates
@nb.jit
def get_comb_index(i, j):
"""Return the index of PMT pair combinations"""
return i*30-i*(i+1)//2 + j-1
@nb.jit
def add_to_twofold_matrix(times, tdcs, mat, tmax=10):
"""Add counts to twofold coincidences for a given `tmax`.
Parameters
----------
times: np.ndarray of hit times (int32)
tdcs: np.ndarray of channel_ids (uint8)
mat: ref to a np.array((465, tmax * 2 + 1))
tmax: int (time window)
Returns
-------
mat: coincidence matrix (np.array((465, tmax * 2 + 1)))
"""
h_idx = 0 # index of initial hit
c_idx = 0 # index of coincident candidate hit
n_hits = len(times)
multiplicity = 0
while h_idx <= n_hits:
c_idx = h_idx + 1
if (c_idx < n_hits) and (times[c_idx] - times[h_idx] <= tmax):
multiplicity = 2
c_idx += 1
while (c_idx < n_hits) and (times[c_idx] - times[h_idx] <= tmax):
c_idx += 1
multiplicity += 1
if multiplicity != 2:
h_idx = c_idx
continue
c_idx -= 1
h_tdc = tdcs[h_idx]
c_tdc = tdcs[c_idx]
h_time = times[h_idx]
c_time = times[c_idx]
if h_tdc != c_tdc:
dt = int(c_time - h_time)
if h_tdc > c_tdc:
mat[get_comb_index(c_tdc, h_tdc), -dt+tmax] += 1
else:
mat[get_comb_index(h_tdc, c_tdc), dt+tmax] += 1
h_idx = c_idx
# jmonitork40_comb_indices = \
# np.array((254, 423, 424, 391, 392, 255, 204, 205, 126, 120, 121, 0,
# 22, 12, 80, 81, 23, 48, 49, 148, 150, 96, 296, 221, 190, 191, 297, 312,
# 313, 386, 355, 132, 110, 431, 42, 433, 113, 256, 134, 358, 192, 74,
# 176, 36, 402, 301, 270, 69, 384, 2, 156, 38, 178, 70, 273, 404, 302,
# 77, 202, 351, 246, 440, 133, 262, 103, 118, 44, 141, 34, 4, 64, 30,
# 196, 91, 172, 61, 292, 84, 157, 198, 276, 182, 281, 410, 381, 289,
# 405, 439, 247, 356, 102, 263, 119, 140, 45, 35, 88, 65, 194, 31,
# 7, 60, 173, 82, 294, 158, 409, 277, 280, 183, 200, 288, 382, 406,
# 212, 432, 128, 388, 206, 264, 105, 72, 144, 52, 283, 6, 19, 14,
# 169, 24, 310, 97, 379, 186, 218, 59, 93, 152, 317, 304, 111, 387,
# 129, 207, 104, 265, 73, 18, 53, 5, 284, 146, 168, 15, 308, 26,
# 98, 92, 187, 58, 219, 380, 316, 154, 305, 112, 434, 257, 357, 135,
# 193, 300, 177, 401, 37, 75, 68, 271, 1, 385, 159, 403, 179, 272,
# 71, 39, 76, 303, 203, 213, 393, 248, 442, 298, 145, 184, 89, 377,
# 315, 216, 57, 309, 27, 99, 8, 54, 16, 171, 287, 153, 21, 78,
# 394, 441, 249, 299, 314, 185, 376, 90, 147, 56, 217, 25, 311, 100,
# 286, 55, 170, 17, 9, 20, 155, 79, 425, 426, 383, 306, 220, 290,
# 291, 307, 188, 189, 149, 151, 101, 86, 13, 50, 51, 87, 28, 29,
# 3, 352, 399, 375, 274, 407, 197, 285, 180, 279, 83, 295, 160, 199,
# 66, 174, 63, 33, 10, 95, 40, 400, 282, 275, 195, 408, 378, 278,
# 181, 293, 85, 161, 32, 67, 62, 175, 201, 94, 11, 41, 435, 415,
# 359, 360, 436, 347, 348, 258, 259, 318, 136, 162, 222, 223, 137, 114,
# 115, 43, 451, 443, 266, 389, 335, 456, 208, 396, 363, 250, 238, 327,
# 235, 107, 130, 215, 116, 343, 344, 452, 461, 462, 331, 332, 417, 226,
# 324, 371, 372, 229, 240, 241, 163, 142, 267, 230, 412, 122, 428, 319,
# 353, 227, 340, 166, 47, 108, 253, 138, 444, 411, 231, 427, 123, 320,
# 46, 228, 165, 341, 354, 252, 109, 139, 455, 336, 395, 209, 364, 106,
# 239, 234, 328, 251, 214, 131, 117, 373, 447, 243, 418, 164, 369, 325,
# 460, 342, 329, 237, 224, 242, 448, 419, 339, 370, 459, 326, 167, 236,
# 330, 225, 127, 365, 124, 333, 244, 450, 430, 397, 211, 260, 366, 429,
# 334, 449, 245, 125, 210, 398, 261, 321, 420, 421, 422, 322, 367, 368,
# 323, 345, 413, 232, 143, 268, 446, 361, 463, 464, 346, 453, 454, 416,
# 374, 233, 337, 458, 349, 414, 457, 338, 350, 445, 269, 362, 390, 437,
# 438))
#
"""
jmonitork40_comb_indices = \
np.array((417, 418, 419, 420, 421, 422, 363, 364, 365, 366, 367, 368,
318, 319, 320, 321, 322, 323, 156, 157, 158, 159, 160, 161, 96, 97, 98,
99, 100, 101, 461, 369, 324, 371, 464, 427, 331, 237, 238, 333, 434, 415,
339, 231, 175, 232, 342, 278, 184, 61, 62, 186, 281, 220, 162, 54, 13,
56, 168, 459, 370, 325, 374, 423, 428, 328, 244, 239, 338, 343, 411, 346,
226, 178, 229, 270, 271, 181, 68, 69, 191, 170, 216, 164, 50, 16, 58,
462, 373, 326, 327, 429, 424, 337, 240, 245, 222, 345, 412, 347, 228, 179,
180, 272, 273, 190, 70, 71, 48, 163, 217, 172, 57, 17, 463, 372, 234,
332, 430, 431, 334, 241, 174, 230, 340, 416, 341, 233, 60, 185, 279, 280,
187, 63, 12, 52, 165, 221, 166, 59, 460, 242, 235, 336, 425, 432, 330,
223, 176, 225, 348, 413, 350, 64, 65, 189, 274, 275, 183, 49, 14, 55,
173, 218, 169, 335, 236, 243, 329, 433, 426, 344, 224, 177, 227, 349, 414,
188, 66, 67, 182, 276, 277, 171, 53, 15, 51, 167, 219, 387, 204, 128,
209, 396, 443, 435, 263, 112, 120, 249, 375, 283, 73, 6, 85, 301, 310,
306, 148, 22, 28, 154, 388, 208, 126, 211, 254, 451, 452, 256, 104, 105,
282, 383, 285, 84, 1, 87, 144, 312, 313, 150, 24, 25, 395, 210, 129,
110, 262, 436, 445, 248, 121, 72, 284, 376, 300, 86, 7, 18, 146, 307,
314, 152, 29, 389, 205, 111, 118, 247, 446, 437, 265, 4, 81, 299, 377,
287, 75, 19, 26, 149, 315, 308, 155, 390, 255, 102, 103, 257, 453, 454,
80, 0, 83, 286, 384, 289, 145, 20, 21, 151, 316, 317, 444, 246, 119,
113, 264, 438, 298, 82, 5, 74, 288, 378, 311, 147, 27, 23, 153, 309,
351, 136, 42, 138, 354, 403, 194, 33, 34, 200, 410, 385, 292, 91, 3,
94, 297, 359, 137, 44, 133, 399, 404, 196, 40, 35, 202, 290, 379, 303,
92, 10, 79, 352, 132, 45, 192, 405, 400, 198, 36, 41, 88, 302, 380,
294, 78, 11, 353, 139, 30, 195, 406, 407, 201, 37, 2, 90, 293, 386,
296, 95, 360, 38, 31, 197, 401, 408, 203, 89, 8, 77, 295, 381, 305,
193, 32, 39, 199, 409, 402, 291, 76, 9, 93, 304, 382, 355, 134, 46,
141, 362, 455, 439, 251, 108, 124, 269, 356, 140, 43, 143, 258, 447, 448,
260, 116, 117, 361, 142, 47, 106, 250, 440, 457, 268, 125, 357, 135, 107,
122, 267, 458, 441, 253, 358, 259, 114, 115, 261, 449, 450, 456, 266, 123,
109, 252, 442, 391, 212, 127, 214, 394, 397, 213, 130, 207, 392, 206, 131,
393, 215, 398))
"""
|
#!/usr/bin/env python
"""
Cuckoo search.
Usage:
cs.py [options]
Options:
-h, --help Show this message and exit.
-n N Number of generations in CS. [default: 20]
--print-level LEVEL
Print verbose level. [default: 1]
"""
from __future__ import print_function
import os,sys
from docopt import docopt
import numpy as np
from numpy import exp, sin, cos
import random
import copy
from multiprocessing import Process, Queue
from time import time
from scipy.special import gamma
__author__ = "RYO KOBAYASHI"
__version__ = "rev190920"
_fname_gen = 'out.cs.generations'
_fname_ind = 'out.cs.individuals'
def test_func(var, vranges, **kwargs):
x,y= var
res= x**2 +y**2 +100.0*exp(-x**2 -y**2)*sin(2.0*(x+y))*cos(2*(x-y)) \
+80.0*exp(-(x-1)**2 -(y-1)**2)*cos(x+4*y)*sin(2*x-y) \
+200.0*sin(x+y)*exp(-(x-3)**2-(y-1)**2)
return res
def test_write_func(vs,vrs,fname,**kwargs):
with open(fname,'w') as f:
for i,v in enumerate(vs):
vr = vrs[i]
f.write(' {0:10.3f} {1:10.3f} {2:10.3f}\n'.format(v,*vr))
return None
def wrap(vs,vrs):
vsnew = copy.copy(vs)
for i,v in enumerate(vsnew):
vmin, vmax = vrs[i]
vsnew[i] = min(max(v,vmin),vmax)
return vsnew
def update_vrange(vrs,all_indivisuals):
"""
Update variable ranges adaptively using all the individuals information.
"""
#...Extract top NTOPS individuals from all
ntops = 100
tops = []
# print('len(all_indivisuals)=',len(all_indivisuals))
for i,ind in enumerate(all_indivisuals):
if len(tops) < ntops: # add the individual
# print(' i (< ntops)=',i)
for it,t in enumerate(tops):
if ind.val < t.val:
tops.insert(it,ind)
break
if not ind in tops:
tops.append(ind)
else: # insert the individual and pop out the worst one
# print(' i (>=ntops)=',i)
for it,t in enumerate(tops):
if ind.val < t.val:
tops.insert(it,ind)
break
if len(tops) > ntops:
del tops[ntops:len(tops)]
# print('len(tops)=',len(tops))
# print('iids= ',[t.iid for t in tops])
#...Get new ranges
new_vrs = np.array(vrs)
vss = np.zeros((len(tops),len(vrs)))
for i,ind in enumerate(tops):
vi = ind.vector
vss[i,:] = vi[:]
# print('i,vi=',i,vi)
for j in range(len(new_vrs)):
# print('j,min,max=',i,min(vss[:,j]),max(vss[:,j]))
new_vrs[j,0] = max(new_vrs[j,0],min(vss[:,j]))
new_vrs[j,1] = min(new_vrs[j,1],max(vss[:,j]))
return new_vrs
class Individual:
"""
Individual class that consists of variables as vector elements.
"""
def __init__(self, iid, ndim, vranges, loss_func):
self.iid = iid
self.ndim = ndim
self.loss_func = loss_func
self.vector = np.zeros(self.ndim)
self.vranges = vranges
self.val = None
def set_variable(self,variables):
if len(variables) != len(self.vector):
raise ValueError()
self.vector = variables
# print('iid, v before wrap,vrs =',self.iid,self.vector,self.vranges)
self.wrap_range()
# print('iid, v after wrap,vrs =',self.iid,self.vector,self.vranges)
self.val = None
return None
def init_random(self):
for i in range(self.ndim):
vmin, vmax = self.vranges[i]
# vmin = self.vranges[i,0]
# vmax = self.vranges[i,1]
v = random.random()*(vmax -vmin) +vmin
self.vector[i] = v
# print(' i,vmin,vmax,v=',i,vmin,vmax,v)
self.wrap_range()
self.val = None
return None
def wrap_range(self):
self.vector = wrap(self.vector, self.vranges)
def calc_loss_func(self,kwargs,q):
"""
Compute loss function value using self.loss_func function given in the constructor.
In order to return a result in multiprocessing.Process, it also takes an argument q.
"""
# print('type(kwargs)=',type(kwargs))
val = self.loss_func(self.vector, self.vranges, **kwargs)
# print(' iid,v,val=',self.iid,self.vector,val)
q.put(val)
return None
class CS:
"""
Cuckoo search class.
"""
def __init__(self, N, F, variables, vranges, loss_func, write_func, **kwargs):
"""
Conctructor of CS class.
N: Number of individuals.
F: Fraction of worse individuals to be abondoned.
loss_func:
Loss function to be minimized with variables and **kwargs.
"""
if N < 2:
raise ValueError('N must be greater than 1 in CS!')
self.N = N # Number of individuals in a generation
self.F = F # Fraction of worse individuals to be abondoned
self.ndim = len(variables)
self.vs = variables
self.vrs0 = vranges
self.vrs = copy.copy(self.vrs0)
self.vws = np.zeros(self.ndim)
for i in range(self.ndim):
self.vws[i] = max(self.vrs[i,1] -self.vrs[i,0], 0.0)
# print('original variables=',self.vs,self.vrs)
self.loss_func = loss_func
self.write_func = write_func
self.kwargs = kwargs
self.bestind = None
self.print_level = 0
if 'print_level' in kwargs.keys():
self.print_level = int(kwargs['print_level'])
if 'update_vrange' in kwargs.keys():
self.update_vrs_per = kwargs['update_vrange']
self.beta = 1.5
self.betai = 1.0 /self.beta
self.usgm = (gamma(1+self.beta)*np.sin(np.pi*self.beta/2)/ \
gamma((1+self.beta)/2)*self.beta*2.0**((self.beta-1)/2))**self.betai
self.vsgm = 1.0
#...initialize population
self.population = []
self.all_indivisuals = []
self.iidmax = 0
for i in range(N):
self.iidmax += 1
ind = Individual(self.iidmax, self.ndim, self.vrs, self.loss_func)
if i == 0:
ind.set_variable(self.vs)
else:
ind.init_random()
self.population.append(ind)
#...Evaluate loss function values
qs = [ Queue() for i in range(self.N) ]
prcs = []
for ip,pi in enumerate(self.population):
kwtmp = copy.copy(self.kwargs)
kwtmp['index'] = ip
kwtmp['iid'] = pi.iid
prcs.append(Process(target=pi.calc_loss_func, args=(kwtmp,qs[ip])))
for p in prcs:
p.start()
for p in prcs:
p.join()
for ip,pi in enumerate(self.population):
pi.val = qs[ip].get()
# print('ip,val,vec=',ip,pi.val,pi.vector)
self.keep_best()
self.all_indivisuals.extend(self.population)
if self.print_level > 2:
for pi in self.population:
self.write_variables(pi,
fname='in.vars.fitpot.{0:d}'.format(pi.iid),
**self.kwargs)
else:
self.write_variables(self.bestind,
fname='in.vars.fitpot.{0:d}'.format(self.bestind.iid),
**self.kwargs)
return None
def keep_best(self):
vals = []
for i,pi in enumerate(self.population):
# print('i,val,vec=',i,pi.val,pi.vector)
if pi.val == None:
raise ValueError('Something went wrong.')
vals.append(pi.val)
minval = min(vals)
if self.bestind == None or minval < self.bestind.val:
idx = vals.index(minval)
self.bestind = copy.deepcopy(self.population[idx])
return None
def sort_individuals(self):
jtop = self.N
for i in range(self.N):
jtop -= 1
for j in range(jtop):
pj = self.population[j]
pjp = self.population[j+1]
if pj.val > pjp.val:
self.population[j] = pjp
self.population[j+1] = pj
def run(self,maxiter=100):
"""
Perfom CS.
"""
if 'start' in self.kwargs.keys():
start = self.kwargs['start']
else:
start = time()
fgen = open(_fname_gen,'w')
find = open(_fname_ind,'w')
for i,ind in enumerate(self.population):
fgen.write(' 0 {0:8d} {1:12.4e}\n'.format(ind.iid, ind.val))
find.write(' {0:8d} {1:12.4e}'.format(ind.iid, ind.val))
for j,vj in enumerate(ind.vector):
find.write(' {0:11.3e}'.format(vj))
find.write('\n')
if self.print_level > 0:
print(' step,time,best,vars= {0:6d} {1:8.1f} {2:8.4f}'.format(0, time()-start,
self.bestind.val),end="")
for i in range(min(16,self.ndim)):
print(' {0:6.3f}'.format(self.bestind.vector[i]),end="")
print('', flush=True)
for it in range(maxiter):
candidates = []
self.sort_individuals()
#...Create candidates by Levy flight
vbest = self.bestind.vector
for ip,pi in enumerate(self.population):
vi = pi.vector
vnew =np.array(vi)
for iv in range(self.ndim):
u = np.random.normal()*self.usgm
v = abs(np.random.normal()*self.vsgm)
v = max(v,1.0e-8)
w = u/v**self.betai
zeta = self.vws[iv] *0.01 *w
# zeta = self.vws[iv]*0.01 *w *(vi[iv] -vbest[iv])
# if ip == 0:
# zeta = self.vws[iv] *0.001 *w
# else:
# zeta = 0.01 *w *(vi[iv] -vbest[iv])
vnew[iv] = vnew[iv] +zeta*np.random.normal()
#...create new individual for trial
# print('ip,vi,vnew=',ip,vi,vnew)
self.iidmax += 1
newind = Individual(self.iidmax, self.ndim, self.vrs, self.loss_func)
newind.set_variable(vnew)
candidates.append(newind)
#...Evaluate loss function values
qs = [ Queue() for i in range(len(candidates)) ]
prcs = []
for ic,ci in enumerate(candidates):
kwtmp = copy.copy(self.kwargs)
kwtmp['index'] = ic
kwtmp['iid'] = ci.iid
prcs.append(Process(target=ci.calc_loss_func, args=(kwtmp,qs[ic])))
for p in prcs:
p.start()
for p in prcs:
p.join()
for ic,ci in enumerate(candidates):
ci.val = qs[ic].get()
self.all_indivisuals.extend(candidates)
#...Pick j that is to be compared with i
js = random.sample(range(self.N),k=self.N)
#...Decide whether or not to adopt new one
for jc,jv in enumerate(js):
pj = self.population[jv]
cj = candidates[jc]
dval = cj.val -pj.val
if dval < 0.0: # replace with new individual
self.population[jv] = cj
find.write(' {0:8d} {1:12.4e}'.format(cj.iid, cj.val))
for k,vk in enumerate(cj.vector):
find.write(' {0:11.3e}'.format(vk))
find.write('\n')
else:
pass
#...Rank individuals
self.sort_individuals()
#...Abandon bad ones and replace with random ones
iab = int((1.0 -self.F)*self.N)
candidates = []
for iv in range(iab,self.N):
self.iidmax += 1
newind = Individual(self.iidmax, self.ndim, self.vrs, self.loss_func)
newind.init_random()
candidates.append(newind)
#...Evaluate loss function values of new random ones
qs = [ Queue() for i in range(len(candidates)) ]
prcs = []
for ic,ci in enumerate(candidates):
kwtmp = copy.copy(self.kwargs)
kwtmp['index'] = ic
kwtmp['iid'] = ci.iid
prcs.append(Process(target=ci.calc_loss_func, args=(kwtmp,qs[ic])))
for p in prcs:
p.start()
for p in prcs:
p.join()
for ic,ci in enumerate(candidates):
ci.val = qs[ic].get()
self.all_indivisuals.extend(candidates)
#...Replace them with old ones
ic = 0
for iv in range(iab,self.N):
ci = candidates[ic]
ic += 1
self.population[iv] = ci
#...Check best
for ic,ci in enumerate(self.population):
if ci.val < self.bestind.val:
self.bestind = ci
self.write_variables(ci,
fname='in.vars.fitpot.{0:d}'.format(ci.iid),
**self.kwargs)
#...Update variable ranges if needed
if self.update_vrs_per > 0 and (it+1) % self.update_vrs_per == 0:
self.vrs = update_vrange(self.vrs,self.all_indivisuals)
print(' Update variable ranges')
for i in range(len(self.vrs)):
print(' {0:2d}: {1:7.3f} {2:7.3f}'.format(i+1,self.vrs[i,0],self.vrs[i,1]))
#...Set variable ranges of all individuals in the population
for iv in range(len(self.population)):
self.population[iv].vranges = self.vrs
if self.print_level > 0:
print(' step,time,best,vars= {0:6d} {1:8.1f} {2:8.4f}'.format(it+1, time()-start,
self.bestind.val),end="")
for i in range(min(16,self.ndim)):
print(' {0:6.3f}'.format(self.bestind.vector[i]),end="")
print('', flush=True)
for i,ind in enumerate(self.population):
fgen.write(' {0:5d} {1:8d} {2:12.4e}\n'.format(it+1, ind.iid, ind.val))
fgen.close()
find.close()
#...Finaly write out the best one
self.write_variables(self.bestind,fname='in.vars.fitpot.best',**self.kwargs)
return None
def write_variables(self,ind,fname='in.vars.fitpot',**kwargs):
vs = ind.vector
vrs = ind.vranges
self.write_func(vs,vrs,fname,**kwargs)
return None
if __name__ == "__main__":
args = docopt(__doc__)
n = int(args['-n'])
kwargs = {}
kwargs['print_level'] = int(args['--print-level'])
vs = np.array([1.0, -0.5])
vrs = np.array([[-1.0, 2.0],[-1.0, 1.0]])
cs = CS(10, 0.25, vs, vrs, test_func, test_write_func, **kwargs)
cs.run(n)
implemented the adaptive update of vrange in CS.
#!/usr/bin/env python
"""
Cuckoo search.
Usage:
cs.py [options]
Options:
-h, --help Show this message and exit.
-n N Number of generations in CS. [default: 20]
--print-level LEVEL
Print verbose level. [default: 1]
"""
from __future__ import print_function
import os,sys
from docopt import docopt
import numpy as np
from numpy import exp, sin, cos
import random
import copy
from multiprocessing import Process, Queue
from time import time
from scipy.special import gamma
__author__ = "RYO KOBAYASHI"
__version__ = "rev190920"
_fname_gen = 'out.cs.generations'
_fname_ind = 'out.cs.individuals'
def test_func(var, vranges, **kwargs):
x,y= var
res= x**2 +y**2 +100.0*exp(-x**2 -y**2)*sin(2.0*(x+y))*cos(2*(x-y)) \
+80.0*exp(-(x-1)**2 -(y-1)**2)*cos(x+4*y)*sin(2*x-y) \
+200.0*sin(x+y)*exp(-(x-3)**2-(y-1)**2)
return res
def test_write_func(vs,vrs,fname,**kwargs):
with open(fname,'w') as f:
for i,v in enumerate(vs):
vr = vrs[i]
f.write(' {0:10.3f} {1:10.3f} {2:10.3f}\n'.format(v,*vr))
return None
def wrap(vs,vrs):
vsnew = copy.copy(vs)
for i,v in enumerate(vsnew):
vmin, vmax = vrs[i]
vsnew[i] = min(max(v,vmin),vmax)
return vsnew
def update_vrange(vrs,all_indivisuals):
"""
Update variable ranges adaptively using all the individuals information.
"""
#...Extract top NTOPS individuals from all
ntops = 100
tops = []
# print('len(all_indivisuals)=',len(all_indivisuals))
for i,ind in enumerate(all_indivisuals):
if len(tops) < ntops: # add the individual
# print(' i (< ntops)=',i)
for it,t in enumerate(tops):
if ind.val < t.val:
tops.insert(it,ind)
break
if not ind in tops:
tops.append(ind)
else: # insert the individual and pop out the worst one
# print(' i (>=ntops)=',i)
for it,t in enumerate(tops):
if ind.val < t.val:
tops.insert(it,ind)
break
if len(tops) > ntops:
del tops[ntops:len(tops)]
# print('len(tops)=',len(tops))
# print('iids= ',[t.iid for t in tops])
#...Get new ranges
new_vrs = np.array(vrs)
vss = np.zeros((len(tops),len(vrs)))
for i,ind in enumerate(tops):
vi = ind.vector
vss[i,:] = vi[:]
# print('i,vi=',i,vi)
for j in range(len(new_vrs)):
# print('j,min,max=',i,min(vss[:,j]),max(vss[:,j]))
new_vrs[j,0] = max(new_vrs[j,0],min(vss[:,j]))
new_vrs[j,1] = min(new_vrs[j,1],max(vss[:,j]))
#...Set best variables center in the ranges
fbest = tops[0].val
vbest = tops[0].vector
for j in range(len(vbest)):
vjmin = new_vrs[j,0]
vjmax = new_vrs[j,1]
wmax = max(abs(vjmin-vbest[j]),abs(vjmax-vbest[j]))
new_vrs[j,0] = min(vjmin,vbest[j]-wmax)
new_vrs[j,1] = max(vjmax,vbest[j]+wmax)
return new_vrs
class Individual:
"""
Individual class that consists of variables as vector elements.
"""
def __init__(self, iid, ndim, vranges, loss_func):
self.iid = iid
self.ndim = ndim
self.loss_func = loss_func
self.vector = np.zeros(self.ndim)
self.vranges = vranges
self.val = None
def set_variable(self,variables):
if len(variables) != len(self.vector):
raise ValueError()
self.vector = variables
# print('iid, v before wrap,vrs =',self.iid,self.vector,self.vranges)
self.wrap_range()
# print('iid, v after wrap,vrs =',self.iid,self.vector,self.vranges)
self.val = None
return None
def init_random(self):
for i in range(self.ndim):
vmin, vmax = self.vranges[i]
# vmin = self.vranges[i,0]
# vmax = self.vranges[i,1]
v = random.random()*(vmax -vmin) +vmin
self.vector[i] = v
# print(' i,vmin,vmax,v=',i,vmin,vmax,v)
self.wrap_range()
self.val = None
return None
def wrap_range(self):
self.vector = wrap(self.vector, self.vranges)
def calc_loss_func(self,kwargs,q):
"""
Compute loss function value using self.loss_func function given in the constructor.
In order to return a result in multiprocessing.Process, it also takes an argument q.
"""
# print('type(kwargs)=',type(kwargs))
val = self.loss_func(self.vector, self.vranges, **kwargs)
# print(' iid,v,val=',self.iid,self.vector,val)
q.put(val)
return None
class CS:
"""
Cuckoo search class.
"""
def __init__(self, N, F, variables, vranges, loss_func, write_func, **kwargs):
"""
Conctructor of CS class.
N: Number of individuals.
F: Fraction of worse individuals to be abondoned.
loss_func:
Loss function to be minimized with variables and **kwargs.
"""
if N < 2:
raise ValueError('N must be greater than 1 in CS!')
self.N = N # Number of individuals in a generation
self.F = F # Fraction of worse individuals to be abondoned
self.ndim = len(variables)
self.vs = variables
self.vrs0 = vranges
self.vrs = copy.copy(self.vrs0)
self.vws = np.zeros(self.ndim)
for i in range(self.ndim):
self.vws[i] = max(self.vrs[i,1] -self.vrs[i,0], 0.0)
# print('original variables=',self.vs,self.vrs)
self.loss_func = loss_func
self.write_func = write_func
self.kwargs = kwargs
self.bestind = None
self.print_level = 0
if 'print_level' in kwargs.keys():
self.print_level = int(kwargs['print_level'])
if 'update_vrange' in kwargs.keys():
self.update_vrs_per = kwargs['update_vrange']
self.beta = 1.5
self.betai = 1.0 /self.beta
self.usgm = (gamma(1+self.beta)*np.sin(np.pi*self.beta/2)/ \
gamma((1+self.beta)/2)*self.beta*2.0**((self.beta-1)/2))**self.betai
self.vsgm = 1.0
#...initialize population
self.population = []
self.all_indivisuals = []
self.iidmax = 0
for i in range(N):
self.iidmax += 1
ind = Individual(self.iidmax, self.ndim, self.vrs, self.loss_func)
if i == 0:
ind.set_variable(self.vs)
else:
ind.init_random()
self.population.append(ind)
#...Evaluate loss function values
qs = [ Queue() for i in range(self.N) ]
prcs = []
for ip,pi in enumerate(self.population):
kwtmp = copy.copy(self.kwargs)
kwtmp['index'] = ip
kwtmp['iid'] = pi.iid
prcs.append(Process(target=pi.calc_loss_func, args=(kwtmp,qs[ip])))
for p in prcs:
p.start()
for p in prcs:
p.join()
for ip,pi in enumerate(self.population):
pi.val = qs[ip].get()
# print('ip,val,vec=',ip,pi.val,pi.vector)
self.keep_best()
self.all_indivisuals.extend(self.population)
if self.print_level > 2:
for pi in self.population:
self.write_variables(pi,
fname='in.vars.fitpot.{0:d}'.format(pi.iid),
**self.kwargs)
else:
self.write_variables(self.bestind,
fname='in.vars.fitpot.{0:d}'.format(self.bestind.iid),
**self.kwargs)
return None
def keep_best(self):
vals = []
for i,pi in enumerate(self.population):
# print('i,val,vec=',i,pi.val,pi.vector)
if pi.val == None:
raise ValueError('Something went wrong.')
vals.append(pi.val)
minval = min(vals)
if self.bestind == None or minval < self.bestind.val:
idx = vals.index(minval)
self.bestind = copy.deepcopy(self.population[idx])
return None
def sort_individuals(self):
jtop = self.N
for i in range(self.N):
jtop -= 1
for j in range(jtop):
pj = self.population[j]
pjp = self.population[j+1]
if pj.val > pjp.val:
self.population[j] = pjp
self.population[j+1] = pj
def run(self,maxiter=100):
"""
Perfom CS.
"""
if 'start' in self.kwargs.keys():
start = self.kwargs['start']
else:
start = time()
fgen = open(_fname_gen,'w')
find = open(_fname_ind,'w')
for i,ind in enumerate(self.population):
fgen.write(' 0 {0:8d} {1:12.4e}\n'.format(ind.iid, ind.val))
find.write(' {0:8d} {1:12.4e}'.format(ind.iid, ind.val))
for j,vj in enumerate(ind.vector):
find.write(' {0:11.3e}'.format(vj))
find.write('\n')
if self.print_level > 0:
print(' step,time,best,vars= {0:6d} {1:8.1f} {2:8.4f}'.format(0, time()-start,
self.bestind.val),end="")
for i in range(min(16,self.ndim)):
print(' {0:6.3f}'.format(self.bestind.vector[i]),end="")
print('', flush=True)
for it in range(maxiter):
candidates = []
self.sort_individuals()
#...Create candidates by Levy flight
vbest = self.bestind.vector
for ip,pi in enumerate(self.population):
vi = pi.vector
vnew =np.array(vi)
for iv in range(self.ndim):
u = np.random.normal()*self.usgm
v = abs(np.random.normal()*self.vsgm)
v = max(v,1.0e-8)
w = u/v**self.betai
zeta = self.vws[iv] *0.01 *w
# zeta = self.vws[iv]*0.01 *w *(vi[iv] -vbest[iv])
# if ip == 0:
# zeta = self.vws[iv] *0.001 *w
# else:
# zeta = 0.01 *w *(vi[iv] -vbest[iv])
vnew[iv] = vnew[iv] +zeta*np.random.normal()
#...create new individual for trial
# print('ip,vi,vnew=',ip,vi,vnew)
self.iidmax += 1
newind = Individual(self.iidmax, self.ndim, self.vrs, self.loss_func)
newind.set_variable(vnew)
candidates.append(newind)
#...Evaluate loss function values
qs = [ Queue() for i in range(len(candidates)) ]
prcs = []
for ic,ci in enumerate(candidates):
kwtmp = copy.copy(self.kwargs)
kwtmp['index'] = ic
kwtmp['iid'] = ci.iid
prcs.append(Process(target=ci.calc_loss_func, args=(kwtmp,qs[ic])))
for p in prcs:
p.start()
for p in prcs:
p.join()
for ic,ci in enumerate(candidates):
ci.val = qs[ic].get()
self.all_indivisuals.extend(candidates)
#...Pick j that is to be compared with i
js = random.sample(range(self.N),k=self.N)
#...Decide whether or not to adopt new one
for jc,jv in enumerate(js):
pj = self.population[jv]
cj = candidates[jc]
dval = cj.val -pj.val
if dval < 0.0: # replace with new individual
self.population[jv] = cj
find.write(' {0:8d} {1:12.4e}'.format(cj.iid, cj.val))
for k,vk in enumerate(cj.vector):
find.write(' {0:11.3e}'.format(vk))
find.write('\n')
else:
pass
#...Rank individuals
self.sort_individuals()
#...Abandon bad ones and replace with random ones
iab = int((1.0 -self.F)*self.N)
candidates = []
for iv in range(iab,self.N):
self.iidmax += 1
newind = Individual(self.iidmax, self.ndim, self.vrs, self.loss_func)
newind.init_random()
candidates.append(newind)
#...Evaluate loss function values of new random ones
qs = [ Queue() for i in range(len(candidates)) ]
prcs = []
for ic,ci in enumerate(candidates):
kwtmp = copy.copy(self.kwargs)
kwtmp['index'] = ic
kwtmp['iid'] = ci.iid
prcs.append(Process(target=ci.calc_loss_func, args=(kwtmp,qs[ic])))
for p in prcs:
p.start()
for p in prcs:
p.join()
for ic,ci in enumerate(candidates):
ci.val = qs[ic].get()
self.all_indivisuals.extend(candidates)
#...Replace them with old ones
ic = 0
for iv in range(iab,self.N):
ci = candidates[ic]
ic += 1
self.population[iv] = ci
#...Check best
for ic,ci in enumerate(self.population):
if ci.val < self.bestind.val:
self.bestind = ci
self.write_variables(ci,
fname='in.vars.fitpot.{0:d}'.format(ci.iid),
**self.kwargs)
#...Update variable ranges if needed
if self.update_vrs_per > 0 and (it+1) % self.update_vrs_per == 0:
self.vrs = update_vrange(self.vrs,self.all_indivisuals)
print(' Update variable ranges')
for i in range(len(self.vrs)):
print(' {0:2d}: {1:7.3f} {2:7.3f}'.format(i+1,self.vrs[i,0],self.vrs[i,1]))
#...Set variable ranges of all individuals in the population
for iv in range(len(self.population)):
self.population[iv].vranges = self.vrs
if self.print_level > 0:
print(' step,time,best,vars= {0:6d} {1:8.1f} {2:8.4f}'.format(it+1, time()-start,
self.bestind.val),end="")
for i in range(min(16,self.ndim)):
print(' {0:6.3f}'.format(self.bestind.vector[i]),end="")
print('', flush=True)
for i,ind in enumerate(self.population):
fgen.write(' {0:5d} {1:8d} {2:12.4e}\n'.format(it+1, ind.iid, ind.val))
fgen.close()
find.close()
#...Finaly write out the best one
self.write_variables(self.bestind,fname='in.vars.fitpot.best',**self.kwargs)
return None
def write_variables(self,ind,fname='in.vars.fitpot',**kwargs):
vs = ind.vector
vrs = ind.vranges
self.write_func(vs,vrs,fname,**kwargs)
return None
if __name__ == "__main__":
args = docopt(__doc__)
n = int(args['-n'])
kwargs = {}
kwargs['print_level'] = int(args['--print-level'])
vs = np.array([1.0, -0.5])
vrs = np.array([[-1.0, 2.0],[-1.0, 1.0]])
cs = CS(10, 0.25, vs, vrs, test_func, test_write_func, **kwargs)
cs.run(n)
|
# -*- coding: utf-8 -*-
#
# this program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# this program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# The Graphics Interchange Format(c) is the Copyright property of
# CompuServe Incorporated. GIF(sm) is a Service Mark property of
# CompuServe Incorporated.
#
# The unisys/lzw patent has expired, yes. If anyone puts another patent
# over this code, you must *burn* this file.
'''pygif: gif implementation in python
http://www.java2s.com/Open-Source/Python/Network/emesene/emesene-1.6.2/pygif/pygif.py.htm'''
#TODO issues to fix
#optimize for speed #partially done# a lot of room for improvement
# its just SLOW.... CRAWLING
import struct
from array import array
import math
KNOWN_FORMATS = ('GIF87a', 'GIF89a')
from kivy.logger import Logger
from . import ImageLoaderBase, ImageData, ImageLoader
Debug = False
class ImageLoaderGIF(ImageLoaderBase):
'''Image loader for gif'''
@staticmethod
def extensions():
'''Return accepted extension for this loader'''
return ('gif', )
def load(self, filename):
Logger.debug('Image_GIF: Load <%s>' % filename)
try:
try:
im = GifDecoder(open(filename, 'rb').read())
except UnicodeEncodeError:
im = GifDecoder(open(filename.encode('utf8'), 'rb').read())
except:
Logger.warning('Image: Unable to load Image <%s>' % filename)
raise
if Debug:
print im.print_info()
img_data = []
ls_width = im.ls_width
ls_height = im.ls_height
pixel_map = array('B', [0]*(ls_width*ls_height*4))
for img in im.images:
pallete = img.pallete if img.local_color_table_flag\
else im.pallete
have_transparent_color = img.transparent_color > -1
transparent_color = img.transparent_color
draw_method_restore_previous = 1 \
if img.draw_method == 'restore previous' else 0
draw_method_replace = 1 \
if ((img.draw_method == 'replace') or\
(img.draw_method == 'restore background')) else 0
pixels = img.pixels
img_height = img.height
img_width = img.width
left = img.left
top = img.top
#reverse top to bottom and left to right
tmp_top = (ls_height - (img_height+ top))
img_width_plus_left = (img_width+ left)
ls_width_multiply_4 = ls_width * 4
left_multiply_4 = left * 4
while img_height > 0:
i = left
img_height -= 1
x = (img_height * img_width) - left
rgba_pos = (tmp_top * ls_width_multiply_4) + (left_multiply_4)
tmp_top += 1
while i < img_width_plus_left:
(r, g, b) = pallete[pixels[x + i]]
# when not magic pink
if (r, g, b) != (255,0,255):
if have_transparent_color:
if transparent_color == pixels[x + i] :
if draw_method_replace:
#transparent pixel draw method replace
pixel_map[rgba_pos + 3] = 0
rgba_pos += 4
i += 1
continue
#transparent pixel draw method combine
rgba_pos += 4
i += 1
continue
# this pixel isn't transparent
#doesn't have transparent color
(pixel_map[rgba_pos], pixel_map[rgba_pos + 1],\
pixel_map[rgba_pos + 2]) = (r, g, b)
pixel_map[rgba_pos + 3] = 255
# if magic pink move to next pixel
rgba_pos += 4
i += 1
img_data.append(ImageData(ls_width, ls_height, \
'rgba', pixel_map.tostring()))
self.filename = filename
return img_data
class Gif(object):
'''Base class to decoder'''
# struct format strings
#17,18:
FMT_HEADER = '<6sHHBBB'
#20:
FMT_IMGDESC = '<HHHHB'
IMAGE_SEPARATOR = 0x2C
EXTENSION_INTRODUCER = 0x21
GIF_TRAILER = 0x3b
LABEL_GRAPHIC_CONTROL = 0xF9
LABEL_COMMENT = 0xFE
LABEL_PLAINTEXT = 0x01
FMT_EXT_GRAPHIC_CONTROL = '<BBHB' #89a
def __init__( self, data, debug ):
self.data = data
self.pointer = 0
# default data for an empty file
self.header = 'GIF87a'
self.ls_width = 0
self.ls_height = 0
self.flags = 0
self.color_resolution = 0
self.sort_flag = 0
self.color_table_flag = 0
self.global_color_table_size = 0
self.background_color = 0
self.aspect_ratio = 0
# greyscale pallete by default
self.pallete = [(x, x, x) for x in range(0, 256)]
self.images = []
self.debug_enabled = False
return
def pop( self, data, length=1 ):
'''gets the next $len chars from the data stack import
and increment the pointer'''
start = self.pointer
end = self.pointer + length
self.pointer += length
return data[start:end]
def pops( self, format, data ):
'''pop struct: get size, pop(), unpack()'''
size = struct.calcsize(format)
return struct.unpack( format, self.pop(data, size) )
def print_info( self ):
'''prints out some useful info (..debug?)'''
print "Version: %s" % self.header
print "Logical screen width: %d" % self.ls_width
print "Logical screen height: %d" % self.ls_height
print "Flags: %s" % repr(self.flags)
print " "*6,"Color resolution: %d" % self.color_resolution
print " "*6,"Sort flag: %s" % str(self.sort_flag)
print " "*6,"Global color table flag: %s" % str(self.color_table_flag)
print " "*22,"...size: %d (%d bytes)" % \
(self.global_color_table_size, self.global_color_table_size * 3)
print "Background color: %d" % self.background_color
print "Aspect ratio info: %d" % self.aspect_ratio
def new_image( self, header=None):
'''adds a new image descriptor'''
image = ImageDescriptor(self, header)
self.images.append(image)
return image
class ImageDescriptor(object):
'''A class that represents a single image'''
def __init__( self, parent, header=None ):
self.parent = parent
# this will be set when needed
self.codesize = 0
# compressed output codes
self.lzwcode = ''
# uncompressed pixels (decoded)
self.pixels = []
# we assume a "fullscreen" image
self.left = self.top = 0
self.width = parent.ls_width
self.height = parent.ls_height
# yes, these default flags work...
self.flags = [False for x in range(8)]
self.local_color_table_flag = False
self.interlace_flag = False
self.sort_flag = False
self.local_color_table_size = 0
self.draw_method = 'overwrite'
self.transparent_color = -1
self.pallete = []
if header:
self.setup_header(header)
def setup_header( self, header ):
'''takes a header tuple and fills the attributes'''
self.left = header[0]
self.top = header[1]
self.width = header[2]
self.height = header[3]
self.flags = get_bits( header[4] )
self.local_color_table_flag = self.flags[7]
self.interlace_flag = self.flags[6]
self.sort_flag = self.flags[5]
#-- flags 4 and 3 are reserved
self.local_color_table_size = 2 ** (pack_bits(self.flags[:3]) + 1)
if self.local_color_table_flag:
if Debug: print 'local color table true'
self.parent.global_color_table_size = self.local_color_table_size
size = (self.local_color_table_size) * 3
self.pallete = self.parent.get_color_table(size)
self.parent.pallete = self.pallete
def get_header(self):
'''builds a header dynamically'''
flags = [False for x in range(8)]
flags[7] = self.local_color_table_flag
flags[6] = self.interlace_flag
flags[5] = self.sort_flag
# useless!
flags[2], flags[1], flags[0] = get_bits(len(self.pallete), bits=3)
return (self.left, self.top, self.width, self.height, pack_bits(flags))
header = property(fget=get_header)
class GifDecoder( Gif ):
'''decodes a gif file into.. something.. else..'''
def __init__( self, data, debug=False ):
Gif.__init__( self, data, debug )
self.fill()
def fill( self ):
'''reads the data and fills each field of the file'''
# start reading from the beggining of the file
self.pointer = 0
#17. Header.
#18. Logical Screen Descriptor.
data = self.pops( Gif.FMT_HEADER, self.data )
self.header = data[0]
self.ls_width = data[1]
self.ls_height = data[2]
self.background_color = data[4]
self.aspect_ratio = data[5]
# flags field
self.flags = get_bits( data[3] )
#1 bit
self.color_table_flag = self.flags[7]
self.sort_flag = self.flags[3]
#3 bit
self.color_resolution = pack_bits(self.flags[4:7]) # 7 not included
#3 bit
self.global_color_table_size = 2 ** (pack_bits(self.flags[:3]) + 1)
#19. Global Color Table.
if self.color_table_flag:
size = (self.global_color_table_size) * 3
self.pallete = self.get_color_table(size)
else:
# generate a greyscale pallete
self.pallete = [(x, x, x) for x in range(256)]
# blocks
image = None
self_data = self.data
self_pops = self.pops
Gif_IMAGE_SEPARATOR = Gif.IMAGE_SEPARATOR
Gif_FMT_IMGDESC = Gif.FMT_IMGDESC
self_new_image = self.new_image
self_pop = self.pop
self_debug_enabled = self.debug_enabled
self_lzw_decode = self.lzw_decode
self_global_color_table_size = self.global_color_table_size
Gif_EXTENSION_INTRODUCER = Gif.EXTENSION_INTRODUCER
Gif_GIF_TRAILER = Gif.GIF_TRAILER
Gif_LABEL_GRAPHIC_CONTROL = Gif.LABEL_GRAPHIC_CONTROL
while True:
try:
nextbyte = self_pops('<B', self_data)[0]
except:
nextbyte = 0x3b # force end
#20. Image Descriptor
if nextbyte == Gif_IMAGE_SEPARATOR:
descriptor = self_pops(Gif_FMT_IMGDESC, self_data)
image = self_new_image(descriptor)
image.transparent_color = trans_color
image.draw_method = drw_method
image.codesize = self_pops('<B', self_data)[0]
image.lzwcode = ''
image_lzwcode = image.lzwcode
while True:
try:
blocksize = self_pops('<B', self_data)[0]
except:
break
if blocksize == 0:
break # no more image data
lzwdata = self_pop(self_data, blocksize)
image_lzwcode = ''.join((image_lzwcode, lzwdata))
if self_debug_enabled:
print 'LZW length:', len(image_lzwcode)
image.lzwcode = image_lzwcode
image.pixels = self_lzw_decode(image.lzwcode, image.codesize, \
self_global_color_table_size)
# Extensions
elif nextbyte == Gif_EXTENSION_INTRODUCER:
pass
# Gif trailer
elif nextbyte == Gif_GIF_TRAILER:
return
elif nextbyte == Gif_LABEL_GRAPHIC_CONTROL:
#if self_debug_enabled: print 'LABEL_GRAPHIC_CONTROL'
nextbyte = self_pops('<B', self_data)[0]
#if self_debug_enabled: print 'block size:%d' %nextbyte
drw_bits = (get_bits(self_pops('<B', self_data)[0]))
if drw_bits[2:5] == array('B', [0,0,1]):
drw_method = 'replace'
elif (drw_bits[2:5]) == array('B', [0,1,0]):
drw_method = 'restore background'
else:
drw_method = 'restore previous'
#if self_debug_enabled:
# print 'draw_method :'+ drw_method
nextbyte = self_pops('<B', self_data)[0]
#if self_debug_enabled: print 'fields:%d' %nextbyte
nextbyte = self_pops('<B', self_data)[0]
#if self_debug_enabled: print 'duration:%d' %nextbyte # delay?
nextbyte = self_pops('<B', self_data)[0]
trans_color = nextbyte
#if Debug: print 'transparent color index :%d' %trans_color
pass
# "No Idea What Is This"
else:
pass
def string_to_bits(self, string):
'''high level string unpacker'''
ordarray = array('B', string)
bits = array('B')
bits_append = bits.append
for byte in ordarray:
map (bits_append, get_bits(byte))
return bits
def bits_to_string(bits):
'''high level bit list packer'''
string = ''
while len(bits)>0:
code = pack_bits(bits[:8])
bits = bits[8:]
string = ''.join((string, chr(code)))
return string
def readable(bool_list):
'''Converts a list of booleans to a readable list of ints
Useful for debug only'''
return [int(x) for x in bool_list]
def bits_to_int(self, bits):
'''high level bit list packer'''
i = 0
c = 0
bits.reverse()
while c < len(bits):
if bits[c]:
i += 2 ** (len(bits) - c - 1)
c += 1
return i
def get_color_table( self, size ):
'''Returns a color table in the format [(r,g,b),(r,g,b), ...]'''
raw_color_table = self.pops("<%dB" % size, self.data)
pos = 0
pallete = []
pallete_append = pallete.append
while pos + 3 < (size+1):
red = raw_color_table[pos]
green = raw_color_table[pos+1]
blue = raw_color_table[pos+2]
pallete_append((red, green, blue))
pos += 3
return pallete
def lzw_decode(self, input, initial_codesize, color_table_size):
'''Decodes a lzw stream from input import
Returns list of ints (pixel values)'''
string_table = {}
output = array('B')
output_append = output.append
output_extend = output.extend
old = ''
index = 0
codesize = initial_codesize + 1
clearcode, end_of_info = color_table_size, color_table_size + 1
if Debug:
print 'codesize: %d' %codesize
print 'clearcode %d, end_of_info: %d' % (clearcode, end_of_info)
bits = self.string_to_bits(input)
def pop(size):
#NOTE:OPTIMISE THIS, according to pycallgraph
#this function takes exponentially more time
#for a bigger file size
# 7+secs for opening a (37.7kb and 71.8kb) files
# and 72+ secs for (37.7, 164.2kb) files
'''Pops <size> bits from <bits>'''
out = array('B')
out_append = out.append
bits_pop = bits.pop
for i in range(size):
out_append(bits_pop(0))
return out
def clear():
'''Called on clear code'''
string_table.clear()
for index in range(color_table_size):
string_table[index] = chr(index)
index = end_of_info + 1
return index
index = clear()
# skip first (clear)code
bits = bits[codesize:]
# read first code, append to output
self_bits_to_int = self.bits_to_int
code = self_bits_to_int(pop(codesize))
output_append(ord(string_table[code]))
old = string_table[code]
while len(bits) > 0:
# read next code
#if Debug:
# print 'length to decode :%d' %len(bits)
code = self_bits_to_int(pop(codesize))
# special code?
if code == clearcode:
index = clear()
codesize = initial_codesize + 1
code = self_bits_to_int(pop(codesize))
output_append(ord(string_table[code]))
old = string_table[code]
continue
elif code == end_of_info:
break
# code in stringtable?
if code in string_table:
c = string_table[code]
string_table[index] = ''.join((old, c[0]))
else:
c = ''.join((old, old[0]))
string_table[code] = c
index += 1
old = c
output_extend(map(ord, c))
if index == 2 ** codesize:
codesize += 1
if codesize == 13:
codesize = 12
print 'decoding error, missed a clearcode?'
print 'index:', index
#exit()
if self.debug_enabled:
print 'Output stream len: %d' % len(output)
return output
def get_bits( flags, reverse=False, bits=8 ):
'''return a list with $bits items, one for each enabled bit'''
mybits = (1, 2, 4, 8, 16, 32, 64, 128)[:bits]
rev_num=1
if reverse:
rev_num = -1
ret = array('B')
ret_append = ret.append
for bit in mybits[::rev_num]:
ret_append(flags & bit != 0)
return ret
def pack_bits( bits ):
'''convert a bit (bool or int) tuple into a int'''
packed = 0
level = 0
for bit in bits:
if bit:
packed += 2 ** level
#packed += int(bit) << level
#print bit, packed, level
level += 1
return packed
# register
ImageLoader.register(ImageLoaderGIF)
Optimisations: Major bottelneck out of the way:)
a few small optimisations still left
#-*- coding: utf-8 -*-
#
# this program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# this program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# The Graphics Interchange Format(c) is the Copyright property of
# CompuServe Incorporated. GIF(sm) is a Service Mark property of
# CompuServe Incorporated.
#
# The unisys/lzw patent has expired, yes. If anyone puts another patent
# over this code, you must *burn* this file.
'''pygif: gif implementation in python
http://www.java2s.com/Open-Source/Python/Network/emesene/emesene-1.6.2/pygif/pygif.py.htm'''
#TODO issues to fix
#optimize for speed #partially done# a lot of room for improvement
# its just SLOW.... CRAWLING
import struct
from array import array
import math
KNOWN_FORMATS = ('GIF87a', 'GIF89a')
from kivy.logger import Logger
from . import ImageLoaderBase, ImageData, ImageLoader
Debug = False
class ImageLoaderGIF(ImageLoaderBase):
'''Image loader for gif'''
@staticmethod
def extensions():
'''Return accepted extension for this loader'''
return ('gif', )
def load(self, filename):
Logger.debug('Image_GIF: Load <%s>' % filename)
try:
try:
im = GifDecoder(open(filename, 'rb').read())
except UnicodeEncodeError:
im = GifDecoder(open(filename.encode('utf8'), 'rb').read())
except:
Logger.warning('Image: Unable to load Image <%s>' % filename)
raise
if Debug:
print im.print_info()
img_data = []
ls_width = im.ls_width
ls_height = im.ls_height
pixel_map = array('B', [0]*(ls_width*ls_height*4))
for img in im.images:
pallete = img.pallete if img.local_color_table_flag\
else im.pallete
have_transparent_color = img.transparent_color > -1
transparent_color = img.transparent_color
draw_method_restore_previous = 1 \
if img.draw_method == 'restore previous' else 0
draw_method_replace = 1 \
if ((img.draw_method == 'replace') or\
(img.draw_method == 'restore background')) else 0
pixels = img.pixels
img_height = img.height
img_width = img.width
left = img.left
top = img.top
#reverse top to bottom and left to right
tmp_top = (ls_height - (img_height+ top))
img_width_plus_left = (img_width+ left)
ls_width_multiply_4 = ls_width * 4
left_multiply_4 = left * 4
while img_height > 0:
i = left
img_height -= 1
x = (img_height * img_width) - left
rgba_pos = (tmp_top * ls_width_multiply_4) + (left_multiply_4)
tmp_top += 1
while i < img_width_plus_left:
(r, g, b) = pallete[pixels[x + i]]
# when not magic pink
if (r, g, b) != (255,0,255):
if have_transparent_color:
if transparent_color == pixels[x + i] :
if draw_method_replace:
#transparent pixel draw method replace
pixel_map[rgba_pos + 3] = 0
rgba_pos += 4
i += 1
continue
#transparent pixel draw method combine
rgba_pos += 4
i += 1
continue
# this pixel isn't transparent
#doesn't have transparent color
(pixel_map[rgba_pos], pixel_map[rgba_pos + 1],\
pixel_map[rgba_pos + 2]) = (r, g, b)
pixel_map[rgba_pos + 3] = 255
# if magic pink move to next pixel
rgba_pos += 4
i += 1
img_data.append(ImageData(ls_width, ls_height, \
'rgba', pixel_map.tostring()))
self.filename = filename
return img_data
class Gif(object):
'''Base class to decoder'''
# struct format strings
#17,18:
FMT_HEADER = '<6sHHBBB'
#20:
FMT_IMGDESC = '<HHHHB'
IMAGE_SEPARATOR = 0x2C
EXTENSION_INTRODUCER = 0x21
GIF_TRAILER = 0x3b
LABEL_GRAPHIC_CONTROL = 0xF9
LABEL_COMMENT = 0xFE
LABEL_PLAINTEXT = 0x01
FMT_EXT_GRAPHIC_CONTROL = '<BBHB' #89a
def __init__( self, data, debug ):
self.data = data
self.pointer = 0
# default data for an empty file
self.header = 'GIF87a'
self.ls_width = 0
self.ls_height = 0
self.flags = 0
self.color_resolution = 0
self.sort_flag = 0
self.color_table_flag = 0
self.global_color_table_size = 0
self.background_color = 0
self.aspect_ratio = 0
# greyscale pallete by default
self.pallete = [(x, x, x) for x in range(0, 256)]
self.images = []
self.debug_enabled = False
return
def pop( self, data, length=1 ):
'''gets the next $len chars from the data stack import
and increment the pointer'''
start = self.pointer
end = self.pointer + length
self.pointer += length
return data[start:end]
def pops( self, format, data ):
'''pop struct: get size, pop(), unpack()'''
size = struct.calcsize(format)
return struct.unpack( format, self.pop(data, size) )
def print_info( self ):
'''prints out some useful info (..debug?)'''
print "Version: %s" % self.header
print "Logical screen width: %d" % self.ls_width
print "Logical screen height: %d" % self.ls_height
print "Flags: %s" % repr(self.flags)
print " "*6,"Color resolution: %d" % self.color_resolution
print " "*6,"Sort flag: %s" % str(self.sort_flag)
print " "*6,"Global color table flag: %s" % str(self.color_table_flag)
print " "*22,"...size: %d (%d bytes)" % \
(self.global_color_table_size, self.global_color_table_size * 3)
print "Background color: %d" % self.background_color
print "Aspect ratio info: %d" % self.aspect_ratio
def new_image( self, header=None):
'''adds a new image descriptor'''
image = ImageDescriptor(self, header)
self.images.append(image)
return image
class ImageDescriptor(object):
'''A class that represents a single image'''
def __init__( self, parent, header=None ):
self.parent = parent
# this will be set when needed
self.codesize = 0
# compressed output codes
self.lzwcode = ''
# uncompressed pixels (decoded)
self.pixels = []
# we assume a "fullscreen" image
self.left = self.top = 0
self.width = parent.ls_width
self.height = parent.ls_height
# yes, these default flags work...
self.flags = [False for x in range(8)]
self.local_color_table_flag = False
self.interlace_flag = False
self.sort_flag = False
self.local_color_table_size = 0
self.draw_method = 'overwrite'
self.transparent_color = -1
self.pallete = []
if header:
self.setup_header(header)
def setup_header( self, header ):
'''takes a header tuple and fills the attributes'''
self.left = header[0]
self.top = header[1]
self.width = header[2]
self.height = header[3]
self.flags = get_bits( header[4] )
self.local_color_table_flag = self.flags[7]
self.interlace_flag = self.flags[6]
self.sort_flag = self.flags[5]
#-- flags 4 and 3 are reserved
self.local_color_table_size = 2 ** (pack_bits(self.flags[:3]) + 1)
if self.local_color_table_flag:
if Debug: print 'local color table true'
self.parent.global_color_table_size = self.local_color_table_size
size = (self.local_color_table_size) * 3
self.pallete = self.parent.get_color_table(size)
self.parent.pallete = self.pallete
def get_header(self):
'''builds a header dynamically'''
flags = [False for x in range(8)]
flags[7] = self.local_color_table_flag
flags[6] = self.interlace_flag
flags[5] = self.sort_flag
# useless!
flags[2], flags[1], flags[0] = get_bits(len(self.pallete), bits=3)
return (self.left, self.top, self.width, self.height, pack_bits(flags))
header = property(fget=get_header)
class GifDecoder( Gif ):
'''decodes a gif file into.. something.. else..'''
def __init__( self, data, debug=False ):
Gif.__init__( self, data, debug )
self.fill()
def fill( self ):
'''reads the data and fills each field of the file'''
# start reading from the beggining of the file
self.pointer = 0
#17. Header.
#18. Logical Screen Descriptor.
data = self.pops( Gif.FMT_HEADER, self.data )
self.header = data[0]
self.ls_width = data[1]
self.ls_height = data[2]
self.background_color = data[4]
self.aspect_ratio = data[5]
# flags field
self.flags = get_bits( data[3] )
#1 bit
self.color_table_flag = self.flags[7]
self.sort_flag = self.flags[3]
#3 bit
self.color_resolution = pack_bits(self.flags[4:7]) # 7 not included
#3 bit
self.global_color_table_size = 2 ** (pack_bits(self.flags[:3]) + 1)
#19. Global Color Table.
if self.color_table_flag:
size = (self.global_color_table_size) * 3
self.pallete = self.get_color_table(size)
else:
# generate a greyscale pallete
self.pallete = [(x, x, x) for x in range(256)]
# blocks
image = None
self_data = self.data
self_pops = self.pops
Gif_IMAGE_SEPARATOR = Gif.IMAGE_SEPARATOR
Gif_FMT_IMGDESC = Gif.FMT_IMGDESC
self_new_image = self.new_image
self_pop = self.pop
self_debug_enabled = self.debug_enabled
self_lzw_decode = self.lzw_decode
self_global_color_table_size = self.global_color_table_size
Gif_EXTENSION_INTRODUCER = Gif.EXTENSION_INTRODUCER
Gif_GIF_TRAILER = Gif.GIF_TRAILER
Gif_LABEL_GRAPHIC_CONTROL = Gif.LABEL_GRAPHIC_CONTROL
while True:
try:
nextbyte = self_pops('<B', self_data)[0]
except:
nextbyte = 0x3b # force end
#20. Image Descriptor
if nextbyte == Gif_IMAGE_SEPARATOR:
descriptor = self_pops(Gif_FMT_IMGDESC, self_data)
image = self_new_image(descriptor)
image.transparent_color = trans_color
image.draw_method = drw_method
image.codesize = self_pops('<B', self_data)[0]
image.lzwcode = ''
image_lzwcode = image.lzwcode
while True:
try:
blocksize = self_pops('<B', self_data)[0]
except:
break
if blocksize == 0:
break # no more image data
lzwdata = self_pop(self_data, blocksize)
image_lzwcode = ''.join((image_lzwcode, lzwdata))
if self_debug_enabled:
print 'LZW length:', len(image_lzwcode)
image.lzwcode = image_lzwcode
image.pixels = self_lzw_decode(image.lzwcode, image.codesize, \
self_global_color_table_size)
# Extensions
elif nextbyte == Gif_EXTENSION_INTRODUCER:
pass
# Gif trailer
elif nextbyte == Gif_GIF_TRAILER:
return
elif nextbyte == Gif_LABEL_GRAPHIC_CONTROL:
#if self_debug_enabled: print 'LABEL_GRAPHIC_CONTROL'
nextbyte = self_pops('<B', self_data)[0]
#if self_debug_enabled: print 'block size:%d' %nextbyte
drw_bits = (get_bits(self_pops('<B', self_data)[0]))
if drw_bits[2:5] == array('B', [0,0,1]):
drw_method = 'replace'
elif (drw_bits[2:5]) == array('B', [0,1,0]):
drw_method = 'restore background'
else:
drw_method = 'restore previous'
#if self_debug_enabled:
# print 'draw_method :'+ drw_method
nextbyte = self_pops('<B', self_data)[0]
#if self_debug_enabled: print 'fields:%d' %nextbyte
nextbyte = self_pops('<B', self_data)[0]
#if self_debug_enabled: print 'duration:%d' %nextbyte # delay?
nextbyte = self_pops('<B', self_data)[0]
trans_color = nextbyte
#if Debug: print 'transparent color index :%d' %trans_color
pass
# "No Idea What Is This"
else:
pass
def string_to_bits(self, string):
'''high level string unpacker'''
ordarray = array('B', string)
bits = array('B')
bits_append = bits.append
#map(lambda byte: map (bits_append, get_bits(byte)), ordarray) slower:(
_get_bits = get_bits
for byte in ordarray:
map (bits_append, _get_bits(byte))
return bits
def bits_to_string(bits):
'''high level bit list packer'''
string = ''
while len(bits)>0:
code = pack_bits(bits[:8])
bits = bits[8:]
string = ''.join((string, chr(code)))
return string
def readable(bool_list):
'''Converts a list of booleans to a readable list of ints
Useful for debug only'''
return [int(x) for x in bool_list]
def bits_to_int(self, bits):
'''high level bit list packer'''
i = 0
c = 0
bits.reverse()
while c < len(bits):
if bits[c]:
i += 2 ** (len(bits) - c - 1)
c += 1
return i
def get_color_table( self, size ):
'''Returns a color table in the format [(r,g,b),(r,g,b), ...]'''
raw_color_table = self.pops("<%dB" % size, self.data)
pos = 0
pallete = []
pallete_append = pallete.append
while pos + 3 < (size+1):
red = raw_color_table[pos]
green = raw_color_table[pos+1]
blue = raw_color_table[pos+2]
pallete_append((red, green, blue))
pos += 3
return pallete
def lzw_decode(self, input, initial_codesize, color_table_size):
'''Decodes a lzw stream from input import
Returns list of ints (pixel values)'''
string_table = {}
output = array('B')
output_append = output.append
output_extend = output.extend
old = ''
index = 0
codesize = initial_codesize + 1
clearcode, end_of_info = color_table_size, color_table_size + 1
if Debug:
print 'codesize: %d' %codesize
print 'clearcode %d, end_of_info: %d' % (clearcode, end_of_info)
bits = self.string_to_bits(input)
self.bitpointer = 0
def pop(size, _bits ):
start = self.bitpointer
end = self.bitpointer = start + size
return _bits[start: end]
def clear():
'''Called on clear code'''
string_table.clear()
for index in range(color_table_size):
string_table[index] = chr(index)
index = end_of_info + 1
return index
index = clear()
# skip first (clear)code
bits = bits[codesize:]
# read first code, append to output
self_bits_to_int = self.bits_to_int
code = self_bits_to_int(pop(codesize, bits))
output_append(ord(string_table[code]))
old = string_table[code]
while self.bitpointer < len(bits):
# read next code
#if Debug:
# print 'length to decode :%d' %len(bits)
code = self_bits_to_int(pop(codesize, bits))
# special code?
if code == clearcode:
index = clear()
codesize = initial_codesize + 1
code = self_bits_to_int(pop(codesize, bits))
output_append(ord(string_table[code]))
old = string_table[code]
continue
elif code == end_of_info:
break
# code in stringtable?
if code in string_table:
c = string_table[code]
string_table[index] = ''.join((old, c[0]))
else:
c = ''.join((old, old[0]))
string_table[code] = c
index += 1
old = c
output_extend(map(ord, c))
if index == 2 ** codesize:
codesize += 1
if codesize == 13:
codesize = 12
print 'decoding error, missed a clearcode?'
print 'index:', index
#exit()
if self.debug_enabled:
print 'Output stream len: %d' % len(output)
return output
def get_bits( flags, reverse=False, bits=8 ):
'''return a list with $bits items, one for each enabled bit'''
mybits = (1, 2, 4, 8, 16, 32, 64, 128)[:bits]
rev_num=1
if reverse:
rev_num = -1
ret = array('B')
ret_append = ret.append
for bit in mybits[::rev_num]:
ret_append(flags & bit != 0)
return ret
def pack_bits( bits ):
'''convert a bit (bool or int) tuple into a int'''
packed = 0
level = 0
for bit in bits:
if bit:
packed += 2 ** level
#packed += int(bit) << level
#print bit, packed, level
level += 1
return packed
# register
ImageLoader.register(ImageLoaderGIF) |
# Copyright (c) 2013 Rackspace
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import redis
from redis.exceptions import BusyLoadingError, ConnectionError
from oslo_log import log as logging
from trove.common import cfg
from trove.common import exception
from trove.common.i18n import _
from trove.common import instance as rd_instance
from trove.common.stream_codecs import PropertiesCodec, StringConverter
from trove.common import utils
from trove.guestagent.common.configuration import ConfigurationManager
from trove.guestagent.common.configuration import OneFileOverrideStrategy
from trove.guestagent.common import guestagent_utils
from trove.guestagent.common import operating_system
from trove.guestagent.datastore.experimental.redis import system
from trove.guestagent.datastore import service
from trove.guestagent import pkg
LOG = logging.getLogger(__name__)
TIME_OUT = 1200
CONF = cfg.CONF
CLUSTER_CFG = 'clustering'
packager = pkg.Package()
class RedisAppStatus(service.BaseDbStatus):
"""
Handles all of the status updating for the redis guest agent.
"""
def __init__(self, client):
super(RedisAppStatus, self).__init__()
self.__client = client
def set_client(self, client):
self.__client = client
def _get_actual_db_status(self):
try:
if self.__client.ping():
return rd_instance.ServiceStatuses.RUNNING
except ConnectionError:
return rd_instance.ServiceStatuses.SHUTDOWN
except BusyLoadingError:
return rd_instance.ServiceStatuses.BLOCKED
except Exception:
LOG.exception(_("Error getting Redis status."))
return rd_instance.ServiceStatuses.CRASHED
def cleanup_stalled_db_services(self):
utils.execute_with_timeout('pkill', '-9',
'redis-server',
run_as_root=True,
root_helper='sudo')
class RedisApp(object):
"""
Handles installation and configuration of redis
on a trove instance.
"""
def __init__(self, state_change_wait_time=None):
"""
Sets default status and state_change_wait_time
"""
if state_change_wait_time:
self.state_change_wait_time = state_change_wait_time
else:
self.state_change_wait_time = CONF.state_change_wait_time
revision_dir = guestagent_utils.build_file_path(
os.path.dirname(system.REDIS_CONFIG),
ConfigurationManager.DEFAULT_STRATEGY_OVERRIDES_SUB_DIR)
config_value_mappings = {'yes': True, 'no': False, "''": None}
self._value_converter = StringConverter(config_value_mappings)
self.configuration_manager = ConfigurationManager(
system.REDIS_CONFIG,
system.REDIS_OWNER, system.REDIS_OWNER,
PropertiesCodec(
unpack_singletons=False,
string_mappings=config_value_mappings
), requires_root=True,
override_strategy=OneFileOverrideStrategy(revision_dir))
self.admin = self._build_admin_client()
self.status = RedisAppStatus(self.admin)
def _build_admin_client(self):
password = self.get_configuration_property('requirepass')
socket = self.get_configuration_property('unixsocket')
cmd = self.get_config_command_name()
return RedisAdmin(password=password, unix_socket_path=socket,
config_cmd=cmd)
def install_if_needed(self, packages):
"""
Install redis if needed do nothing if it is already installed.
"""
LOG.info(_('Preparing Guest as Redis Server.'))
if not packager.pkg_is_installed(packages):
LOG.info(_('Installing Redis.'))
self._install_redis(packages)
LOG.info(_('Redis installed completely.'))
def _install_redis(self, packages):
"""
Install the redis server.
"""
LOG.debug('Installing redis server.')
LOG.debug("Creating %s.", system.REDIS_CONF_DIR)
operating_system.create_directory(system.REDIS_CONF_DIR, as_root=True)
pkg_opts = {}
packager.pkg_install(packages, pkg_opts, TIME_OUT)
self.start_db()
LOG.debug('Finished installing redis server.')
def stop_db(self, update_db=False, do_not_start_on_reboot=False):
self.status.stop_db_service(
system.SERVICE_CANDIDATES, self.state_change_wait_time,
disable_on_boot=do_not_start_on_reboot, update_db=update_db)
def restart(self):
self.status.restart_db_service(
system.SERVICE_CANDIDATES, self.state_change_wait_time)
def update_overrides(self, context, overrides, remove=False):
if overrides:
self.configuration_manager.apply_user_override(overrides)
def apply_overrides(self, client, overrides):
"""Use the 'CONFIG SET' command to apply configuration at runtime.
Commands that appear multiple times have values separated by a
white space. For instance, the following two 'save' directives from the
configuration file...
save 900 1
save 300 10
... would be applied in a single command as:
CONFIG SET save "900 1 300 10"
Note that the 'CONFIG' command has been renamed to prevent
users from using it to bypass configuration groups.
"""
for prop_name, prop_args in overrides.items():
args_string = self._join_lists(
self._value_converter.to_strings(prop_args), ' ')
client.config_set(prop_name, args_string)
self.admin = self._build_admin_client()
self.status = RedisAppStatus(self.admin)
client = self.admin
def _join_lists(self, items, sep):
"""Join list items (including items from sub-lists) into a string.
Non-list inputs are returned unchanged.
_join_lists('1234', ' ') = "1234"
_join_lists(['1','2','3','4'], ' ') = "1 2 3 4"
_join_lists([['1','2'], ['3','4']], ' ') = "1 2 3 4"
"""
if isinstance(items, list):
return sep.join([sep.join(e) if isinstance(e, list) else e
for e in items])
return items
def remove_overrides(self):
self.configuration_manager.remove_user_override()
def make_read_only(self, read_only):
# Redis has no mechanism to make an instance read-only at present
pass
def start_db_with_conf_changes(self, config_contents):
LOG.info(_('Starting redis with conf changes.'))
if self.status.is_running:
format = 'Cannot start_db_with_conf_changes because status is %s.'
LOG.debug(format, self.status)
raise RuntimeError(format % self.status)
LOG.info(_("Initiating config."))
self.configuration_manager.save_configuration(config_contents)
# The configuration template has to be updated with
# guestagent-controlled settings.
self.apply_initial_guestagent_configuration()
self.start_db(True)
def start_db(self, update_db=False):
self.status.start_db_service(
system.SERVICE_CANDIDATES, self.state_change_wait_time,
enable_on_boot=True, update_db=update_db)
def apply_initial_guestagent_configuration(self):
"""Update guestagent-controlled configuration properties.
"""
# Hide the 'CONFIG' command from end users by mangling its name.
self.admin.set_config_command_name(self._mangle_config_command_name())
self.configuration_manager.apply_system_override(
{'daemonize': 'yes',
'protected-mode': 'no',
'supervised': 'systemd',
'pidfile': system.REDIS_PID_FILE,
'logfile': system.REDIS_LOG_FILE,
'dir': system.REDIS_DATA_DIR})
def get_config_command_name(self):
"""Get current name of the 'CONFIG' command.
"""
renamed_cmds = self.configuration_manager.get_value('rename-command')
for name_pair in renamed_cmds:
if name_pair[0] == 'CONFIG':
return name_pair[1]
return None
def _mangle_config_command_name(self):
"""Hide the 'CONFIG' command from the clients by renaming it to a
random string known only to the guestagent.
Return the mangled name.
"""
mangled = utils.generate_random_password()
self._rename_command('CONFIG', mangled)
return mangled
def _rename_command(self, old_name, new_name):
"""It is possible to completely disable a command by renaming it
to an empty string.
"""
self.configuration_manager.apply_system_override(
{'rename-command': [old_name, new_name]})
def get_logfile(self):
"""Specify the log file name. Also the empty string can be used to
force Redis to log on the standard output.
Note that if you use standard output for logging but daemonize,
logs will be sent to /dev/null
"""
return self.get_configuration_property('logfile')
def get_db_filename(self):
"""The filename where to dump the DB.
"""
return self.get_configuration_property('dbfilename')
def get_working_dir(self):
"""The DB will be written inside this directory,
with the filename specified the 'dbfilename' configuration directive.
The Append Only File will also be created inside this directory.
"""
return self.get_configuration_property('dir')
def get_persistence_filepath(self):
"""Returns the full path to the persistence file."""
return guestagent_utils.build_file_path(
self.get_working_dir(), self.get_db_filename())
def get_port(self):
"""Port for this instance or default if not set."""
return self.get_configuration_property('port', system.REDIS_PORT)
def get_auth_password(self):
"""Client authentication password for this instance or None if not set.
"""
return self.get_configuration_property('requirepass')
def is_appendonly_enabled(self):
"""True if the Append Only File (AOF) persistence mode is enabled.
"""
return self.get_configuration_property('appendonly', False)
def get_append_file_name(self):
"""The name of the append only file (AOF).
"""
return self.get_configuration_property('appendfilename')
def is_cluster_enabled(self):
"""Only nodes that are started as cluster nodes can be part of a
Redis Cluster.
"""
return self.get_configuration_property('cluster-enabled', False)
def enable_cluster(self):
"""In order to start a Redis instance as a cluster node enable the
cluster support
"""
self.configuration_manager.apply_system_override(
{'cluster-enabled': 'yes'}, CLUSTER_CFG)
def get_cluster_config_filename(self):
"""Cluster node configuration file.
"""
return self.get_configuration_property('cluster-config-file')
def set_cluster_config_filename(self, name):
"""Make sure that instances running in the same system do not have
overlapping cluster configuration file names.
"""
self.configuration_manager.apply_system_override(
{'cluster-config-file': name}, CLUSTER_CFG)
def get_cluster_node_timeout(self):
"""Cluster node timeout is the amount of milliseconds a node must be
unreachable for it to be considered in failure state.
"""
return self.get_configuration_property('cluster-node-timeout')
def get_configuration_property(self, name, default=None):
"""Return the value of a Redis configuration property.
Returns a single value for single-argument properties or
a list otherwise.
"""
return utils.unpack_singleton(
self.configuration_manager.get_value(name, default))
def cluster_meet(self, ip, port):
try:
utils.execute_with_timeout('redis-cli', 'cluster', 'meet',
ip, port)
except exception.ProcessExecutionError:
LOG.exception(_('Error joining node to cluster at %s.'), ip)
raise
def cluster_addslots(self, first_slot, last_slot):
try:
slots = map(str, range(first_slot, last_slot + 1))
group_size = 200
while slots:
cmd = (['redis-cli', 'cluster', 'addslots']
+ slots[0:group_size])
out, err = utils.execute_with_timeout(*cmd, run_as_root=True,
root_helper='sudo')
if 'OK' not in out:
raise RuntimeError(_('Error executing addslots: %s')
% out)
del slots[0:group_size]
except exception.ProcessExecutionError:
LOG.exception(_('Error adding slots %(first_slot)s-%(last_slot)s'
' to cluster.'),
{'first_slot': first_slot, 'last_slot': last_slot})
raise
def _get_node_info(self):
try:
out, _ = utils.execute_with_timeout('redis-cli', '--csv',
'cluster', 'nodes')
return [line.split(' ') for line in out.splitlines()]
except exception.ProcessExecutionError:
LOG.exception(_('Error getting node info.'))
raise
def _get_node_details(self):
for node_details in self._get_node_info():
if 'myself' in node_details[2]:
return node_details
raise exception.TroveError(_("Unable to determine node details"))
def get_node_ip(self):
"""Returns [ip, port] where both values are strings"""
return self._get_node_details()[1].split(':')
def get_node_id_for_removal(self):
node_details = self._get_node_details()
node_id = node_details[0]
my_ip = node_details[1].split(':')[0]
try:
slots, _ = utils.execute_with_timeout('redis-cli', '--csv',
'cluster', 'slots')
return node_id if my_ip not in slots else None
except exception.ProcessExecutionError:
LOG.exception(_('Error validating node to for removal.'))
raise
def remove_nodes(self, node_ids):
try:
for node_id in node_ids:
utils.execute_with_timeout('redis-cli', 'cluster',
'forget', node_id)
except exception.ProcessExecutionError:
LOG.exception(_('Error removing node from cluster.'))
raise
class RedisAdmin(object):
"""Handles administrative tasks on the Redis database.
"""
DEFAULT_CONFIG_CMD = 'CONFIG'
def __init__(self, password=None, unix_socket_path=None, config_cmd=None):
self.__client = redis.StrictRedis(
password=password, unix_socket_path=unix_socket_path)
self.__config_cmd_name = config_cmd or self.DEFAULT_CONFIG_CMD
def set_config_command_name(self, name):
"""Set name of the 'CONFIG' command or None for default.
"""
self.__config_cmd_name = name or self.DEFAULT_CONFIG_CMD
def ping(self):
"""Ping the Redis server and return True if a response is received.
"""
return self.__client.ping()
def get_info(self, section=None):
return self.__client.info(section=section)
def persist_data(self):
save_cmd = 'SAVE'
last_save = self.__client.lastsave()
LOG.debug("Starting Redis data persist")
save_ok = True
try:
save_ok = self.__client.bgsave()
except redis.exceptions.ResponseError as re:
# If an auto-save is in progress just use it, since it must have
# just happened
if "Background save already in progress" in str(re):
LOG.info(_("Waiting for existing background save to finish"))
else:
raise
if save_ok:
save_cmd = 'BGSAVE'
def _timestamp_changed():
return last_save != self.__client.lastsave()
try:
utils.poll_until(_timestamp_changed, sleep_time=2,
time_out=TIME_OUT)
except exception.PollTimeOut:
raise RuntimeError(_("Timeout occurred waiting for Redis "
"persist (%s) to complete.") % save_cmd)
# If the background save fails for any reason, try doing a foreground
# one. This blocks client connections, so we don't want it to be
# the default.
elif not self.__client.save():
raise exception.BackupCreationError(_("Could not persist "
"Redis data (%s)") % save_cmd)
LOG.debug("Redis data persist (%s) completed", save_cmd)
def set_master(self, host=None, port=None):
self.__client.slaveof(host, port)
def config_set(self, name, value):
response = self.execute(
'%s %s' % (self.__config_cmd_name, 'SET'), name, value)
if not self._is_ok_response(response):
raise exception.UnprocessableEntity(
_("Could not set configuration property '%(name)s' to "
"'%(value)s'.") % {'name': name, 'value': value})
def _is_ok_response(self, response):
"""Return True if a given Redis response is 'OK'.
"""
return response and redis.client.bool_ok(response)
def execute(self, cmd_name, *cmd_args, **options):
"""Execute a command and return a parsed response.
"""
try:
return self.__client.execute_command(cmd_name, *cmd_args,
**options)
except Exception as e:
LOG.exception(e)
raise exception.TroveError(
_("Redis command '%(cmd_name)s %(cmd_args)s' failed.")
% {'cmd_name': cmd_name, 'cmd_args': ' '.join(cmd_args)})
def wait_until(self, key, wait_value, section=None, timeout=None):
"""Polls redis until the specified 'key' changes to 'wait_value'."""
timeout = timeout or CONF.usage_timeout
LOG.debug("Waiting for Redis '%(key)s' to be: %(value)s.",
{'key': key, 'value': wait_value})
def _check_info():
redis_info = self.get_info(section)
if key in redis_info:
current_value = redis_info[key]
LOG.debug("Found '%(value)s' for field %(key)s.",
{'value': current_value, 'key': key})
else:
LOG.error(_('Output from Redis command: %s'), redis_info)
raise RuntimeError(_("Field %(field)s not found "
"(Section: '%(sec)s').") %
({'field': key, 'sec': section}))
return current_value == wait_value
try:
utils.poll_until(_check_info, time_out=timeout)
except exception.PollTimeOut:
raise RuntimeError(_("Timeout occurred waiting for Redis field "
"'%(field)s' to change to '%(val)s'.") %
{'field': key, 'val': wait_value})
Fix trove-guestagent startup for redis mangled config cmd
Initially, the mangled command is not set, so the get_value()
returns None which is non-iterable
Change-Id: Id705264a265c8758dd32b3f1c62e3b74e81138b1
# Copyright (c) 2013 Rackspace
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import redis
from redis.exceptions import BusyLoadingError, ConnectionError
from oslo_log import log as logging
from trove.common import cfg
from trove.common import exception
from trove.common.i18n import _
from trove.common import instance as rd_instance
from trove.common.stream_codecs import PropertiesCodec, StringConverter
from trove.common import utils
from trove.guestagent.common.configuration import ConfigurationManager
from trove.guestagent.common.configuration import OneFileOverrideStrategy
from trove.guestagent.common import guestagent_utils
from trove.guestagent.common import operating_system
from trove.guestagent.datastore.experimental.redis import system
from trove.guestagent.datastore import service
from trove.guestagent import pkg
LOG = logging.getLogger(__name__)
TIME_OUT = 1200
CONF = cfg.CONF
CLUSTER_CFG = 'clustering'
packager = pkg.Package()
class RedisAppStatus(service.BaseDbStatus):
"""
Handles all of the status updating for the redis guest agent.
"""
def __init__(self, client):
super(RedisAppStatus, self).__init__()
self.__client = client
def set_client(self, client):
self.__client = client
def _get_actual_db_status(self):
try:
if self.__client.ping():
return rd_instance.ServiceStatuses.RUNNING
except ConnectionError:
return rd_instance.ServiceStatuses.SHUTDOWN
except BusyLoadingError:
return rd_instance.ServiceStatuses.BLOCKED
except Exception:
LOG.exception(_("Error getting Redis status."))
return rd_instance.ServiceStatuses.CRASHED
def cleanup_stalled_db_services(self):
utils.execute_with_timeout('pkill', '-9',
'redis-server',
run_as_root=True,
root_helper='sudo')
class RedisApp(object):
"""
Handles installation and configuration of redis
on a trove instance.
"""
def __init__(self, state_change_wait_time=None):
"""
Sets default status and state_change_wait_time
"""
if state_change_wait_time:
self.state_change_wait_time = state_change_wait_time
else:
self.state_change_wait_time = CONF.state_change_wait_time
revision_dir = guestagent_utils.build_file_path(
os.path.dirname(system.REDIS_CONFIG),
ConfigurationManager.DEFAULT_STRATEGY_OVERRIDES_SUB_DIR)
config_value_mappings = {'yes': True, 'no': False, "''": None}
self._value_converter = StringConverter(config_value_mappings)
self.configuration_manager = ConfigurationManager(
system.REDIS_CONFIG,
system.REDIS_OWNER, system.REDIS_OWNER,
PropertiesCodec(
unpack_singletons=False,
string_mappings=config_value_mappings
), requires_root=True,
override_strategy=OneFileOverrideStrategy(revision_dir))
self.admin = self._build_admin_client()
self.status = RedisAppStatus(self.admin)
def _build_admin_client(self):
password = self.get_configuration_property('requirepass')
socket = self.get_configuration_property('unixsocket')
cmd = self.get_config_command_name()
return RedisAdmin(password=password, unix_socket_path=socket,
config_cmd=cmd)
def install_if_needed(self, packages):
"""
Install redis if needed do nothing if it is already installed.
"""
LOG.info(_('Preparing Guest as Redis Server.'))
if not packager.pkg_is_installed(packages):
LOG.info(_('Installing Redis.'))
self._install_redis(packages)
LOG.info(_('Redis installed completely.'))
def _install_redis(self, packages):
"""
Install the redis server.
"""
LOG.debug('Installing redis server.')
LOG.debug("Creating %s.", system.REDIS_CONF_DIR)
operating_system.create_directory(system.REDIS_CONF_DIR, as_root=True)
pkg_opts = {}
packager.pkg_install(packages, pkg_opts, TIME_OUT)
self.start_db()
LOG.debug('Finished installing redis server.')
def stop_db(self, update_db=False, do_not_start_on_reboot=False):
self.status.stop_db_service(
system.SERVICE_CANDIDATES, self.state_change_wait_time,
disable_on_boot=do_not_start_on_reboot, update_db=update_db)
def restart(self):
self.status.restart_db_service(
system.SERVICE_CANDIDATES, self.state_change_wait_time)
def update_overrides(self, context, overrides, remove=False):
if overrides:
self.configuration_manager.apply_user_override(overrides)
def apply_overrides(self, client, overrides):
"""Use the 'CONFIG SET' command to apply configuration at runtime.
Commands that appear multiple times have values separated by a
white space. For instance, the following two 'save' directives from the
configuration file...
save 900 1
save 300 10
... would be applied in a single command as:
CONFIG SET save "900 1 300 10"
Note that the 'CONFIG' command has been renamed to prevent
users from using it to bypass configuration groups.
"""
for prop_name, prop_args in overrides.items():
args_string = self._join_lists(
self._value_converter.to_strings(prop_args), ' ')
client.config_set(prop_name, args_string)
self.admin = self._build_admin_client()
self.status = RedisAppStatus(self.admin)
client = self.admin
def _join_lists(self, items, sep):
"""Join list items (including items from sub-lists) into a string.
Non-list inputs are returned unchanged.
_join_lists('1234', ' ') = "1234"
_join_lists(['1','2','3','4'], ' ') = "1 2 3 4"
_join_lists([['1','2'], ['3','4']], ' ') = "1 2 3 4"
"""
if isinstance(items, list):
return sep.join([sep.join(e) if isinstance(e, list) else e
for e in items])
return items
def remove_overrides(self):
self.configuration_manager.remove_user_override()
def make_read_only(self, read_only):
# Redis has no mechanism to make an instance read-only at present
pass
def start_db_with_conf_changes(self, config_contents):
LOG.info(_('Starting redis with conf changes.'))
if self.status.is_running:
format = 'Cannot start_db_with_conf_changes because status is %s.'
LOG.debug(format, self.status)
raise RuntimeError(format % self.status)
LOG.info(_("Initiating config."))
self.configuration_manager.save_configuration(config_contents)
# The configuration template has to be updated with
# guestagent-controlled settings.
self.apply_initial_guestagent_configuration()
self.start_db(True)
def start_db(self, update_db=False):
self.status.start_db_service(
system.SERVICE_CANDIDATES, self.state_change_wait_time,
enable_on_boot=True, update_db=update_db)
def apply_initial_guestagent_configuration(self):
"""Update guestagent-controlled configuration properties.
"""
# Hide the 'CONFIG' command from end users by mangling its name.
self.admin.set_config_command_name(self._mangle_config_command_name())
self.configuration_manager.apply_system_override(
{'daemonize': 'yes',
'protected-mode': 'no',
'supervised': 'systemd',
'pidfile': system.REDIS_PID_FILE,
'logfile': system.REDIS_LOG_FILE,
'dir': system.REDIS_DATA_DIR})
def get_config_command_name(self):
"""Get current name of the 'CONFIG' command.
"""
renamed_cmds = self.configuration_manager.get_value('rename-command')
if renamed_cmds:
for name_pair in renamed_cmds:
if name_pair[0] == 'CONFIG':
return name_pair[1]
return None
def _mangle_config_command_name(self):
"""Hide the 'CONFIG' command from the clients by renaming it to a
random string known only to the guestagent.
Return the mangled name.
"""
mangled = utils.generate_random_password()
self._rename_command('CONFIG', mangled)
return mangled
def _rename_command(self, old_name, new_name):
"""It is possible to completely disable a command by renaming it
to an empty string.
"""
self.configuration_manager.apply_system_override(
{'rename-command': [old_name, new_name]})
def get_logfile(self):
"""Specify the log file name. Also the empty string can be used to
force Redis to log on the standard output.
Note that if you use standard output for logging but daemonize,
logs will be sent to /dev/null
"""
return self.get_configuration_property('logfile')
def get_db_filename(self):
"""The filename where to dump the DB.
"""
return self.get_configuration_property('dbfilename')
def get_working_dir(self):
"""The DB will be written inside this directory,
with the filename specified the 'dbfilename' configuration directive.
The Append Only File will also be created inside this directory.
"""
return self.get_configuration_property('dir')
def get_persistence_filepath(self):
"""Returns the full path to the persistence file."""
return guestagent_utils.build_file_path(
self.get_working_dir(), self.get_db_filename())
def get_port(self):
"""Port for this instance or default if not set."""
return self.get_configuration_property('port', system.REDIS_PORT)
def get_auth_password(self):
"""Client authentication password for this instance or None if not set.
"""
return self.get_configuration_property('requirepass')
def is_appendonly_enabled(self):
"""True if the Append Only File (AOF) persistence mode is enabled.
"""
return self.get_configuration_property('appendonly', False)
def get_append_file_name(self):
"""The name of the append only file (AOF).
"""
return self.get_configuration_property('appendfilename')
def is_cluster_enabled(self):
"""Only nodes that are started as cluster nodes can be part of a
Redis Cluster.
"""
return self.get_configuration_property('cluster-enabled', False)
def enable_cluster(self):
"""In order to start a Redis instance as a cluster node enable the
cluster support
"""
self.configuration_manager.apply_system_override(
{'cluster-enabled': 'yes'}, CLUSTER_CFG)
def get_cluster_config_filename(self):
"""Cluster node configuration file.
"""
return self.get_configuration_property('cluster-config-file')
def set_cluster_config_filename(self, name):
"""Make sure that instances running in the same system do not have
overlapping cluster configuration file names.
"""
self.configuration_manager.apply_system_override(
{'cluster-config-file': name}, CLUSTER_CFG)
def get_cluster_node_timeout(self):
"""Cluster node timeout is the amount of milliseconds a node must be
unreachable for it to be considered in failure state.
"""
return self.get_configuration_property('cluster-node-timeout')
def get_configuration_property(self, name, default=None):
"""Return the value of a Redis configuration property.
Returns a single value for single-argument properties or
a list otherwise.
"""
return utils.unpack_singleton(
self.configuration_manager.get_value(name, default))
def cluster_meet(self, ip, port):
try:
utils.execute_with_timeout('redis-cli', 'cluster', 'meet',
ip, port)
except exception.ProcessExecutionError:
LOG.exception(_('Error joining node to cluster at %s.'), ip)
raise
def cluster_addslots(self, first_slot, last_slot):
try:
slots = map(str, range(first_slot, last_slot + 1))
group_size = 200
while slots:
cmd = (['redis-cli', 'cluster', 'addslots']
+ slots[0:group_size])
out, err = utils.execute_with_timeout(*cmd, run_as_root=True,
root_helper='sudo')
if 'OK' not in out:
raise RuntimeError(_('Error executing addslots: %s')
% out)
del slots[0:group_size]
except exception.ProcessExecutionError:
LOG.exception(_('Error adding slots %(first_slot)s-%(last_slot)s'
' to cluster.'),
{'first_slot': first_slot, 'last_slot': last_slot})
raise
def _get_node_info(self):
try:
out, _ = utils.execute_with_timeout('redis-cli', '--csv',
'cluster', 'nodes')
return [line.split(' ') for line in out.splitlines()]
except exception.ProcessExecutionError:
LOG.exception(_('Error getting node info.'))
raise
def _get_node_details(self):
for node_details in self._get_node_info():
if 'myself' in node_details[2]:
return node_details
raise exception.TroveError(_("Unable to determine node details"))
def get_node_ip(self):
"""Returns [ip, port] where both values are strings"""
return self._get_node_details()[1].split(':')
def get_node_id_for_removal(self):
node_details = self._get_node_details()
node_id = node_details[0]
my_ip = node_details[1].split(':')[0]
try:
slots, _ = utils.execute_with_timeout('redis-cli', '--csv',
'cluster', 'slots')
return node_id if my_ip not in slots else None
except exception.ProcessExecutionError:
LOG.exception(_('Error validating node to for removal.'))
raise
def remove_nodes(self, node_ids):
try:
for node_id in node_ids:
utils.execute_with_timeout('redis-cli', 'cluster',
'forget', node_id)
except exception.ProcessExecutionError:
LOG.exception(_('Error removing node from cluster.'))
raise
class RedisAdmin(object):
"""Handles administrative tasks on the Redis database.
"""
DEFAULT_CONFIG_CMD = 'CONFIG'
def __init__(self, password=None, unix_socket_path=None, config_cmd=None):
self.__client = redis.StrictRedis(
password=password, unix_socket_path=unix_socket_path)
self.__config_cmd_name = config_cmd or self.DEFAULT_CONFIG_CMD
def set_config_command_name(self, name):
"""Set name of the 'CONFIG' command or None for default.
"""
self.__config_cmd_name = name or self.DEFAULT_CONFIG_CMD
def ping(self):
"""Ping the Redis server and return True if a response is received.
"""
return self.__client.ping()
def get_info(self, section=None):
return self.__client.info(section=section)
def persist_data(self):
save_cmd = 'SAVE'
last_save = self.__client.lastsave()
LOG.debug("Starting Redis data persist")
save_ok = True
try:
save_ok = self.__client.bgsave()
except redis.exceptions.ResponseError as re:
# If an auto-save is in progress just use it, since it must have
# just happened
if "Background save already in progress" in str(re):
LOG.info(_("Waiting for existing background save to finish"))
else:
raise
if save_ok:
save_cmd = 'BGSAVE'
def _timestamp_changed():
return last_save != self.__client.lastsave()
try:
utils.poll_until(_timestamp_changed, sleep_time=2,
time_out=TIME_OUT)
except exception.PollTimeOut:
raise RuntimeError(_("Timeout occurred waiting for Redis "
"persist (%s) to complete.") % save_cmd)
# If the background save fails for any reason, try doing a foreground
# one. This blocks client connections, so we don't want it to be
# the default.
elif not self.__client.save():
raise exception.BackupCreationError(_("Could not persist "
"Redis data (%s)") % save_cmd)
LOG.debug("Redis data persist (%s) completed", save_cmd)
def set_master(self, host=None, port=None):
self.__client.slaveof(host, port)
def config_set(self, name, value):
response = self.execute(
'%s %s' % (self.__config_cmd_name, 'SET'), name, value)
if not self._is_ok_response(response):
raise exception.UnprocessableEntity(
_("Could not set configuration property '%(name)s' to "
"'%(value)s'.") % {'name': name, 'value': value})
def _is_ok_response(self, response):
"""Return True if a given Redis response is 'OK'.
"""
return response and redis.client.bool_ok(response)
def execute(self, cmd_name, *cmd_args, **options):
"""Execute a command and return a parsed response.
"""
try:
return self.__client.execute_command(cmd_name, *cmd_args,
**options)
except Exception as e:
LOG.exception(e)
raise exception.TroveError(
_("Redis command '%(cmd_name)s %(cmd_args)s' failed.")
% {'cmd_name': cmd_name, 'cmd_args': ' '.join(cmd_args)})
def wait_until(self, key, wait_value, section=None, timeout=None):
"""Polls redis until the specified 'key' changes to 'wait_value'."""
timeout = timeout or CONF.usage_timeout
LOG.debug("Waiting for Redis '%(key)s' to be: %(value)s.",
{'key': key, 'value': wait_value})
def _check_info():
redis_info = self.get_info(section)
if key in redis_info:
current_value = redis_info[key]
LOG.debug("Found '%(value)s' for field %(key)s.",
{'value': current_value, 'key': key})
else:
LOG.error(_('Output from Redis command: %s'), redis_info)
raise RuntimeError(_("Field %(field)s not found "
"(Section: '%(sec)s').") %
({'field': key, 'sec': section}))
return current_value == wait_value
try:
utils.poll_until(_check_info, time_out=timeout)
except exception.PollTimeOut:
raise RuntimeError(_("Timeout occurred waiting for Redis field "
"'%(field)s' to change to '%(val)s'.") %
{'field': key, 'val': wait_value})
|
# -*- coding: utf8 -*-
''' classes for database access '''
import math
from injector import Module, provides, inject, singleton
import datetime
import pytz
import shapely.wkb
import shapely.geometry
import pandas as pd
try:
import psycopg2
import psycopg2.extras
import psycopg2.extensions
except ImportError:
pass
from abc import ABCMeta, abstractmethod
import blitzortung
class BaseInterval(object):
def __init__(self, start = None, end = None):
self.start = start
self.end = end
def get_start(self):
return self.start
def get_end(self):
return self.end
def __str__(self):
return '[' + str(self.start) + ' - ' + str(self.end) + ']'
class IdInterval(BaseInterval):
def __init__(self, start = None, end = None):
if start and not isinstance(start, int):
raise ValueError("start should be an integer value")
if end and not isinstance(end, int):
raise ValueError("end should be an integer value")
super(IdInterval, self).__init__(start, end)
class TimeInterval(BaseInterval):
def __init__(self, start = None, end = None):
if start and not isinstance(start, datetime.datetime):
raise ValueError("start should be a datetime value")
if end and not isinstance(end, datetime.datetime):
raise ValueError("end should be a datetime value")
super(TimeInterval, self).__init__(start, end)
class Query(object):
'''
simple class for building of complex queries
'''
def __init__(self):
self.sql = ''
self.conditions = []
self.parameters = {}
self.table_name = None
self.columns = None
self.limit = None
self.order = []
def set_table_name(self, table_name):
self.table_name = table_name
def set_columns(self, columns):
self.columns = columns
def add_order(self, order):
self.order.append(order)
def set_limit(self, limit):
if self.limit != None:
raise RuntimeError("overriding Query.limit")
self.limit = limit
def add_condition(self, condition, parameters = None):
self.conditions.append(condition)
if parameters != None:
self.parameters.update(parameters)
def add_parameters(self, parameters):
self.parameters.update(parameters)
def __str__(self):
sql = 'SELECT '
if self.columns:
for index, column in enumerate(self.columns):
if index != 0:
sql += ', '
sql += column
sql += ' '
sql += 'FROM ' + self.table_name + ' '
for index, condition in enumerate(self.conditions):
if index == 0:
sql += 'WHERE '
else:
sql += 'AND '
sql += condition + ' '
if len(self.order) > 0:
sql += 'ORDER BY '
for index, order in enumerate(self.order):
if index != 0:
sql += ', '
sql += order.get_column()
if order.is_desc():
sql += ' DESC'
sql += ' '
if self.limit:
sql += 'LIMIT ' + str(self.limit.get_number()) + ' '
return sql.strip()
def get_parameters(self):
return self.parameters
def parse_args(self, args):
for arg in args:
if arg:
if isinstance(arg, TimeInterval):
if arg.get_start() != None:
self.add_condition('timestamp >= %(starttime)s', {'starttime': arg.get_start()})
#self.add_condition('timestamp >= :starttime', {'starttime': arg.get_start().astimezone(pytz.UTC).replace(tzinfo=None)})
if arg.get_end() != None:
self.add_condition('timestamp < %(endtime)s', {'endtime': arg.get_end()})
#self.add_condition('timestamp < :endtime', {'endtime': arg.get_end().astimezone(pytz.UTC).replace(tzinfo=None)})
elif isinstance(arg, IdInterval):
if arg.get_start() != None:
self.add_condition('id >= %(startid)s', {'startid': arg.get_start()})
if arg.get_end() != None:
self.add_condition('id < %(endid)s', {'endid': arg.get_end()})
elif isinstance(arg, shapely.geometry.base.BaseGeometry):
if arg.is_valid:
self.add_condition('ST_SetSRID(CAST(%(envelope)s AS geometry), %(srid)s) && geog', {'envelope': shapely.wkb.dumps(arg.envelope).encode('hex')})
#self.add_condition('ST_SetSRID(CAST(:envelope AS geometry), :srid) && Transform(geog, :srid)', {'envelope': shapely.wkb.dumps(arg.envelope).encode('hex')})
if not arg.equals(arg.envelope):
self.add_condition('Intersects(ST_SetSRID(CAST(%(geometry)s AS geometry), %(srid)s), st_transform(geog, %(srid)s))', {'geometry': shapely.wkb.dumps(arg).encode('hex')})
#self.add_condition('Intersects(ST_SetSRID(CAST(:geometry AS geometry), :srid), Transform(geog, :srid))', {'geometry': shapely.wkb.dumps(arg).encode('hex')})
else:
raise ValueError("invalid geometry in db.Stroke.select()")
elif isinstance(arg, Order):
self.add_order(arg)
elif isinstance(arg, Limit):
self.set_limit(arg)
else:
print 'WARNING: ' + __name__ + ' unhandled object ' + str(type(arg))
def get_results(self, db):
resulting_strokes = []
if db.cur.rowcount > 0:
for result in db.cur.fetchall():
resulting_strokes.append(db.create(result))
return resulting_strokes
class RasterQuery(Query):
def __init__(self, raster):
super(RasterQuery, self).__init__()
self.raster = raster
env = self.raster.get_env()
if env.is_valid:
self.add_condition('ST_SetSRID(CAST(%(envelope)s AS geometry), %(srid)s) && geog', {'envelope': shapely.wkb.dumps(env).encode('hex')})
#self.add_condition('ST_SetSRID(CAST(:envelope AS geometry), :srid) && Transform(geog, :srid)', {'envelope': shapely.wkb.dumps(env).encode('hex')})
else:
raise ValueError("invalid Raster geometry in db.Stroke.select()")
def __str__(self):
sql = 'SELECT '
sql += 'TRUNC((ST_X(ST_TRANSFORM(geog, %(srid)s)) - ' + str(self.raster.get_x_min()) + ') /' + str(self.raster.get_x_div()) + ') AS rx, '
sql += 'TRUNC((ST_Y(ST_TRANSFORM(geog, %(srid)s)) - ' + str(self.raster.get_y_min()) + ') /' + str(self.raster.get_y_div()) + ') AS ry, '
sql += 'count(*) AS count, max(timestamp) as timestamp FROM ('
sql += Query.__str__(self)
sql += ') AS ' + self.table_name + ' GROUP BY rx, ry'
return sql
def get_results(self, db):
if db.cur.rowcount > 0:
for result in db.cur.fetchall():
self.raster.set(result['rx'], result['ry'], blitzortung.geom.RasterElement(result['count'], result['timestamp']))
return self.raster
class Order(object):
'''
definition for query search order
'''
def __init__(self, column, desc = False):
self.column = column
self.desc = desc
def get_column(self):
return self.column
def is_desc(self):
return self.desc
class Limit(object):
'''
definition of query result limit
'''
def __init__(self, limit):
self.limit = limit
def get_number(self):
return self.limit
class Center(object):
'''
definition of query center point
'''
def __init__(self, center):
self.center = center
def get_point(self):
return self.center
class Connection(Module):
@singleton
@provides(psycopg2._psycopg.connection)
@inject(configuration=blitzortung.config.configuration)
def provide_psycopg2_connection(self, configuration):
return psycopg2.connect(configuration['db_connection_string'])
class Base(object):
'''
abstract base class for database access objects
creation of database
as user postgres:
createuser -i -D -R -S -W -E -P blitzortung
createdb -E utf8 -O blitzortung blitzortung
createlang plpgsql blitzortung
psql -f /usr/share/postgresql/9.1/contrib/postgis-1.5/postgis.sql -d blitzortung
psql -f /usr/share/postgresql/9.1/contrib/postgis-1.5/spatial_ref_sys.sql -d blitzortung
(< pg 9.0)
psql -f /usr/share/postgresql/8.4/contrib/btree_gist.sql blitzortung
psql blitzortung
GRANT SELECT ON spatial_ref_sys TO blitzortung;
GRANT SELECT ON geometry_columns TO blitzortung;
GRANT INSERT, DELETE ON geometry_columns TO blitzortung;
(>= pg 9.0)
CREATE EXTENSION "btree_gist";
'''
__metaclass__ = ABCMeta
DefaultTimezone = pytz.UTC
def __init__(self, db_connection):
self.schema_name = None
self.table_name = None
self.cur = None
self.conn = db_connection
self.initialized = False
self.srid = blitzortung.geom.Geometry.DefaultSrid
self.tz = Base.DefaultTimezone
try:
self.cur = self.conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE, self.cur)
except psycopg2.DatabaseError, e:
print e
if self.cur != None:
try:
self.cur.close()
except NameError:
pass
if self.conn != None:
try:
self.conn.close()
except NameError:
pass
#if not self.has_table('geometry_columns'):
# self.execute('SELECT InitSpatialMetadata()')
# self.execute("INSERT INTO spatial_ref_sys (srid, auth_name, auth_srid, ref_sys_name, proj4text) VALUES (4326, 'epsg', 4326, 'WGS 84', '+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs')")
# def has_table(self, table_name):
# result = self.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='%s'" % table_name)
# return result.fetchone() != None
def is_connected(self):
if self.conn != None:
return not self.conn.closed
else:
return False
def set_table_name(self, table_name):
self.table_name = table_name
def get_table_name(self):
return self.table_name
def get_full_table_name(self):
if self.get_schema_name() != None:
return '"' + self.get_schema_name() + '"."' + self.get_table_name() + '"'
else:
return self.get_table_name()
def set_schema_name(self, schema_name):
self.schema_name = schema_name
def get_schema_name(self):
return self.schema_name
def get_srid(self):
return self.srid
def set_srid(self, srid):
self.srid = srid
def get_timezone(self):
return self.tz
def set_timezone(self, tz):
self.tz = tz
def fix_timezone(self, timestamp):
return timestamp.astimezone(self.tz) if timestamp else None
def from_bare_utc_to_timezone(self, utc_time):
return utc_time.replace(tzinfo=pytz.UTC).astimezone(self.tz)
def from_timezone_to_bare_utc(self, time_with_tz):
return time_with_tz.astimezone(pytz.UTC).replace(tzinfo=None)
def commit(self):
''' commit pending database transaction '''
self.conn.commit()
def rollback(self):
''' rollback pending database transaction '''
self.conn.rollback()
@abstractmethod
def insert(self, object_to_insert):
pass
@abstractmethod
def select(self, args):
pass
def execute(self, sql_string, parameters=None):
if not self.initialized:
self.cur.execute('SET TIME ZONE \'%s\'' %(str(self.tz)))
self.initialized = True
self.cur.execute(sql_string, parameters)
@singleton
class Stroke(Base):
'''
stroke db access class
database table creation (as db user blitzortung, database blitzortung):
CREATE TABLE strokes (id bigserial, timestamp timestamptz, nanoseconds SMALLINT, geog GEOGRAPHY(Point), PRIMARY KEY(id));
ALTER TABLE strokes ADD COLUMN region SMALLINT;
ALTER TABLE strokes ADD COLUMN amplitude REAL;
ALTER TABLE strokes ADD COLUMN error2d SMALLINT;
ALTER TABLE strokes ADD COLUMN type SMALLINT;
ALTER TABLE strokes ADD COLUMN stationcount SMALLINT;
CREATE INDEX strokes_timestamp ON strokes USING btree("timestamp");
CREATE INDEX strokes_region_timestamp ON strokes USING btree(region, "timestamp");
CREATE INDEX strokes_id_timestamp ON strokes USING btree(id, "timestamp");
CREATE INDEX strokes_geog ON strokes USING gist(geog);
CREATE INDEX strokes_timestamp_geog ON strokes USING gist("timestamp", geog);
CREATE INDEX strokes_id_timestamp_geog ON strokes USING gist(id, "timestamp", geog);
empty the table with the following commands:
DELETE FROM strokes;
ALTER SEQUENCE strokes_id_seq RESTART 1;
'''
@inject(db_connection=psycopg2._psycopg.connection)
def __init__(self, db_connection):
super(Stroke, self).__init__(db_connection)
self.set_table_name('strokes')
# if not self.has_table(self.get_table_name()):
# self.execute("CREATE TABLE strokes (id INTEGER PRIMARY KEY, timestamp timestamp, nanoseconds INTEGER)")
# self.execute("SELECT AddGeometryColumn('strokes','geog',4326,'POINT',2)")
# self.execute("ALTER TABLE strokes ADD COLUMN amplitude REAL")
# self.execute("ALTER TABLE strokes ADD COLUMN error2d INTEGER")
# self.execute("ALTER TABLE strokes ADD COLUMN type INTEGER")
# self.execute("ALTER TABLE strokes ADD COLUMN stationcount INTEGER")
# self.execute("ALTER TABLE strokes ADD COLUMN detected BOOLEAN")
def insert(self, stroke, region=1):
sql = 'INSERT INTO ' + self.get_full_table_name() + \
' ("timestamp", nanoseconds, geog, region, amplitude, error2d, type, stationcount) ' + \
'VALUES (%(timestamp)s, %(nanoseconds)s, ST_MakePoint(%(longitude)s, %(latitude)s), %(region)s, %(amplitude)s, %(error2d)s, %(type)s, %(stationcount)s)'
parameters = {
'timestamp': stroke.get_timestamp(),
'nanoseconds': stroke.get_timestamp().nanosecond,
'longitude': stroke.get_x(),
'latitude': stroke.get_y(),
'region': region,
'amplitude': stroke.get_amplitude(),
'error2d': stroke.get_lateral_error(),
'type': stroke.get_type(),
'stationcount': stroke.get_station_count()
}
self.execute(sql, parameters)
def get_latest_time(self, region=1):
sql = 'SELECT timestamp FROM ' + self.get_full_table_name() + \
' WHERE region=%(region)s' + \
' ORDER BY timestamp DESC LIMIT 1'
self.execute(sql, {'region': region})
if self.cur.rowcount == 1:
result = self.cur.fetchone()
return pd.Timestamp(self.fix_timezone(result['timestamp']))
else:
return None
def create(self, result):
stroke_builder = blitzortung.builder.Stroke()
stroke_builder.set_id(result['id'])
stroke_builder.set_timestamp(self.fix_timezone(result['timestamp']), result['nanoseconds'])
location = shapely.wkb.loads(result['geog'].decode('hex'))
stroke_builder.set_x(location.x)
stroke_builder.set_y(location.y)
stroke_builder.set_amplitude(result['amplitude'])
stroke_builder.set_type(result['type'])
stroke_builder.set_station_count(result['stationcount'])
stroke_builder.set_lateral_error(result['error2d'])
return stroke_builder.build()
def select_query(self, args, query = None):
' build up query object for select statement '
if not query:
query = Query()
query.set_table_name(self.get_full_table_name())
query.set_columns(['id', '"timestamp"', 'nanoseconds', 'st_transform(geog::geometry, %(srid)s) AS geog', 'amplitude', 'type', 'error2d', 'stationcount'])
query.add_parameters({'srid': self.srid})
#query.add_condition('geog IS NOT NULL')
query.parse_args(args)
return query
def select(self, *args):
' build up query '
query = self.select_query(args)
return self.select_execute(query)
def select_raster(self, raster, *args):
' build up query '
query = self.select_query(args, RasterQuery(raster))
return self.select_execute(query)
def select_histogram(self, minutes, region=1, binsize=5):
query = "select -extract(epoch from current_timestamp - timestamp)::int/60/%(binsize)s as interval, count(*) from strokes where timestamp > current_timestamp - interval '%(minutes)s minutes' and region = %(region)s group by interval order by interval;"
self.cur.execute(query, {'minutes': minutes, 'binsize':binsize, 'region':region})
value_count = minutes/binsize
result = [0] * value_count
raw_result = self.cur.fetchall()
for bin_data in raw_result:
try:
result[bin_data[0] + value_count - 1] = bin_data[1]
except IndexError:
print "Index %d/%d out of range (%s)" % ( bin_data[0] + value_count - 1, value_count, str(bin_data))
return result
def select_execute(self, query):
self.execute(str(query), query.get_parameters())
return query.get_results(self)
def stroke():
from __init__ import INJECTOR
return INJECTOR.get(Stroke)
@singleton
class Station(Base):
'''
database table creation (as db user blitzortung, database blitzortung):
CREATE TABLE stations (id bigserial, number int, geog GEOGRAPHY(Point), PRIMARY KEY(id));
ALTER TABLE stations ADD COLUMN short_name CHARACTER VARYING;
ALTER TABLE stations ADD COLUMN name CHARACTER VARYING;
ALTER TABLE stations ADD COLUMN location_name CHARACTER VARYING;
ALTER TABLE stations ADD COLUMN country CHARACTER VARYING;
ALTER TABLE stations ADD COLUMN timestamp TIMESTAMPTZ;
CREATE INDEX stations_timestamp ON stations USING btree("timestamp");
CREATE INDEX stations_number_timestamp ON stations USING btree(number, "timestamp");
CREATE INDEX stations_geog ON stations USING gist(geog);
empty the table with the following commands:
DELETE FROM stations;
ALTER SEQUENCE stations_id_seq RESTART 1;
'''
@inject(db_connection=psycopg2._psycopg.connection)
def __init__(self, db_connection):
super(Station, self).__init__(db_connection)
self.set_table_name('stations')
def insert(self, station):
self.execute('INSERT INTO ' + self.get_full_table_name() + \
' (number, short_name, "name", location_name, country, timestamp, geog) ' + \
'VALUES (%s, %s, %s, %s, %s, %s, ST_MakePoint(%s, %s))',
(station.get_number(), station.get_short_name(), station.get_name(), station.get_location_name(), station.get_country(), station.get_timestamp(), station.get_x(), station.get_y()))
def select(self, timestamp=None):
sql = ''' select
o.begin, s.number, s.short_name, s.name, s.location_name, s.country, s.geog
from stations as s
inner join
(select b.number, max(b.timestamp) as timestamp
from stations as b
group by number
order by number) as c
on s.number = c.number and s.timestamp = c.timestamp
left join stations_offline as o
on o.number = s.number and o."end" is null
order by s.number'''
self.execute(sql)
resulting_stations = []
if self.cur.rowcount > 0:
for result in self.cur.fetchall():
resulting_stations.append(self.create(result))
return resulting_stations
def create(self, result):
station_builder = blitzortung.builder.Station()
station_builder.set_number(result['number'])
station_builder.set_short_name(result['short_name'])
station_builder.set_name(result['name'])
station_builder.set_location_name(result['location_name'])
station_builder.set_country(result['country'])
location = shapely.wkb.loads(result['geog'].decode('hex'))
station_builder.set_x(location.x)
station_builder.set_y(location.y)
station_builder.set_timestamp(self.fix_timezone(result['begin']))
return station_builder.build()
def station():
from __init__ import INJECTOR
return INJECTOR.get(Station)
@singleton
class StationOffline(Base):
'''
database table creation (as db user blitzortung, database blitzortung):
CREATE TABLE stations_offline (id bigserial, number int, PRIMARY KEY(id));
ALTER TABLE stations_offline ADD COLUMN begin TIMESTAMPTZ;
ALTER TABLE stations_offline ADD COLUMN "end" TIMESTAMPTZ;
CREATE INDEX stations_offline_begin ON stations_offline USING btree(begin);
CREATE INDEX stations_offline_end ON stations_offline USING btree("end");
CREATE INDEX stations_offline_end_number ON stations_offline USING btree("end", number);
CREATE INDEX stations_offline_begin_end ON stations_offline USING btree(begin, "end");
empty the table with the following commands:
DELETE FROM stations_offline;
ALTER SEQUENCE stations_offline_id_seq RESTART 1;
'''
@inject(db_connection=psycopg2._psycopg.connection)
def __init__(self, db_connection):
super(StationOffline, self).__init__(db_connection)
self.set_table_name('stations_offline')
def insert(self, station_offline):
self.execute('INSERT INTO ' + self.get_full_table_name() + \
' (number, begin, "end") ' + \
'VALUES (%s, %s, %s)',
(station_offline.get_number(), station_offline.get_begin(), station_offline.get_end()))
def update(self, station_offline):
self.execute('UPDATE ' + self.get_full_table_name() + ' SET "end"=%s WHERE id=%s',
(station_offline.get_end(), station_offline.get_id()))
def select(self, timestamp=None):
sql = '''select id, number, begin, "end" from stations_offline where "end" is null order by number;'''
self.execute(sql)
resulting_stations = []
if self.cur.rowcount > 0:
for result in self.cur.fetchall():
resulting_stations.append(self.create(result))
return resulting_stations
def create(self, result):
stationOfflineBuilder = blitzortung.builder.StationOffline()
stationOfflineBuilder.set_id(result['id'])
stationOfflineBuilder.set_number(result['number'])
stationOfflineBuilder.set_begin(result['begin'])
stationOfflineBuilder.set_end(result['end'])
return stationOfflineBuilder.build()
def station_offline():
from __init__ import INJECTOR
return INJECTOR.get(StationOffline)
@singleton
class Location(Base):
'''
geonames db access class
CREATE SCHEMA geo;
CREATE TABLE geo.geonames (id bigserial, "name" character varying, geog Geography(Point), PRIMARY KEY(id));
ALTER TABLE geo.geonames ADD COLUMN "class" INTEGER;
ALTER TABLE geo.geonames ADD COLUMN feature_class CHARACTER(1);
ALTER TABLE geo.geonames ADD COLUMN feature_code VARCHAR;
ALTER TABLE geo.geonames ADD COLUMN country_code VARCHAR;
ALTER TABLE geo.geonames ADD COLUMN admin_code_1 VARCHAR;
ALTER TABLE geo.geonames ADD COLUMN admin_code_2 VARCHAR;
ALTER TABLE geo.geonames ADD COLUMN population INTEGER;
ALTER TABLE geo.geonames ADD COLUMN elevation SMALLINT;
CREATE INDEX geonames_geog ON geo.geonames USING gist(geog);
'''
@inject(db_connection=psycopg2._psycopg.connection)
def __init__(self, db_connection):
super(Location, self).__init__(db_connection)
self.set_schema_name('geo')
self.set_table_name('geonames')
self.center = None
self.min_population = None
self.limit = None
self.max_distance = None
def delete_all(self):
self.execute('DELETE FROM ' + self.get_full_table_name())
def insert(self, line):
fields = line.strip().split('\t')
name = fields[1]
y = float(fields[4])
x = float(fields[5])
feature_class = fields[6]
feature_code = fields[7]
country_code = fields[8]
admin_code_1 = fields[10]
admin_code_2 = fields[11]
admin_code_3 = fields[12]
admin_code_4 = fields[13]
population = int(fields[14])
if fields[15] != '':
elevation = int(fields[15])
else:
elevation = -1
name = name.replace("'", "''")
classification = self.size_class(population)
if classification is not None:
self.execute('INSERT INTO ' + self.get_full_table_name() + '''
(geog, name, class, feature_class, feature_code, country_code, admin_code_1, admin_code_2, population, elevation)
VALUES(
ST_GeomFromText('POINT(%s %s)', 4326), %s, %s, %s, %s, %s, %s, %s, %s, %s)''',
(x, y, name, classification, feature_class, feature_code, country_code, admin_code_1, admin_code_2, population, elevation))
def size_class(self, n):
if n < 1:
return None
base = math.floor(math.log(n)/math.log(10)) - 1
relative = n / math.pow(10, base)
order = min(2, math.floor(relative/25))
if base < 0:
base = 0
return min(15, base * 3 + order)
def select(self, *args):
self.center = None
self.min_population = 1000
self.max_distance = 10000
self.limit = 10
for arg in args:
if arg != None:
if isinstance(arg, Center):
self.center = arg
elif isinstance(arg, Limit):
self.limit = arg
if self.is_connected():
queryString = '''SELECT
name,
country_code,
admin_code_1,
admin_code_2,
feature_class,
feature_code,
elevation,
ST_Transform(geog, %(srid)s) AS geog,
population,
ST_Distance_Sphere(geog, c.center) AS distance,
ST_Azimuth(geog, c.center) AS azimuth
FROM
(SELECT ST_SetSRID(ST_MakePoint(%(center_x)s, %(center_y)s), %(srid)s) as center ) as c,''' + \
self.get_full_table_name() + '''
WHERE
feature_class='P'
AND population >= %(min_population)s
AND ST_Transform(geog, %(srid)s) && st_expand(c.center, %(max_distance)s) order by distance limit %(limit)s'''
params = {
'srid': self.get_srid(),
'center_x': self.center.get_point().x,
'center_y': self.center.get_point().y,
'min_population': self.min_population,
'max_distance': self.max_distance,
'limit': self.limit
}
self.execute(queryString, params)
locations = []
if self.cur.rowcount > 0:
for result in self.cur.fetchall():
location = {}
location['name'] = result['name']
location['distance'] = result['distance']
location['azimuth'] = result['azimuth']
locations.append(location)
return locations
def location():
from __init__ import INJECTOR
return INJECTOR.get(Location)
better error message when histogram fails
# -*- coding: utf8 -*-
''' classes for database access '''
import math
from injector import Module, provides, inject, singleton
import datetime
import pytz
import shapely.wkb
import shapely.geometry
import pandas as pd
try:
import psycopg2
import psycopg2.extras
import psycopg2.extensions
except ImportError:
pass
from abc import ABCMeta, abstractmethod
import blitzortung
class BaseInterval(object):
def __init__(self, start = None, end = None):
self.start = start
self.end = end
def get_start(self):
return self.start
def get_end(self):
return self.end
def __str__(self):
return '[' + str(self.start) + ' - ' + str(self.end) + ']'
class IdInterval(BaseInterval):
def __init__(self, start = None, end = None):
if start and not isinstance(start, int):
raise ValueError("start should be an integer value")
if end and not isinstance(end, int):
raise ValueError("end should be an integer value")
super(IdInterval, self).__init__(start, end)
class TimeInterval(BaseInterval):
def __init__(self, start = None, end = None):
if start and not isinstance(start, datetime.datetime):
raise ValueError("start should be a datetime value")
if end and not isinstance(end, datetime.datetime):
raise ValueError("end should be a datetime value")
super(TimeInterval, self).__init__(start, end)
class Query(object):
'''
simple class for building of complex queries
'''
def __init__(self):
self.sql = ''
self.conditions = []
self.parameters = {}
self.table_name = None
self.columns = None
self.limit = None
self.order = []
def set_table_name(self, table_name):
self.table_name = table_name
def set_columns(self, columns):
self.columns = columns
def add_order(self, order):
self.order.append(order)
def set_limit(self, limit):
if self.limit != None:
raise RuntimeError("overriding Query.limit")
self.limit = limit
def add_condition(self, condition, parameters = None):
self.conditions.append(condition)
if parameters != None:
self.parameters.update(parameters)
def add_parameters(self, parameters):
self.parameters.update(parameters)
def __str__(self):
sql = 'SELECT '
if self.columns:
for index, column in enumerate(self.columns):
if index != 0:
sql += ', '
sql += column
sql += ' '
sql += 'FROM ' + self.table_name + ' '
for index, condition in enumerate(self.conditions):
if index == 0:
sql += 'WHERE '
else:
sql += 'AND '
sql += condition + ' '
if len(self.order) > 0:
sql += 'ORDER BY '
for index, order in enumerate(self.order):
if index != 0:
sql += ', '
sql += order.get_column()
if order.is_desc():
sql += ' DESC'
sql += ' '
if self.limit:
sql += 'LIMIT ' + str(self.limit.get_number()) + ' '
return sql.strip()
def get_parameters(self):
return self.parameters
def parse_args(self, args):
for arg in args:
if arg:
if isinstance(arg, TimeInterval):
if arg.get_start() != None:
self.add_condition('timestamp >= %(starttime)s', {'starttime': arg.get_start()})
#self.add_condition('timestamp >= :starttime', {'starttime': arg.get_start().astimezone(pytz.UTC).replace(tzinfo=None)})
if arg.get_end() != None:
self.add_condition('timestamp < %(endtime)s', {'endtime': arg.get_end()})
#self.add_condition('timestamp < :endtime', {'endtime': arg.get_end().astimezone(pytz.UTC).replace(tzinfo=None)})
elif isinstance(arg, IdInterval):
if arg.get_start() != None:
self.add_condition('id >= %(startid)s', {'startid': arg.get_start()})
if arg.get_end() != None:
self.add_condition('id < %(endid)s', {'endid': arg.get_end()})
elif isinstance(arg, shapely.geometry.base.BaseGeometry):
if arg.is_valid:
self.add_condition('ST_SetSRID(CAST(%(envelope)s AS geometry), %(srid)s) && geog', {'envelope': shapely.wkb.dumps(arg.envelope).encode('hex')})
#self.add_condition('ST_SetSRID(CAST(:envelope AS geometry), :srid) && Transform(geog, :srid)', {'envelope': shapely.wkb.dumps(arg.envelope).encode('hex')})
if not arg.equals(arg.envelope):
self.add_condition('Intersects(ST_SetSRID(CAST(%(geometry)s AS geometry), %(srid)s), st_transform(geog, %(srid)s))', {'geometry': shapely.wkb.dumps(arg).encode('hex')})
#self.add_condition('Intersects(ST_SetSRID(CAST(:geometry AS geometry), :srid), Transform(geog, :srid))', {'geometry': shapely.wkb.dumps(arg).encode('hex')})
else:
raise ValueError("invalid geometry in db.Stroke.select()")
elif isinstance(arg, Order):
self.add_order(arg)
elif isinstance(arg, Limit):
self.set_limit(arg)
else:
print 'WARNING: ' + __name__ + ' unhandled object ' + str(type(arg))
def get_results(self, db):
resulting_strokes = []
if db.cur.rowcount > 0:
for result in db.cur.fetchall():
resulting_strokes.append(db.create(result))
return resulting_strokes
class RasterQuery(Query):
def __init__(self, raster):
super(RasterQuery, self).__init__()
self.raster = raster
env = self.raster.get_env()
if env.is_valid:
self.add_condition('ST_SetSRID(CAST(%(envelope)s AS geometry), %(srid)s) && geog', {'envelope': shapely.wkb.dumps(env).encode('hex')})
#self.add_condition('ST_SetSRID(CAST(:envelope AS geometry), :srid) && Transform(geog, :srid)', {'envelope': shapely.wkb.dumps(env).encode('hex')})
else:
raise ValueError("invalid Raster geometry in db.Stroke.select()")
def __str__(self):
sql = 'SELECT '
sql += 'TRUNC((ST_X(ST_TRANSFORM(geog, %(srid)s)) - ' + str(self.raster.get_x_min()) + ') /' + str(self.raster.get_x_div()) + ') AS rx, '
sql += 'TRUNC((ST_Y(ST_TRANSFORM(geog, %(srid)s)) - ' + str(self.raster.get_y_min()) + ') /' + str(self.raster.get_y_div()) + ') AS ry, '
sql += 'count(*) AS count, max(timestamp) as timestamp FROM ('
sql += Query.__str__(self)
sql += ') AS ' + self.table_name + ' GROUP BY rx, ry'
return sql
def get_results(self, db):
if db.cur.rowcount > 0:
for result in db.cur.fetchall():
self.raster.set(result['rx'], result['ry'], blitzortung.geom.RasterElement(result['count'], result['timestamp']))
return self.raster
class Order(object):
'''
definition for query search order
'''
def __init__(self, column, desc = False):
self.column = column
self.desc = desc
def get_column(self):
return self.column
def is_desc(self):
return self.desc
class Limit(object):
'''
definition of query result limit
'''
def __init__(self, limit):
self.limit = limit
def get_number(self):
return self.limit
class Center(object):
'''
definition of query center point
'''
def __init__(self, center):
self.center = center
def get_point(self):
return self.center
class Connection(Module):
@singleton
@provides(psycopg2._psycopg.connection)
@inject(configuration=blitzortung.config.configuration)
def provide_psycopg2_connection(self, configuration):
return psycopg2.connect(configuration['db_connection_string'])
class Base(object):
'''
abstract base class for database access objects
creation of database
as user postgres:
createuser -i -D -R -S -W -E -P blitzortung
createdb -E utf8 -O blitzortung blitzortung
createlang plpgsql blitzortung
psql -f /usr/share/postgresql/9.1/contrib/postgis-1.5/postgis.sql -d blitzortung
psql -f /usr/share/postgresql/9.1/contrib/postgis-1.5/spatial_ref_sys.sql -d blitzortung
(< pg 9.0)
psql -f /usr/share/postgresql/8.4/contrib/btree_gist.sql blitzortung
psql blitzortung
GRANT SELECT ON spatial_ref_sys TO blitzortung;
GRANT SELECT ON geometry_columns TO blitzortung;
GRANT INSERT, DELETE ON geometry_columns TO blitzortung;
(>= pg 9.0)
CREATE EXTENSION "btree_gist";
'''
__metaclass__ = ABCMeta
DefaultTimezone = pytz.UTC
def __init__(self, db_connection):
self.schema_name = None
self.table_name = None
self.cur = None
self.conn = db_connection
self.initialized = False
self.srid = blitzortung.geom.Geometry.DefaultSrid
self.tz = Base.DefaultTimezone
try:
self.cur = self.conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE, self.cur)
except psycopg2.DatabaseError, e:
print e
if self.cur != None:
try:
self.cur.close()
except NameError:
pass
if self.conn != None:
try:
self.conn.close()
except NameError:
pass
#if not self.has_table('geometry_columns'):
# self.execute('SELECT InitSpatialMetadata()')
# self.execute("INSERT INTO spatial_ref_sys (srid, auth_name, auth_srid, ref_sys_name, proj4text) VALUES (4326, 'epsg', 4326, 'WGS 84', '+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs')")
# def has_table(self, table_name):
# result = self.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='%s'" % table_name)
# return result.fetchone() != None
def is_connected(self):
if self.conn != None:
return not self.conn.closed
else:
return False
def set_table_name(self, table_name):
self.table_name = table_name
def get_table_name(self):
return self.table_name
def get_full_table_name(self):
if self.get_schema_name() != None:
return '"' + self.get_schema_name() + '"."' + self.get_table_name() + '"'
else:
return self.get_table_name()
def set_schema_name(self, schema_name):
self.schema_name = schema_name
def get_schema_name(self):
return self.schema_name
def get_srid(self):
return self.srid
def set_srid(self, srid):
self.srid = srid
def get_timezone(self):
return self.tz
def set_timezone(self, tz):
self.tz = tz
def fix_timezone(self, timestamp):
return timestamp.astimezone(self.tz) if timestamp else None
def from_bare_utc_to_timezone(self, utc_time):
return utc_time.replace(tzinfo=pytz.UTC).astimezone(self.tz)
def from_timezone_to_bare_utc(self, time_with_tz):
return time_with_tz.astimezone(pytz.UTC).replace(tzinfo=None)
def commit(self):
''' commit pending database transaction '''
self.conn.commit()
def rollback(self):
''' rollback pending database transaction '''
self.conn.rollback()
@abstractmethod
def insert(self, object_to_insert):
pass
@abstractmethod
def select(self, args):
pass
def execute(self, sql_string, parameters=None):
if not self.initialized:
self.cur.execute('SET TIME ZONE \'%s\'' %(str(self.tz)))
self.initialized = True
self.cur.execute(sql_string, parameters)
@singleton
class Stroke(Base):
'''
stroke db access class
database table creation (as db user blitzortung, database blitzortung):
CREATE TABLE strokes (id bigserial, timestamp timestamptz, nanoseconds SMALLINT, geog GEOGRAPHY(Point), PRIMARY KEY(id));
ALTER TABLE strokes ADD COLUMN region SMALLINT;
ALTER TABLE strokes ADD COLUMN amplitude REAL;
ALTER TABLE strokes ADD COLUMN error2d SMALLINT;
ALTER TABLE strokes ADD COLUMN type SMALLINT;
ALTER TABLE strokes ADD COLUMN stationcount SMALLINT;
CREATE INDEX strokes_timestamp ON strokes USING btree("timestamp");
CREATE INDEX strokes_region_timestamp ON strokes USING btree(region, "timestamp");
CREATE INDEX strokes_id_timestamp ON strokes USING btree(id, "timestamp");
CREATE INDEX strokes_geog ON strokes USING gist(geog);
CREATE INDEX strokes_timestamp_geog ON strokes USING gist("timestamp", geog);
CREATE INDEX strokes_id_timestamp_geog ON strokes USING gist(id, "timestamp", geog);
empty the table with the following commands:
DELETE FROM strokes;
ALTER SEQUENCE strokes_id_seq RESTART 1;
'''
@inject(db_connection=psycopg2._psycopg.connection)
def __init__(self, db_connection):
super(Stroke, self).__init__(db_connection)
self.set_table_name('strokes')
# if not self.has_table(self.get_table_name()):
# self.execute("CREATE TABLE strokes (id INTEGER PRIMARY KEY, timestamp timestamp, nanoseconds INTEGER)")
# self.execute("SELECT AddGeometryColumn('strokes','geog',4326,'POINT',2)")
# self.execute("ALTER TABLE strokes ADD COLUMN amplitude REAL")
# self.execute("ALTER TABLE strokes ADD COLUMN error2d INTEGER")
# self.execute("ALTER TABLE strokes ADD COLUMN type INTEGER")
# self.execute("ALTER TABLE strokes ADD COLUMN stationcount INTEGER")
# self.execute("ALTER TABLE strokes ADD COLUMN detected BOOLEAN")
def insert(self, stroke, region=1):
sql = 'INSERT INTO ' + self.get_full_table_name() + \
' ("timestamp", nanoseconds, geog, region, amplitude, error2d, type, stationcount) ' + \
'VALUES (%(timestamp)s, %(nanoseconds)s, ST_MakePoint(%(longitude)s, %(latitude)s), %(region)s, %(amplitude)s, %(error2d)s, %(type)s, %(stationcount)s)'
parameters = {
'timestamp': stroke.get_timestamp(),
'nanoseconds': stroke.get_timestamp().nanosecond,
'longitude': stroke.get_x(),
'latitude': stroke.get_y(),
'region': region,
'amplitude': stroke.get_amplitude(),
'error2d': stroke.get_lateral_error(),
'type': stroke.get_type(),
'stationcount': stroke.get_station_count()
}
self.execute(sql, parameters)
def get_latest_time(self, region=1):
sql = 'SELECT timestamp FROM ' + self.get_full_table_name() + \
' WHERE region=%(region)s' + \
' ORDER BY timestamp DESC LIMIT 1'
self.execute(sql, {'region': region})
if self.cur.rowcount == 1:
result = self.cur.fetchone()
return pd.Timestamp(self.fix_timezone(result['timestamp']))
else:
return None
def create(self, result):
stroke_builder = blitzortung.builder.Stroke()
stroke_builder.set_id(result['id'])
stroke_builder.set_timestamp(self.fix_timezone(result['timestamp']), result['nanoseconds'])
location = shapely.wkb.loads(result['geog'].decode('hex'))
stroke_builder.set_x(location.x)
stroke_builder.set_y(location.y)
stroke_builder.set_amplitude(result['amplitude'])
stroke_builder.set_type(result['type'])
stroke_builder.set_station_count(result['stationcount'])
stroke_builder.set_lateral_error(result['error2d'])
return stroke_builder.build()
def select_query(self, args, query = None):
' build up query object for select statement '
if not query:
query = Query()
query.set_table_name(self.get_full_table_name())
query.set_columns(['id', '"timestamp"', 'nanoseconds', 'st_transform(geog::geometry, %(srid)s) AS geog', 'amplitude', 'type', 'error2d', 'stationcount'])
query.add_parameters({'srid': self.srid})
#query.add_condition('geog IS NOT NULL')
query.parse_args(args)
return query
def select(self, *args):
' build up query '
query = self.select_query(args)
return self.select_execute(query)
def select_raster(self, raster, *args):
' build up query '
query = self.select_query(args, RasterQuery(raster))
return self.select_execute(query)
def select_histogram(self, minutes, region=1, binsize=5):
query = "select -extract(epoch from current_timestamp - timestamp)::int/60/%(binsize)s as interval, count(*) from strokes where timestamp > current_timestamp - interval '%(minutes)s minutes' and region = %(region)s group by interval order by interval;"
self.cur.execute(query, {'minutes': minutes, 'binsize':binsize, 'region':region})
value_count = minutes/binsize
result = [0] * value_count
raw_result = self.cur.fetchall()
for bin_data in raw_result:
try:
result[bin_data[0] + value_count - 1] = bin_data[1]
except IndexError:
print "Index %d/%d out of range (%s)" % ( bin_data[0] + value_count - 1, value_count, str(bin_data))
print "Query:",query % {'minutes': str(minutes), 'binsize': str(binsize), 'region': str(region)}
return result
def select_execute(self, query):
self.execute(str(query), query.get_parameters())
return query.get_results(self)
def stroke():
from __init__ import INJECTOR
return INJECTOR.get(Stroke)
@singleton
class Station(Base):
'''
database table creation (as db user blitzortung, database blitzortung):
CREATE TABLE stations (id bigserial, number int, geog GEOGRAPHY(Point), PRIMARY KEY(id));
ALTER TABLE stations ADD COLUMN short_name CHARACTER VARYING;
ALTER TABLE stations ADD COLUMN name CHARACTER VARYING;
ALTER TABLE stations ADD COLUMN location_name CHARACTER VARYING;
ALTER TABLE stations ADD COLUMN country CHARACTER VARYING;
ALTER TABLE stations ADD COLUMN timestamp TIMESTAMPTZ;
CREATE INDEX stations_timestamp ON stations USING btree("timestamp");
CREATE INDEX stations_number_timestamp ON stations USING btree(number, "timestamp");
CREATE INDEX stations_geog ON stations USING gist(geog);
empty the table with the following commands:
DELETE FROM stations;
ALTER SEQUENCE stations_id_seq RESTART 1;
'''
@inject(db_connection=psycopg2._psycopg.connection)
def __init__(self, db_connection):
super(Station, self).__init__(db_connection)
self.set_table_name('stations')
def insert(self, station):
self.execute('INSERT INTO ' + self.get_full_table_name() + \
' (number, short_name, "name", location_name, country, timestamp, geog) ' + \
'VALUES (%s, %s, %s, %s, %s, %s, ST_MakePoint(%s, %s))',
(station.get_number(), station.get_short_name(), station.get_name(), station.get_location_name(), station.get_country(), station.get_timestamp(), station.get_x(), station.get_y()))
def select(self, timestamp=None):
sql = ''' select
o.begin, s.number, s.short_name, s.name, s.location_name, s.country, s.geog
from stations as s
inner join
(select b.number, max(b.timestamp) as timestamp
from stations as b
group by number
order by number) as c
on s.number = c.number and s.timestamp = c.timestamp
left join stations_offline as o
on o.number = s.number and o."end" is null
order by s.number'''
self.execute(sql)
resulting_stations = []
if self.cur.rowcount > 0:
for result in self.cur.fetchall():
resulting_stations.append(self.create(result))
return resulting_stations
def create(self, result):
station_builder = blitzortung.builder.Station()
station_builder.set_number(result['number'])
station_builder.set_short_name(result['short_name'])
station_builder.set_name(result['name'])
station_builder.set_location_name(result['location_name'])
station_builder.set_country(result['country'])
location = shapely.wkb.loads(result['geog'].decode('hex'))
station_builder.set_x(location.x)
station_builder.set_y(location.y)
station_builder.set_timestamp(self.fix_timezone(result['begin']))
return station_builder.build()
def station():
from __init__ import INJECTOR
return INJECTOR.get(Station)
@singleton
class StationOffline(Base):
'''
database table creation (as db user blitzortung, database blitzortung):
CREATE TABLE stations_offline (id bigserial, number int, PRIMARY KEY(id));
ALTER TABLE stations_offline ADD COLUMN begin TIMESTAMPTZ;
ALTER TABLE stations_offline ADD COLUMN "end" TIMESTAMPTZ;
CREATE INDEX stations_offline_begin ON stations_offline USING btree(begin);
CREATE INDEX stations_offline_end ON stations_offline USING btree("end");
CREATE INDEX stations_offline_end_number ON stations_offline USING btree("end", number);
CREATE INDEX stations_offline_begin_end ON stations_offline USING btree(begin, "end");
empty the table with the following commands:
DELETE FROM stations_offline;
ALTER SEQUENCE stations_offline_id_seq RESTART 1;
'''
@inject(db_connection=psycopg2._psycopg.connection)
def __init__(self, db_connection):
super(StationOffline, self).__init__(db_connection)
self.set_table_name('stations_offline')
def insert(self, station_offline):
self.execute('INSERT INTO ' + self.get_full_table_name() + \
' (number, begin, "end") ' + \
'VALUES (%s, %s, %s)',
(station_offline.get_number(), station_offline.get_begin(), station_offline.get_end()))
def update(self, station_offline):
self.execute('UPDATE ' + self.get_full_table_name() + ' SET "end"=%s WHERE id=%s',
(station_offline.get_end(), station_offline.get_id()))
def select(self, timestamp=None):
sql = '''select id, number, begin, "end" from stations_offline where "end" is null order by number;'''
self.execute(sql)
resulting_stations = []
if self.cur.rowcount > 0:
for result in self.cur.fetchall():
resulting_stations.append(self.create(result))
return resulting_stations
def create(self, result):
stationOfflineBuilder = blitzortung.builder.StationOffline()
stationOfflineBuilder.set_id(result['id'])
stationOfflineBuilder.set_number(result['number'])
stationOfflineBuilder.set_begin(result['begin'])
stationOfflineBuilder.set_end(result['end'])
return stationOfflineBuilder.build()
def station_offline():
from __init__ import INJECTOR
return INJECTOR.get(StationOffline)
@singleton
class Location(Base):
'''
geonames db access class
CREATE SCHEMA geo;
CREATE TABLE geo.geonames (id bigserial, "name" character varying, geog Geography(Point), PRIMARY KEY(id));
ALTER TABLE geo.geonames ADD COLUMN "class" INTEGER;
ALTER TABLE geo.geonames ADD COLUMN feature_class CHARACTER(1);
ALTER TABLE geo.geonames ADD COLUMN feature_code VARCHAR;
ALTER TABLE geo.geonames ADD COLUMN country_code VARCHAR;
ALTER TABLE geo.geonames ADD COLUMN admin_code_1 VARCHAR;
ALTER TABLE geo.geonames ADD COLUMN admin_code_2 VARCHAR;
ALTER TABLE geo.geonames ADD COLUMN population INTEGER;
ALTER TABLE geo.geonames ADD COLUMN elevation SMALLINT;
CREATE INDEX geonames_geog ON geo.geonames USING gist(geog);
'''
@inject(db_connection=psycopg2._psycopg.connection)
def __init__(self, db_connection):
super(Location, self).__init__(db_connection)
self.set_schema_name('geo')
self.set_table_name('geonames')
self.center = None
self.min_population = None
self.limit = None
self.max_distance = None
def delete_all(self):
self.execute('DELETE FROM ' + self.get_full_table_name())
def insert(self, line):
fields = line.strip().split('\t')
name = fields[1]
y = float(fields[4])
x = float(fields[5])
feature_class = fields[6]
feature_code = fields[7]
country_code = fields[8]
admin_code_1 = fields[10]
admin_code_2 = fields[11]
admin_code_3 = fields[12]
admin_code_4 = fields[13]
population = int(fields[14])
if fields[15] != '':
elevation = int(fields[15])
else:
elevation = -1
name = name.replace("'", "''")
classification = self.size_class(population)
if classification is not None:
self.execute('INSERT INTO ' + self.get_full_table_name() + '''
(geog, name, class, feature_class, feature_code, country_code, admin_code_1, admin_code_2, population, elevation)
VALUES(
ST_GeomFromText('POINT(%s %s)', 4326), %s, %s, %s, %s, %s, %s, %s, %s, %s)''',
(x, y, name, classification, feature_class, feature_code, country_code, admin_code_1, admin_code_2, population, elevation))
def size_class(self, n):
if n < 1:
return None
base = math.floor(math.log(n)/math.log(10)) - 1
relative = n / math.pow(10, base)
order = min(2, math.floor(relative/25))
if base < 0:
base = 0
return min(15, base * 3 + order)
def select(self, *args):
self.center = None
self.min_population = 1000
self.max_distance = 10000
self.limit = 10
for arg in args:
if arg != None:
if isinstance(arg, Center):
self.center = arg
elif isinstance(arg, Limit):
self.limit = arg
if self.is_connected():
queryString = '''SELECT
name,
country_code,
admin_code_1,
admin_code_2,
feature_class,
feature_code,
elevation,
ST_Transform(geog, %(srid)s) AS geog,
population,
ST_Distance_Sphere(geog, c.center) AS distance,
ST_Azimuth(geog, c.center) AS azimuth
FROM
(SELECT ST_SetSRID(ST_MakePoint(%(center_x)s, %(center_y)s), %(srid)s) as center ) as c,''' + \
self.get_full_table_name() + '''
WHERE
feature_class='P'
AND population >= %(min_population)s
AND ST_Transform(geog, %(srid)s) && st_expand(c.center, %(max_distance)s) order by distance limit %(limit)s'''
params = {
'srid': self.get_srid(),
'center_x': self.center.get_point().x,
'center_y': self.center.get_point().y,
'min_population': self.min_population,
'max_distance': self.max_distance,
'limit': self.limit
}
self.execute(queryString, params)
locations = []
if self.cur.rowcount > 0:
for result in self.cur.fetchall():
location = {}
location['name'] = result['name']
location['distance'] = result['distance']
location['azimuth'] = result['azimuth']
locations.append(location)
return locations
def location():
from __init__ import INJECTOR
return INJECTOR.get(Location)
|
#!/usr/bin/env python3
"""
Use URL to EUPS candidate tag file to git tag repos with official version
"""
# Technical Debt
# --------------
# - completely hide eups-specifics from this file
# - skips non-github repos - can add repos.yaml knowhow to address this
# - worth doing the smart thing for externals? (yes for Sims)
# - deal with authentication version
# Known Bugs
# ----------
# Yeah, the candidate logic is broken, will fix
import logging
import os
import sys
import argparse
import textwrap
from datetime import datetime
from getpass import getuser
import certifi
import urllib3
from .. import codetools
from .. import eprint
eupspkg_site = 'https://eups.lsst.codes/stack/src/'
def lookup_email(args):
email = args.email
if email is None:
email = codetools.gituseremail()
if email is None:
sys.exit("Specify --email option")
if args.debug:
print("email is " + email)
return email
def lookup_tagger(args):
tagger = args.tagger
if tagger is None:
tagger = codetools.gitusername()
if tagger is None:
sys.exit("Specify --tagger option")
if args.debug:
print("tagger name is " + tagger)
return tagger
def current_timestamp(args):
now = datetime.utcnow()
timestamp = now.isoformat()[0:19] + 'Z'
if args.debug:
print(timestamp)
return timestamp
def fetch_eups_tag(args, eups_candidate):
# construct url
eupspkg_taglist = '/'.join((eupspkg_site, 'tags',
eups_candidate + '.list'))
if args.debug:
print(eupspkg_taglist)
http = urllib3.PoolManager(
cert_reqs='CERT_REQUIRED',
ca_certs=certifi.where()
)
if args.debug:
logging.getLogger('requests.packages.urllib3')
stream_handler = logging.StreamHandler()
logger = logging.getLogger('github3')
logger.addHandler(stream_handler)
logger.setLevel(logging.DEBUG)
manifest = http.request('GET', eupspkg_taglist)
if manifest.status >= 300:
sys.exit("Failed GET")
return manifest.data.splitlines()
def parse_args():
"""Parse command-line arguments"""
user = getuser()
parser = argparse.ArgumentParser(
prog='github-tag-version',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent("""
Tag all repositories in a GitHub org using a team-based scheme
Examples:
github-tag-version --org lsst --team 'Data Management' w.2015.33 b1630
github-tag-version --org lsst --team 'Data Management' \
--team 'External' --candidate v11_0_rc2 11.0.rc2 b1679
Note that the access token must have access to these oauth scopes:
* read:org
* repo
The token generated by `github-auth --user` should have sufficient
permissions.
"""),
epilog='Part of codekit: https://github.com/lsst-sqre/sqre-codekit'
)
# for safety, default to dummy org <user>-shadow
# will fail for most people but see github_fork_repos in this module
# on how to get your own
parser.add_argument('tag')
parser.add_argument('manifest')
parser.add_argument(
'--org',
default=user + '-shadow')
parser.add_argument(
'--team',
action='append',
required=True,
help="team whose repos may be tagged (can specify several times")
parser.add_argument('--candidate')
parser.add_argument('--dry-run', action='store_true')
parser.add_argument(
'--tagger',
help='Name of person making the tag - defaults to gitconfig value')
parser.add_argument(
'--email',
help='Email address of tagger - defaults to gitconfig value')
parser.add_argument(
'--token-path',
default='~/.sq_github_token_delete',
help='Use a token (made with github-auth) in a non-standard location')
parser.add_argument(
'--token',
default=None,
help='Literal github personal access token string')
parser.add_argument(
'--force-tag',
action='store_true',
help='Force moving pre-existing annotated git tags.')
parser.add_argument(
'--fail-fast',
action='store_true',
help='Fail immediately on github API errors.')
parser.add_argument(
'-d', '--debug',
action='store_true',
default=os.getenv('DM_SQUARE_DEBUG'),
help='Debug mode')
parser.add_argument('-v', '--version',
action='version', version='%(prog)s 0.5')
return parser.parse_args()
def main():
"""Create the tag"""
# pylint: disable=too-many-locals,too-many-nested-blocks,too-many-branches
# pylint: disable=too-many-statements
# Although maybe that is a hint that we should break this up...
args = parse_args()
orgname = args.org
version = args.tag
# if email not specified, try getting it from the gitconfig
email = lookup_email(args)
# ditto for the name of the tagger
tagger = lookup_tagger(args)
# The candidate is assumed to be the requested EUPS tag unless
# otherwise specified with the --candidate option The reason to
# currently do this is that for weeklies and other internal builds,
# it's okay to eups publish the weekly and git tag post-facto. However
# for official releases, we don't want to publish until the git tag
# goes down, because we want to eups publish the build that has the
# official versions in the eups ref.
candidate = args.candidate if args.candidate else args.tag
eupsbuild = args.manifest # sadly we need to "just" know this
message_template = 'Version {v} release from {c}/{b}'
message = message_template.format(v=version, c=candidate, b=eupsbuild)
# generate timestamp for github API
timestamp = current_timestamp(args)
tagstuff = dict(name=tagger,
email=email,
date=timestamp)
if args.debug:
print(tagstuff)
ghb = codetools.login_github(token_path=args.token_path, token=args.token)
if args.debug:
print(type(ghb))
if args.debug:
print("Tagging repos in ", orgname)
# generate eups-style version
# eups no likey semantic versioning markup, wants underscores
cmap = str.maketrans('.-', '__') # pylint: disable=no-member
# eups_version = version.translate(map)
eups_candidate = candidate.translate(cmap)
tag_exceptions = []
for entry in fetch_eups_tag(args, eups_candidate):
if not isinstance(entry, str):
entry = str(entry, 'utf-8')
# skip commented out and blank lines
if entry.startswith('#'):
continue
if entry.startswith('EUPS'):
continue
if entry == '':
continue
# extract the repo and eups tag from the entry
(upstream, _, eups_tag) = entry.split()
if args.debug:
print(upstream, eups_tag)
repo = ghb.repository(orgname, upstream)
# if the repo is not in github skip it for now
# see TD
if not hasattr(repo, 'name'):
print('!!! SKIPPING', upstream, (60 - len(upstream)) * '-')
continue
if not sum(1 for _ in repo.teams()):
print('!!! repo has NO teams -- SKIPPING', upstream,
(45 - len(upstream)) * '-')
continue
for team in repo.teams():
if team.name not in args.team:
if args.debug:
print('No action for', repo.name,
'belonging to', team.name)
continue
sha = codetools.eups2git_ref(eups_ref=eups_tag,
repo=repo.name,
eupsbuild=eupsbuild,
debug=args.debug)
if args.debug or args.dry_run:
print(repo.name.ljust(40), 'found in', team.name)
print('Will tag sha: {sha} as {v} (was {t})'.format(
sha=sha, v=version, t=eups_tag))
if args.dry_run:
continue
try:
# create_tag() returns a Tag object on success or None
# on failure
tag = repo.create_tag(tag=version,
message=message,
sha=sha,
obj_type='commit',
tagger=tagstuff,
lightweight=False,
update=args.force_tag)
if tag is None:
raise RuntimeError('failed to create git tag')
except Exception as exc: # pylint: disable=broad-except
tag_exceptions.append(exc)
eprint('OOPS: -------------------')
eprint(str(exc))
eprint('OOPS: -------------------')
if args.fail_fast:
raise
lp_fires = len(tag_exceptions)
if lp_fires:
eprint("ERROR: {failed} tag failures".format(failed=str(lp_fires)))
if args.debug:
for e in tag_exceptions:
eprint(str(e))
sys.exit(lp_fires if lp_fires < 256 else 255)
if __name__ == '__main__':
main()
improve clarity of github org name debug output
#!/usr/bin/env python3
"""
Use URL to EUPS candidate tag file to git tag repos with official version
"""
# Technical Debt
# --------------
# - completely hide eups-specifics from this file
# - skips non-github repos - can add repos.yaml knowhow to address this
# - worth doing the smart thing for externals? (yes for Sims)
# - deal with authentication version
# Known Bugs
# ----------
# Yeah, the candidate logic is broken, will fix
import logging
import os
import sys
import argparse
import textwrap
from datetime import datetime
from getpass import getuser
import certifi
import urllib3
from .. import codetools
from .. import eprint
eupspkg_site = 'https://eups.lsst.codes/stack/src/'
def lookup_email(args):
email = args.email
if email is None:
email = codetools.gituseremail()
if email is None:
sys.exit("Specify --email option")
if args.debug:
print("email is " + email)
return email
def lookup_tagger(args):
tagger = args.tagger
if tagger is None:
tagger = codetools.gitusername()
if tagger is None:
sys.exit("Specify --tagger option")
if args.debug:
print("tagger name is " + tagger)
return tagger
def current_timestamp(args):
now = datetime.utcnow()
timestamp = now.isoformat()[0:19] + 'Z'
if args.debug:
print(timestamp)
return timestamp
def fetch_eups_tag(args, eups_candidate):
# construct url
eupspkg_taglist = '/'.join((eupspkg_site, 'tags',
eups_candidate + '.list'))
if args.debug:
print(eupspkg_taglist)
http = urllib3.PoolManager(
cert_reqs='CERT_REQUIRED',
ca_certs=certifi.where()
)
if args.debug:
logging.getLogger('requests.packages.urllib3')
stream_handler = logging.StreamHandler()
logger = logging.getLogger('github3')
logger.addHandler(stream_handler)
logger.setLevel(logging.DEBUG)
manifest = http.request('GET', eupspkg_taglist)
if manifest.status >= 300:
sys.exit("Failed GET")
return manifest.data.splitlines()
def parse_args():
"""Parse command-line arguments"""
user = getuser()
parser = argparse.ArgumentParser(
prog='github-tag-version',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent("""
Tag all repositories in a GitHub org using a team-based scheme
Examples:
github-tag-version --org lsst --team 'Data Management' w.2015.33 b1630
github-tag-version --org lsst --team 'Data Management' \
--team 'External' --candidate v11_0_rc2 11.0.rc2 b1679
Note that the access token must have access to these oauth scopes:
* read:org
* repo
The token generated by `github-auth --user` should have sufficient
permissions.
"""),
epilog='Part of codekit: https://github.com/lsst-sqre/sqre-codekit'
)
# for safety, default to dummy org <user>-shadow
# will fail for most people but see github_fork_repos in this module
# on how to get your own
parser.add_argument('tag')
parser.add_argument('manifest')
parser.add_argument(
'--org',
default=user + '-shadow')
parser.add_argument(
'--team',
action='append',
required=True,
help="team whose repos may be tagged (can specify several times")
parser.add_argument('--candidate')
parser.add_argument('--dry-run', action='store_true')
parser.add_argument(
'--tagger',
help='Name of person making the tag - defaults to gitconfig value')
parser.add_argument(
'--email',
help='Email address of tagger - defaults to gitconfig value')
parser.add_argument(
'--token-path',
default='~/.sq_github_token_delete',
help='Use a token (made with github-auth) in a non-standard location')
parser.add_argument(
'--token',
default=None,
help='Literal github personal access token string')
parser.add_argument(
'--force-tag',
action='store_true',
help='Force moving pre-existing annotated git tags.')
parser.add_argument(
'--fail-fast',
action='store_true',
help='Fail immediately on github API errors.')
parser.add_argument(
'-d', '--debug',
action='store_true',
default=os.getenv('DM_SQUARE_DEBUG'),
help='Debug mode')
parser.add_argument('-v', '--version',
action='version', version='%(prog)s 0.5')
return parser.parse_args()
def main():
"""Create the tag"""
# pylint: disable=too-many-locals,too-many-nested-blocks,too-many-branches
# pylint: disable=too-many-statements
# Although maybe that is a hint that we should break this up...
args = parse_args()
orgname = args.org
version = args.tag
# if email not specified, try getting it from the gitconfig
email = lookup_email(args)
# ditto for the name of the tagger
tagger = lookup_tagger(args)
# The candidate is assumed to be the requested EUPS tag unless
# otherwise specified with the --candidate option The reason to
# currently do this is that for weeklies and other internal builds,
# it's okay to eups publish the weekly and git tag post-facto. However
# for official releases, we don't want to publish until the git tag
# goes down, because we want to eups publish the build that has the
# official versions in the eups ref.
candidate = args.candidate if args.candidate else args.tag
eupsbuild = args.manifest # sadly we need to "just" know this
message_template = 'Version {v} release from {c}/{b}'
message = message_template.format(v=version, c=candidate, b=eupsbuild)
# generate timestamp for github API
timestamp = current_timestamp(args)
tagstuff = dict(name=tagger,
email=email,
date=timestamp)
if args.debug:
print(tagstuff)
ghb = codetools.login_github(token_path=args.token_path, token=args.token)
if args.debug:
print(type(ghb))
if args.debug:
print("Tagging repos in github org: {org}".format(org=orgname))
# generate eups-style version
# eups no likey semantic versioning markup, wants underscores
cmap = str.maketrans('.-', '__') # pylint: disable=no-member
# eups_version = version.translate(map)
eups_candidate = candidate.translate(cmap)
tag_exceptions = []
for entry in fetch_eups_tag(args, eups_candidate):
if not isinstance(entry, str):
entry = str(entry, 'utf-8')
# skip commented out and blank lines
if entry.startswith('#'):
continue
if entry.startswith('EUPS'):
continue
if entry == '':
continue
# extract the repo and eups tag from the entry
(upstream, _, eups_tag) = entry.split()
if args.debug:
print(upstream, eups_tag)
repo = ghb.repository(orgname, upstream)
# if the repo is not in github skip it for now
# see TD
if not hasattr(repo, 'name'):
print('!!! SKIPPING', upstream, (60 - len(upstream)) * '-')
continue
if not sum(1 for _ in repo.teams()):
print('!!! repo has NO teams -- SKIPPING', upstream,
(45 - len(upstream)) * '-')
continue
for team in repo.teams():
if team.name not in args.team:
if args.debug:
print('No action for', repo.name,
'belonging to', team.name)
continue
sha = codetools.eups2git_ref(eups_ref=eups_tag,
repo=repo.name,
eupsbuild=eupsbuild,
debug=args.debug)
if args.debug or args.dry_run:
print(repo.name.ljust(40), 'found in', team.name)
print('Will tag sha: {sha} as {v} (was {t})'.format(
sha=sha, v=version, t=eups_tag))
if args.dry_run:
continue
try:
# create_tag() returns a Tag object on success or None
# on failure
tag = repo.create_tag(tag=version,
message=message,
sha=sha,
obj_type='commit',
tagger=tagstuff,
lightweight=False,
update=args.force_tag)
if tag is None:
raise RuntimeError('failed to create git tag')
except Exception as exc: # pylint: disable=broad-except
tag_exceptions.append(exc)
eprint('OOPS: -------------------')
eprint(str(exc))
eprint('OOPS: -------------------')
if args.fail_fast:
raise
lp_fires = len(tag_exceptions)
if lp_fires:
eprint("ERROR: {failed} tag failures".format(failed=str(lp_fires)))
if args.debug:
for e in tag_exceptions:
eprint(str(e))
sys.exit(lp_fires if lp_fires < 256 else 255)
if __name__ == '__main__':
main()
|
'''
A REST API for Salt
===================
:depends: - CherryPy Python module
:configuration: All authentication is done through Salt's :ref:`external auth
<acl-eauth>` system. Be sure that it is enabled and the user you are
authenticating as has permissions for all the functions you will be
running.
The configuration options for this module resides in the Salt master config
file. All available options are detailed below.
port
**Required**
The port for the webserver to listen on.
debug : ``False``
Starts a for-development web server instead of the production-ready web
server.
Does not use SSL and ignores the certificate configuration options.
ssl_crt
Required when ``debug`` is ``False``
The path to a SSL certificate. (See below)
ssl_key
Required when ``debug`` is ``False``
The path to the private key for your SSL certificate. (See below)
static
A filesystem path to static HTML/JavaScript/CSS/image assets.
If this directory contains a ``index.html`` file, it will be served at
the root URL when HTML is requested by a client via the ``Accept``
header.
This directory may point to a clone of the `salt-ui`_ project to
bootstrap a graphical interface for interacting with Salt.
.. _`salt-ui`: https://github.com/saltstack/salt-ui
Example production configuration block:
.. code-block:: yaml
rest_cherrypy:
port: 8000
ssl_crt: /etc/pki/tls/certs/localhost.crt
ssl_key: /etc/pki/tls/certs/localhost.key
The REST interface requires a secure HTTPS connection. You must provide an
SSL certificate to use. If you don't already have a certificate and don't
wish to buy one, you can generate a self-signed certificate using the
:py:func:`~salt.modules.tls.create_self_signed_cert` function in Salt (note
the dependencies for this module):
.. code-block:: bash
% salt-call tls.create_self_signed_cert
Usage
-----
You access a running Salt master via this module by sending HTTP requests to
the URLs detailed below.
.. admonition:: Content negotiation
This REST interface is flexible in what data formats it will accept as well
as what formats it will return (e.g., JSON, YAML, x-www-form-urlencoded).
* Specify the format of data you are sending in a request by including the
:mailheader:`Content-Type` header.
* Specify your desired output format for the response with the
:mailheader:`Accept` header.
This REST interface expects data sent in :http:method:`post` and
:http:method:`put` requests to be in the format of a list of lowstate
dictionaries. This allows you to specify multiple commands in a single request.
.. glossary::
lowstate
A dictionary containing various keys that instruct Salt which command
to run, where that command lives, any parameters for that command, any
authentication credientials, what returner to use, etc.
Salt uses the lowstate data format internally in many places to pass
command data between functions. Salt also uses lowstate for the
:ref:`LocalClient() <python-api>` Python API interface.
For example (in JSON format)::
[{
'client': 'local',
'tgt': '*',
'fun': 'test.fib',
'arg': ['10'],
}]
.. admonition:: x-www-form-urlencoded
This REST interface accepts data in the x-www-form-urlencoded format. This
is the format used by HTML forms, the default format used by
:command:`curl`, the default format used by many JavaScript AJAX libraries
(such as jQuery), etc. This format will be converted to the
:term:`lowstate` format as best as possible with the caveats below. It is
always preferable to format data in the lowstate format directly in a more
capable format such as JSON or YAML.
* Only a single command may be sent in this format per HTTP request.
* Multiple ``arg`` params will be sent as a single list of params.
Note, some popular frameworks and languages (notably jQuery, PHP, and
Ruby on Rails) will automatically append empty brackets onto repeated
parameters. E.g., arg=one, arg=two will be sent as arg[]=one, arg[]=two.
Again, it is preferable to send lowstate via JSON or YAML directly by
specifying the :mailheader:`Content-Type` header in the request.
URL reference
-------------
The main entry point is the root URL (``/``) and all functionality is available
at that URL. The other URLs are largely convenience URLs that wrap that main
entry point.
'''
# We need a custom pylintrc here...
# pylint: disable=W0212,E1101,C0103,R0201,W0221,W0613
# Import Python libs
import os
import json
# Import third-party libs
import cherrypy
import yaml
# Import Salt libs
import salt.auth
import salt.log
import salt.output
# Import salt-api libs
import saltapi
logger = salt.log.logging.getLogger(__name__)
def salt_token_tool():
'''
If the custom authentication header is supplied, put it in the cookie dict
so the rest of the session-based auth works as intended
'''
x_auth = cherrypy.request.headers.get('X-Auth-Token', None)
# X-Auth-Token header trumps session cookie
if x_auth:
cherrypy.request.cookie['session_id'] = x_auth
def salt_auth_tool():
'''
Redirect all unauthenticated requests to the login page
'''
# Short-circuit for the login page
ignore_urls = ('/login', '/logout', '/run')
if cherrypy.request.path_info.startswith(ignore_urls):
return
# Otherwise redirect to the login page if the session hasn't been authed
if not cherrypy.session.get('token', None):
raise cherrypy.InternalRedirect('/login')
# Session is authenticated; inform caches
cherrypy.response.headers['Cache-Control'] = 'private'
def wants_html():
'''
Determine if the request is asking for HTML specifically.
Returns an empty string or a string containing the output of the
cherrypy.lib.cptools.accept() function.
'''
# Short-circuit if the request is vague or overly broad
if (not 'Accept' in cherrypy.request.headers
or cherrypy.request.headers['Accept'] == '*/*'):
return ''
try:
return cherrypy.lib.cptools.accept(
['text/html'] + [i for (i, _) in ct_out_map])
except (AttributeError, cherrypy.CherryPyException):
return ''
# Be conservative in what you send
# Maps Content-Type to serialization functions; this is a tuple of tuples to
# preserve order of preference.
ct_out_map = (
('application/json', json.dumps),
('application/x-yaml', yaml.dump),
)
def hypermedia_handler(*args, **kwargs):
'''
Determine the best output format based on the Accept header, execute the
regular handler, and transform the output to the request content type (even
if it's an error).
:param args: Pass args through to the main handler
:param kwargs: Pass kwargs through to the main handler
'''
# If we're being asked for HTML, try to serve index.html from the 'static'
# directory; this is useful (as a random, non-specific example) for
# bootstrapping the salt-ui app
if 'static' in cherrypy.config and 'html' in wants_html():
index = os.path.join(cherrypy.config['static'], 'index.html')
if os.path.exists(index):
return cherrypy.lib.static.serve_file(index)
# Execute the real handler. Handle or pass-through any errors we know how
# to handle (auth & HTTP errors). Reformat any errors we don't know how to
# handle as a data structure.
try:
cherrypy.response.processors = dict(ct_out_map) # handlers may modify this
ret = cherrypy.serving.request._hypermedia_inner_handler(*args, **kwargs)
except salt.exceptions.EauthAuthenticationError:
raise cherrypy.InternalRedirect('/login')
except cherrypy.CherryPyException:
raise
except Exception as exc:
import traceback
logger.debug("Error while processing request for: %s",
cherrypy.request.path_info,
exc_info=True)
cherrypy.response.status = 500
ret = {
'status': cherrypy.response.status,
'return': '{0}'.format(traceback.format_exc(exc))
if cherrypy.config['debug']
else "An unexpected error occurred"}
# Raises 406 if requested content-type is not supported
best = cherrypy.lib.cptools.accept([i for (i, _) in ct_out_map])
# Transform the output from the handler into the requested output format
cherrypy.response.headers['Content-Type'] = best
out = cherrypy.response.processors[best]
return out(ret)
def hypermedia_out():
'''
Wrap the normal handler and transform the output from that handler into the
requested content type
'''
request = cherrypy.serving.request
request._hypermedia_inner_handler = request.handler
request.handler = hypermedia_handler
# cherrypy.response.headers['Alternates'] = self.ct_out_map.keys()
# TODO: add 'negotiate' to Vary header and 'list' to TCN header
# Alternates: {"paper.1" 0.9 {type text/html} {language en}},
# {"paper.2" 0.7 {type text/html} {language fr}},
# {"paper.3" 1.0 {type application/postscript} {language en}}
def urlencoded_processor(entity):
'''
Accept x-www-form-urlencoded data (run through CherryPy's formatter)
and reformat it into a Low State datastructure.
Since we can't easily represent complicated data structures with
key-value pairs, any more complicated requirements (e.g. compound
commands) must instead be delivered via JSON or YAML.
For example::
curl -si localhost:8000 -d client=local -d tgt='*' \\
-d fun='test.kwarg' -d arg='one=1' -d arg='two=2'
:param entity: raw POST data
'''
# First call out to CherryPy's default processor
cherrypy._cpreqbody.process_urlencoded(entity)
lowdata = entity.params
# Make the 'arg' param a list if not already
if 'arg' in lowdata and not isinstance(lowdata['arg'], list):
lowdata['arg'] = [lowdata['arg']]
# Finally, make a Low State and put it in request
cherrypy.request.lowstate = [lowdata]
def json_processor(entity):
'''
Unserialize raw POST data in JSON format to a Python datastructure.
:param entity: raw POST data
'''
body = entity.fp.read()
try:
cherrypy.serving.request.lowstate = json.loads(body)
except ValueError:
raise cherrypy.HTTPError(400, 'Invalid JSON document')
def yaml_processor(entity):
'''
Unserialize raw POST data in YAML format to a Python datastructure.
:param entity: raw POST data
'''
body = entity.fp.read()
try:
cherrypy.serving.request.lowstate = yaml.load(body)
except ValueError:
raise cherrypy.HTTPError(400, 'Invalid YAML document')
def hypermedia_in():
'''
Unserialize POST/PUT data of a specified Content-Type.
The following custom processors all are intended to format Low State data
and will place that datastructure into the request object.
:raises HTTPError: if the request contains a Content-Type that we do not
have a processor for
'''
# Be liberal in what you accept
ct_in_map = {
'application/x-www-form-urlencoded': urlencoded_processor,
'application/json': json_processor,
'application/x-yaml': yaml_processor,
'text/yaml': yaml_processor,
}
cherrypy.request.body.processors.clear()
cherrypy.request.body.default_proc = cherrypy.HTTPError(
406, 'Content type not supported')
cherrypy.request.body.processors = ct_in_map
class LowDataAdapter(object):
'''
The primary entry point to the REST API. All functionality is available
through this URL. The other available URLs provide convenience wrappers
around this URL.
'''
exposed = True
_cp_config = {
'tools.sessions.on': True,
'tools.sessions.timeout': 60 * 10, # 10 hours
# 'tools.autovary.on': True,
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
'tools.hypermedia_out.on': True,
'tools.hypermedia_in.on': True,
}
def __init__(self, opts):
'''
:param opts: A dictionary of options from Salt's master config (e.g.
Salt's, ``__opts__``)
'''
self.opts = opts
self.api = saltapi.APIClient(opts)
def exec_lowstate(self):
'''
Pull a Low State datastructure from request and execute the low-data
chunks through Salt. The low-data chunks will be updated to include the
authorization token for the current session.
'''
lowstate = cherrypy.request.lowstate
token = cherrypy.session.get('token', None)
for chunk in lowstate:
if token:
chunk['token'] = token
yield self.api.run(chunk)
def GET(self):
'''
.. http:get:: /
An explanation of the API with links of where to go next.
**Example request**::
% curl -i localhost:8000
.. code-block:: http
GET / HTTP/1.1
Host: localhost:8000
Accept: application/json
**Example response**:
.. code-block:: http
HTTP/1.1 200 OK
Content-Type: application/json
:status 200: success
:status 401: authentication required
:status 406: requested Content-Type not available
'''
return {
'status': cherrypy.response.status,
'return': "Welcome",
}
def POST(self, **kwargs):
'''
The primary execution interface for the rest of the API
.. http:post:: /
**Example request**::
% curl -si https://localhost:8000 \\
-H "Accept: application/x-yaml" \\
-H "X-Auth-Token: d40d1e1e" \\
-d client=local \\
-d tgt='*' \\
-d fun='test.ping' \\
-d arg
.. code-block:: http
POST / HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
X-Auth-Token: d40d1e1e
Content-Length: 36
Content-Type: application/x-www-form-urlencoded
fun=test.ping&arg&client=local&tgt=*
**Example response**:
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 200
Allow: GET, HEAD, POST
Content-Type: application/x-yaml
return:
- ms-0: true
ms-1: true
ms-2: true
ms-3: true
ms-4: true
:form lowstate: A list of :term:`lowstate` data appropriate for the
:ref:`client <client-apis>` interface you are calling.
Lowstate may be supplied in any supported format by specifying the
:mailheader:`Content-Type` header in the request. Supported formats
are listed in the :mailheader:`Alternates` response header.
:status 200: success
:status 401: authentication required
:status 406: requested Content-Type not available
'''
return {
'return': list(self.exec_lowstate()),
}
class Minions(LowDataAdapter):
def GET(self, mid=None):
'''
A convenience URL for getting lists of minions or getting minion
details
.. http:get:: /minions/(mid)
Get grains, modules, functions, and inline function documentation
for all minions or a single minion
**Example request**::
% curl -i localhost:8000/minions/ms-3
.. code-block:: http
GET /minions/ms-3 HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response**:
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 129005
Content-Type: application/x-yaml
return:
- ms-3:
grains.items:
...
:param mid: (optional) a minion id
:status 200: success
:status 401: authentication required
:status 406: requested Content-Type not available
'''
cherrypy.request.lowstate = [{
'client': 'local', 'tgt': mid or '*', 'fun': 'grains.items',
}]
return {
'return': list(self.exec_lowstate()),
}
def POST(self, **kwargs):
'''
Start an execution command and immediately return the job id
.. http:post:: /minions
You must pass low-data in the request body either from an HTML form
or as JSON or YAML. The ``client`` option is pre-set to
``local_async``.
**Example request**::
% curl -sSi localhost:8000/minions \\
-H "Accept: application/x-yaml" \\
-d tgt='*' \\
-d fun='status.diskusage'
.. code-block:: http
POST /minions HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Length: 26
Content-Type: application/x-www-form-urlencoded
tgt=*&fun=status.diskusage
**Example response**:
.. code-block:: http
HTTP/1.1 202 Accepted
Content-Length: 86
Content-Type: application/x-yaml
- return:
jid: '20130118105423694155'
minions: [ms-4, ms-3, ms-2, ms-1, ms-0]
:form lowstate: lowstate data for the
:py:mod:`~salt.client.LocalClient`; the ``client`` parameter will
be set to ``local_async``
Lowstate may be supplied in any supported format by specifying the
:mailheader:`Content-Type` header in the request. Supported formats
are listed in the :mailheader:`Alternates` response header.
:status 202: success
:status 401: authentication required
:status 406: requested :mailheader:`Content-Type` not available
'''
for chunk in cherrypy.request.lowstate:
chunk['client'] = 'local_async'
job_data = next(self.exec_lowstate(), {})
cherrypy.response.status = 202
return [{
'return': job_data,
}]
class Jobs(LowDataAdapter):
def GET(self, jid=None):
'''
A convenience URL for getting lists of previously run jobs or getting
the return from a single job
.. http:get:: /jobs/(jid)
Get grains, modules, functions, and inline function documentation
for all minions or a single minion
**Example request**::
% curl -i localhost:8000/jobs
.. code-block:: http
GET /jobs HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response**:
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 165
Content-Type: application/x-yaml
return:
- '20121130104633606931':
Arguments:
- '3'
Function: test.fib
Start Time: 2012, Nov 30 10:46:33.606931
Target: ms-3
Target-type: glob
**Example request**::
% curl -i localhost:8000/jobs/20121130104633606931
.. code-block:: http
GET /jobs/20121130104633606931 HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response**:
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
return:
- ms-3:
- - 0
- 1
- 1
- 2
- 9.059906005859375e-06
:param mid: (optional) a minion id
:status 200: success
:status 401: authentication required
:status 406: requested Content-Type not available
'''
cherrypy.request.lowstate = [{
'client': 'runner',
'fun': 'jobs.lookup_jid' if jid else 'jobs.list_jobs',
'jid': jid,
}]
return {
'return': list(self.exec_lowstate()),
}
class Login(LowDataAdapter):
'''
All interactions with this REST API must be authenticated. Authentication
is performed through Salt's eauth system. You must set the eauth backend
and allowed users by editing the :conf_master:`external_auth` section in
your master config.
Authentication credentials are passed to the REST API via a session id in
one of two ways:
If the request is initiated from a browser it must pass a session id via a
cookie and that session must be valid and active.
If the request is initiated programmatically, the request must contain a
:mailheader:`X-Auth-Token` header with valid and active session id.
'''
exposed = True
def GET(self):
'''
Present the login interface
.. http:get:: /login
An explanation of how to log in.
**Example request**::
% curl -i localhost:8000/login
.. code-block:: http
GET /login HTTP/1.1
Host: localhost:8000
Accept: text/html
**Example response**:
.. code-block:: http
HTTP/1.1 200 OK
Content-Type: text/html
:status 401: authentication required
:status 406: requested Content-Type not available
'''
cherrypy.response.status = '401 Unauthorized'
cherrypy.response.headers['WWW-Authenticate'] = 'Session'
return {
'status': cherrypy.response.status,
'return': "Please log in",
}
def POST(self, **kwargs):
'''
Authenticate against Salt's eauth system
.. versionchanged:: 0.8.0
No longer returns a 302 redirect on success.
.. http:post:: /login
**Example request**::
% curl -si localhost:8000/login \\
-H "Accept: application/json" \\
-d username='saltuser' \\
-d password='saltpass' \\
-d eauth='pam'
.. code-block:: http
POST / HTTP/1.1
Host: localhost:8000
Content-Length: 42
Content-Type: application/x-www-form-urlencoded
Accept: application/json
username=saltuser&password=saltpass&eauth=pam
**Example response**:
.. code-block:: http
HTTP/1.1 200 OK
Content-Type: application/json
Content-Length: 206
X-Auth-Token: 6d1b722e
Set-Cookie: session_id=6d1b722e; expires=Sat, 17 Nov 2012 03:23:52 GMT; Path=/
{"return": {
"token": "6d1b722e",
"start": 1363805943.776223,
"expire": 1363849143.776224,
"user": "saltuser",
"eauth": "pam",
"perms": [
"grains.*",
"status.*",
"sys.*",
"test.*"
]
}}
:form eauth: the eauth backend configured in your master config
:form username: username
:form password: password
:status 200: success
:status 406: requested Content-Type not available
'''
auth = salt.auth.LoadAuth(self.opts)
# the urlencoded_processor will wrap this in a list
if isinstance(cherrypy.serving.request.lowstate, list):
creds = cherrypy.serving.request.lowstate[0]
else:
creds = cherrypy.serving.request.lowstate
token = auth.mk_token(creds)
cherrypy.response.headers['X-Auth-Token'] = cherrypy.session.id
cherrypy.session['token'] = token['token']
cherrypy.session['timeout'] = (token['expire'] - token['start']) / 60
# Grab eauth config for the current backend for the current user
perms = self.opts['external_auth'][token['eauth']][token['name']]
return {'return': [{
'token': cherrypy.session.id,
'expire': token['expire'],
'start': token['start'],
'user': token['name'],
'eauth': token['eauth'],
'perms': perms,
}]}
class Logout(LowDataAdapter):
def POST(self):
'''
Destroy the currently active session and expire the session cookie
.. versionadded:: 0.8.0
'''
cherrypy.lib.sessions.expire()
cherrypy.session.clear()
cherrypy.session.clean_up()
return {'return': "Your token has been cleared"}
class Run(LowDataAdapter):
def POST(self, **kwargs):
'''
Run commands bypassing the normal session handling
.. versionadded:: 0.8.0
This entry point is primarily for "one-off" commands. Each request must
pass full Salt external authentication credentials. Otherwise this URL
is identical to the root (``/``) execution URL.
:form lowstate: A list of :term:`lowstate` data appropriate for the
:ref:`client <client-apis>` specified client interface. Full
external authentication credentials must be included.
:status 200: success
:status 401: authentication failed
:status 406: requested Content-Type not available
'''
return {
'return': list(self.exec_lowstate()),
}
class API(object):
'''
Collect configuration and URL map for building the CherryPy app
'''
url_map = {
'index': LowDataAdapter,
'login': Login,
'logout': Logout,
'minions': Minions,
'run': Run,
'jobs': Jobs,
}
def __init__(self, opts):
self.opts = opts
for url, cls in self.url_map.items():
setattr(self, url, cls(self.opts))
def get_conf(self, apiopts):
'''
Combine the CherryPy configuration with the rest_cherrypy config values
pulled from the master config and return the CherryPy configuration
'''
conf = {
'global': {
'server.socket_host': '0.0.0.0',
'server.socket_port': apiopts.get('port', 8000),
'debug': apiopts.get('debug', False),
},
'/': {
'request.dispatch': cherrypy.dispatch.MethodDispatcher(),
'tools.trailing_slash.on': True,
'tools.gzip.on': True,
'tools.staticdir.on': True if 'static' in apiopts else False,
'tools.staticdir.dir': apiopts.get('static', ''),
},
}
conf['global'].update(apiopts)
# Add to global config
cherrypy.config.update(conf['global'])
return conf
def get_app(opts):
'''
Returns a WSGI app and a configuration dictionary
'''
root = API(opts) # cherrypy app
apiopts = opts.get(__name__.rsplit('.', 2)[-2], {}) # rest_cherrypy opts
cpyopts = root.get_conf(apiopts) # cherrypy app opts
gconf = cpyopts.get('global', {}) # 'global' section of cpyopts
# Register salt-specific hooks
cherrypy.tools.salt_token = cherrypy.Tool('on_start_resource',
salt_token_tool, priority=55)
cherrypy.tools.salt_auth = cherrypy.Tool('before_request_body',
salt_auth_tool, priority=60)
cherrypy.tools.hypermedia_out = cherrypy.Tool('before_handler',
hypermedia_out)
cherrypy.tools.hypermedia_in = cherrypy.Tool('before_request_body',
hypermedia_in)
return root, apiopts, cpyopts
Do not process the request body if Content-Length is empty or missing
'''
A REST API for Salt
===================
:depends: - CherryPy Python module
:configuration: All authentication is done through Salt's :ref:`external auth
<acl-eauth>` system. Be sure that it is enabled and the user you are
authenticating as has permissions for all the functions you will be
running.
The configuration options for this module resides in the Salt master config
file. All available options are detailed below.
port
**Required**
The port for the webserver to listen on.
debug : ``False``
Starts a for-development web server instead of the production-ready web
server.
Does not use SSL and ignores the certificate configuration options.
ssl_crt
Required when ``debug`` is ``False``
The path to a SSL certificate. (See below)
ssl_key
Required when ``debug`` is ``False``
The path to the private key for your SSL certificate. (See below)
static
A filesystem path to static HTML/JavaScript/CSS/image assets.
If this directory contains a ``index.html`` file, it will be served at
the root URL when HTML is requested by a client via the ``Accept``
header.
This directory may point to a clone of the `salt-ui`_ project to
bootstrap a graphical interface for interacting with Salt.
.. _`salt-ui`: https://github.com/saltstack/salt-ui
Example production configuration block:
.. code-block:: yaml
rest_cherrypy:
port: 8000
ssl_crt: /etc/pki/tls/certs/localhost.crt
ssl_key: /etc/pki/tls/certs/localhost.key
The REST interface requires a secure HTTPS connection. You must provide an
SSL certificate to use. If you don't already have a certificate and don't
wish to buy one, you can generate a self-signed certificate using the
:py:func:`~salt.modules.tls.create_self_signed_cert` function in Salt (note
the dependencies for this module):
.. code-block:: bash
% salt-call tls.create_self_signed_cert
Usage
-----
You access a running Salt master via this module by sending HTTP requests to
the URLs detailed below.
.. admonition:: Content negotiation
This REST interface is flexible in what data formats it will accept as well
as what formats it will return (e.g., JSON, YAML, x-www-form-urlencoded).
* Specify the format of data you are sending in a request by including the
:mailheader:`Content-Type` header.
* Specify your desired output format for the response with the
:mailheader:`Accept` header.
This REST interface expects data sent in :http:method:`post` and
:http:method:`put` requests to be in the format of a list of lowstate
dictionaries. This allows you to specify multiple commands in a single request.
.. glossary::
lowstate
A dictionary containing various keys that instruct Salt which command
to run, where that command lives, any parameters for that command, any
authentication credientials, what returner to use, etc.
Salt uses the lowstate data format internally in many places to pass
command data between functions. Salt also uses lowstate for the
:ref:`LocalClient() <python-api>` Python API interface.
For example (in JSON format)::
[{
'client': 'local',
'tgt': '*',
'fun': 'test.fib',
'arg': ['10'],
}]
.. admonition:: x-www-form-urlencoded
This REST interface accepts data in the x-www-form-urlencoded format. This
is the format used by HTML forms, the default format used by
:command:`curl`, the default format used by many JavaScript AJAX libraries
(such as jQuery), etc. This format will be converted to the
:term:`lowstate` format as best as possible with the caveats below. It is
always preferable to format data in the lowstate format directly in a more
capable format such as JSON or YAML.
* Only a single command may be sent in this format per HTTP request.
* Multiple ``arg`` params will be sent as a single list of params.
Note, some popular frameworks and languages (notably jQuery, PHP, and
Ruby on Rails) will automatically append empty brackets onto repeated
parameters. E.g., arg=one, arg=two will be sent as arg[]=one, arg[]=two.
Again, it is preferable to send lowstate via JSON or YAML directly by
specifying the :mailheader:`Content-Type` header in the request.
URL reference
-------------
The main entry point is the root URL (``/``) and all functionality is available
at that URL. The other URLs are largely convenience URLs that wrap that main
entry point.
'''
# We need a custom pylintrc here...
# pylint: disable=W0212,E1101,C0103,R0201,W0221,W0613
# Import Python libs
import os
import json
# Import third-party libs
import cherrypy
import yaml
# Import Salt libs
import salt.auth
import salt.log
import salt.output
# Import salt-api libs
import saltapi
logger = salt.log.logging.getLogger(__name__)
def salt_token_tool():
'''
If the custom authentication header is supplied, put it in the cookie dict
so the rest of the session-based auth works as intended
'''
x_auth = cherrypy.request.headers.get('X-Auth-Token', None)
# X-Auth-Token header trumps session cookie
if x_auth:
cherrypy.request.cookie['session_id'] = x_auth
def salt_auth_tool():
'''
Redirect all unauthenticated requests to the login page
'''
# Short-circuit for the login page
ignore_urls = ('/login', '/logout', '/run')
if cherrypy.request.path_info.startswith(ignore_urls):
return
# Otherwise redirect to the login page if the session hasn't been authed
if not cherrypy.session.get('token', None):
raise cherrypy.InternalRedirect('/login')
# Session is authenticated; inform caches
cherrypy.response.headers['Cache-Control'] = 'private'
def wants_html():
'''
Determine if the request is asking for HTML specifically.
Returns an empty string or a string containing the output of the
cherrypy.lib.cptools.accept() function.
'''
# Short-circuit if the request is vague or overly broad
if (not 'Accept' in cherrypy.request.headers
or cherrypy.request.headers['Accept'] == '*/*'):
return ''
try:
return cherrypy.lib.cptools.accept(
['text/html'] + [i for (i, _) in ct_out_map])
except (AttributeError, cherrypy.CherryPyException):
return ''
# Be conservative in what you send
# Maps Content-Type to serialization functions; this is a tuple of tuples to
# preserve order of preference.
ct_out_map = (
('application/json', json.dumps),
('application/x-yaml', yaml.dump),
)
def hypermedia_handler(*args, **kwargs):
'''
Determine the best output format based on the Accept header, execute the
regular handler, and transform the output to the request content type (even
if it's an error).
:param args: Pass args through to the main handler
:param kwargs: Pass kwargs through to the main handler
'''
# If we're being asked for HTML, try to serve index.html from the 'static'
# directory; this is useful (as a random, non-specific example) for
# bootstrapping the salt-ui app
if 'static' in cherrypy.config and 'html' in wants_html():
index = os.path.join(cherrypy.config['static'], 'index.html')
if os.path.exists(index):
return cherrypy.lib.static.serve_file(index)
# Execute the real handler. Handle or pass-through any errors we know how
# to handle (auth & HTTP errors). Reformat any errors we don't know how to
# handle as a data structure.
try:
cherrypy.response.processors = dict(ct_out_map) # handlers may modify this
ret = cherrypy.serving.request._hypermedia_inner_handler(*args, **kwargs)
except salt.exceptions.EauthAuthenticationError:
raise cherrypy.InternalRedirect('/login')
except cherrypy.CherryPyException:
raise
except Exception as exc:
import traceback
logger.debug("Error while processing request for: %s",
cherrypy.request.path_info,
exc_info=True)
cherrypy.response.status = 500
ret = {
'status': cherrypy.response.status,
'return': '{0}'.format(traceback.format_exc(exc))
if cherrypy.config['debug']
else "An unexpected error occurred"}
# Raises 406 if requested content-type is not supported
best = cherrypy.lib.cptools.accept([i for (i, _) in ct_out_map])
# Transform the output from the handler into the requested output format
cherrypy.response.headers['Content-Type'] = best
out = cherrypy.response.processors[best]
return out(ret)
def hypermedia_out():
'''
Wrap the normal handler and transform the output from that handler into the
requested content type
'''
request = cherrypy.serving.request
request._hypermedia_inner_handler = request.handler
request.handler = hypermedia_handler
# cherrypy.response.headers['Alternates'] = self.ct_out_map.keys()
# TODO: add 'negotiate' to Vary header and 'list' to TCN header
# Alternates: {"paper.1" 0.9 {type text/html} {language en}},
# {"paper.2" 0.7 {type text/html} {language fr}},
# {"paper.3" 1.0 {type application/postscript} {language en}}
def urlencoded_processor(entity):
'''
Accept x-www-form-urlencoded data (run through CherryPy's formatter)
and reformat it into a Low State datastructure.
Since we can't easily represent complicated data structures with
key-value pairs, any more complicated requirements (e.g. compound
commands) must instead be delivered via JSON or YAML.
For example::
curl -si localhost:8000 -d client=local -d tgt='*' \\
-d fun='test.kwarg' -d arg='one=1' -d arg='two=2'
:param entity: raw POST data
'''
# First call out to CherryPy's default processor
cherrypy._cpreqbody.process_urlencoded(entity)
lowdata = entity.params
# Make the 'arg' param a list if not already
if 'arg' in lowdata and not isinstance(lowdata['arg'], list):
lowdata['arg'] = [lowdata['arg']]
# Finally, make a Low State and put it in request
cherrypy.request.lowstate = [lowdata]
def json_processor(entity):
'''
Unserialize raw POST data in JSON format to a Python datastructure.
:param entity: raw POST data
'''
body = entity.fp.read()
try:
cherrypy.serving.request.lowstate = json.loads(body)
except ValueError:
raise cherrypy.HTTPError(400, 'Invalid JSON document')
def yaml_processor(entity):
'''
Unserialize raw POST data in YAML format to a Python datastructure.
:param entity: raw POST data
'''
body = entity.fp.read()
try:
cherrypy.serving.request.lowstate = yaml.load(body)
except ValueError:
raise cherrypy.HTTPError(400, 'Invalid YAML document')
def hypermedia_in():
'''
Unserialize POST/PUT data of a specified Content-Type.
The following custom processors all are intended to format Low State data
and will place that datastructure into the request object.
:raises HTTPError: if the request contains a Content-Type that we do not
have a processor for
'''
# Be liberal in what you accept
ct_in_map = {
'application/x-www-form-urlencoded': urlencoded_processor,
'application/json': json_processor,
'application/x-yaml': yaml_processor,
'text/yaml': yaml_processor,
}
# Do not process the body for POST requests that have specified no content
# or have not specified Content-Length
if (cherrypy.request.method.upper() == 'POST'
and cherrypy.request.headers.get('Content-Length', 0) == 0):
cherrypy.request.process_request_body = False
cherrypy.request.body.processors.clear()
cherrypy.request.body.default_proc = cherrypy.HTTPError(
406, 'Content type not supported')
cherrypy.request.body.processors = ct_in_map
class LowDataAdapter(object):
'''
The primary entry point to the REST API. All functionality is available
through this URL. The other available URLs provide convenience wrappers
around this URL.
'''
exposed = True
_cp_config = {
'tools.sessions.on': True,
'tools.sessions.timeout': 60 * 10, # 10 hours
# 'tools.autovary.on': True,
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
'tools.hypermedia_out.on': True,
'tools.hypermedia_in.on': True,
}
def __init__(self, opts):
'''
:param opts: A dictionary of options from Salt's master config (e.g.
Salt's, ``__opts__``)
'''
self.opts = opts
self.api = saltapi.APIClient(opts)
def exec_lowstate(self):
'''
Pull a Low State datastructure from request and execute the low-data
chunks through Salt. The low-data chunks will be updated to include the
authorization token for the current session.
'''
lowstate = cherrypy.request.lowstate
token = cherrypy.session.get('token', None)
for chunk in lowstate:
if token:
chunk['token'] = token
yield self.api.run(chunk)
def GET(self):
'''
.. http:get:: /
An explanation of the API with links of where to go next.
**Example request**::
% curl -i localhost:8000
.. code-block:: http
GET / HTTP/1.1
Host: localhost:8000
Accept: application/json
**Example response**:
.. code-block:: http
HTTP/1.1 200 OK
Content-Type: application/json
:status 200: success
:status 401: authentication required
:status 406: requested Content-Type not available
'''
return {
'status': cherrypy.response.status,
'return': "Welcome",
}
def POST(self, **kwargs):
'''
The primary execution interface for the rest of the API
.. http:post:: /
**Example request**::
% curl -si https://localhost:8000 \\
-H "Accept: application/x-yaml" \\
-H "X-Auth-Token: d40d1e1e" \\
-d client=local \\
-d tgt='*' \\
-d fun='test.ping' \\
-d arg
.. code-block:: http
POST / HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
X-Auth-Token: d40d1e1e
Content-Length: 36
Content-Type: application/x-www-form-urlencoded
fun=test.ping&arg&client=local&tgt=*
**Example response**:
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 200
Allow: GET, HEAD, POST
Content-Type: application/x-yaml
return:
- ms-0: true
ms-1: true
ms-2: true
ms-3: true
ms-4: true
:form lowstate: A list of :term:`lowstate` data appropriate for the
:ref:`client <client-apis>` interface you are calling.
Lowstate may be supplied in any supported format by specifying the
:mailheader:`Content-Type` header in the request. Supported formats
are listed in the :mailheader:`Alternates` response header.
:status 200: success
:status 401: authentication required
:status 406: requested Content-Type not available
'''
return {
'return': list(self.exec_lowstate()),
}
class Minions(LowDataAdapter):
def GET(self, mid=None):
'''
A convenience URL for getting lists of minions or getting minion
details
.. http:get:: /minions/(mid)
Get grains, modules, functions, and inline function documentation
for all minions or a single minion
**Example request**::
% curl -i localhost:8000/minions/ms-3
.. code-block:: http
GET /minions/ms-3 HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response**:
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 129005
Content-Type: application/x-yaml
return:
- ms-3:
grains.items:
...
:param mid: (optional) a minion id
:status 200: success
:status 401: authentication required
:status 406: requested Content-Type not available
'''
cherrypy.request.lowstate = [{
'client': 'local', 'tgt': mid or '*', 'fun': 'grains.items',
}]
return {
'return': list(self.exec_lowstate()),
}
def POST(self, **kwargs):
'''
Start an execution command and immediately return the job id
.. http:post:: /minions
You must pass low-data in the request body either from an HTML form
or as JSON or YAML. The ``client`` option is pre-set to
``local_async``.
**Example request**::
% curl -sSi localhost:8000/minions \\
-H "Accept: application/x-yaml" \\
-d tgt='*' \\
-d fun='status.diskusage'
.. code-block:: http
POST /minions HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Length: 26
Content-Type: application/x-www-form-urlencoded
tgt=*&fun=status.diskusage
**Example response**:
.. code-block:: http
HTTP/1.1 202 Accepted
Content-Length: 86
Content-Type: application/x-yaml
- return:
jid: '20130118105423694155'
minions: [ms-4, ms-3, ms-2, ms-1, ms-0]
:form lowstate: lowstate data for the
:py:mod:`~salt.client.LocalClient`; the ``client`` parameter will
be set to ``local_async``
Lowstate may be supplied in any supported format by specifying the
:mailheader:`Content-Type` header in the request. Supported formats
are listed in the :mailheader:`Alternates` response header.
:status 202: success
:status 401: authentication required
:status 406: requested :mailheader:`Content-Type` not available
'''
for chunk in cherrypy.request.lowstate:
chunk['client'] = 'local_async'
job_data = next(self.exec_lowstate(), {})
cherrypy.response.status = 202
return [{
'return': job_data,
}]
class Jobs(LowDataAdapter):
def GET(self, jid=None):
'''
A convenience URL for getting lists of previously run jobs or getting
the return from a single job
.. http:get:: /jobs/(jid)
Get grains, modules, functions, and inline function documentation
for all minions or a single minion
**Example request**::
% curl -i localhost:8000/jobs
.. code-block:: http
GET /jobs HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response**:
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 165
Content-Type: application/x-yaml
return:
- '20121130104633606931':
Arguments:
- '3'
Function: test.fib
Start Time: 2012, Nov 30 10:46:33.606931
Target: ms-3
Target-type: glob
**Example request**::
% curl -i localhost:8000/jobs/20121130104633606931
.. code-block:: http
GET /jobs/20121130104633606931 HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response**:
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
return:
- ms-3:
- - 0
- 1
- 1
- 2
- 9.059906005859375e-06
:param mid: (optional) a minion id
:status 200: success
:status 401: authentication required
:status 406: requested Content-Type not available
'''
cherrypy.request.lowstate = [{
'client': 'runner',
'fun': 'jobs.lookup_jid' if jid else 'jobs.list_jobs',
'jid': jid,
}]
return {
'return': list(self.exec_lowstate()),
}
class Login(LowDataAdapter):
'''
All interactions with this REST API must be authenticated. Authentication
is performed through Salt's eauth system. You must set the eauth backend
and allowed users by editing the :conf_master:`external_auth` section in
your master config.
Authentication credentials are passed to the REST API via a session id in
one of two ways:
If the request is initiated from a browser it must pass a session id via a
cookie and that session must be valid and active.
If the request is initiated programmatically, the request must contain a
:mailheader:`X-Auth-Token` header with valid and active session id.
'''
exposed = True
def GET(self):
'''
Present the login interface
.. http:get:: /login
An explanation of how to log in.
**Example request**::
% curl -i localhost:8000/login
.. code-block:: http
GET /login HTTP/1.1
Host: localhost:8000
Accept: text/html
**Example response**:
.. code-block:: http
HTTP/1.1 200 OK
Content-Type: text/html
:status 401: authentication required
:status 406: requested Content-Type not available
'''
cherrypy.response.status = '401 Unauthorized'
cherrypy.response.headers['WWW-Authenticate'] = 'Session'
return {
'status': cherrypy.response.status,
'return': "Please log in",
}
def POST(self, **kwargs):
'''
Authenticate against Salt's eauth system
.. versionchanged:: 0.8.0
No longer returns a 302 redirect on success.
.. http:post:: /login
**Example request**::
% curl -si localhost:8000/login \\
-H "Accept: application/json" \\
-d username='saltuser' \\
-d password='saltpass' \\
-d eauth='pam'
.. code-block:: http
POST / HTTP/1.1
Host: localhost:8000
Content-Length: 42
Content-Type: application/x-www-form-urlencoded
Accept: application/json
username=saltuser&password=saltpass&eauth=pam
**Example response**:
.. code-block:: http
HTTP/1.1 200 OK
Content-Type: application/json
Content-Length: 206
X-Auth-Token: 6d1b722e
Set-Cookie: session_id=6d1b722e; expires=Sat, 17 Nov 2012 03:23:52 GMT; Path=/
{"return": {
"token": "6d1b722e",
"start": 1363805943.776223,
"expire": 1363849143.776224,
"user": "saltuser",
"eauth": "pam",
"perms": [
"grains.*",
"status.*",
"sys.*",
"test.*"
]
}}
:form eauth: the eauth backend configured in your master config
:form username: username
:form password: password
:status 200: success
:status 406: requested Content-Type not available
'''
auth = salt.auth.LoadAuth(self.opts)
# the urlencoded_processor will wrap this in a list
if isinstance(cherrypy.serving.request.lowstate, list):
creds = cherrypy.serving.request.lowstate[0]
else:
creds = cherrypy.serving.request.lowstate
token = auth.mk_token(creds)
cherrypy.response.headers['X-Auth-Token'] = cherrypy.session.id
cherrypy.session['token'] = token['token']
cherrypy.session['timeout'] = (token['expire'] - token['start']) / 60
# Grab eauth config for the current backend for the current user
perms = self.opts['external_auth'][token['eauth']][token['name']]
return {'return': [{
'token': cherrypy.session.id,
'expire': token['expire'],
'start': token['start'],
'user': token['name'],
'eauth': token['eauth'],
'perms': perms,
}]}
class Logout(LowDataAdapter):
def POST(self):
'''
Destroy the currently active session and expire the session cookie
.. versionadded:: 0.8.0
'''
cherrypy.lib.sessions.expire()
cherrypy.session.clear()
cherrypy.session.clean_up()
return {'return': "Your token has been cleared"}
class Run(LowDataAdapter):
def POST(self, **kwargs):
'''
Run commands bypassing the normal session handling
.. versionadded:: 0.8.0
This entry point is primarily for "one-off" commands. Each request must
pass full Salt external authentication credentials. Otherwise this URL
is identical to the root (``/``) execution URL.
:form lowstate: A list of :term:`lowstate` data appropriate for the
:ref:`client <client-apis>` specified client interface. Full
external authentication credentials must be included.
:status 200: success
:status 401: authentication failed
:status 406: requested Content-Type not available
'''
return {
'return': list(self.exec_lowstate()),
}
class API(object):
'''
Collect configuration and URL map for building the CherryPy app
'''
url_map = {
'index': LowDataAdapter,
'login': Login,
'logout': Logout,
'minions': Minions,
'run': Run,
'jobs': Jobs,
}
def __init__(self, opts):
self.opts = opts
for url, cls in self.url_map.items():
setattr(self, url, cls(self.opts))
def get_conf(self, apiopts):
'''
Combine the CherryPy configuration with the rest_cherrypy config values
pulled from the master config and return the CherryPy configuration
'''
conf = {
'global': {
'server.socket_host': '0.0.0.0',
'server.socket_port': apiopts.get('port', 8000),
'debug': apiopts.get('debug', False),
},
'/': {
'request.dispatch': cherrypy.dispatch.MethodDispatcher(),
'tools.trailing_slash.on': True,
'tools.gzip.on': True,
'tools.staticdir.on': True if 'static' in apiopts else False,
'tools.staticdir.dir': apiopts.get('static', ''),
},
}
conf['global'].update(apiopts)
# Add to global config
cherrypy.config.update(conf['global'])
return conf
def get_app(opts):
'''
Returns a WSGI app and a configuration dictionary
'''
root = API(opts) # cherrypy app
apiopts = opts.get(__name__.rsplit('.', 2)[-2], {}) # rest_cherrypy opts
cpyopts = root.get_conf(apiopts) # cherrypy app opts
gconf = cpyopts.get('global', {}) # 'global' section of cpyopts
# Register salt-specific hooks
cherrypy.tools.salt_token = cherrypy.Tool('on_start_resource',
salt_token_tool, priority=55)
cherrypy.tools.salt_auth = cherrypy.Tool('before_request_body',
salt_auth_tool, priority=60)
cherrypy.tools.hypermedia_out = cherrypy.Tool('before_handler',
hypermedia_out)
cherrypy.tools.hypermedia_in = cherrypy.Tool('before_request_body',
hypermedia_in)
return root, apiopts, cpyopts
|
"""
Form designer link plugin.
This plugin displays a form at the page, that was created with form_designer.
To customize the output, configure the ``django-form-designer`` application via the settings file.
For example, use:
* ``FORM_DESIGNER_DEFAULT_FORM_TEMPLATE`` or ``FORM_TEMPLATES`` to control the form output (e.g. render it with ``django-uni-form``).
* ``FORM_DESIGNER_FIELD_CLASSES`` to define which field types are allowed.
* ``FORM_DESIGNER_WIDGET_CLASSES`` to define which widgets are allowed.
"""
from django.contrib.messages.api import get_messages
from django.utils.translation import ugettext_lazy as _
from fluent_contents.extensions import ContentPlugin, plugin_pool
from fluent_contents.plugins.formdesignerlink.models import FormDesignerLink
from form_designer import settings as form_designer_settings
from form_designer.views import process_form
@plugin_pool.register
class FormDesignerLinkPlugin(ContentPlugin):
model = FormDesignerLink
category = _('Interactivity')
cache_output = False
def get_render_template(self, request, instance, **kwargs):
# Overwritten to return a template from the instance.
return instance.form_definition.form_template_name or self.render_template or form_designer_settings.DEFAULT_FORM_TEMPLATE
def render(self, request, instance, **kwargs):
# While overwriting get_context() would be sufficient here, this is rather easier to understand.
# Implemented a custom rendering function instead.
# The process_form() function is designed with Django CMS in mind,
# and responds to both the GET and POST request.
context = process_form(request, instance.form_definition, {}, is_cms_plugin=True)
context['messages'] = get_messages(request) # No matter what, because the template needs it.
# Render the plugin
render_template = self.get_render_template(request, instance, **kwargs)
return self.render_to_string(request, render_template, context)
Support new versions of django-form-designer
The is_cms_plugin arg is renamed to disable_redirection.
"""
Form designer link plugin.
This plugin displays a form at the page, that was created with form_designer.
To customize the output, configure the ``django-form-designer`` application via the settings file.
For example, use:
* ``FORM_DESIGNER_DEFAULT_FORM_TEMPLATE`` or ``FORM_TEMPLATES`` to control the form output (e.g. render it with ``django-uni-form``).
* ``FORM_DESIGNER_FIELD_CLASSES`` to define which field types are allowed.
* ``FORM_DESIGNER_WIDGET_CLASSES`` to define which widgets are allowed.
"""
from inspect import getargspec
from django.contrib.messages.api import get_messages
from django.utils.translation import ugettext_lazy as _
from fluent_contents.extensions import ContentPlugin, plugin_pool
from fluent_contents.plugins.formdesignerlink.models import FormDesignerLink
from form_designer import settings as form_designer_settings
from form_designer.views import process_form
# Find out which version of process_form is available.
# The 'is_cms_plugin' was renamed to 'disable_redirection' at some revision.
_keywords = getargspec(process_form).args
_disable_redirection = 'disable_redirection'
for _name in ('disable_redirection', 'is_cms_plugin'):
if _name in _keywords:
_disable_redirection = _name
@plugin_pool.register
class FormDesignerLinkPlugin(ContentPlugin):
model = FormDesignerLink
category = _('Interactivity')
cache_output = False
def get_render_template(self, request, instance, **kwargs):
# Overwritten to return a template from the instance.
return instance.form_definition.form_template_name or self.render_template or form_designer_settings.DEFAULT_FORM_TEMPLATE
def render(self, request, instance, **kwargs):
# While overwriting get_context() would be sufficient here, this is rather easier to understand.
# Implemented a custom rendering function instead.
# The process_form() function is designed with Django CMS in mind,
# and responds to both the GET and POST request.
context = process_form(request, instance.form_definition, {}, **{_disable_redirection: True})
context['messages'] = get_messages(request) # No matter what, because the template needs it.
# Render the plugin
render_template = self.get_render_template(request, instance, **kwargs)
return self.render_to_string(request, render_template, context)
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on 2014-05-16
@author: Eugene Dvoretsky
Immunopy GUI primitives.
"""
import sys
import time
import numpy as np
from PySide import QtCore
from PySide import QtGui
from PySide import QtOpenGL
from OpenGL.GL import *
from OpenGL import ERROR_ON_COPY
import iptools
import lut
import statdata
ERROR_ON_COPY = True # Raise exception on array copy or casting
# http://pyopengl.sourceforge.net/documentation/opengl_diffs.html
class AdjustBar(QtGui.QWidget):
"""Slider and spinbox widget.
The spinboxes always contain real property value.
BUG: precision sometimes is not enough.
"""
def __init__(self, mmcore, prop, parent=None):
super(AdjustBar, self).__init__(parent)
self.parent = parent
self.mmc = mmcore
self.prop = prop
self.mult = 1000.0
self.camname = self.mmc.getCameraDevice()
self.minlim=self.mmc.getPropertyLowerLimit(self.camname, prop)
self.maxlim=self.mmc.getPropertyUpperLimit(self.camname, prop)
self.vbox = QtGui.QVBoxLayout(self.parent)
self.form = QtGui.QFormLayout()
# self.hbox.setAlignment(QtCore.Qt.AlignTop)
self.slid = QtGui.QSlider(QtCore.Qt.Horizontal)
if self.mmc.getPropertyType(self.camname, prop) == 3: # 3 is int
self.spin = QtGui.QSpinBox()
self.slid.setRange(int(self.minlim), int(self.maxlim))
self.spin.setRange(int(self.minlim), int(self.maxlim))
self.spin.setValue(int(self.mmc.getProperty(self.camname, prop)))
self.slid.valueChanged.connect(self.spin.setValue)
self.spin.valueChanged.connect(self.slid.setValue)
self.slid.valueChanged.connect(self.setDevProperty)
else:
self.spin = QtGui.QDoubleSpinBox()
self.spin.setSingleStep(0.01)
# Stretch slider
self.slid.setRange(self.minlim,
(self.maxlim - self.minlim) * self.mult + self.minlim)
self.spin.setRange(self.minlim, self.maxlim)
# Prevent comma on Linux.
self.spin.setValue(float(
self.mmc.getProperty(self.camname, prop).replace(',', '.')))
self.slid.valueChanged.connect(self.setAsDouble)
self.spin.valueChanged.connect(self.setAsInt)
self.form.addRow(prop, self.spin)
self.setLayout(self.vbox)
self.vbox.addLayout(self.form)
self.vbox.addWidget(self.slid)
@QtCore.Slot(float)
def setAsInt(self, value):
target = (value - self.minlim) * self.mult + self.minlim
if self.slid.value() != target:
self.slid.setValue(target)
self.setDevProperty(value)
@QtCore.Slot(int)
def setAsDouble(self, value):
current = round(self.spin.value(), 2)
target = round((value - self.minlim) / self.mult + self.minlim, 2)
if current != target:
self.spin.setValue(target)
@QtCore.Slot()
def setDevProperty(self, value):
self.mmc.setProperty(self.camname, self.prop, str(value))
class MicroscopeControl(QtGui.QGroupBox):
"""Control microscope devices.
Aware hardcode for properties is better way.
"""
willRunOnce = QtCore.Signal()
willRunContinuously = QtCore.Signal()
willStop = QtCore.Signal()
needAutoWb = QtCore.Signal()
def __init__(self, parent=None):
super(MicroscopeControl, self).__init__(parent)
self.parent = parent
self.setTitle('Microscope control')
self.vbox = QtGui.QVBoxLayout()
self.setLayout(self.vbox)
self.form = QtGui.QFormLayout()
self.in_vbox = QtGui.QVBoxLayout()
self.horizontal = QtGui.QHBoxLayout()
self.horizontal.setAlignment(QtCore.Qt.AlignLeft)
self.vbox.addLayout(self.form)
self.vbox.addLayout(self.in_vbox)
self.vbox.addLayout(self.horizontal)
self.streaming_btn = QtGui.QPushButton('&Start')
self.form.addRow('Acquisition', self.streaming_btn)
self.streaming_btn.pressed.connect(self.toggle_streaming)
self.cont_cbx = QtGui.QCheckBox()
self.form.addRow('Continuous', self.cont_cbx)
# Get scales and set default.
self.objective = QtGui.QComboBox()
self.objective.addItems(self.parent.CMicro.get_all_scalenames())
self.objective.setCurrentIndex(
self.objective.findText(self.parent.CMicro.scalename))
self.form.addRow('Objective', self.objective)
self.objective.currentIndexChanged.connect(self.change_scalename)
self.camname = self.parent.mmc.getCameraDevice()
self.exposure = QtGui.QSpinBox()
self.exposure.setSuffix(' ms')
self.exposure.setRange(
self.parent.mmc.getPropertyLowerLimit(self.camname, 'Exposure'),
self.parent.mmc.getPropertyUpperLimit(self.camname, 'Exposure'))
self.exposure.setValue(self.parent.mmc.getExposure())
self.exposure.valueChanged.connect(self.parent.mmc.setExposure)
self.form.addRow('Exposure', self.exposure)
self.gain = QtGui.QDoubleSpinBox()
self.gain.setSingleStep(0.1)
self.gain.setRange(
self.parent.mmc.getPropertyLowerLimit(self.camname, 'Gain'),
self.parent.mmc.getPropertyUpperLimit(self.camname, 'Gain'))
self.gain.setValue(float(self.parent.mmc.getProperty(self.camname, 'Gain')))
self.gain.valueChanged.connect(self.set_gain)
self.form.addRow('Gain', self.gain)
self.binning = QtGui.QComboBox()
self.binning.addItems(self.parent.mmc.getAllowedPropertyValues(self.camname, 'Binning'))
self.binning.setCurrentIndex(
self.binning.findText(self.parent.mmc.getProperty(self.camname, 'Binning')))
self.binning.currentIndexChanged.connect(self.set_binning)
self.form.addRow('Binning', self.binning)
self.histview = QtGui.QLabel('Histogram')
self.histview.setAlignment(QtCore.Qt.AlignCenter)
self.histview.setFixedSize(256, 100)
self.in_vbox.addWidget(self.histview)
self.horizontal.addWidget(QtGui.QLabel('R'))
self.sbx_adjust_r = QtGui.QDoubleSpinBox()
self.sbx_adjust_r.setSingleStep(0.01)
self.sbx_adjust_r.setRange(-2.0, 2.0)
self.horizontal.addWidget(self.sbx_adjust_r)
self.horizontal.addWidget(QtGui.QLabel('G'))
self.sbx_adjust_g = QtGui.QDoubleSpinBox()
self.sbx_adjust_g.setSingleStep(0.01)
self.sbx_adjust_g.setRange(-2.0, 2.0)
self.horizontal.addWidget(self.sbx_adjust_g)
self.horizontal.addWidget(QtGui.QLabel('B'))
self.sbx_adjust_b = QtGui.QDoubleSpinBox()
self.sbx_adjust_b.setSingleStep(0.01)
self.sbx_adjust_b.setRange(-2.0, 2.0)
self.horizontal.addWidget(self.sbx_adjust_b)
self.btn_autowb = QtGui.QPushButton('Auto')
self.btn_autowb.setToolTip(
"Please remove slice and click the button")
self.btn_autowb.setStyleSheet("padding: 3px;")
self.horizontal.addWidget(self.btn_autowb)
self.btn_resetwb = QtGui.QPushButton('Reset')
self.btn_resetwb.setToolTip("Reset channels shifts to zero")
self.btn_resetwb.setStyleSheet("padding: 3px;")
self.btn_resetwb.clicked.connect(self.resetWbControls)
self.horizontal.addWidget(self.btn_resetwb)
self.btn_autowb.clicked.connect(self.autowb)
self.updateWbControls()
self.willRunOnce.connect(self.parent.VProc.runOnce)
self.willRunContinuously.connect(self.parent.VProc.runContinuous)
self.willStop.connect(self.parent.VProc.stop)
@QtCore.Slot()
def toggle_streaming(self):
if not self.parent.mmc.isSequenceRunning():
if self.cont_cbx.checkState() == QtCore.Qt.Checked:
self.willRunContinuously.emit()
self.streaming_btn.setText('&Stop')
self.cont_cbx.setEnabled(False)
self.binning.setEnabled(False)
else:
self.willRunOnce.emit()
else:
self.willStop.emit()
self.streaming_btn.setText('&Start')
self.cont_cbx.setEnabled(True)
self.binning.setEnabled(True)
@QtCore.Slot(int)
def change_scalename(self, index):
self.parent.CMicro.scalename = str(self.objective.currentText())
@QtCore.Slot(float)
def set_gain(self, value):
self.parent.mmc.setProperty(self.camname, 'Gain', str(value))
@QtCore.Slot(int)
def set_binning(self, index):
value = self.binning.itemText(index)
self.parent.mmc.setProperty(self.camname, 'Binning', str(value))
@QtCore.Slot()
def setHistogram(self):
img = self.parent.VProc.hist
image = QtGui.QImage(img, img.shape[1], img.shape[0], QtGui.QImage.Format_ARGB32)
self.histview.setPixmap(QtGui.QPixmap(image))
@QtCore.Slot()
def updateWbControls(self):
r, g, b = self.parent.VProc.get_white_point()
self.sbx_adjust_r.setValue(r)
self.sbx_adjust_g.setValue(g)
self.sbx_adjust_b.setValue(b)
def resetWbControls(self):
self.sbx_adjust_r.setValue(1.0)
self.sbx_adjust_g.setValue(1.0)
self.sbx_adjust_b.setValue(1.0)
self.btn_autowb.setEnabled(True)
def autowb(self):
self.btn_autowb.setEnabled(False)
self.needAutoWb.emit()
class AnalysisControl(QtGui.QGroupBox):
"""Control image analysis workflow.
Cell segmentation controls.
"""
def __init__(self, parent=None):
super(AnalysisControl, self).__init__(parent)
self.parent = parent
self.setTitle('Analysis control')
self.vbox = QtGui.QVBoxLayout()
self.vbox.setAlignment(QtCore.Qt.AlignTop)
self.setLayout(self.vbox)
self.in_vbox = QtGui.QVBoxLayout()
self.form = QtGui.QFormLayout()
self.vbox.addLayout(self.in_vbox)
self.vbox.addLayout(self.form)
# self.cont_cbx = QtGui.QCheckBox()
# self.form.addRow('Analyze', self.cont_cbx)
self.vtype = QtGui.QComboBox()
self.vtype.addItems(self.parent.VProc.CProcessor.vtypes)
self.vtype.setCurrentIndex(
self.vtype.findText(self.parent.VProc.CProcessor.vtype))
self.form.addRow('VizType', self.vtype)
self.sizemax = QtGui.QSpinBox()
self.sizemax.setSuffix(' px')
self.sizemax.setRange(0, 9999999)
self.sizemax.setValue(self.parent.VProc.CProcessor.max_size)
self.form.addRow('Max size', self.sizemax)
self.sizemin = QtGui.QSpinBox()
self.sizemin.setSuffix(' px')
self.sizemin.setRange(0, 9999)
self.sizemin.setValue(self.parent.VProc.CProcessor.min_size)
self.form.addRow('Min size', self.sizemin)
self.peak_dist = QtGui.QSpinBox()
self.peak_dist.setSuffix(' px')
self.peak_dist.setRange(0, 9999)
self.peak_dist.setValue(self.parent.VProc.CProcessor.peak_distance)
self.form.addRow('Peak distance', self.peak_dist)
self.dab_th_shift = QtGui.QSpinBox()
self.dab_th_shift.setSuffix(' %')
self.dab_th_shift.setRange(-100, 100)
self.dab_th_shift.setValue(self.parent.VProc.CProcessor.th_dab_shift)
self.form.addRow('DAB threshold shift', self.dab_th_shift)
self.hem_th_shift = QtGui.QSpinBox()
self.hem_th_shift.setSuffix(' %')
self.hem_th_shift.setRange(-100, 100)
self.hem_th_shift.setValue(self.parent.VProc.CProcessor.th_hem_shift)
self.form.addRow('HEM threshold shift', self.hem_th_shift)
class GLFrame(QtOpenGL.QGLWidget):
"""OpenGL based video output Qt widget.
Put RGB image to texture and show it with OpenGL.
* Разрешение Viewport определяется размером окна
* Соотношение сторон фиксированное 4:3 (зависит от `setBaseSize()` при установке текстуры)
"""
def __init__(self):
super(GLFrame, self).__init__()
self._tex_data = None
self._texture_id = None
self.rect = QtCore.QRectF(-1, -1, 2, 2) # x, y, w, h
def initializeGL(self):
glClearColor(0.85, 0.85, 0.85, 1.0) # Like Qt background
glEnable(GL_TEXTURE_2D)
def paintGL(self):
"""Replace old texture data and show it on screen.
"""
if self._texture_id is not None:
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
self.drawTexture(self.rect, self._texture_id)
# glBegin(GL_QUADS)
# glTexCoord2f(0, 0); glVertex3f(-1.0, 1.0, 0.0); # Top left (w,h,d)
# glTexCoord2f(1, 0); glVertex3f( 1.0, 1.0, 0.0); # Top right
# glTexCoord2f(1, 1); glVertex3f( 1.0,-1.0, 0.0); # Bottom right
# glTexCoord2f(0, 1); glVertex3f(-1.0,-1.0, 0.0); # Bottom left
# glEnd()
def resizeGL(self, width, height):
"""Keep aspect ratio in viewport.
"""
widget_size = self.baseSize()
widget_size.scale(width, height, QtCore.Qt.KeepAspectRatio)
glViewport(0, 0, widget_size.width(), widget_size.height())
# self.resize(widget_size)
def setData(self, array):
"""Set numpy array as new texture to widget.
"""
# self.makeCurrent()
if self._tex_data is not None:
if (self._tex_data.shape == array.shape and self._tex_data.dtype == array.dtype):
self.update_texture(array)
else:
self.deleteTexture(self._texture_id)
self.create_texture(array)
self.update_widget_size()
else:
self.create_texture(array)
self.update_widget_size()
self.updateGL()
def create_texture(self, array):
"""Create texture object for given RGB or grayscale uint8 array.
"""
self.makeCurrent()
# Update texture properties
self._tex_data = array
if len(self._tex_data.shape) == 3:
self._tex_color = GL_RGB
elif len(self._tex_data.shape) == 2:
self._tex_color = GL_LUMINANCE
if self._tex_data.dtype == np.uint8:
self._tex_dtype = GL_UNSIGNED_BYTE
elif self._tex_data.dtype == np.float32:
self._tex_dtype = GL_FLOAT
else:
raise ValueError("{} dtype is not supported, "
"use uint8 or float32 instead".format(array.dtype))
# Prepare an empty texture
self._texture_id = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D, self._texture_id)
glPixelStorei(GL_UNPACK_ALIGNMENT, 1)
# Установим параметры "оборачивания" текстуры
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP)
# Linear filtering (?)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
glTexImage2D(
GL_TEXTURE_2D, 0, GL_RGB,
self._tex_data.shape[1], self._tex_data.shape[0],
0, self._tex_color, self._tex_dtype, self._tex_data)
def update_texture(self, array):
# Prevent segfault: glTexSubImage would not accept None.
self.makeCurrent()
self._tex_data = array
glTexSubImage2D(
GL_TEXTURE_2D, 0, 0, 0,
self._tex_data.shape[1], self._tex_data.shape[0],
self._tex_color, self._tex_dtype, self._tex_data)
def update_widget_size(self):
self.setBaseSize(self._tex_data.shape[1], self._tex_data.shape[0])
winsize = self.size()
self.resizeGL(winsize.width(), winsize.height())
class VideoWidget(QtGui.QWidget):
"""Video output with OpenGL and size controls.
"""
def __init__(self, parent=None):
super(VideoWidget, self).__init__()
self.parent = parent
self.vbox = QtGui.QVBoxLayout()
self.setLayout(self.vbox)
self.bar = QtGui.QToolBar('ToolBar')
self.scrollableView = QtGui.QScrollArea()
self.glWidget = GLFrame()
self.glWidget.setSizePolicy(QtGui.QSizePolicy.Ignored, QtGui.QSizePolicy.Ignored)
self.scrollableView.setWidget(self.glWidget)
self.vbox.addWidget(self.bar)
self.vbox.addWidget(self.scrollableView)
self.frameResNatural = self.bar.addAction('1:1', self.resNatural)
# self.frameResPlus = self.bar.addAction('+', self.resPlus)
# self.frameResMinus = self.bar.addAction(u'−', self.resMinus)
self.frameResFit = self.bar.addAction('Fit', self.resFit)
self.resFit() # Fit viewport on start
@QtCore.Slot()
def resNatural(self):
self.scrollableView.setWidgetResizable(False)
self.glWidget.resize(self.glWidget.baseSize())
@QtCore.Slot()
def resPlus(self):
print("resPlus")
@QtCore.Slot()
def resMinus(self):
print("resMinus")
@QtCore.Slot()
def resFit(self):
"""Fit in scrollableView size.
"""
self.scrollableView.setWidgetResizable(True)
widget_size = self.glWidget.baseSize()
widget_size.scale(self.scrollableView.size(), QtCore.Qt.KeepAspectRatio)
self.scrollableView.resize(widget_size)
def setData(self, array):
self.glWidget.setData(array)
class VideoProcessor(QtCore.QObject):
"""Get frames and process it. Should live in separate thread."""
newframe = QtCore.Signal()
histogramready = QtCore.Signal()
modelGotAssay = QtCore.Signal()
newwhitepoint = QtCore.Signal()
def __init__(self, mmcore, parent=None):
super(VideoProcessor, self).__init__()
self.parent = parent
self.mmc = mmcore
self.CProcessor = iptools.CellProcessor(
scale=parent.CMicro.scale, colormap=lut.random_jet(), mp=True)
self.HPlotter = iptools.HistogramPlotter(gradient=True)
self.__model = statdata.StatDataModel()
self.rgb32 = None
self.rgb = None
self._wb_gain = [1.0, 1.0, 1.0]
self.out = None
self.workTimer = QtCore.QTimer(parent=self)
self.workTimer.setInterval(20)
self.workTimer.timeout.connect(self.process_frame)
self.__singleshot = False # Snap one image flag
self.__lock = QtCore.QMutex()
@QtCore.Slot()
def set_white_point(self):
rgb_gain = self.HPlotter.get_wp_gain(normalize=False)
if rgb_gain is not None:
self._wb_gain = rgb_gain
self.newwhitepoint.emit()
@QtCore.Slot()
def get_white_point(self):
return self._wb_gain
@QtCore.Slot()
def process_frame(self):
"""Retrieve frame from buffer and process it.
"""
start_time = time.time()
with QtCore.QMutexLocker(self.__lock):
if self.__singleshot:
self.rgb32 = self.mmc.getImage()
self.__singleshot = False
else:
if self.mmc.getRemainingImageCount() > 0:
self.rgb32 = self.mmc.getLastImage()
else:
print('No frame')
if self.rgb32 is not None:
rgb = iptools.rgb32asrgb(self.rgb32)
# WB correction before histogram calculation
self.rgb = iptools.correct_wb(rgb, self._wb_gain)
self.hist = self.HPlotter.plot(self.rgb)
self.histogramready.emit()
self.out = self.CProcessor.process(self.rgb)
self.newframe.emit()
delta_time = time.time() - start_time
if delta_time != 0:
print('FPS: %f') % (1. / (time.time() - start_time))
@QtCore.Slot()
def runOnce(self):
print('Take one picture.')
if self.workTimer.isActive():
raise RuntimeWarning('Timer must be stopped before runOnce()!')
self.mmc.snapImage()
self.__singleshot = True
self.process_frame()
@QtCore.Slot()
def runContinuous(self):
print('Start taking pictures continuously')
if self.workTimer.isActive():
raise RuntimeWarning('Timer must be stopped before runContinuous()!')
self.mmc.snapImage() # Avoid Baumer bug
self.mmc.startContinuousSequenceAcquisition(1)
self.workTimer.start()
@QtCore.Slot()
def pushAssay(self):
"""Safely save statistics and image to StatModel.
"""
with QtCore.QMutexLocker(self.__lock):
if self.__model.isSaveImage:
self.__model.appendAssay(self.CProcessor.take_assay(),
image=self.rgb)
else:
self.__model.appendAssay(self.CProcessor.take_assay())
self.modelGotAssay.emit()
def getModel(self):
return self.__model
@QtCore.Slot()
def stop(self):
self.workTimer.stop()
self.mmc.stopSequenceAcquisition()
print('Video acquisition terminated.')
@QtCore.Slot()
def setVtype(self, value):
print(value)
print(self.CProcessor.vtypes[value])
self.CProcessor.vtype = self.CProcessor.vtypes[value]
@QtCore.Slot()
def setScale(self, value):
self.CProcessor.scale = value
@QtCore.Slot()
def setDabThresholdShift(self, value):
self.CProcessor.th_dab_shift = value
@QtCore.Slot()
def setHemThresholdShift(self, value):
self.CProcessor.th_hem_shift = value
@QtCore.Slot()
def setMinSize(self, value):
self.CProcessor.min_size = value
@QtCore.Slot()
def setMaxSize(self, value):
self.CProcessor.max_size = value
@QtCore.Slot()
def setPeakDistance(self, value):
self.CProcessor.peak_distance = value
@QtCore.Slot()
def setRShift(self, value):
self._wb_gain[0] = value
@QtCore.Slot()
def setGShift(self, value):
self._wb_gain[1] = value
@QtCore.Slot()
def setBShift(self, value):
self._wb_gain[2] = value
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
window = AdjustBar(minlim=0, maxlim=2000, dtype=int)
# window = AdjustBar(minlim=-1.0, maxlim=1.0, dtype=float)
window.show()
app.exec_()
Print new white point to STDOUT
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on 2014-05-16
@author: Eugene Dvoretsky
Immunopy GUI primitives.
"""
import sys
import time
import numpy as np
from PySide import QtCore
from PySide import QtGui
from PySide import QtOpenGL
from OpenGL.GL import *
from OpenGL import ERROR_ON_COPY
import iptools
import lut
import statdata
ERROR_ON_COPY = True # Raise exception on array copy or casting
# http://pyopengl.sourceforge.net/documentation/opengl_diffs.html
class AdjustBar(QtGui.QWidget):
"""Slider and spinbox widget.
The spinboxes always contain real property value.
BUG: precision sometimes is not enough.
"""
def __init__(self, mmcore, prop, parent=None):
super(AdjustBar, self).__init__(parent)
self.parent = parent
self.mmc = mmcore
self.prop = prop
self.mult = 1000.0
self.camname = self.mmc.getCameraDevice()
self.minlim=self.mmc.getPropertyLowerLimit(self.camname, prop)
self.maxlim=self.mmc.getPropertyUpperLimit(self.camname, prop)
self.vbox = QtGui.QVBoxLayout(self.parent)
self.form = QtGui.QFormLayout()
# self.hbox.setAlignment(QtCore.Qt.AlignTop)
self.slid = QtGui.QSlider(QtCore.Qt.Horizontal)
if self.mmc.getPropertyType(self.camname, prop) == 3: # 3 is int
self.spin = QtGui.QSpinBox()
self.slid.setRange(int(self.minlim), int(self.maxlim))
self.spin.setRange(int(self.minlim), int(self.maxlim))
self.spin.setValue(int(self.mmc.getProperty(self.camname, prop)))
self.slid.valueChanged.connect(self.spin.setValue)
self.spin.valueChanged.connect(self.slid.setValue)
self.slid.valueChanged.connect(self.setDevProperty)
else:
self.spin = QtGui.QDoubleSpinBox()
self.spin.setSingleStep(0.01)
# Stretch slider
self.slid.setRange(self.minlim,
(self.maxlim - self.minlim) * self.mult + self.minlim)
self.spin.setRange(self.minlim, self.maxlim)
# Prevent comma on Linux.
self.spin.setValue(float(
self.mmc.getProperty(self.camname, prop).replace(',', '.')))
self.slid.valueChanged.connect(self.setAsDouble)
self.spin.valueChanged.connect(self.setAsInt)
self.form.addRow(prop, self.spin)
self.setLayout(self.vbox)
self.vbox.addLayout(self.form)
self.vbox.addWidget(self.slid)
@QtCore.Slot(float)
def setAsInt(self, value):
target = (value - self.minlim) * self.mult + self.minlim
if self.slid.value() != target:
self.slid.setValue(target)
self.setDevProperty(value)
@QtCore.Slot(int)
def setAsDouble(self, value):
current = round(self.spin.value(), 2)
target = round((value - self.minlim) / self.mult + self.minlim, 2)
if current != target:
self.spin.setValue(target)
@QtCore.Slot()
def setDevProperty(self, value):
self.mmc.setProperty(self.camname, self.prop, str(value))
class MicroscopeControl(QtGui.QGroupBox):
"""Control microscope devices.
Aware hardcode for properties is better way.
"""
willRunOnce = QtCore.Signal()
willRunContinuously = QtCore.Signal()
willStop = QtCore.Signal()
needAutoWb = QtCore.Signal()
def __init__(self, parent=None):
super(MicroscopeControl, self).__init__(parent)
self.parent = parent
self.setTitle('Microscope control')
self.vbox = QtGui.QVBoxLayout()
self.setLayout(self.vbox)
self.form = QtGui.QFormLayout()
self.in_vbox = QtGui.QVBoxLayout()
self.horizontal = QtGui.QHBoxLayout()
self.horizontal.setAlignment(QtCore.Qt.AlignLeft)
self.vbox.addLayout(self.form)
self.vbox.addLayout(self.in_vbox)
self.vbox.addLayout(self.horizontal)
self.streaming_btn = QtGui.QPushButton('&Start')
self.form.addRow('Acquisition', self.streaming_btn)
self.streaming_btn.pressed.connect(self.toggle_streaming)
self.cont_cbx = QtGui.QCheckBox()
self.form.addRow('Continuous', self.cont_cbx)
# Get scales and set default.
self.objective = QtGui.QComboBox()
self.objective.addItems(self.parent.CMicro.get_all_scalenames())
self.objective.setCurrentIndex(
self.objective.findText(self.parent.CMicro.scalename))
self.form.addRow('Objective', self.objective)
self.objective.currentIndexChanged.connect(self.change_scalename)
self.camname = self.parent.mmc.getCameraDevice()
self.exposure = QtGui.QSpinBox()
self.exposure.setSuffix(' ms')
self.exposure.setRange(
self.parent.mmc.getPropertyLowerLimit(self.camname, 'Exposure'),
self.parent.mmc.getPropertyUpperLimit(self.camname, 'Exposure'))
self.exposure.setValue(self.parent.mmc.getExposure())
self.exposure.valueChanged.connect(self.parent.mmc.setExposure)
self.form.addRow('Exposure', self.exposure)
self.gain = QtGui.QDoubleSpinBox()
self.gain.setSingleStep(0.1)
self.gain.setRange(
self.parent.mmc.getPropertyLowerLimit(self.camname, 'Gain'),
self.parent.mmc.getPropertyUpperLimit(self.camname, 'Gain'))
self.gain.setValue(float(self.parent.mmc.getProperty(self.camname, 'Gain')))
self.gain.valueChanged.connect(self.set_gain)
self.form.addRow('Gain', self.gain)
self.binning = QtGui.QComboBox()
self.binning.addItems(self.parent.mmc.getAllowedPropertyValues(self.camname, 'Binning'))
self.binning.setCurrentIndex(
self.binning.findText(self.parent.mmc.getProperty(self.camname, 'Binning')))
self.binning.currentIndexChanged.connect(self.set_binning)
self.form.addRow('Binning', self.binning)
self.histview = QtGui.QLabel('Histogram')
self.histview.setAlignment(QtCore.Qt.AlignCenter)
self.histview.setFixedSize(256, 100)
self.in_vbox.addWidget(self.histview)
self.horizontal.addWidget(QtGui.QLabel('R'))
self.sbx_adjust_r = QtGui.QDoubleSpinBox()
self.sbx_adjust_r.setSingleStep(0.01)
self.sbx_adjust_r.setRange(-2.0, 2.0)
self.horizontal.addWidget(self.sbx_adjust_r)
self.horizontal.addWidget(QtGui.QLabel('G'))
self.sbx_adjust_g = QtGui.QDoubleSpinBox()
self.sbx_adjust_g.setSingleStep(0.01)
self.sbx_adjust_g.setRange(-2.0, 2.0)
self.horizontal.addWidget(self.sbx_adjust_g)
self.horizontal.addWidget(QtGui.QLabel('B'))
self.sbx_adjust_b = QtGui.QDoubleSpinBox()
self.sbx_adjust_b.setSingleStep(0.01)
self.sbx_adjust_b.setRange(-2.0, 2.0)
self.horizontal.addWidget(self.sbx_adjust_b)
self.btn_autowb = QtGui.QPushButton('Auto')
self.btn_autowb.setToolTip(
"Please remove slice and click the button")
self.btn_autowb.setStyleSheet("padding: 3px;")
self.horizontal.addWidget(self.btn_autowb)
self.btn_resetwb = QtGui.QPushButton('Reset')
self.btn_resetwb.setToolTip("Reset channels shifts to zero")
self.btn_resetwb.setStyleSheet("padding: 3px;")
self.btn_resetwb.clicked.connect(self.resetWbControls)
self.horizontal.addWidget(self.btn_resetwb)
self.btn_autowb.clicked.connect(self.autowb)
self.updateWbControls()
self.willRunOnce.connect(self.parent.VProc.runOnce)
self.willRunContinuously.connect(self.parent.VProc.runContinuous)
self.willStop.connect(self.parent.VProc.stop)
@QtCore.Slot()
def toggle_streaming(self):
if not self.parent.mmc.isSequenceRunning():
if self.cont_cbx.checkState() == QtCore.Qt.Checked:
self.willRunContinuously.emit()
self.streaming_btn.setText('&Stop')
self.cont_cbx.setEnabled(False)
self.binning.setEnabled(False)
else:
self.willRunOnce.emit()
else:
self.willStop.emit()
self.streaming_btn.setText('&Start')
self.cont_cbx.setEnabled(True)
self.binning.setEnabled(True)
@QtCore.Slot(int)
def change_scalename(self, index):
self.parent.CMicro.scalename = str(self.objective.currentText())
@QtCore.Slot(float)
def set_gain(self, value):
self.parent.mmc.setProperty(self.camname, 'Gain', str(value))
@QtCore.Slot(int)
def set_binning(self, index):
value = self.binning.itemText(index)
self.parent.mmc.setProperty(self.camname, 'Binning', str(value))
@QtCore.Slot()
def setHistogram(self):
img = self.parent.VProc.hist
image = QtGui.QImage(img, img.shape[1], img.shape[0], QtGui.QImage.Format_ARGB32)
self.histview.setPixmap(QtGui.QPixmap(image))
@QtCore.Slot()
def updateWbControls(self):
r, g, b = self.parent.VProc.get_white_point()
self.sbx_adjust_r.setValue(r)
self.sbx_adjust_g.setValue(g)
self.sbx_adjust_b.setValue(b)
def resetWbControls(self):
self.sbx_adjust_r.setValue(1.0)
self.sbx_adjust_g.setValue(1.0)
self.sbx_adjust_b.setValue(1.0)
self.btn_autowb.setEnabled(True)
def autowb(self):
self.btn_autowb.setEnabled(False)
self.needAutoWb.emit()
class AnalysisControl(QtGui.QGroupBox):
"""Control image analysis workflow.
Cell segmentation controls.
"""
def __init__(self, parent=None):
super(AnalysisControl, self).__init__(parent)
self.parent = parent
self.setTitle('Analysis control')
self.vbox = QtGui.QVBoxLayout()
self.vbox.setAlignment(QtCore.Qt.AlignTop)
self.setLayout(self.vbox)
self.in_vbox = QtGui.QVBoxLayout()
self.form = QtGui.QFormLayout()
self.vbox.addLayout(self.in_vbox)
self.vbox.addLayout(self.form)
# self.cont_cbx = QtGui.QCheckBox()
# self.form.addRow('Analyze', self.cont_cbx)
self.vtype = QtGui.QComboBox()
self.vtype.addItems(self.parent.VProc.CProcessor.vtypes)
self.vtype.setCurrentIndex(
self.vtype.findText(self.parent.VProc.CProcessor.vtype))
self.form.addRow('VizType', self.vtype)
self.sizemax = QtGui.QSpinBox()
self.sizemax.setSuffix(' px')
self.sizemax.setRange(0, 9999999)
self.sizemax.setValue(self.parent.VProc.CProcessor.max_size)
self.form.addRow('Max size', self.sizemax)
self.sizemin = QtGui.QSpinBox()
self.sizemin.setSuffix(' px')
self.sizemin.setRange(0, 9999)
self.sizemin.setValue(self.parent.VProc.CProcessor.min_size)
self.form.addRow('Min size', self.sizemin)
self.peak_dist = QtGui.QSpinBox()
self.peak_dist.setSuffix(' px')
self.peak_dist.setRange(0, 9999)
self.peak_dist.setValue(self.parent.VProc.CProcessor.peak_distance)
self.form.addRow('Peak distance', self.peak_dist)
self.dab_th_shift = QtGui.QSpinBox()
self.dab_th_shift.setSuffix(' %')
self.dab_th_shift.setRange(-100, 100)
self.dab_th_shift.setValue(self.parent.VProc.CProcessor.th_dab_shift)
self.form.addRow('DAB threshold shift', self.dab_th_shift)
self.hem_th_shift = QtGui.QSpinBox()
self.hem_th_shift.setSuffix(' %')
self.hem_th_shift.setRange(-100, 100)
self.hem_th_shift.setValue(self.parent.VProc.CProcessor.th_hem_shift)
self.form.addRow('HEM threshold shift', self.hem_th_shift)
class GLFrame(QtOpenGL.QGLWidget):
"""OpenGL based video output Qt widget.
Put RGB image to texture and show it with OpenGL.
* Разрешение Viewport определяется размером окна
* Соотношение сторон фиксированное 4:3 (зависит от `setBaseSize()` при установке текстуры)
"""
def __init__(self):
super(GLFrame, self).__init__()
self._tex_data = None
self._texture_id = None
self.rect = QtCore.QRectF(-1, -1, 2, 2) # x, y, w, h
def initializeGL(self):
glClearColor(0.85, 0.85, 0.85, 1.0) # Like Qt background
glEnable(GL_TEXTURE_2D)
def paintGL(self):
"""Replace old texture data and show it on screen.
"""
if self._texture_id is not None:
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
self.drawTexture(self.rect, self._texture_id)
# glBegin(GL_QUADS)
# glTexCoord2f(0, 0); glVertex3f(-1.0, 1.0, 0.0); # Top left (w,h,d)
# glTexCoord2f(1, 0); glVertex3f( 1.0, 1.0, 0.0); # Top right
# glTexCoord2f(1, 1); glVertex3f( 1.0,-1.0, 0.0); # Bottom right
# glTexCoord2f(0, 1); glVertex3f(-1.0,-1.0, 0.0); # Bottom left
# glEnd()
def resizeGL(self, width, height):
"""Keep aspect ratio in viewport.
"""
widget_size = self.baseSize()
widget_size.scale(width, height, QtCore.Qt.KeepAspectRatio)
glViewport(0, 0, widget_size.width(), widget_size.height())
# self.resize(widget_size)
def setData(self, array):
"""Set numpy array as new texture to widget.
"""
# self.makeCurrent()
if self._tex_data is not None:
if (self._tex_data.shape == array.shape and self._tex_data.dtype == array.dtype):
self.update_texture(array)
else:
self.deleteTexture(self._texture_id)
self.create_texture(array)
self.update_widget_size()
else:
self.create_texture(array)
self.update_widget_size()
self.updateGL()
def create_texture(self, array):
"""Create texture object for given RGB or grayscale uint8 array.
"""
self.makeCurrent()
# Update texture properties
self._tex_data = array
if len(self._tex_data.shape) == 3:
self._tex_color = GL_RGB
elif len(self._tex_data.shape) == 2:
self._tex_color = GL_LUMINANCE
if self._tex_data.dtype == np.uint8:
self._tex_dtype = GL_UNSIGNED_BYTE
elif self._tex_data.dtype == np.float32:
self._tex_dtype = GL_FLOAT
else:
raise ValueError("{} dtype is not supported, "
"use uint8 or float32 instead".format(array.dtype))
# Prepare an empty texture
self._texture_id = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D, self._texture_id)
glPixelStorei(GL_UNPACK_ALIGNMENT, 1)
# Установим параметры "оборачивания" текстуры
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP)
# Linear filtering (?)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
glTexImage2D(
GL_TEXTURE_2D, 0, GL_RGB,
self._tex_data.shape[1], self._tex_data.shape[0],
0, self._tex_color, self._tex_dtype, self._tex_data)
def update_texture(self, array):
# Prevent segfault: glTexSubImage would not accept None.
self.makeCurrent()
self._tex_data = array
glTexSubImage2D(
GL_TEXTURE_2D, 0, 0, 0,
self._tex_data.shape[1], self._tex_data.shape[0],
self._tex_color, self._tex_dtype, self._tex_data)
def update_widget_size(self):
self.setBaseSize(self._tex_data.shape[1], self._tex_data.shape[0])
winsize = self.size()
self.resizeGL(winsize.width(), winsize.height())
class VideoWidget(QtGui.QWidget):
"""Video output with OpenGL and size controls.
"""
def __init__(self, parent=None):
super(VideoWidget, self).__init__()
self.parent = parent
self.vbox = QtGui.QVBoxLayout()
self.setLayout(self.vbox)
self.bar = QtGui.QToolBar('ToolBar')
self.scrollableView = QtGui.QScrollArea()
self.glWidget = GLFrame()
self.glWidget.setSizePolicy(QtGui.QSizePolicy.Ignored, QtGui.QSizePolicy.Ignored)
self.scrollableView.setWidget(self.glWidget)
self.vbox.addWidget(self.bar)
self.vbox.addWidget(self.scrollableView)
self.frameResNatural = self.bar.addAction('1:1', self.resNatural)
# self.frameResPlus = self.bar.addAction('+', self.resPlus)
# self.frameResMinus = self.bar.addAction(u'−', self.resMinus)
self.frameResFit = self.bar.addAction('Fit', self.resFit)
self.resFit() # Fit viewport on start
@QtCore.Slot()
def resNatural(self):
self.scrollableView.setWidgetResizable(False)
self.glWidget.resize(self.glWidget.baseSize())
@QtCore.Slot()
def resPlus(self):
print("resPlus")
@QtCore.Slot()
def resMinus(self):
print("resMinus")
@QtCore.Slot()
def resFit(self):
"""Fit in scrollableView size.
"""
self.scrollableView.setWidgetResizable(True)
widget_size = self.glWidget.baseSize()
widget_size.scale(self.scrollableView.size(), QtCore.Qt.KeepAspectRatio)
self.scrollableView.resize(widget_size)
def setData(self, array):
self.glWidget.setData(array)
class VideoProcessor(QtCore.QObject):
"""Get frames and process it. Should live in separate thread."""
newframe = QtCore.Signal()
histogramready = QtCore.Signal()
modelGotAssay = QtCore.Signal()
newwhitepoint = QtCore.Signal()
def __init__(self, mmcore, parent=None):
super(VideoProcessor, self).__init__()
self.parent = parent
self.mmc = mmcore
self.CProcessor = iptools.CellProcessor(
scale=parent.CMicro.scale, colormap=lut.random_jet(), mp=True)
self.HPlotter = iptools.HistogramPlotter(gradient=True)
self.__model = statdata.StatDataModel()
self.rgb32 = None
self.rgb = None
self._wb_gain = [1.0, 1.0, 1.0]
self.out = None
self.workTimer = QtCore.QTimer(parent=self)
self.workTimer.setInterval(20)
self.workTimer.timeout.connect(self.process_frame)
self.__singleshot = False # Snap one image flag
self.__lock = QtCore.QMutex()
@QtCore.Slot()
def set_white_point(self):
rgb_gain = self.HPlotter.get_wp_gain(normalize=False)
if rgb_gain is not None:
self._wb_gain = rgb_gain
print("New white point: {}".format(str(self._wb_gain)))
self.newwhitepoint.emit()
@QtCore.Slot()
def get_white_point(self):
return self._wb_gain
@QtCore.Slot()
def process_frame(self):
"""Retrieve frame from buffer and process it.
"""
start_time = time.time()
with QtCore.QMutexLocker(self.__lock):
if self.__singleshot:
self.rgb32 = self.mmc.getImage()
self.__singleshot = False
else:
if self.mmc.getRemainingImageCount() > 0:
self.rgb32 = self.mmc.getLastImage()
else:
print('No frame')
if self.rgb32 is not None:
rgb = iptools.rgb32asrgb(self.rgb32)
# WB correction before histogram calculation
self.rgb = iptools.correct_wb(rgb, self._wb_gain)
self.hist = self.HPlotter.plot(self.rgb)
self.histogramready.emit()
self.out = self.CProcessor.process(self.rgb)
self.newframe.emit()
delta_time = time.time() - start_time
if delta_time != 0:
print('FPS: %f') % (1. / (time.time() - start_time))
@QtCore.Slot()
def runOnce(self):
print('Take one picture.')
if self.workTimer.isActive():
raise RuntimeWarning('Timer must be stopped before runOnce()!')
self.mmc.snapImage()
self.__singleshot = True
self.process_frame()
@QtCore.Slot()
def runContinuous(self):
print('Start taking pictures continuously')
if self.workTimer.isActive():
raise RuntimeWarning('Timer must be stopped before runContinuous()!')
self.mmc.snapImage() # Avoid Baumer bug
self.mmc.startContinuousSequenceAcquisition(1)
self.workTimer.start()
@QtCore.Slot()
def pushAssay(self):
"""Safely save statistics and image to StatModel.
"""
with QtCore.QMutexLocker(self.__lock):
if self.__model.isSaveImage:
self.__model.appendAssay(self.CProcessor.take_assay(),
image=self.rgb)
else:
self.__model.appendAssay(self.CProcessor.take_assay())
self.modelGotAssay.emit()
def getModel(self):
return self.__model
@QtCore.Slot()
def stop(self):
self.workTimer.stop()
self.mmc.stopSequenceAcquisition()
print('Video acquisition terminated.')
@QtCore.Slot()
def setVtype(self, value):
print(value)
print(self.CProcessor.vtypes[value])
self.CProcessor.vtype = self.CProcessor.vtypes[value]
@QtCore.Slot()
def setScale(self, value):
self.CProcessor.scale = value
@QtCore.Slot()
def setDabThresholdShift(self, value):
self.CProcessor.th_dab_shift = value
@QtCore.Slot()
def setHemThresholdShift(self, value):
self.CProcessor.th_hem_shift = value
@QtCore.Slot()
def setMinSize(self, value):
self.CProcessor.min_size = value
@QtCore.Slot()
def setMaxSize(self, value):
self.CProcessor.max_size = value
@QtCore.Slot()
def setPeakDistance(self, value):
self.CProcessor.peak_distance = value
@QtCore.Slot()
def setRShift(self, value):
self._wb_gain[0] = value
@QtCore.Slot()
def setGShift(self, value):
self._wb_gain[1] = value
@QtCore.Slot()
def setBShift(self, value):
self._wb_gain[2] = value
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
window = AdjustBar(minlim=0, maxlim=2000, dtype=int)
# window = AdjustBar(minlim=-1.0, maxlim=1.0, dtype=float)
window.show()
app.exec_()
|
__author__ = 'Georgios Rizos (georgerizos@iti.gr)'
import numpy as np
import scipy.sparse as sparse
import ctypes as c
import multiprocessing as mp
def get_label_based_random_walk_matrix(adjacency_matrix, labelled_nodes, label_absorption_probability):
"""
Returns the label-absorbing random walk transition probability matrix.
Input: - A: A sparse matrix that contains the adjacency matrix of the graph.
Output: - W: A sparse matrix that contains the natural random walk transition probability matrix.
"""
# Turn to sparse.csr_matrix format for faster row access.
rw_transition = sparse.csr_matrix(adjacency_matrix, dtype=np.float64)
# Sum along the two axes to get out-degree and in-degree, respectively
out_degree = rw_transition.sum(axis=1)
in_degree = rw_transition.sum(axis=0)
# Form the inverse of the diagonal matrix containing the out-degree
for i in np.arange(rw_transition.shape[0]):
rw_transition.data[rw_transition.indptr[i]: rw_transition.indptr[i + 1]] =\
rw_transition.data[rw_transition.indptr[i]: rw_transition.indptr[i + 1]]/out_degree[i]
out_degree = np.array(out_degree).astype(np.float64).reshape(out_degree.size)
in_degree = np.array(in_degree).astype(np.float64).reshape(in_degree.size)
# When the random walk agent encounters a labelled node, there is a probability that it will be absorbed.
diag = np.zeros_like(out_degree)
diag[labelled_nodes] = 1.0
diag = sparse.dia_matrix((diag, [0]), shape=(in_degree.size, in_degree.size))
diag = sparse.csr_matrix(diag)
rw_transition[labelled_nodes, :] = (1-label_absorption_probability)*rw_transition[labelled_nodes, :] + label_absorption_probability*diag[labelled_nodes, :]
return rw_transition, out_degree, in_degree
def get_natural_random_walk_matrix(adjacency_matrix, make_shared=False):
"""
Returns the natural random walk transition probability matrix given the adjacency matrix.
Input: - A: A sparse matrix that contains the adjacency matrix of the graph.
Output: - W: A sparse matrix that contains the natural random walk transition probability matrix.
"""
# Turn to sparse.csr_matrix format for faster row access.
rw_transition = sparse.csr_matrix(adjacency_matrix, dtype=np.float64, copy=True)
# Sum along the two axes to get out-degree and in-degree, respectively
out_degree = rw_transition.sum(axis=1)
in_degree = rw_transition.sum(axis=0)
# Form the inverse of the diagonal matrix containing the out-degree
for i in np.arange(rw_transition.shape[0]):
rw_transition.data[rw_transition.indptr[i]: rw_transition.indptr[i + 1]] =\
rw_transition.data[rw_transition.indptr[i]: rw_transition.indptr[i + 1]]/out_degree[i]
out_degree = np.array(out_degree).astype(np.float64).reshape(out_degree.size)
in_degree = np.array(in_degree).astype(np.float64).reshape(in_degree.size)
if make_shared:
number_of_nodes = adjacency_matrix.shape[0]
out_degree_c = mp.sharedctypes.Array(c.c_double, number_of_nodes)
in_degree_c = mp.sharedctypes.Array(c.c_double, number_of_nodes)
out_degree_shared = np.frombuffer(out_degree_c.get_obj(), dtype=np.float64, count=number_of_nodes)
in_degree_shared = np.frombuffer(in_degree_c.get_obj(), dtype=np.float64, count=number_of_nodes)
out_degree_shared[:] = out_degree[:]
in_degree_shared[:] = in_degree[:]
indices_c = mp.sharedctypes.Array(c.c_int64, rw_transition.indices.size)
indptr_c = mp.sharedctypes.Array(c.c_int64, rw_transition.indptr.size)
data_c = mp.sharedctypes.Array(c.c_double, rw_transition.data.size)
indices_shared = np.frombuffer(indices_c.get_obj(), dtype=np.int64, count=rw_transition.indices.size)
indptr_shared = np.frombuffer(indptr_c.get_obj(), dtype=np.int64, count=rw_transition.indptr.size)
data_shared = np.frombuffer(data_c.get_obj(), dtype=np.float64, count=rw_transition.data.size)
indices_shared[:] = rw_transition.indices[:]
indptr_shared[:] = rw_transition.indptr[:]
data_shared[:] = rw_transition.data[:]
rw_transition = sparse.csr_matrix((data_shared,
indices_shared,
indptr_shared),
shape=rw_transition.shape)
return rw_transition, out_degree, in_degree
Transition matrix fix.
__author__ = 'Georgios Rizos (georgerizos@iti.gr)'
import numpy as np
import scipy.sparse as sparse
import ctypes as c
import multiprocessing as mp
def get_label_based_random_walk_matrix(adjacency_matrix, labelled_nodes, label_absorption_probability):
"""
Returns the label-absorbing random walk transition probability matrix.
Input: - A: A sparse matrix that contains the adjacency matrix of the graph.
Output: - W: A sparse matrix that contains the natural random walk transition probability matrix.
"""
# Turn to sparse.csr_matrix format for faster row access.
rw_transition = sparse.csr_matrix(adjacency_matrix, dtype=np.float64)
# Sum along the two axes to get out-degree and in-degree, respectively
out_degree = rw_transition.sum(axis=1)
in_degree = rw_transition.sum(axis=0)
# Form the inverse of the diagonal matrix containing the out-degree
for i in np.arange(rw_transition.shape[0]):
rw_transition.data[rw_transition.indptr[i]: rw_transition.indptr[i + 1]] =\
rw_transition.data[rw_transition.indptr[i]: rw_transition.indptr[i + 1]]/out_degree[i]
out_degree = np.array(out_degree).astype(np.float64).reshape(out_degree.size)
in_degree = np.array(in_degree).astype(np.float64).reshape(in_degree.size)
# When the random walk agent encounters a labelled node, there is a probability that it will be absorbed.
diag = np.zeros_like(out_degree)
diag[labelled_nodes] = 1.0
diag = sparse.dia_matrix((diag, [0]), shape=(in_degree.size, in_degree.size))
diag = sparse.csr_matrix(diag)
rw_transition[labelled_nodes, :] = (1-label_absorption_probability)*rw_transition[labelled_nodes, :] + label_absorption_probability*diag[labelled_nodes, :]
return rw_transition, out_degree, in_degree
def get_natural_random_walk_matrix(adjacency_matrix, make_shared=False):
"""
Returns the natural random walk transition probability matrix given the adjacency matrix.
Input: - A: A sparse matrix that contains the adjacency matrix of the graph.
Output: - W: A sparse matrix that contains the natural random walk transition probability matrix.
"""
# Turn to sparse.csr_matrix format for faster row access.
rw_transition = sparse.csr_matrix(adjacency_matrix, dtype=np.float64, copy=True)
# Sum along the two axes to get out-degree and in-degree, respectively
out_degree = rw_transition.sum(axis=1)
in_degree = rw_transition.sum(axis=0)
# Form the inverse of the diagonal matrix containing the out-degree
for i in np.arange(rw_transition.shape[0]):
rw_transition.data[rw_transition.indptr[i]: rw_transition.indptr[i + 1]] =\
rw_transition.data[rw_transition.indptr[i]: rw_transition.indptr[i + 1]]/out_degree[i]
rw_transition.sort_indices()
out_degree = np.array(out_degree).astype(np.float64).reshape(out_degree.size)
in_degree = np.array(in_degree).astype(np.float64).reshape(in_degree.size)
if make_shared:
number_of_nodes = adjacency_matrix.shape[0]
out_degree_c = mp.Array(c.c_double, number_of_nodes)
in_degree_c = mp.Array(c.c_double, number_of_nodes)
out_degree_shared = np.frombuffer(out_degree_c.get_obj(), dtype=np.float64, count=number_of_nodes)
in_degree_shared = np.frombuffer(in_degree_c.get_obj(), dtype=np.float64, count=number_of_nodes)
out_degree_shared[:] = out_degree[:]
in_degree_shared[:] = in_degree[:]
indices_c = mp.Array(c.c_int64, rw_transition.indices.size)
indptr_c = mp.Array(c.c_int64, rw_transition.indptr.size)
data_c = mp.Array(c.c_double, rw_transition.data.size)
indices_shared = np.frombuffer(indices_c.get_obj(), dtype=np.int64, count=rw_transition.indices.size)
indptr_shared = np.frombuffer(indptr_c.get_obj(), dtype=np.int64, count=rw_transition.indptr.size)
data_shared = np.frombuffer(data_c.get_obj(), dtype=np.float64, count=rw_transition.data.size)
indices_shared[:] = rw_transition.indices[:]
indptr_shared[:] = rw_transition.indptr[:]
data_shared[:] = rw_transition.data[:]
rw_transition = sparse.csr_matrix((data_shared,
indices_shared,
indptr_shared),
shape=rw_transition.shape)
return rw_transition, out_degree, in_degree
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, ESS LLP and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
import json
from frappe.utils import getdate, add_days
from frappe import _
import datetime
from frappe.core.doctype.sms_settings.sms_settings import send_sms
from erpnext.hr.doctype.employee.employee import is_holiday
from erpnext.healthcare.doctype.healthcare_settings.healthcare_settings import get_receivable_account,get_income_account
from erpnext.healthcare.utils import validity_exists, service_item_and_practitioner_charge
class PatientAppointment(Document):
def on_update(self):
today = datetime.date.today()
appointment_date = getdate(self.appointment_date)
# If appointment created for today set as open
if today == appointment_date:
frappe.db.set_value("Patient Appointment", self.name, "status", "Open")
self.reload()
def after_insert(self):
if self.procedure_prescription:
frappe.db.set_value("Procedure Prescription", self.procedure_prescription, "appointment_booked", True)
# Check fee validity exists
appointment = self
validity_exist = validity_exists(appointment.practitioner, appointment.patient)
if validity_exist:
fee_validity = frappe.get_doc("Fee Validity", validity_exist[0][0])
# Check if the validity is valid
appointment_date = getdate(appointment.appointment_date)
if (fee_validity.valid_till >= appointment_date) and (fee_validity.visited < fee_validity.max_visit):
visited = fee_validity.visited + 1
frappe.db.set_value("Fee Validity", fee_validity.name, "visited", visited)
if fee_validity.ref_invoice:
frappe.db.set_value("Patient Appointment", appointment.name, "invoiced", True)
frappe.msgprint(_("{0} has fee validity till {1}").format(appointment.patient, fee_validity.valid_till))
confirm_sms(self)
if frappe.db.get_value("Healthcare Settings", None, "manage_appointment_invoice_automatically") == '1' and \
frappe.db.get_value("Patient Appointment", self.name, "invoiced") != 1:
invoice_appointment(self)
@frappe.whitelist()
def invoice_appointment(appointment_doc):
if not appointment_doc.name:
return False
sales_invoice = frappe.new_doc("Sales Invoice")
sales_invoice.customer = frappe.get_value("Patient", appointment_doc.patient, "customer")
sales_invoice.appointment = appointment_doc.name
sales_invoice.due_date = getdate()
sales_invoice.is_pos = True
sales_invoice.company = appointment_doc.company
sales_invoice.debit_to = get_receivable_account(appointment_doc.company)
item_line = sales_invoice.append("items")
service_item, practitioner_charge = service_item_and_practitioner_charge(appointment_doc)
item_line.item_code = service_item
item_line.description = "Consulting Charges: " + appointment_doc.practitioner
item_line.income_account = get_income_account(appointment_doc.practitioner, appointment_doc.company)
item_line.rate = practitioner_charge
item_line.amount = practitioner_charge
item_line.qty = 1
item_line.reference_dt = "Patient Appointment"
item_line.reference_dn = appointment_doc.name
payments_line = sales_invoice.append("payments")
payments_line.mode_of_payment = appointment_doc.mode_of_payment
payments_line.amount = appointment_doc.paid_amount
sales_invoice.set_missing_values(for_validate = True)
sales_invoice.save(ignore_permissions=True)
sales_invoice.submit()
frappe.msgprint(_("Sales Invoice {0} created as paid".format(sales_invoice.name)), alert=True)
def appointment_cancel(appointment_id):
appointment = frappe.get_doc("Patient Appointment", appointment_id)
# If invoiced --> fee_validity update with -1 visit
if appointment.invoiced:
sales_invoice = exists_sales_invoice(appointment)
if sales_invoice and cancel_sales_invoice(sales_invoice):
frappe.msgprint(
_("Appointment {0} and Sales Invoice {1} cancelled".format(appointment.name, sales_invoice.name))
)
else:
validity = validity_exists(appointment.practitioner, appointment.patient)
if validity:
fee_validity = frappe.get_doc("Fee Validity", validity[0][0])
if appointment_valid_in_fee_validity(appointment, fee_validity.valid_till, True, fee_validity.ref_invoice):
visited = fee_validity.visited - 1
frappe.db.set_value("Fee Validity", fee_validity.name, "visited", visited)
frappe.msgprint(
_("Appointment cancelled, Please review and cancel the invoice {0}".format(fee_validity.ref_invoice))
)
else:
frappe.msgprint(_("Appointment cancelled"))
else:
frappe.msgprint(_("Appointment cancelled"))
else:
frappe.msgprint(_("Appointment cancelled"))
def appointment_valid_in_fee_validity(appointment, valid_end_date, invoiced, ref_invoice):
valid_days = frappe.db.get_value("Healthcare Settings", None, "valid_days")
max_visit = frappe.db.get_value("Healthcare Settings", None, "max_visit")
valid_start_date = add_days(getdate(valid_end_date), -int(valid_days))
# Appointments which has same fee validity range with the appointment
appointments = frappe.get_list("Patient Appointment",{'patient': appointment.patient, 'invoiced': invoiced,
'appointment_date':("<=", getdate(valid_end_date)), 'appointment_date':(">=", getdate(valid_start_date)),
'practitioner': appointment.practitioner}, order_by="appointment_date desc", limit=int(max_visit))
if appointments and len(appointments) > 0:
appointment_obj = appointments[len(appointments)-1]
sales_invoice = exists_sales_invoice(appointment_obj)
if sales_invoice.name == ref_invoice:
return True
return False
def cancel_sales_invoice(sales_invoice):
if frappe.db.get_value("Healthcare Settings", None, "manage_appointment_invoice_automatically") == '1':
if len(sales_invoice.items) == 1:
sales_invoice.cancel()
return True
return False
def exists_sales_invoice_item(appointment):
return frappe.db.exists(
"Sales Invoice Item",
{
"reference_dt": "Patient Appointment",
"reference_dn": appointment.name
}
)
def exists_sales_invoice(appointment):
sales_item_exist = exists_sales_invoice_item(appointment)
if sales_item_exist:
sales_invoice = frappe.get_doc("Sales Invoice", frappe.db.get_value("Sales Invoice Item", sales_item_exist, "parent"))
return sales_invoice
return False
@frappe.whitelist()
def get_availability_data(date, practitioner):
"""
Get availability data of 'practitioner' on 'date'
:param date: Date to check in schedule
:param practitioner: Name of the practitioner
:return: dict containing a list of available slots, list of appointments and time of appointments
"""
date = getdate(date)
weekday = date.strftime("%A")
available_slots = []
slot_details = []
practitioner_schedule = None
employee = None
practitioner_obj = frappe.get_doc("Healthcare Practitioner", practitioner)
# Get practitioner employee relation
if practitioner_obj.employee:
employee = practitioner_obj.employee
elif practitioner_obj.user_id:
if frappe.db.exists({
"doctype": "Employee",
"user_id": practitioner_obj.user_id
}):
employee = frappe.get_doc("Employee", {"user_id": practitioner_obj.user_id}).name
if employee:
# Check if it is Holiday
if is_holiday(employee, date):
frappe.throw(_("{0} is a company holiday".format(date)))
# Check if He/She on Leave
leave_record = frappe.db.sql("""select half_day from `tabLeave Application`
where employee = %s and %s between from_date and to_date
and docstatus = 1""", (employee, date), as_dict=True)
if leave_record:
if leave_record[0].half_day:
frappe.throw(_("{0} on Half day Leave on {1}").format(practitioner, date))
else:
frappe.throw(_("{0} on Leave on {1}").format(practitioner, date))
# get practitioners schedule
if practitioner_obj.practitioner_schedules:
for schedule in practitioner_obj.practitioner_schedules:
if schedule.schedule:
practitioner_schedule = frappe.get_doc("Practitioner Schedule", schedule.schedule)
else:
frappe.throw(_("{0} does not have a Healthcare Practitioner Schedule. Add it in Healthcare Practitioner master".format(practitioner)))
if practitioner_schedule:
available_slots = []
for t in practitioner_schedule.time_slots:
if weekday == t.day:
available_slots.append(t)
if available_slots:
appointments = []
if schedule.service_unit:
slot_name = schedule.schedule+" - "+schedule.service_unit
allow_overlap = frappe.get_value('Healthcare Service Unit', schedule.service_unit, 'overlap_appointments')
if allow_overlap:
# fetch all appointments to practitioner by service unit
appointments = frappe.get_all(
"Patient Appointment",
filters={"practitioner": practitioner, "service_unit": schedule.service_unit, "appointment_date": date, "status": ["not in",["Cancelled"]]},
fields=["name", "appointment_time", "duration", "status"])
else:
# fetch all appointments to service unit
appointments = frappe.get_all(
"Patient Appointment",
filters={"service_unit": schedule.service_unit, "appointment_date": date, "status": ["not in",["Cancelled"]]},
fields=["name", "appointment_time", "duration", "status"])
else:
slot_name = schedule.schedule
# fetch all appointments to practitioner without service unit
appointments = frappe.get_all(
"Patient Appointment",
filters={"practitioner": practitioner, "service_unit": '', "appointment_date": date, "status": ["not in",["Cancelled"]]},
fields=["name", "appointment_time", "duration", "status"])
slot_details.append({"slot_name":slot_name, "service_unit":schedule.service_unit,
"avail_slot":available_slots, 'appointments': appointments})
else:
frappe.throw(_("{0} does not have a Healthcare Practitioner Schedule. Add it in Healthcare Practitioner master".format(practitioner)))
if not available_slots and not slot_details:
# TODO: return available slots in nearby dates
frappe.throw(_("Healthcare Practitioner not available on {0}").format(weekday))
return {
"slot_details": slot_details
}
@frappe.whitelist()
def update_status(appointment_id, status):
frappe.db.set_value("Patient Appointment", appointment_id, "status", status)
appointment_booked = True
if status == "Cancelled":
appointment_booked = False
appointment_cancel(appointment_id)
procedure_prescription = frappe.db.get_value("Patient Appointment", appointment_id, "procedure_prescription")
if procedure_prescription:
frappe.db.set_value("Procedure Prescription", procedure_prescription, "appointment_booked", appointment_booked)
@frappe.whitelist()
def set_open_appointments():
today = getdate()
frappe.db.sql(
"update `tabPatient Appointment` set status='Open' where status = 'Scheduled'"
" and appointment_date = %s", today)
@frappe.whitelist()
def set_pending_appointments():
today = getdate()
frappe.db.sql(
"update `tabPatient Appointment` set status='Pending' where status in "
"('Scheduled','Open') and appointment_date < %s", today)
def confirm_sms(doc):
if frappe.db.get_value("Healthcare Settings", None, "app_con") == '1':
message = frappe.db.get_value("Healthcare Settings", None, "app_con_msg")
send_message(doc, message)
@frappe.whitelist()
def create_encounter(appointment):
appointment = frappe.get_doc("Patient Appointment", appointment)
encounter = frappe.new_doc("Patient Encounter")
encounter.appointment = appointment.name
encounter.patient = appointment.patient
encounter.practitioner = appointment.practitioner
encounter.visit_department = appointment.department
encounter.patient_sex = appointment.patient_sex
encounter.encounter_date = appointment.appointment_date
if appointment.invoiced:
encounter.invoiced = True
return encounter.as_dict()
def remind_appointment():
if frappe.db.get_value("Healthcare Settings", None, "app_rem") == '1':
rem_before = datetime.datetime.strptime(frappe.get_value("Healthcare Settings", None, "rem_before"), "%H:%M:%S")
rem_dt = datetime.datetime.now() + datetime.timedelta(
hours=rem_before.hour, minutes=rem_before.minute, seconds=rem_before.second)
appointment_list = frappe.db.sql(
"select name from `tabPatient Appointment` where start_dt between %s and %s and reminded = 0 ",
(datetime.datetime.now(), rem_dt)
)
for i in range(0, len(appointment_list)):
doc = frappe.get_doc("Patient Appointment", appointment_list[i][0])
message = frappe.db.get_value("Healthcare Settings", None, "app_rem_msg")
send_message(doc, message)
frappe.db.set_value("Patient Appointment", doc.name, "reminded",1)
def send_message(doc, message):
patient = frappe.get_doc("Patient", doc.patient)
if patient.mobile:
context = {"doc": doc, "alert": doc, "comments": None}
if doc.get("_comments"):
context["comments"] = json.loads(doc.get("_comments"))
# jinja to string convertion happens here
message = frappe.render_template(message, context)
number = [patient.mobile]
send_sms(number, message)
@frappe.whitelist()
def get_events(start, end, filters=None):
"""Returns events for Gantt / Calendar view rendering.
:param start: Start date-time.
:param end: End date-time.
:param filters: Filters (JSON).
"""
from frappe.desk.calendar import get_event_conditions
conditions = get_event_conditions("Patient Appointment", filters)
data = frappe.db.sql("""
select
`tabPatient Appointment`.name, `tabPatient Appointment`.patient,
`tabPatient Appointment`.practitioner, `tabPatient Appointment`.status,
`tabPatient Appointment`.duration,
timestamp(`tabPatient Appointment`.appointment_date, `tabPatient Appointment`.appointment_time) as 'start',
`tabAppointment Type`.color
from
`tabPatient Appointment`
left join `tabAppointment Type` on `tabPatient Appointment`.appointment_type=`tabAppointment Type`.name
where
(`tabPatient Appointment`.appointment_date between %(start)s and %(end)s)
and `tabPatient Appointment`.docstatus < 2 {conditions}""".format(conditions=conditions),
{"start": start, "end": end}, as_dict=True, update={"allDay": 0})
for item in data:
item.end = item.start + datetime.timedelta(minutes = item.duration)
return data
@frappe.whitelist()
def get_procedure_prescribed(patient):
return frappe.db.sql("""select pp.name, pp.procedure, pp.parent, ct.practitioner,
ct.encounter_date, pp.practitioner, pp.date, pp.department
from `tabPatient Encounter` ct, `tabProcedure Prescription` pp
where ct.patient='{0}' and pp.parent=ct.name and pp.appointment_booked=0
order by ct.creation desc""".format(patient))
fix: Traverse note from procedure prescription to appointment
# -*- coding: utf-8 -*-
# Copyright (c) 2015, ESS LLP and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
import json
from frappe.utils import getdate, add_days
from frappe import _
import datetime
from frappe.core.doctype.sms_settings.sms_settings import send_sms
from erpnext.hr.doctype.employee.employee import is_holiday
from erpnext.healthcare.doctype.healthcare_settings.healthcare_settings import get_receivable_account,get_income_account
from erpnext.healthcare.utils import validity_exists, service_item_and_practitioner_charge
class PatientAppointment(Document):
def on_update(self):
today = datetime.date.today()
appointment_date = getdate(self.appointment_date)
# If appointment created for today set as open
if today == appointment_date:
frappe.db.set_value("Patient Appointment", self.name, "status", "Open")
self.reload()
def after_insert(self):
if self.procedure_prescription:
frappe.db.set_value("Procedure Prescription", self.procedure_prescription, "appointment_booked", True)
if self.procedure_template:
comments = frappe.db.get_value("Procedure Prescription", self.procedure_prescription, "comments")
if comments:
frappe.db.set_value("Patient Appointment", self.name, "notes", comments)
# Check fee validity exists
appointment = self
validity_exist = validity_exists(appointment.practitioner, appointment.patient)
if validity_exist:
fee_validity = frappe.get_doc("Fee Validity", validity_exist[0][0])
# Check if the validity is valid
appointment_date = getdate(appointment.appointment_date)
if (fee_validity.valid_till >= appointment_date) and (fee_validity.visited < fee_validity.max_visit):
visited = fee_validity.visited + 1
frappe.db.set_value("Fee Validity", fee_validity.name, "visited", visited)
if fee_validity.ref_invoice:
frappe.db.set_value("Patient Appointment", appointment.name, "invoiced", True)
frappe.msgprint(_("{0} has fee validity till {1}").format(appointment.patient, fee_validity.valid_till))
confirm_sms(self)
if frappe.db.get_value("Healthcare Settings", None, "manage_appointment_invoice_automatically") == '1' and \
frappe.db.get_value("Patient Appointment", self.name, "invoiced") != 1:
invoice_appointment(self)
@frappe.whitelist()
def invoice_appointment(appointment_doc):
if not appointment_doc.name:
return False
sales_invoice = frappe.new_doc("Sales Invoice")
sales_invoice.customer = frappe.get_value("Patient", appointment_doc.patient, "customer")
sales_invoice.appointment = appointment_doc.name
sales_invoice.due_date = getdate()
sales_invoice.is_pos = True
sales_invoice.company = appointment_doc.company
sales_invoice.debit_to = get_receivable_account(appointment_doc.company)
item_line = sales_invoice.append("items")
service_item, practitioner_charge = service_item_and_practitioner_charge(appointment_doc)
item_line.item_code = service_item
item_line.description = "Consulting Charges: " + appointment_doc.practitioner
item_line.income_account = get_income_account(appointment_doc.practitioner, appointment_doc.company)
item_line.rate = practitioner_charge
item_line.amount = practitioner_charge
item_line.qty = 1
item_line.reference_dt = "Patient Appointment"
item_line.reference_dn = appointment_doc.name
payments_line = sales_invoice.append("payments")
payments_line.mode_of_payment = appointment_doc.mode_of_payment
payments_line.amount = appointment_doc.paid_amount
sales_invoice.set_missing_values(for_validate = True)
sales_invoice.save(ignore_permissions=True)
sales_invoice.submit()
frappe.msgprint(_("Sales Invoice {0} created as paid".format(sales_invoice.name)), alert=True)
def appointment_cancel(appointment_id):
appointment = frappe.get_doc("Patient Appointment", appointment_id)
# If invoiced --> fee_validity update with -1 visit
if appointment.invoiced:
sales_invoice = exists_sales_invoice(appointment)
if sales_invoice and cancel_sales_invoice(sales_invoice):
frappe.msgprint(
_("Appointment {0} and Sales Invoice {1} cancelled".format(appointment.name, sales_invoice.name))
)
else:
validity = validity_exists(appointment.practitioner, appointment.patient)
if validity:
fee_validity = frappe.get_doc("Fee Validity", validity[0][0])
if appointment_valid_in_fee_validity(appointment, fee_validity.valid_till, True, fee_validity.ref_invoice):
visited = fee_validity.visited - 1
frappe.db.set_value("Fee Validity", fee_validity.name, "visited", visited)
frappe.msgprint(
_("Appointment cancelled, Please review and cancel the invoice {0}".format(fee_validity.ref_invoice))
)
else:
frappe.msgprint(_("Appointment cancelled"))
else:
frappe.msgprint(_("Appointment cancelled"))
else:
frappe.msgprint(_("Appointment cancelled"))
def appointment_valid_in_fee_validity(appointment, valid_end_date, invoiced, ref_invoice):
valid_days = frappe.db.get_value("Healthcare Settings", None, "valid_days")
max_visit = frappe.db.get_value("Healthcare Settings", None, "max_visit")
valid_start_date = add_days(getdate(valid_end_date), -int(valid_days))
# Appointments which has same fee validity range with the appointment
appointments = frappe.get_list("Patient Appointment",{'patient': appointment.patient, 'invoiced': invoiced,
'appointment_date':("<=", getdate(valid_end_date)), 'appointment_date':(">=", getdate(valid_start_date)),
'practitioner': appointment.practitioner}, order_by="appointment_date desc", limit=int(max_visit))
if appointments and len(appointments) > 0:
appointment_obj = appointments[len(appointments)-1]
sales_invoice = exists_sales_invoice(appointment_obj)
if sales_invoice.name == ref_invoice:
return True
return False
def cancel_sales_invoice(sales_invoice):
if frappe.db.get_value("Healthcare Settings", None, "manage_appointment_invoice_automatically") == '1':
if len(sales_invoice.items) == 1:
sales_invoice.cancel()
return True
return False
def exists_sales_invoice_item(appointment):
return frappe.db.exists(
"Sales Invoice Item",
{
"reference_dt": "Patient Appointment",
"reference_dn": appointment.name
}
)
def exists_sales_invoice(appointment):
sales_item_exist = exists_sales_invoice_item(appointment)
if sales_item_exist:
sales_invoice = frappe.get_doc("Sales Invoice", frappe.db.get_value("Sales Invoice Item", sales_item_exist, "parent"))
return sales_invoice
return False
@frappe.whitelist()
def get_availability_data(date, practitioner):
"""
Get availability data of 'practitioner' on 'date'
:param date: Date to check in schedule
:param practitioner: Name of the practitioner
:return: dict containing a list of available slots, list of appointments and time of appointments
"""
date = getdate(date)
weekday = date.strftime("%A")
available_slots = []
slot_details = []
practitioner_schedule = None
employee = None
practitioner_obj = frappe.get_doc("Healthcare Practitioner", practitioner)
# Get practitioner employee relation
if practitioner_obj.employee:
employee = practitioner_obj.employee
elif practitioner_obj.user_id:
if frappe.db.exists({
"doctype": "Employee",
"user_id": practitioner_obj.user_id
}):
employee = frappe.get_doc("Employee", {"user_id": practitioner_obj.user_id}).name
if employee:
# Check if it is Holiday
if is_holiday(employee, date):
frappe.throw(_("{0} is a company holiday".format(date)))
# Check if He/She on Leave
leave_record = frappe.db.sql("""select half_day from `tabLeave Application`
where employee = %s and %s between from_date and to_date
and docstatus = 1""", (employee, date), as_dict=True)
if leave_record:
if leave_record[0].half_day:
frappe.throw(_("{0} on Half day Leave on {1}").format(practitioner, date))
else:
frappe.throw(_("{0} on Leave on {1}").format(practitioner, date))
# get practitioners schedule
if practitioner_obj.practitioner_schedules:
for schedule in practitioner_obj.practitioner_schedules:
if schedule.schedule:
practitioner_schedule = frappe.get_doc("Practitioner Schedule", schedule.schedule)
else:
frappe.throw(_("{0} does not have a Healthcare Practitioner Schedule. Add it in Healthcare Practitioner master".format(practitioner)))
if practitioner_schedule:
available_slots = []
for t in practitioner_schedule.time_slots:
if weekday == t.day:
available_slots.append(t)
if available_slots:
appointments = []
if schedule.service_unit:
slot_name = schedule.schedule+" - "+schedule.service_unit
allow_overlap = frappe.get_value('Healthcare Service Unit', schedule.service_unit, 'overlap_appointments')
if allow_overlap:
# fetch all appointments to practitioner by service unit
appointments = frappe.get_all(
"Patient Appointment",
filters={"practitioner": practitioner, "service_unit": schedule.service_unit, "appointment_date": date, "status": ["not in",["Cancelled"]]},
fields=["name", "appointment_time", "duration", "status"])
else:
# fetch all appointments to service unit
appointments = frappe.get_all(
"Patient Appointment",
filters={"service_unit": schedule.service_unit, "appointment_date": date, "status": ["not in",["Cancelled"]]},
fields=["name", "appointment_time", "duration", "status"])
else:
slot_name = schedule.schedule
# fetch all appointments to practitioner without service unit
appointments = frappe.get_all(
"Patient Appointment",
filters={"practitioner": practitioner, "service_unit": '', "appointment_date": date, "status": ["not in",["Cancelled"]]},
fields=["name", "appointment_time", "duration", "status"])
slot_details.append({"slot_name":slot_name, "service_unit":schedule.service_unit,
"avail_slot":available_slots, 'appointments': appointments})
else:
frappe.throw(_("{0} does not have a Healthcare Practitioner Schedule. Add it in Healthcare Practitioner master".format(practitioner)))
if not available_slots and not slot_details:
# TODO: return available slots in nearby dates
frappe.throw(_("Healthcare Practitioner not available on {0}").format(weekday))
return {
"slot_details": slot_details
}
@frappe.whitelist()
def update_status(appointment_id, status):
frappe.db.set_value("Patient Appointment", appointment_id, "status", status)
appointment_booked = True
if status == "Cancelled":
appointment_booked = False
appointment_cancel(appointment_id)
procedure_prescription = frappe.db.get_value("Patient Appointment", appointment_id, "procedure_prescription")
if procedure_prescription:
frappe.db.set_value("Procedure Prescription", procedure_prescription, "appointment_booked", appointment_booked)
@frappe.whitelist()
def set_open_appointments():
today = getdate()
frappe.db.sql(
"update `tabPatient Appointment` set status='Open' where status = 'Scheduled'"
" and appointment_date = %s", today)
@frappe.whitelist()
def set_pending_appointments():
today = getdate()
frappe.db.sql(
"update `tabPatient Appointment` set status='Pending' where status in "
"('Scheduled','Open') and appointment_date < %s", today)
def confirm_sms(doc):
if frappe.db.get_value("Healthcare Settings", None, "app_con") == '1':
message = frappe.db.get_value("Healthcare Settings", None, "app_con_msg")
send_message(doc, message)
@frappe.whitelist()
def create_encounter(appointment):
appointment = frappe.get_doc("Patient Appointment", appointment)
encounter = frappe.new_doc("Patient Encounter")
encounter.appointment = appointment.name
encounter.patient = appointment.patient
encounter.practitioner = appointment.practitioner
encounter.visit_department = appointment.department
encounter.patient_sex = appointment.patient_sex
encounter.encounter_date = appointment.appointment_date
if appointment.invoiced:
encounter.invoiced = True
return encounter.as_dict()
def remind_appointment():
if frappe.db.get_value("Healthcare Settings", None, "app_rem") == '1':
rem_before = datetime.datetime.strptime(frappe.get_value("Healthcare Settings", None, "rem_before"), "%H:%M:%S")
rem_dt = datetime.datetime.now() + datetime.timedelta(
hours=rem_before.hour, minutes=rem_before.minute, seconds=rem_before.second)
appointment_list = frappe.db.sql(
"select name from `tabPatient Appointment` where start_dt between %s and %s and reminded = 0 ",
(datetime.datetime.now(), rem_dt)
)
for i in range(0, len(appointment_list)):
doc = frappe.get_doc("Patient Appointment", appointment_list[i][0])
message = frappe.db.get_value("Healthcare Settings", None, "app_rem_msg")
send_message(doc, message)
frappe.db.set_value("Patient Appointment", doc.name, "reminded",1)
def send_message(doc, message):
patient = frappe.get_doc("Patient", doc.patient)
if patient.mobile:
context = {"doc": doc, "alert": doc, "comments": None}
if doc.get("_comments"):
context["comments"] = json.loads(doc.get("_comments"))
# jinja to string convertion happens here
message = frappe.render_template(message, context)
number = [patient.mobile]
send_sms(number, message)
@frappe.whitelist()
def get_events(start, end, filters=None):
"""Returns events for Gantt / Calendar view rendering.
:param start: Start date-time.
:param end: End date-time.
:param filters: Filters (JSON).
"""
from frappe.desk.calendar import get_event_conditions
conditions = get_event_conditions("Patient Appointment", filters)
data = frappe.db.sql("""
select
`tabPatient Appointment`.name, `tabPatient Appointment`.patient,
`tabPatient Appointment`.practitioner, `tabPatient Appointment`.status,
`tabPatient Appointment`.duration,
timestamp(`tabPatient Appointment`.appointment_date, `tabPatient Appointment`.appointment_time) as 'start',
`tabAppointment Type`.color
from
`tabPatient Appointment`
left join `tabAppointment Type` on `tabPatient Appointment`.appointment_type=`tabAppointment Type`.name
where
(`tabPatient Appointment`.appointment_date between %(start)s and %(end)s)
and `tabPatient Appointment`.docstatus < 2 {conditions}""".format(conditions=conditions),
{"start": start, "end": end}, as_dict=True, update={"allDay": 0})
for item in data:
item.end = item.start + datetime.timedelta(minutes = item.duration)
return data
@frappe.whitelist()
def get_procedure_prescribed(patient):
return frappe.db.sql("""select pp.name, pp.procedure, pp.parent, ct.practitioner,
ct.encounter_date, pp.practitioner, pp.date, pp.department
from `tabPatient Encounter` ct, `tabProcedure Prescription` pp
where ct.patient='{0}' and pp.parent=ct.name and pp.appointment_booked=0
order by ct.creation desc""".format(patient))
|
import sqlalchemy
from osmaxx.converters.gis_converter.bootstrap.bootstrap import BootStrapper
from tests.inside_worker_test.conftest import slow, cleanup_osmaxx_schemas
from tests.inside_worker_test.declarative_schema import osm_models
@slow
def test_osmaxx_data_model_processing_puts_amenity_grave_yard_with_religion_into_table_pow_a(
osmaxx_functions, clean_osm_tables, monkeypatch):
assert osmaxx_functions == clean_osm_tables # same db-connection
engine = osmaxx_functions
engine.execute(
osm_models.t_osm_polygon.insert().values(
amenity='grave_yard',
religion='any value will do, as long as one is present',
).execution_options(autocommit=True)
)
monkeypatch.setattr(
'osmaxx.converters.gis_converter.helper.postgres_wrapper.create_engine', lambda *_, **__: engine)
bootstrapper = BootStrapper(pbf_file_path=None)
try:
bootstrapper._harmonize_database()
bootstrapper._filter_data()
t_pow_a = sqlalchemy.sql.schema.Table('pow_a', osm_models.metadata, schema='osmaxx')
result = engine.execute(sqlalchemy.select([t_pow_a]))
assert result.rowcount == 1
finally:
try:
del result # The (unfetched) result would block the dropping of SCHEMA "osmaxx" in the following cleanup.
except NameError:
pass
cleanup_osmaxx_schemas(engine)
release row and table locks
so that SCHEMA dropping won't block afterwards, even if test failed.
See http://stackoverflow.com/a/35888141/674064
import sqlalchemy
from osmaxx.converters.gis_converter.bootstrap.bootstrap import BootStrapper
from tests.inside_worker_test.conftest import slow, cleanup_osmaxx_schemas
from tests.inside_worker_test.declarative_schema import osm_models
@slow
def test_osmaxx_data_model_processing_puts_amenity_grave_yard_with_religion_into_table_pow_a(
osmaxx_functions, clean_osm_tables, monkeypatch):
assert osmaxx_functions == clean_osm_tables # same db-connection
engine = osmaxx_functions
engine.execute(
osm_models.t_osm_polygon.insert().values(
amenity='grave_yard',
religion='any value will do, as long as one is present',
).execution_options(autocommit=True)
)
monkeypatch.setattr(
'osmaxx.converters.gis_converter.helper.postgres_wrapper.create_engine', lambda *_, **__: engine)
bootstrapper = BootStrapper(pbf_file_path=None)
try:
bootstrapper._harmonize_database()
bootstrapper._filter_data()
t_pow_a = sqlalchemy.sql.schema.Table('pow_a', osm_models.metadata, schema='osmaxx')
result = engine.execute(sqlalchemy.select([t_pow_a]))
assert result.rowcount == 1
finally:
try:
result.close() # Release row and table locks.
except NameError:
pass
cleanup_osmaxx_schemas(engine)
|
import sys
import copy
import psutil
from PyQt5.QtWidgets import QWidget, QTabWidget
from PyQt5.QtWidgets import QVBoxLayout
from PyQt5.QtWidgets import QApplication
from PyQt5.QtWidgets import QFileDialog
from PyQt5.QtCore import Qt
from PDielec.GUI.MainTab import MainTab
from PDielec.GUI.SettingsTab import SettingsTab
from PDielec.GUI.PowderScenarioTab import PowderScenarioTab
from PDielec.GUI.SingleCrystalScenarioTab import SingleCrystalScenarioTab
from PDielec.GUI.PlottingTab import PlottingTab
from PDielec.GUI.AnalysisTab import AnalysisTab
from PDielec.GUI.ViewerTab import ViewerTab
from PDielec.GUI.FitterTab import FitterTab
from PDielec.Utilities import Debug
class NoteBook(QWidget):
def __init__(self, parent, program, filename, spreadsheet, debug=False, progressbar=None, scripting=False, default_scenario='powder',ncpus=0, threading=False):
super(QWidget, self).__init__(parent)
global debugger
debugger = Debug(debug,'NoteBook:')
debugger.print('Initialising')
self.reader = None
self.progressbars=[progressbar]
if progressbar is None:
self.progressbars = [ ]
self.progressbar_status = 0
self.progressbar_maximum = 0
self.spreadsheet = None
self.threading = threading
if default_scenario == 'powder':
self.currentScenarioTab = PowderScenarioTab
else:
self.currentScenarioTab = SingleCrystalScenarioTab
if ncpus == 0:
self.ncpus = psutil.cpu_count(logical=False)
else:
self.ncpus = ncpus
#
# Set threading for mkl
#
try:
import mkl
mkl.set_num_threads(self.ncpus)
except:
pass
self.scripting = scripting
# Overwriting of files is not allowed with a prompt
# If scripting is used then overwriting is allowed
self.overwriting = False
self.debug = debug
#jk self.old_tab_index = None
self.layout = QVBoxLayout()
# The number of tabs before we have scenarios
self.tabOffSet = 2
# Set the plotting tab to None in case a scenario tries to read it
self.plottingTab = None
self.settingsTab = None
self.analysisTab = None
self.viewerTab = None
self.fitterTab = None
self.scenarios = None
#
# Initialize tab screen
#
self.tabs = QTabWidget(self)
self.tabs.currentChanged.connect(self.on_tabs_currentChanged)
self.mainTab = MainTab(self, program, filename, spreadsheet, debug=debug)
self.settingsTab = SettingsTab(self, debug=debug)
if filename != '' and not self.scripting:
debugger.print('Refreshing settingsTab in notebook initialisation - filename',filename)
self.settingsTab.refresh()
#
# Open more windows
#
debugger.print('Initialising the first scenario')
self.scenarios = []
self.scenarios.append( self.currentScenarioTab(self, debug=debug ) )
self.scenarios[0].setScenarioIndex(0)
self.scenarios[0].settings['Legend'] = 'Scenario 1'
debugger.print('Finished adding the first scenario')
#
# Open the plotting tab
#
self.plottingTab = PlottingTab(self, debug=debug)
if filename != '' and not self.scripting:
debugger.print('Refreshing plotting because filename is set')
self.plottingTab.refresh()
#
# Open the Analysis tab
#
self.analysisTab = AnalysisTab(self, debug=debug)
if filename != '' and not self.scripting:
debugger.print('Refreshing analysis because filename is set')
self.analysisTab.refresh()
#
# Open the Viewer tab
#
debugger.print('Initialising the viewer tab')
self.viewerTab = ViewerTab(self, debug=debug)
#
# Open the Fitter tab
#
debugger.print('Initialising the fitter tab')
self.fitterTab = FitterTab(self, debug=debug)
#
# Add tabs
#
debugger.print('Adding all tabs to the notebook')
self.tabs.addTab(self.mainTab,'Main')
self.tabs.addTab(self.settingsTab,'Settings')
for i,tab in enumerate(self.scenarios):
#jk tab.refresh(force=True)
tab.requestRefresh()
self.tabs.addTab(tab,'Scenario '+str(i+1))
self.tabs.addTab(self.plottingTab,'Plotting')
self.tabs.addTab(self.analysisTab,'Analysis')
self.tabs.addTab(self.viewerTab,'3D Viewer')
self.tabs.addTab(self.fitterTab,'Fitter')
# Add the tab widget
self.layout.addWidget(self.tabs)
self.setLayout(self.layout)
debugger.print('Exiting initialisation ')
return
def requestRefresh(self):
self.refreshRequired = True
debugger.print('requestRefresh')
return
def addScenario(self,scenarioType=None,copyFromIndex=-2):
"""Add Scenario is used by the script to add a new scenario"""
debugger.print('addScenario for scenarioType', scenarioType,copyFromIndex)
if copyFromIndex != -2:
# If the copyFromIndex is not -2 then we override the scenarioType
last = self.scenarios[copyFromIndex]
scenarioType = last.scenarioType
elif scenarioType == None:
# The default behaviour with no parameters in the call, use the last scenario in the list
last = self.scenarios[-1]
scenarioType = last.scenarioType
else:
# copyFromIndex is default so we find the last scenario of scenarioType in the list
last = None
for scenario in self.scenarios:
if scenarioType == scenario.scenarioType:
last = scenario
# end for
# Create a new scenario
if scenarioType == 'Powder':
self.currentScenarioTab = PowderScenarioTab
else:
self.currentScenarioTab = SingleCrystalScenarioTab
# Add the scenario to the end of the list
self.scenarios.append(self.currentScenarioTab(self, self.debug))
# If we have found a previous scenario of the same time set the settings to it
if last is not None:
self.scenarios[-1].settings = copy.deepcopy(last.settings)
self.scenarios[-1].refresh(force=True)
n = len(self.scenarios)
self.tabs.insertTab(self.tabOffSet+n-1,self.scenarios[-1],'Scenario '+str(n))
self.tabs.setCurrentIndex(self.tabOffSet+n-1)
for i,scenario in enumerate(self.scenarios):
scenario.setScenarioIndex(i)
self.tabs.setTabText(self.tabOffSet+i,'Scenario '+str(i+1))
return
def print_settings(self, filename=None):
# Print the settings of all the settings that have been used to a file settings.py
debugger.print('print_settings, filename=',filename)
qf = QFileDialog()
qf.setWindowTitle('Save the program settings to a file')
debugger.print('print_settings, directory=',self.mainTab.directory)
qf.setDirectory(self.mainTab.directory)
if filename == None:
filename,selection = qf.getSaveFileName()
if filename == '':
return
print('Current settings will be saved to '+filename)
fd = open(filename,'w')
# Handle the special case of the first scenario
print('#',file=fd)
print('# Handle the special case of the first scenario',file=fd)
print('#',file=fd)
print('self.notebook.switchScenario(0,scenarioType=\"'+self.scenarios[0].scenarioType+'\")',file=fd )
print('#',file=fd)
# Print settings of mainTab
self.print_tab_settings(self.mainTab, 'mainTab',fd)
print('tab.requestRefresh()',file=fd)
# Print settings of settingsTab
self.print_tab_settings(self.settingsTab, 'settingsTab',fd)
print('tab.sigmas_cm1 =',self.settingsTab.sigmas_cm1,file=fd)
print('tab.requestRefresh()',file=fd)
# Print settings of scenarios
for i,tab in enumerate(self.scenarios):
if i == 0:
self.print_tab_settings(tab, 'scenarios[{}]'.format(i), fd, new_scenario = False)
else:
self.print_tab_settings(tab, 'scenarios[{}]'.format(i), fd, new_scenario = True)
print('tab.requestRefresh()',file=fd)
self.print_tab_settings(self.analysisTab, 'analysisTab',fd)
print('tab.requestRefresh()',file=fd)
self.print_tab_settings(self.viewerTab, 'viewerTab',fd)
print('tab.requestRefresh()',file=fd)
self.print_tab_settings(self.fitterTab, 'fitterTab',fd)
print('tab.requestRefresh()',file=fd)
self.print_tab_settings(self.plottingTab, 'plottingTab',fd)
print('tab.requestRefresh()',file=fd)
fd.close()
return
def print_tab_settings(self,tab,title,fd,new_scenario = False):
print('#',file=fd)
print('#',file=fd)
if new_scenario:
print('self.notebook.addScenario(scenarioType=\"'+self.scenarios[0].scenarioType+'\")',file=fd )
print('tab = self.notebook.'+title,file=fd)
for item in tab.settings:
if item == 'Optical permittivity' and not tab.settings['Optical permittivity edited']:
pass
else:
value = tab.settings[item]
if 'str' in str(type(value)):
print('tab.settings[\''+item+'\'] = \'{}\''.format(tab.settings[item]),file=fd)
else:
print('tab.settings[\''+item+'\'] = ', tab.settings[item],file=fd)
def deleteScenario(self,index):
# Don't delete the last scenario
if len(self.scenarios) > 1:
self.tabs.removeTab(self.tabOffSet+index)
del self.scenarios[index]
for i,scenario in enumerate(self.scenarios):
scenario.setScenarioIndex(i)
self.tabs.setTabText(self.tabOffSet+i,'Scenario '+str(i+1))
if index-1 < 0:
index += 1
self.tabs.setCurrentIndex(self.tabOffSet+index-1)
return
def switchScenario(self,index,scenarioType=None):
debugger.print('switch for scenario', index+1)
# Replace the scenario with the other scenario type
scenario = self.scenarios[index]
debugger.print('Current scenario type', scenario.scenarioType, scenarioType)
#
# If scenarioType is specified in the call then force that type
# Otherwise switch type
#
if scenarioType == None:
if scenario.scenarioType == 'Powder':
self.currentScenarioTab = SingleCrystalScenarioTab
else:
self.currentScenarioTab = PowderScenarioTab
# end if
else:
if scenarioType == 'Powder':
self.currentScenarioTab = PowderScenarioTab
else:
self.currentScenarioTab = SingleCrystalScenarioTab
# end if
#end if
self.scenarios[index] = self.currentScenarioTab(self, self.debug)
scenario = self.scenarios[index]
debugger.print('Current scenario type now', scenario.scenarioType)
self.tabs.removeTab(self.tabOffSet+index)
self.tabs.insertTab(self.tabOffSet+index,scenario,'Scenario '+str(index+1) )
for i,scenario in enumerate(self.scenarios):
scenario.setScenarioIndex(i)
self.tabs.setTabText(self.tabOffSet+i,'Scenario '+str(i+1))
self.tabs.setCurrentIndex(self.tabOffSet+index)
self.scenarios[index].refresh(force=True)
return
def refresh(self,force=False):
if self.scripting:
debugger.print('Notebook aborting refresh because of scripting')
return
debugger.print('Notebook refresh changed',force)
ntabs = 2 + len(self.scenarios) + 4
self.mainTab.refresh(force=force)
self.settingsTab.refresh(force=force)
for tab in self.scenarios:
tab.refresh(force=force)
self.tabs.setCurrentIndex(ntabs-5)
self.plottingTab.refresh(force=force)
self.tabs.setCurrentIndex(ntabs-4)
self.analysisTab.refresh(force=force)
self.tabs.setCurrentIndex(ntabs-3)
self.viewerTab.refresh(force=force)
self.tabs.setCurrentIndex(ntabs-2)
self.fitterTab.refresh(force=force)
self.tabs.setCurrentIndex(ntabs-1)
# Sets the open tab to be the plotterTab
self.tabs.setCurrentIndex(ntabs-4)
def newrefresh(self,force=False):
if self.scripting:
debugger.print('Notebook aborting refresh because of scripting')
return
debugger.print('Notebook refresh changed',force)
ntabs = 2 + len(self.scenarios) + 4
self.mainTab.refresh(force=force)
self.settingsTab.requestRefresh()
for tab in self.scenarios:
tab.requestRefresh()
self.tabs.setCurrentIndex(ntabs-5)
self.plottingTab.requestRefresh()
self.tabs.setCurrentIndex(ntabs-4)
self.analysisTab.requestRefresh()
self.tabs.setCurrentIndex(ntabs-3)
self.viewerTab.requestRefresh()
self.tabs.setCurrentIndex(ntabs-2)
self.fitterTab.requestRefresh()
self.tabs.setCurrentIndex(ntabs-1)
self.tabs.setCurrentIndex(ntabs-4)
def writeSpreadsheet(self):
debugger.print('Write spreadsheet')
self.mainTab.writeSpreadsheet()
self.settingsTab.writeSpreadsheet()
self.analysisTab.writeSpreadsheet()
self.plottingTab.writeSpreadsheet()
def on_tabs_currentChanged(self, tabindex):
debugger.print('Tab index changed', tabindex)
#
# If scripting do not refresh tabs
#
if self.scripting:
debugger.print('Exiting on_tabs_currentChanged without refreshing')
return
# Number of tabs
ntabs = 2 + len(self.scenarios) + 4
debugger.print('Number of tabs',ntabs)
if tabindex == ntabs-1:
# fitter tab
debugger.print('Calling fitterTab refresh')
self.fitterTab.refresh()
elif tabindex == ntabs-2:
# viewer tab
debugger.print('Calling viewerTab refresh')
self.viewerTab.refresh()
elif tabindex == ntabs-3:
# analysis tab
debugger.print('Calling analysisTab refresh')
self.analysisTab.refresh()
elif tabindex == ntabs-4:
# plottings tab
debugger.print('Calling plottingTab refresh')
self.plottingTab.refresh()
debugger.print('Exiting on_tabs_currentChanged()')
#jk self.old_tab_index = tabindex
def keyPressEvent(self, e):
if (e.key() == Qt.Key_S) and QApplication.keyboardModifiers() and Qt.ControlModifier:
print('Control S has been pressed')
self.print_settings()
elif (e.key() == Qt.Key_C) and QApplication.keyboardModifiers() and Qt.ControlModifier:
print('Control C has been pressed')
print('The program will close down')
sys.exit()
def progressbars_set_maximum( self, maximum ):
self.progressbar_status = 0
self.progressbar_maximum = maximum
for bar in self.progressbars:
bar.setMaximum(maximum)
bar.setValue(self.progressbar_status)
return
def progressbars_update( self, increment=1 ):
self.progressbar_status += increment
for bar in self.progressbars:
bar.setValue(self.progressbar_status)
return
def progressbars_add( self, bar ):
self.progressbars.append(bar)
bar.setMaximum(self.progressbar_maximum)
bar.setValue(self.progressbar_status)
return
Fixed a problem when writing single crystal tab settings to the script file
The error only occured if the first tab was a powder mode scenario
It now uses the existing tab setting to write the type of scenario
being written
import sys
import copy
import psutil
from PyQt5.QtWidgets import QWidget, QTabWidget
from PyQt5.QtWidgets import QVBoxLayout
from PyQt5.QtWidgets import QApplication
from PyQt5.QtWidgets import QFileDialog
from PyQt5.QtCore import Qt
from PDielec.GUI.MainTab import MainTab
from PDielec.GUI.SettingsTab import SettingsTab
from PDielec.GUI.PowderScenarioTab import PowderScenarioTab
from PDielec.GUI.SingleCrystalScenarioTab import SingleCrystalScenarioTab
from PDielec.GUI.PlottingTab import PlottingTab
from PDielec.GUI.AnalysisTab import AnalysisTab
from PDielec.GUI.ViewerTab import ViewerTab
from PDielec.GUI.FitterTab import FitterTab
from PDielec.Utilities import Debug
class NoteBook(QWidget):
def __init__(self, parent, program, filename, spreadsheet, debug=False, progressbar=None, scripting=False, default_scenario='powder',ncpus=0, threading=False):
super(QWidget, self).__init__(parent)
global debugger
debugger = Debug(debug,'NoteBook:')
debugger.print('Initialising')
self.reader = None
self.progressbars=[progressbar]
if progressbar is None:
self.progressbars = [ ]
self.progressbar_status = 0
self.progressbar_maximum = 0
self.spreadsheet = None
self.threading = threading
if default_scenario == 'powder':
self.currentScenarioTab = PowderScenarioTab
else:
self.currentScenarioTab = SingleCrystalScenarioTab
if ncpus == 0:
self.ncpus = psutil.cpu_count(logical=False)
else:
self.ncpus = ncpus
#
# Set threading for mkl
#
try:
import mkl
mkl.set_num_threads(self.ncpus)
except:
pass
self.scripting = scripting
# Overwriting of files is not allowed with a prompt
# If scripting is used then overwriting is allowed
self.overwriting = False
self.debug = debug
#jk self.old_tab_index = None
self.layout = QVBoxLayout()
# The number of tabs before we have scenarios
self.tabOffSet = 2
# Set the plotting tab to None in case a scenario tries to read it
self.plottingTab = None
self.settingsTab = None
self.analysisTab = None
self.viewerTab = None
self.fitterTab = None
self.scenarios = None
#
# Initialize tab screen
#
self.tabs = QTabWidget(self)
self.tabs.currentChanged.connect(self.on_tabs_currentChanged)
self.mainTab = MainTab(self, program, filename, spreadsheet, debug=debug)
self.settingsTab = SettingsTab(self, debug=debug)
if filename != '' and not self.scripting:
debugger.print('Refreshing settingsTab in notebook initialisation - filename',filename)
self.settingsTab.refresh()
#
# Open more windows
#
debugger.print('Initialising the first scenario')
self.scenarios = []
self.scenarios.append( self.currentScenarioTab(self, debug=debug ) )
self.scenarios[0].setScenarioIndex(0)
self.scenarios[0].settings['Legend'] = 'Scenario 1'
debugger.print('Finished adding the first scenario')
#
# Open the plotting tab
#
self.plottingTab = PlottingTab(self, debug=debug)
if filename != '' and not self.scripting:
debugger.print('Refreshing plotting because filename is set')
self.plottingTab.refresh()
#
# Open the Analysis tab
#
self.analysisTab = AnalysisTab(self, debug=debug)
if filename != '' and not self.scripting:
debugger.print('Refreshing analysis because filename is set')
self.analysisTab.refresh()
#
# Open the Viewer tab
#
debugger.print('Initialising the viewer tab')
self.viewerTab = ViewerTab(self, debug=debug)
#
# Open the Fitter tab
#
debugger.print('Initialising the fitter tab')
self.fitterTab = FitterTab(self, debug=debug)
#
# Add tabs
#
debugger.print('Adding all tabs to the notebook')
self.tabs.addTab(self.mainTab,'Main')
self.tabs.addTab(self.settingsTab,'Settings')
for i,tab in enumerate(self.scenarios):
#jk tab.refresh(force=True)
tab.requestRefresh()
self.tabs.addTab(tab,'Scenario '+str(i+1))
self.tabs.addTab(self.plottingTab,'Plotting')
self.tabs.addTab(self.analysisTab,'Analysis')
self.tabs.addTab(self.viewerTab,'3D Viewer')
self.tabs.addTab(self.fitterTab,'Fitter')
# Add the tab widget
self.layout.addWidget(self.tabs)
self.setLayout(self.layout)
debugger.print('Exiting initialisation ')
return
def requestRefresh(self):
self.refreshRequired = True
debugger.print('requestRefresh')
return
def addScenario(self,scenarioType=None,copyFromIndex=-2):
"""Add Scenario is used by the script to add a new scenario"""
debugger.print('addScenario for scenarioType', scenarioType,copyFromIndex)
if copyFromIndex != -2:
# If the copyFromIndex is not -2 then we override the scenarioType
last = self.scenarios[copyFromIndex]
scenarioType = last.scenarioType
elif scenarioType == None:
# The default behaviour with no parameters in the call, use the last scenario in the list
last = self.scenarios[-1]
scenarioType = last.scenarioType
else:
# copyFromIndex is default so we find the last scenario of scenarioType in the list
last = None
for scenario in self.scenarios:
if scenarioType == scenario.scenarioType:
last = scenario
# end for
# Create a new scenario
if scenarioType == 'Powder':
self.currentScenarioTab = PowderScenarioTab
else:
self.currentScenarioTab = SingleCrystalScenarioTab
# Add the scenario to the end of the list
self.scenarios.append(self.currentScenarioTab(self, self.debug))
# If we have found a previous scenario of the same time set the settings to it
if last is not None:
self.scenarios[-1].settings = copy.deepcopy(last.settings)
self.scenarios[-1].refresh(force=True)
n = len(self.scenarios)
self.tabs.insertTab(self.tabOffSet+n-1,self.scenarios[-1],'Scenario '+str(n))
self.tabs.setCurrentIndex(self.tabOffSet+n-1)
for i,scenario in enumerate(self.scenarios):
scenario.setScenarioIndex(i)
self.tabs.setTabText(self.tabOffSet+i,'Scenario '+str(i+1))
return
def print_settings(self, filename=None):
# Print the settings of all the settings that have been used to a file settings.py
debugger.print('print_settings, filename=',filename)
qf = QFileDialog()
qf.setWindowTitle('Save the program settings to a file')
debugger.print('print_settings, directory=',self.mainTab.directory)
qf.setDirectory(self.mainTab.directory)
if filename == None:
filename,selection = qf.getSaveFileName()
if filename == '':
return
print('Current settings will be saved to '+filename)
fd = open(filename,'w')
# Handle the special case of the first scenario
print('#',file=fd)
print('# Handle the special case of the first scenario',file=fd)
print('#',file=fd)
print('self.notebook.switchScenario(0,scenarioType=\"'+self.scenarios[0].scenarioType+'\")',file=fd )
print('#',file=fd)
# Print settings of mainTab
self.print_tab_settings(self.mainTab, 'mainTab',fd)
print('tab.requestRefresh()',file=fd)
# Print settings of settingsTab
self.print_tab_settings(self.settingsTab, 'settingsTab',fd)
print('tab.sigmas_cm1 =',self.settingsTab.sigmas_cm1,file=fd)
print('tab.requestRefresh()',file=fd)
# Print settings of scenarios
for i,tab in enumerate(self.scenarios):
if i == 0:
self.print_tab_settings(tab, 'scenarios[{}]'.format(i), fd, new_scenario = False)
else:
self.print_tab_settings(tab, 'scenarios[{}]'.format(i), fd, new_scenario = True)
print('tab.requestRefresh()',file=fd)
self.print_tab_settings(self.analysisTab, 'analysisTab',fd)
print('tab.requestRefresh()',file=fd)
self.print_tab_settings(self.viewerTab, 'viewerTab',fd)
print('tab.requestRefresh()',file=fd)
self.print_tab_settings(self.fitterTab, 'fitterTab',fd)
print('tab.requestRefresh()',file=fd)
self.print_tab_settings(self.plottingTab, 'plottingTab',fd)
print('tab.requestRefresh()',file=fd)
fd.close()
return
def print_tab_settings(self,tab,title,fd,new_scenario = False):
print('#',file=fd)
print('#',file=fd)
if new_scenario:
print('self.notebook.addScenario(scenarioType=\"'+tab.scenarioType+'\")',file=fd )
print('tab = self.notebook.'+title,file=fd)
for item in tab.settings:
if item == 'Optical permittivity' and not tab.settings['Optical permittivity edited']:
pass
else:
value = tab.settings[item]
if 'str' in str(type(value)):
print('tab.settings[\''+item+'\'] = \'{}\''.format(tab.settings[item]),file=fd)
else:
print('tab.settings[\''+item+'\'] = ', tab.settings[item],file=fd)
def deleteScenario(self,index):
# Don't delete the last scenario
if len(self.scenarios) > 1:
self.tabs.removeTab(self.tabOffSet+index)
del self.scenarios[index]
for i,scenario in enumerate(self.scenarios):
scenario.setScenarioIndex(i)
self.tabs.setTabText(self.tabOffSet+i,'Scenario '+str(i+1))
if index-1 < 0:
index += 1
self.tabs.setCurrentIndex(self.tabOffSet+index-1)
return
def switchScenario(self,index,scenarioType=None):
debugger.print('switch for scenario', index+1)
# Replace the scenario with the other scenario type
scenario = self.scenarios[index]
debugger.print('Current scenario type', scenario.scenarioType, scenarioType)
#
# If scenarioType is specified in the call then force that type
# Otherwise switch type
#
if scenarioType == None:
if scenario.scenarioType == 'Powder':
self.currentScenarioTab = SingleCrystalScenarioTab
else:
self.currentScenarioTab = PowderScenarioTab
# end if
else:
if scenarioType == 'Powder':
self.currentScenarioTab = PowderScenarioTab
else:
self.currentScenarioTab = SingleCrystalScenarioTab
# end if
#end if
self.scenarios[index] = self.currentScenarioTab(self, self.debug)
scenario = self.scenarios[index]
debugger.print('Current scenario type now', scenario.scenarioType)
self.tabs.removeTab(self.tabOffSet+index)
self.tabs.insertTab(self.tabOffSet+index,scenario,'Scenario '+str(index+1) )
for i,scenario in enumerate(self.scenarios):
scenario.setScenarioIndex(i)
self.tabs.setTabText(self.tabOffSet+i,'Scenario '+str(i+1))
self.tabs.setCurrentIndex(self.tabOffSet+index)
self.scenarios[index].refresh(force=True)
return
def refresh(self,force=False):
if self.scripting:
debugger.print('Notebook aborting refresh because of scripting')
return
debugger.print('Notebook refresh changed',force)
ntabs = 2 + len(self.scenarios) + 4
self.mainTab.refresh(force=force)
self.settingsTab.refresh(force=force)
for tab in self.scenarios:
tab.refresh(force=force)
self.tabs.setCurrentIndex(ntabs-5)
self.plottingTab.refresh(force=force)
self.tabs.setCurrentIndex(ntabs-4)
self.analysisTab.refresh(force=force)
self.tabs.setCurrentIndex(ntabs-3)
self.viewerTab.refresh(force=force)
self.tabs.setCurrentIndex(ntabs-2)
self.fitterTab.refresh(force=force)
self.tabs.setCurrentIndex(ntabs-1)
# Sets the open tab to be the plotterTab
self.tabs.setCurrentIndex(ntabs-4)
def newrefresh(self,force=False):
if self.scripting:
debugger.print('Notebook aborting refresh because of scripting')
return
debugger.print('Notebook refresh changed',force)
ntabs = 2 + len(self.scenarios) + 4
self.mainTab.refresh(force=force)
self.settingsTab.requestRefresh()
for tab in self.scenarios:
tab.requestRefresh()
self.tabs.setCurrentIndex(ntabs-5)
self.plottingTab.requestRefresh()
self.tabs.setCurrentIndex(ntabs-4)
self.analysisTab.requestRefresh()
self.tabs.setCurrentIndex(ntabs-3)
self.viewerTab.requestRefresh()
self.tabs.setCurrentIndex(ntabs-2)
self.fitterTab.requestRefresh()
self.tabs.setCurrentIndex(ntabs-1)
self.tabs.setCurrentIndex(ntabs-4)
def writeSpreadsheet(self):
debugger.print('Write spreadsheet')
self.mainTab.writeSpreadsheet()
self.settingsTab.writeSpreadsheet()
self.analysisTab.writeSpreadsheet()
self.plottingTab.writeSpreadsheet()
def on_tabs_currentChanged(self, tabindex):
debugger.print('Tab index changed', tabindex)
#
# If scripting do not refresh tabs
#
if self.scripting:
debugger.print('Exiting on_tabs_currentChanged without refreshing')
return
# Number of tabs
ntabs = 2 + len(self.scenarios) + 4
debugger.print('Number of tabs',ntabs)
if tabindex == ntabs-1:
# fitter tab
debugger.print('Calling fitterTab refresh')
self.fitterTab.refresh()
elif tabindex == ntabs-2:
# viewer tab
debugger.print('Calling viewerTab refresh')
self.viewerTab.refresh()
elif tabindex == ntabs-3:
# analysis tab
debugger.print('Calling analysisTab refresh')
self.analysisTab.refresh()
elif tabindex == ntabs-4:
# plottings tab
debugger.print('Calling plottingTab refresh')
self.plottingTab.refresh()
debugger.print('Exiting on_tabs_currentChanged()')
#jk self.old_tab_index = tabindex
def keyPressEvent(self, e):
if (e.key() == Qt.Key_S) and QApplication.keyboardModifiers() and Qt.ControlModifier:
print('Control S has been pressed')
self.print_settings()
elif (e.key() == Qt.Key_C) and QApplication.keyboardModifiers() and Qt.ControlModifier:
print('Control C has been pressed')
print('The program will close down')
sys.exit()
def progressbars_set_maximum( self, maximum ):
self.progressbar_status = 0
self.progressbar_maximum = maximum
for bar in self.progressbars:
bar.setMaximum(maximum)
bar.setValue(self.progressbar_status)
return
def progressbars_update( self, increment=1 ):
self.progressbar_status += increment
for bar in self.progressbars:
bar.setValue(self.progressbar_status)
return
def progressbars_add( self, bar ):
self.progressbars.append(bar)
bar.setMaximum(self.progressbar_maximum)
bar.setValue(self.progressbar_status)
return
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.api.definitions import address_scope
from neutron_lib.api.definitions import agent as agent_def
from neutron_lib.api.definitions import allowedaddresspairs
from neutron_lib.api.definitions import auto_allocated_topology
from neutron_lib.api.definitions import availability_zone as az_def
from neutron_lib.api.definitions import default_subnetpools
from neutron_lib.api.definitions import dns
from neutron_lib.api.definitions import expose_port_forwarding_in_fip
from neutron_lib.api.definitions import external_net
from neutron_lib.api.definitions import extra_dhcp_opt
from neutron_lib.api.definitions import extraroute
from neutron_lib.api.definitions import filter_validation
from neutron_lib.api.definitions import fip_pf_description
from neutron_lib.api.definitions import fip_port_details
from neutron_lib.api.definitions import floating_ip_port_forwarding
from neutron_lib.api.definitions import l3
from neutron_lib.api.definitions import l3_ext_gw_mode
from neutron_lib.api.definitions import logging
from neutron_lib.api.definitions import multiprovidernet
from neutron_lib.api.definitions import network_availability_zone
from neutron_lib.api.definitions import network_ip_availability
from neutron_lib.api.definitions import network_mtu
from neutron_lib.api.definitions import network_mtu_writable
from neutron_lib.api.definitions import pagination
from neutron_lib.api.definitions import port_device_profile
from neutron_lib.api.definitions import port_numa_affinity_policy
from neutron_lib.api.definitions import port_resource_request
from neutron_lib.api.definitions import port_security
from neutron_lib.api.definitions import portbindings
from neutron_lib.api.definitions import project_id
from neutron_lib.api.definitions import provider_net
from neutron_lib.api.definitions import qos
from neutron_lib.api.definitions import qos_bw_limit_direction
from neutron_lib.api.definitions import qos_default
from neutron_lib.api.definitions import qos_rule_type_details
from neutron_lib.api.definitions import qos_rules_alias
from neutron_lib.api.definitions import rbac_address_scope
from neutron_lib.api.definitions import router_availability_zone as raz_def
from neutron_lib.api.definitions import security_groups_normalized_cidr
from neutron_lib.api.definitions import security_groups_remote_address_group
from neutron_lib.api.definitions import segment as seg_def
from neutron_lib.api.definitions import sorting
from neutron_lib.api.definitions import trunk
from neutron_lib.api.definitions import vlantransparent
from neutron_lib import constants
# NOTE(russellb) This remains in its own file (vs constants.py) because we want
# to be able to easily import it and export the info without any dependencies
# on external imports.
# NOTE(russellb) If you update these lists, please also update
# doc/source/admin/ovn/features.rst and the current release note.
ML2_SUPPORTED_API_EXTENSIONS_OVN_L3 = [
l3.ALIAS,
extraroute.ALIAS,
l3_ext_gw_mode.ALIAS,
fip_port_details.ALIAS,
pagination.ALIAS,
'qos-fip',
sorting.ALIAS,
project_id.ALIAS,
dns.ALIAS,
agent_def.ALIAS,
az_def.ALIAS,
raz_def.ALIAS,
]
ML2_SUPPORTED_API_EXTENSIONS = [
address_scope.ALIAS,
agent_def.ALIAS,
allowedaddresspairs.ALIAS,
auto_allocated_topology.ALIAS,
portbindings.ALIAS,
default_subnetpools.ALIAS,
external_net.ALIAS,
extra_dhcp_opt.ALIAS,
filter_validation.ALIAS,
multiprovidernet.ALIAS,
network_mtu.ALIAS,
network_mtu_writable.ALIAS,
network_availability_zone.ALIAS,
network_ip_availability.ALIAS,
port_device_profile.ALIAS,
port_numa_affinity_policy.ALIAS,
port_security.ALIAS,
provider_net.ALIAS,
port_resource_request.ALIAS,
qos.ALIAS,
qos_bw_limit_direction.ALIAS,
qos_default.ALIAS,
qos_rule_type_details.ALIAS,
qos_rules_alias.ALIAS,
'quotas',
rbac_address_scope.ALIAS,
'rbac-policies',
'standard-attr-revisions',
'security-group',
security_groups_normalized_cidr.ALIAS,
security_groups_remote_address_group.ALIAS,
'standard-attr-description',
constants.SUBNET_ALLOCATION_EXT_ALIAS,
'standard-attr-tag',
'standard-attr-timestamp',
trunk.ALIAS,
'quota_details',
seg_def.ALIAS,
expose_port_forwarding_in_fip.ALIAS,
fip_pf_description.ALIAS,
floating_ip_port_forwarding.ALIAS,
vlantransparent.ALIAS,
logging.ALIAS,
]
[OVN] Add availability zone to the ML2_SUPPORTED_API_EXTENSIONS list
Currently in the ML2_SUPPORTED_API_EXTENSIONS which is list of
extensions supported by OVN mech driver there was
network_availability_zone extension. But that extension requires
availability_zone extension which was added only in
ML2_SUPPORTED_API_EXTENSIONS_OVN_L3 which is list of extensions
supported by the OVN L3 plugin.
To fix that, this patch adds availability_zone extension to the
ML2_SUPPORTED_API_EXTENSIONS list also.
Related-Bug: #1929676
Change-Id: If815cff18f83962233f3d6a535b18070690ce948
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.api.definitions import address_scope
from neutron_lib.api.definitions import agent as agent_def
from neutron_lib.api.definitions import allowedaddresspairs
from neutron_lib.api.definitions import auto_allocated_topology
from neutron_lib.api.definitions import availability_zone as az_def
from neutron_lib.api.definitions import default_subnetpools
from neutron_lib.api.definitions import dns
from neutron_lib.api.definitions import expose_port_forwarding_in_fip
from neutron_lib.api.definitions import external_net
from neutron_lib.api.definitions import extra_dhcp_opt
from neutron_lib.api.definitions import extraroute
from neutron_lib.api.definitions import filter_validation
from neutron_lib.api.definitions import fip_pf_description
from neutron_lib.api.definitions import fip_port_details
from neutron_lib.api.definitions import floating_ip_port_forwarding
from neutron_lib.api.definitions import l3
from neutron_lib.api.definitions import l3_ext_gw_mode
from neutron_lib.api.definitions import logging
from neutron_lib.api.definitions import multiprovidernet
from neutron_lib.api.definitions import network_availability_zone
from neutron_lib.api.definitions import network_ip_availability
from neutron_lib.api.definitions import network_mtu
from neutron_lib.api.definitions import network_mtu_writable
from neutron_lib.api.definitions import pagination
from neutron_lib.api.definitions import port_device_profile
from neutron_lib.api.definitions import port_numa_affinity_policy
from neutron_lib.api.definitions import port_resource_request
from neutron_lib.api.definitions import port_security
from neutron_lib.api.definitions import portbindings
from neutron_lib.api.definitions import project_id
from neutron_lib.api.definitions import provider_net
from neutron_lib.api.definitions import qos
from neutron_lib.api.definitions import qos_bw_limit_direction
from neutron_lib.api.definitions import qos_default
from neutron_lib.api.definitions import qos_rule_type_details
from neutron_lib.api.definitions import qos_rules_alias
from neutron_lib.api.definitions import rbac_address_scope
from neutron_lib.api.definitions import router_availability_zone as raz_def
from neutron_lib.api.definitions import security_groups_normalized_cidr
from neutron_lib.api.definitions import security_groups_remote_address_group
from neutron_lib.api.definitions import segment as seg_def
from neutron_lib.api.definitions import sorting
from neutron_lib.api.definitions import trunk
from neutron_lib.api.definitions import vlantransparent
from neutron_lib import constants
# NOTE(russellb) This remains in its own file (vs constants.py) because we want
# to be able to easily import it and export the info without any dependencies
# on external imports.
# NOTE(russellb) If you update these lists, please also update
# doc/source/admin/ovn/features.rst and the current release note.
ML2_SUPPORTED_API_EXTENSIONS_OVN_L3 = [
l3.ALIAS,
extraroute.ALIAS,
l3_ext_gw_mode.ALIAS,
fip_port_details.ALIAS,
pagination.ALIAS,
'qos-fip',
sorting.ALIAS,
project_id.ALIAS,
dns.ALIAS,
agent_def.ALIAS,
az_def.ALIAS,
raz_def.ALIAS,
]
ML2_SUPPORTED_API_EXTENSIONS = [
address_scope.ALIAS,
agent_def.ALIAS,
allowedaddresspairs.ALIAS,
auto_allocated_topology.ALIAS,
az_def.ALIAS,
portbindings.ALIAS,
default_subnetpools.ALIAS,
external_net.ALIAS,
extra_dhcp_opt.ALIAS,
filter_validation.ALIAS,
multiprovidernet.ALIAS,
network_mtu.ALIAS,
network_mtu_writable.ALIAS,
network_availability_zone.ALIAS,
network_ip_availability.ALIAS,
port_device_profile.ALIAS,
port_numa_affinity_policy.ALIAS,
port_security.ALIAS,
provider_net.ALIAS,
port_resource_request.ALIAS,
qos.ALIAS,
qos_bw_limit_direction.ALIAS,
qos_default.ALIAS,
qos_rule_type_details.ALIAS,
qos_rules_alias.ALIAS,
'quotas',
rbac_address_scope.ALIAS,
'rbac-policies',
'standard-attr-revisions',
'security-group',
security_groups_normalized_cidr.ALIAS,
security_groups_remote_address_group.ALIAS,
'standard-attr-description',
constants.SUBNET_ALLOCATION_EXT_ALIAS,
'standard-attr-tag',
'standard-attr-timestamp',
trunk.ALIAS,
'quota_details',
seg_def.ALIAS,
expose_port_forwarding_in_fip.ALIAS,
fip_pf_description.ALIAS,
floating_ip_port_forwarding.ALIAS,
vlantransparent.ALIAS,
logging.ALIAS,
]
|
import logging
from django.conf import settings
from django.urls import reverse
from astrobin.models import Image
from astrobin.stories import add_story
from astrobin_apps_notifications.services import NotificationsService
from astrobin_apps_notifications.utils import push_notification, build_notification_url
from astrobin_apps_users.services import UserService
from nested_comments.models import NestedComment
log = logging.getLogger('apps')
class CommentNotificationsService:
def __init__(self, comment):
# type: (NestedComment) -> None
self.comment = comment
def send_notifications(self, force=False):
if self.comment.pending_moderation and not force:
return
instance = self.comment
model_class = instance.content_type.model_class()
obj = instance.content_type.get_object_for_this_type(id=instance.object_id)
url = settings.BASE_URL + instance.get_absolute_url()
if model_class == Image:
if UserService(obj.user).shadow_bans(instance.author):
log.info("Skipping notification for comment because %d shadow-bans %d" % (
obj.user.pk, instance.author.pk))
return
if instance.parent and \
instance.parent.author != instance.author and \
not instance.pending_moderation:
push_notification(
[instance.parent.author], instance.author, 'new_comment_reply',
{
'url': build_notification_url(url, instance.author),
'user': instance.author.userprofile.get_display_name(),
'user_url': settings.BASE_URL + reverse(
'user_page', kwargs={'username': instance.author.username}),
}
)
if instance.author != obj.user and \
(instance.parent is None or instance.parent.author != obj.user) and \
not instance.pending_moderation:
push_notification(
[obj.user], instance.author, 'new_comment',
{
'url': build_notification_url(url, instance.author),
'user': instance.author.userprofile.get_display_name(),
'user_url': settings.BASE_URL + reverse(
'user_page', kwargs={'username': instance.author.username}),
}
)
if not instance.pending_moderation and not obj.is_wip:
add_story(instance.author,
verb='VERB_COMMENTED_IMAGE',
action_object=instance,
target=obj)
def send_approval_notification(self):
if not self.comment.pending_moderation:
push_notification([self.comment.author], None, 'comment_approved', {
'url': build_notification_url(settings.BASE_URL + self.comment.get_absolute_url())
})
@staticmethod
def send_moderation_required_email():
NotificationsService.email_superusers(
'New comment needs moderation',
'%s/admin/nested_comments/nestedcomment/?pending_moderation__exact=1' % settings.BASE_URL
)
(comments) Add home page story forcefully if comment has been just approved (#2281)
import logging
from django.conf import settings
from django.urls import reverse
from astrobin.models import Image
from astrobin.stories import add_story
from astrobin_apps_notifications.services import NotificationsService
from astrobin_apps_notifications.utils import push_notification, build_notification_url
from astrobin_apps_users.services import UserService
from nested_comments.models import NestedComment
log = logging.getLogger('apps')
class CommentNotificationsService:
def __init__(self, comment):
# type: (NestedComment) -> None
self.comment = comment
def send_notifications(self, force=False):
if self.comment.pending_moderation and not force:
return
instance = self.comment
model_class = instance.content_type.model_class()
obj = instance.content_type.get_object_for_this_type(id=instance.object_id)
url = settings.BASE_URL + instance.get_absolute_url()
if model_class == Image:
if UserService(obj.user).shadow_bans(instance.author):
log.info("Skipping notification for comment because %d shadow-bans %d" % (
obj.user.pk, instance.author.pk))
return
if instance.parent and \
instance.parent.author != instance.author and \
not instance.pending_moderation:
push_notification(
[instance.parent.author], instance.author, 'new_comment_reply',
{
'url': build_notification_url(url, instance.author),
'user': instance.author.userprofile.get_display_name(),
'user_url': settings.BASE_URL + reverse(
'user_page', kwargs={'username': instance.author.username}),
}
)
if instance.author != obj.user and \
(instance.parent is None or instance.parent.author != obj.user) and \
not instance.pending_moderation:
push_notification(
[obj.user], instance.author, 'new_comment',
{
'url': build_notification_url(url, instance.author),
'user': instance.author.userprofile.get_display_name(),
'user_url': settings.BASE_URL + reverse(
'user_page', kwargs={'username': instance.author.username}),
}
)
if (force or not instance.pending_moderation) and not obj.is_wip:
add_story(instance.author,
verb='VERB_COMMENTED_IMAGE',
action_object=instance,
target=obj)
def send_approval_notification(self):
if not self.comment.pending_moderation:
push_notification([self.comment.author], None, 'comment_approved', {
'url': build_notification_url(settings.BASE_URL + self.comment.get_absolute_url())
})
@staticmethod
def send_moderation_required_email():
NotificationsService.email_superusers(
'New comment needs moderation',
'%s/admin/nested_comments/nestedcomment/?pending_moderation__exact=1' % settings.BASE_URL
)
|
update PSNR calculation border
|
# Copyright (c) 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import os
from tvcm import project as project_module
class TraceViewerProject(project_module.Project):
trace_viewer_path = os.path.abspath(os.path.join(
os.path.dirname(__file__), '..'))
src_path = os.path.abspath(os.path.join(
trace_viewer_path, 'trace_viewer'))
trace_viewer_third_party_path = os.path.abspath(os.path.join(
trace_viewer_path, 'third_party'))
jszip_path = os.path.abspath(os.path.join(
trace_viewer_third_party_path, 'jszip'))
test_data_path = os.path.join(trace_viewer_path, 'test_data')
skp_data_path = os.path.join(trace_viewer_path, 'skp_data')
def __init__(self):
super(TraceViewerProject, self).__init__(
[self.src_path, self.jszip_path])
Allow other_paths to be passed into TraceViewerProject
This allows external embedders to subclass TraceViewerProject and thus
use trace viewer.
git-svn-id: 3a56fcae908c7e16d23cb53443ea4795ac387cf2@1198 0e6d7f2b-9903-5b78-7403-59d27f066143
# Copyright (c) 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import os
from tvcm import project as project_module
class TraceViewerProject(project_module.Project):
trace_viewer_path = os.path.abspath(os.path.join(
os.path.dirname(__file__), '..'))
src_path = os.path.abspath(os.path.join(
trace_viewer_path, 'trace_viewer'))
trace_viewer_third_party_path = os.path.abspath(os.path.join(
trace_viewer_path, 'third_party'))
jszip_path = os.path.abspath(os.path.join(
trace_viewer_third_party_path, 'jszip'))
test_data_path = os.path.join(trace_viewer_path, 'test_data')
skp_data_path = os.path.join(trace_viewer_path, 'skp_data')
def __init__(self, other_paths=None):
paths = [self.src_path, self.jszip_path]
if other_paths:
paths.extend(other_paths)
super(TraceViewerProject, self).__init__(
paths)
|
import difflib
import re
import unicodecsv as csv
from ..edit import Edit
from pyutils import print_progress
from .tokenize import default_tokenize_func
MULTIPLE_PUNCT_REGEX = re.compile(r'([.!?]){2,}')
MULTIPLE_SPACE_REGEX = re.compile(r'([ ]){2,}')
GARBAGE_REGEX = re.compile(r'[^\w\s]')
def _get_line_num(edit_file):
line_n = 0
with open(edit_file) as input:
line_n = sum(1 for _ in input)
return line_n
def _prefilter_line(row):
edits = []
for edit in row:
edit = edit.strip()
if '\n' in edit:
print 'LOL'
return
# replace special formatting
edit = edit.replace('*', '')
edit = MULTIPLE_PUNCT_REGEX.sub('\g<1>', edit)
edit = MULTIPLE_SPACE_REGEX.sub('\g<1>', edit)
if not edit or edit == "null":
edit = None
edits.append(edit)
return edits
def _is_garbage(ngram1, ngram2):
"""Filter useless edits"""
if not ngram1 and not ngram2:
return True
elif ngram1.lower() == ngram2.lower():
return True
ngram1 = GARBAGE_REGEX.search(ngram1)
ngram2 = GARBAGE_REGEX.search(ngram2)
if ngram1 or ngram2:
return True
return False
def extract_edits(edit_file, substitutions=None, tokenize_func=default_tokenize_func):
"""
Extracts contexts for all n-grams that were changed between two text versions.
Uses most sequence matcher from difflib, which takes most longest subsequence.
If ``substitutions'' argument is supplied, extract all n-gram matching substitutions,
even if they were not changed.
:returns: list of Edit objects
"""
edit_n = 0
edits = []
line_n = _get_line_num(edit_file)
with open(edit_file, 'r') as input:
csvreader = csv.reader(input, delimiter='\t', encoding='utf-8')
csvreader.__class__.__len__ = lambda x: line_n
for row in print_progress(csvreader):
row = _prefilter_line(row)
edit1, edit2 = row
if edit1 is None or edit2 is None:
continue
# tokenize to words, since we want word diff
edit1 = tokenize_func(edit1)
edit2 = tokenize_func(edit2)
context1 = ' '.join(edit1)
context2 = ' '.join(edit2)
for seq in difflib.SequenceMatcher(None, edit1, edit2).get_grouped_opcodes(0):
for tag, i1, i2, j1, j2 in seq:
if tag == 'equal':
continue
ngram1, ngram2 = ' '.join(edit1[i1:i2]), ' '.join(edit2[j1:j2])
if _is_garbage(ngram1, ngram2):
continue
# TODO: works only for unigrams
# extract merged edits into unigrams is match substitutions
if substitutions:
index1 = [i for i in range(i1, i2) if edit1[i] in substitutions]
index2 = [i for i in range(j1, j2) if edit2[i] in substitutions]
if len(index1) + len(index2) != 2 or index1[0] != index2[0]:
continue
ngram1, ngram2 = edit1[index1[0]], edit2[index2[0]]
i1, i2 = index1[0], index1[0]+1
j1, j2 = index2[0], index2[0]+1
edits.append(Edit(ngram1, ngram2, context1, context2, (i1, i2), (j1, j2)))
edit_n += 1
# Add all other substitution if supplied
# TODO: works only for unigrams
if substitutions:
for i, unigram in enumerate(edit2):
if unigram in substitutions:
edits.append(Edit(unigram, unigram, context1, context2, (i, i+1), (i, i+1)))
edit_n += 1
del csvreader.__class__.__len__
print 'Total edits extracted:', edit_n
return edits
def extract_filtered(edit_file, filter_func, tokenize_func=default_tokenize_func):
"""
Extracts contexts for all words from edit_file that satisfy conditions in ``filter_func``.
Only second text version is used.
:returns: list of Edit objects
"""
edit_n = 0
edits = []
line_n = _get_line_num(edit_file)
with open(edit_file, 'r') as input:
csvreader = csv.reader(input, delimiter='\t', encoding='utf-8')
csvreader.__class__.__len__ = lambda x: line_n
for row in print_progress(csvreader):
row = _prefilter_line(row)
_, edit2 = row
if edit2 is None:
continue
# tokenize to words, since we want word diff
edit2 = tokenize_func(edit2)
context2 = ' '.join(edit2)
for i1, word in enumerate(edit2):
if filter_func(word):
edits.append(Edit(word, word, context2, context2, (i1, i1+1), (i1, i1+1)))
edit_n += 1
del csvreader.__class__.__len__
print 'Total edits extracted:', edit_n
return edits
fix extractions
import difflib
import re
import unicodecsv as csv
from ..edit import Edit
from pyutils import print_progress
from .tokenize import default_tokenize_func
MULTIPLE_PUNCT_REGEX = re.compile(r'([.!?]){2,}')
MULTIPLE_SPACE_REGEX = re.compile(r'([ ]){2,}')
GARBAGE_REGEX = re.compile(r'[^\w\s]')
def _get_line_num(edit_file):
line_n = 0
with open(edit_file) as input:
line_n = sum(1 for _ in input)
return line_n
def _prefilter_line(row):
edits = []
for edit in row:
edit = edit.strip()
if '\n' in edit:
print 'LOL'
return
# replace special formatting
edit = edit.replace('*', '')
edit = MULTIPLE_PUNCT_REGEX.sub('\g<1>', edit)
edit = MULTIPLE_SPACE_REGEX.sub('\g<1>', edit)
if not edit or edit == "null":
edit = None
edits.append(edit)
return edits
def _is_garbage(ngram1, ngram2):
"""Filter useless edits"""
if not ngram1 and not ngram2:
return True
elif ngram1.lower() == ngram2.lower():
return True
ngram1 = GARBAGE_REGEX.search(ngram1)
ngram2 = GARBAGE_REGEX.search(ngram2)
if ngram1 or ngram2:
return True
return False
def extract_edits(edit_file, substitutions=None, tokenize_func=default_tokenize_func):
"""
Extracts contexts for all n-grams that were changed between two text versions.
Uses most sequence matcher from difflib, which takes most longest subsequence.
If ``substitutions'' argument is supplied, extract all n-gram matching substitutions,
even if they were not changed.
:returns: list of Edit objects
"""
edit_n = 0
edits = []
line_n = _get_line_num(edit_file)
with open(edit_file, 'r') as input:
csvreader = csv.reader(input, delimiter='\t', encoding='utf-8')
csvreader.__class__.__len__ = lambda x: line_n
for row in print_progress(csvreader):
row = _prefilter_line(row)
edit1, edit2 = row
if edit1 is None or edit2 is None:
continue
# tokenize to words, since we want word diff
edit1 = tokenize_func(edit1)
edit2 = tokenize_func(edit2)
context1 = ' '.join(edit1)
context2 = ' '.join(edit2)
for seq in difflib.SequenceMatcher(None, edit1, edit2).get_grouped_opcodes(0):
for tag, i1, i2, j1, j2 in seq:
if tag == 'equal':
continue
ngram1, ngram2 = ' '.join(edit1[i1:i2]), ' '.join(edit2[j1:j2])
if _is_garbage(ngram1, ngram2):
continue
# TODO: works only for unigrams
# extract merged edits into unigrams is match substitutions
if substitutions:
index1 = [i for i in range(i1, i2) if edit1[i] in substitutions]
index2 = [i for i in range(j1, j2) if edit2[i] in substitutions]
if len(index1) !=1 or len(index2) != 1 or index1[0] != index2[0]:
continue
ngram1, ngram2 = edit1[index1[0]], edit2[index2[0]]
i1, i2 = index1[0], index1[0]+1
j1, j2 = index2[0], index2[0]+1
edits.append(Edit(ngram1, ngram2, context1, context2, (i1, i2), (j1, j2)))
edit_n += 1
# Add all other substitution if supplied
# TODO: works only for unigrams
if substitutions:
for i, unigram in enumerate(edit2):
if unigram in substitutions:
edits.append(Edit(unigram, unigram, context1, context2, (i, i+1), (i, i+1)))
edit_n += 1
del csvreader.__class__.__len__
print 'Total edits extracted:', edit_n
return edits
def extract_filtered(edit_file, filter_func, tokenize_func=default_tokenize_func):
"""
Extracts contexts for all words from edit_file that satisfy conditions in ``filter_func``.
Only second text version is used.
:returns: list of Edit objects
"""
edit_n = 0
edits = []
line_n = _get_line_num(edit_file)
with open(edit_file, 'r') as input:
csvreader = csv.reader(input, delimiter='\t', encoding='utf-8')
csvreader.__class__.__len__ = lambda x: line_n
for row in print_progress(csvreader):
row = _prefilter_line(row)
_, edit2 = row
if edit2 is None:
continue
# tokenize to words, since we want word diff
edit2 = tokenize_func(edit2)
context2 = ' '.join(edit2)
for i1, word in enumerate(edit2):
if filter_func(word):
edits.append(Edit(word, word, context2, context2, (i1, i1+1), (i1, i1+1)))
edit_n += 1
del csvreader.__class__.__len__
print 'Total edits extracted:', edit_n
return edits
|
# -*- coding: utf-8 -*-
import csv
import sys
import re
from django.contrib.gis.geos import Point
from django.core.exceptions import ObjectDoesNotExist
from django.db.utils import IntegrityError
from dplace_app.models import *
MISSING_CODES = []
LOAD_BY_ROW=('iso', 'env_vals',
'langs', 'iso_lat_long',
'ea_soc', 'ea_vars', 'ea_vals',
'bf_soc', 'bf_vars', 'bf_vals')
def run(file_name=None, mode=None):
# read the csv file
with open(file_name, 'rb') as csvfile:
if mode in LOAD_BY_ROW:
csv_reader = csv.DictReader(csvfile)
for dict_row in csv_reader:
if mode == 'iso':
load_isocode(dict_row)
elif mode == 'iso_lat_long':
load_iso_lat_long(dict_row)
elif mode == 'ea_soc':
load_ea_society(dict_row)
elif mode == 'env_vals':
load_environmental(dict_row)
elif mode == 'ea_vars':
load_ea_var(dict_row)
elif mode == 'ea_vals':
load_ea_val(dict_row)
elif mode == 'langs':
load_lang(dict_row)
elif mode == 'bf_soc':
load_bf_society(dict_row)
elif mode =='bf_vars':
load_bf_var(dict_row)
elif mode == 'ea_codes':
load_ea_codes(csvfile)
elif mode == 'bf_codes':
load_bf_codes(csvfile)
if len(MISSING_CODES) > 0:
print "Missing ISO Codes:"
print '\n'.join(MISSING_CODES)
if mode == 'ea_vals':
# after loading values, populate society-level data from variable values
postprocess_ea_societies()
elif mode == 'env_vars':
create_environmental_variables()
# get a value from a dictionary, searching the possible keys
def get_value(dict,possible_keys):
for key in possible_keys:
if key in dict.keys():
return dict[key]
return None
def get_isocode(dict):
# ISO Code may appear in 'ISO' column (17th Ed Missing ISO codes)
# or the 'ISO 693-3 code' column (17th Ed - ISO 693-3 - current)
return get_value(dict,('ISO','ISO 693-3 code'))
def load_isocode(iso_dict):
code = get_isocode(iso_dict)
if code is None:
print "ISO Code not found in row, skipping"
return
if len(code) > 3:
print "ISO Code '%s' too long, skipping" % code
return
ISOCode.objects.get_or_create(iso_code=code)
def load_iso_lat_long(iso_dict):
code = iso_dict['ISO']
found_code = None
try:
found_code = ISOCode.objects.get(iso_code=code)
except ObjectDoesNotExist:
print "Tried to attach Lat/Long to ISO Code %s but code not found" % code
return
location = Point(float(iso_dict['LMP_LON']),float(iso_dict['LMP_LAT']))
found_code.location = location
found_code.save()
ENVIRONMENTAL_MAP = {
'AnnualMeanTemperature': {
'name': 'Annual Mean Temperature',
'units': '℃',
},
'AnnualTemperatureVariance': {
'name': 'Annual Temperature Variance',
'units': '℃',
},
'TemperatureConstancy': {
'name': 'Temperature Constancy',
'units': '',
},
'TemperatureContingency': {
'name': 'Temperature Contingency',
'units': '',
},
'TemperaturePredictability': {
'name': 'Temperature Predictability',
'units': '',
},
'AnnualMeanPrecipitation': {
'name': 'Annual Mean Precipitation',
'units': 'mm',
},
'AnnualPrecipitationVariance': {
'name': 'Annual Precipitation Variance',
'units': '',
},
'PrecipitationConstancy': {
'name': 'Precipitation Constancy',
'units': '',
},
'PrecipitationContingency': {
'name': 'Precipitation Contingency',
'units': '',
},
'PrecipitationPredictability': {
'name': 'Precipitation Predictability',
'units': '',
},
'BirdRichness': {
'name': 'Bird Richness',
'units': '',
},
'MammalRichness': {
'name': 'Mammal Richness',
'units': '',
},
'AmphibianRichness': {
'name': 'Amphibian Richness',
'units': '',
},
'VascularPlantsRichness': {
'name': 'Vascular Plants Richness',
'units': '',
},
# TODO: EcoRegion! (text)
'Elevation': {
'name': 'Elevation',
'units': '',
},
'Slope': {
'name': 'Slope',
'units': '',
},
# TODO: Coastal (Bool)
'NetPrimaryProduction': {
'name': 'Net Primary Production',
'units': '',
},
'DurationOfGrowingSeason': {
'name': 'Duration of Growing Season',
'units': 'mo',
},
'MeanGrowingSeason.NPP': {
'name': 'Mean Growing Season NPP',
'units': '',
},
'InterYearVariance.GrowingSeason.NPP': {
'name': 'Inter-Year Variance Growing Season NPP',
'units': '',
},
}
def iso_from_code(code):
if code == 'NA':
return None
try:
return ISOCode.objects.get(iso_code=code)
except ObjectDoesNotExist:
return None
def create_environmental_variables():
for k in ENVIRONMENTAL_MAP:
var_dict = ENVIRONMENTAL_MAP[k]
EnvironmentalVariable.objects.get_or_create(name=var_dict['name'],units=var_dict['units'])
def load_environmental(env_dict):
ext_id = env_dict['ID']
source = env_dict['Source']
# hack for B109 vs. 109
if source == 'Binford' and ext_id.find('B') == -1:
ext_id = 'B' + ext_id
try:
society = Society.objects.get(ext_id=ext_id, source=source)
except ObjectDoesNotExist:
print "Unable to find a Society object with ext_id %s and source %s, skipping..." % (ext_id, source)
return
# This limits the environmental data to one record per society record
found_environmentals = Environmental.objects.filter(society=society)
if len(found_environmentals) == 0:
reported_latlon = Point(float(env_dict['Orig.longitude']),float(env_dict['Orig.latitude']))
actual_latlon = Point(float(env_dict['longitude']), float(env_dict['latitude']))
iso_code = iso_from_code(env_dict['iso'])
# Create the base Environmental
environmental = Environmental(society=society,
reported_location=reported_latlon,
actual_location=actual_latlon,
iso_code=iso_code)
environmental.save()
for k in ENVIRONMENTAL_MAP: # keys are the columns in the CSV file
var_dict = ENVIRONMENTAL_MAP[k]
try:
# Get the variable
variable = EnvironmentalVariable.objects.get(name=var_dict['name'])
except ObjectDoesNotExist:
print "Warning: Did not find an EnvironmentalVariable with name %s" % var_dict['name']
continue
if env_dict[k] and env_dict[k] != 'NA':
value = float(env_dict[k])
EnvironmentalValue.objects.get_or_create(variable=variable,value=value,environmental=environmental)
def load_ea_society(society_dict):
ext_id = society_dict['ID']
source = 'EA'
found_societies = Society.objects.filter(ext_id=ext_id,source=source)
if len(found_societies) == 0:
name = society_dict['Society_name_EA']
iso_code = iso_from_code(society_dict['ISO693_3'])
# Get the language
language_name = society_dict['LangNam']
try:
language = Language.objects.get(name=language_name,iso_code=iso_code)
except ObjectDoesNotExist:
language = None
print "Warning: Creating society record for %s but no language found with name %s" % (ext_id, language_name)
society = Society(ext_id=ext_id,
name=name,
source=source,
iso_code=iso_code,
language=language
)
society.save()
def postprocess_ea_societies():
'''
Some of the EA Variable values represent data that is needed at the society level, e.g.
source and location
'''
try:
lon_var = VariableDescription.objects.get(name='Longitude')
lat_var = VariableDescription.objects.get(name='Latitude')
focal_year_var = VariableDescription.objects.get(name='Date: Year with Century')
except ObjectDoesNotExist:
print "Unable to find vars for Lon/Lat/Year. Have you loaded the ea_vars?"
for society in Society.objects.filter(source='EA'):
# Get location
try:
lon_val = society.variablecodedvalue_set.get(variable=lon_var)
lat_val = society.variablecodedvalue_set.get(variable=lat_var)
except ObjectDoesNotExist:
print "Unable to get lon/lat for society %s, skipping postprocessing" % society
continue
try:
location = Point(
float(lon_val.coded_value),
float(lat_val.coded_value)
)
society.location = location
except ValueError:
print "Unable to create Point from (%s,%s) for society %s" % (lon_val.coded_value, lat_val.coded_value, society)
# TODO: Get source, incl focal year
society.save()
def eavar_number_to_label(number):
return "EA{0:0>3}".format(number)
def load_ea_var(var_dict):
"""
Variables are loaded form ea_variable_names+categories.csv for simplicity,
but there is more detailed information in ea_codes.csv
"""
try:
number = int(var_dict['Variable number'])
except ValueError:
return
exclude = var_dict['Exclude from D-PLACE?']
if exclude == '1':
return
label = eavar_number_to_label(number)
name = var_dict['Variable'].strip()
variable, created = VariableDescription.objects.get_or_create(label=label,name=name)
index_categories = [clean_category(x) for x in var_dict['INDEXCATEGORY'].split(',')]
# Currently max 1 niche category
niche_categories = [clean_category(x) for x in var_dict['NICHECATEGORY'].split(',')]
# when creating categories, ignore '?'
for category_name in index_categories:
index_category, created = VariableCategory.objects.get_or_create(name=category_name)
variable.index_categories.add(index_category)
for category_name in niche_categories:
niche_category, created = VariableCategory.objects.get_or_create(name=category_name)
variable.niche_categories.add(niche_category)
def clean_category(category):
return category.strip().capitalize()
SORT_COLUMN = 0
VARIABLE_VNUMBER_COLUMN = 1
VARIABLE_NUMBER_COLUMN = 2
VARIABLE_NAME_COLUMN = 3
N_COLUMN = 4
CODE_COLUMN = 5
DESCRIPTION_COLUMN = 6
# e.g. N CODE DESCRIPTION
def row_is_headers(row):
possible_code = row[CODE_COLUMN].strip()
possible_n = row[N_COLUMN].strip()
possible_desc = row[DESCRIPTION_COLUMN].strip()
if possible_code == 'CODE' and possible_n == 'N' and possible_desc == 'DESCRIPTION':
return True
else:
return False
# e.g. 1 1 Gathering 1267
def row_is_def(row):
possible_number = row[VARIABLE_NUMBER_COLUMN].strip()
if possible_number.isdigit():
return True
else:
return False
# has a code value and a description text
# e.g. 706 0 0 - 5% Dependence
def row_is_data(row):
# N_row is numeric
n_cell = row[N_COLUMN].strip()
# variable_number is empty
number_cell = row[VARIABLE_NUMBER_COLUMN].strip()
# Code may be ., 0, or abc... so it's not a simple identifier
if n_cell.isdigit() and len(number_cell) == 0:
return True
else:
return False
# Junk rows
def row_is_skip(row):
sort_cell = row[SORT_COLUMN].strip()
if sort_cell.isdigit():
return False
else:
return True
def load_ea_codes(csvfile=None):
number = None
variable = None
csv_reader = csv.reader(csvfile)
for row in csv_reader:
if row_is_skip(row):
pass
elif row_is_data(row):
if variable is None:
# Variable may have been excluded from D-PLACE, ignore this data row
continue
code = row[CODE_COLUMN].strip()
n = row[N_COLUMN].strip()
try:
n = int(n)
except ValueError:
n = 0
found_descriptions = VariableCodeDescription.objects.filter(variable=variable,code=code)
if len(found_descriptions) == 0:
# This won't help for things that specify a range or include the word or
description = row[DESCRIPTION_COLUMN].strip()
code_description = VariableCodeDescription(variable=variable,
code=code,
description=description,
n=n)
code_description.save()
elif row_is_headers(row):
pass
elif row_is_def(row):
# get the variable number
number = int(row[VARIABLE_NUMBER_COLUMN])
try:
# Some variables in the EA have been excluded from D-PLACE, so there
# will be no VariableDescription object for them
label = eavar_number_to_label(number)
variable = VariableDescription.objects.get(label=label)
except ObjectDoesNotExist:
variable = None
else:
print "did not get anything from this row %s" % (','.join(row)).strip()
def load_ea_val(val_row):
ext_id = val_row['ID'].strip()
# find the existing society
try:
society = Society.objects.get(ext_id=ext_id)
except ObjectDoesNotExist:
print "Attempting to load EA values for %s but did not find an existing Society object" % ext_id
return
# get the keys that start with v
for key in val_row.keys():
if key.find('v') == 0:
number = int(key[1:])
label = eavar_number_to_label(number)
value = val_row[key].strip()
try:
variable = VariableDescription.objects.get(label=label)
except ObjectDoesNotExist:
continue
try:
# Check for Code description if it exists.
code = VariableCodeDescription.objects.get(variable=variable,code=value)
except ObjectDoesNotExist:
code = None
try:
variable_value = VariableCodedValue(variable=variable,
society=society,
coded_value=value,
code=code)
variable_value.save()
except IntegrityError:
print "Unable to store value %s for var %s in society %s, already exists" % (value, variable, society)
def load_lang(lang_row):
# Extract values from dictionary
code = get_isocode(lang_row)
if code is None:
print "ISO Code not found in row, skipping"
return
language_name = get_value(lang_row,('Language name','NAM'))
ethnologue_classification = get_value(lang_row,('Ethnologue Classification (unrevised)','Ethnologue classification (if applicable)'))
family_names = [
get_value(lang_row,('FAMILY-REVISED','FAMILY')),
lang_row['Class2'],
lang_row['Class3']
]
# ISO Code
isocode = iso_from_code(code) # Does not create new ISO Codes
if isocode is None:
print "No ISO Code found in database for %s, skipping language %s" % (code, language_name)
add_missing_isocode(code)
return
# Language
try:
language = Language.objects.get(iso_code=isocode)
except ObjectDoesNotExist:
language = Language(name=language_name,
iso_code=isocode
)
language.save()
# Classes
classes = []
for i in range(3):
level = i + 1
name = family_names[i].strip()
if len(name) == 0:
# empty cell
continue
try:
classes.append(LanguageClass.objects.get(level=level,name=name))
except ObjectDoesNotExist:
if len(classes) > 0:
parent = classes[-1]
else:
parent = None
lang_class = LanguageClass(level=level, name=name, parent=parent)
lang_class.save()
classes.append(lang_class)
# Finally, create the LanguageClassification
classification_scheme = 'R' # Ethnologue17-Revised
try:
classification = LanguageClassification.objects.get(ethnologue_classification=ethnologue_classification)
except ObjectDoesNotExist:
class_family = classes[0]
class_subfamily = classes[1] if len(classes) > 1 else None
class_subsubfamily = classes[2] if len(classes) > 2 else None
classification = LanguageClassification(scheme=classification_scheme,
language=language,
ethnologue_classification=ethnologue_classification,
class_family=class_family,
class_subfamily=class_subfamily,
class_subsubfamily=class_subsubfamily,
)
classification.save()
def add_missing_isocode(isocode):
MISSING_CODES.append(isocode)
def load_bf_society(society_dict):
ext_id = society_dict['ID']
source = 'Binford'
found_societies = Society.objects.filter(ext_id=ext_id,source=source)
if len(found_societies) == 0:
name = society_dict['STANDARD SOCIETY NAME Binford']
iso_code = iso_from_code(society_dict['ISO693_3'])
# Get the language
language_name = society_dict['LangNam']
try:
language = Language.objects.get(name=language_name,iso_code=iso_code)
except ObjectDoesNotExist:
language = None
print "Warning: Creating society record for %s but no language found with name %s" % (ext_id, language_name)
society = Society(ext_id=ext_id,
name=name,
source=source,
iso_code=iso_code,
language=language
)
society.save()
def load_bf_var(var_dict):
"""
Variables are loaded form binford_variable_names+categories.csv for simplicity,
but there is more detailed information in bf_codebook.csv
"""
label = var_dict['Field name'].strip()
name = var_dict['Variable name'].strip()
description = var_dict['Detailed description'].strip()
variable, created = VariableDescription.objects.get_or_create(label=label,name=name)
index_categories = [clean_category(x) for x in var_dict['IndexCategory'].split(',')]
# Currently max 1 niche category
niche_categories = [clean_category(x) for x in var_dict['NicheCategory'].split(',')]
# when creating categories, ignore '?'
for category_name in index_categories:
index_category, created = VariableCategory.objects.get_or_create(name=category_name)
variable.index_categories.add(index_category)
for category_name in niche_categories:
niche_category, created = VariableCategory.objects.get_or_create(name=category_name)
variable.niche_categories.add(niche_category)
VARIABLE_DEF_EXPRESSION = 'B[0-9]{3}_.*'
BF_CODE_COLUMN_VARIABLE_DEF = 0
BF_CODE_COLUMN_VARIABLE_NAME = 6
BF_CODE_COLUMN_VARIABLE_DESC = 8
def read_binford_variable_def(csv_reader):
'''
Advances the CSV reader row-by-row until finding a row that starts with
VARIABLE_DEF_EXPRESSION
'''
row, matched = None, None
while matched is None:
try:
row = csv_reader.next()
except StopIteration:
return None
matched = re.match(VARIABLE_DEF_EXPRESSION, row[BF_CODE_COLUMN_VARIABLE_DEF])
variable_def = dict()
variable_def['field'] = row[BF_CODE_COLUMN_VARIABLE_DEF]
variable_def['name'] = row[BF_CODE_COLUMN_VARIABLE_NAME]
variable_def['desc'] = row[BF_CODE_COLUMN_VARIABLE_DESC]
return variable_def
def read_binford_header_row(csv_reader):
'''
Advances the CSV reader row-by-row until finding a row with CODE,DESCRIPTION,NOTES
'''
row, matched = None, False
while not matched:
try:
row = csv_reader.next()
except StopIteration:
return None
matched = row[0].strip() == 'CODE'
return row
def read_binford_code_rows(csv_reader):
'''
Advances the CSV reader row-by-row, collecting CODE / DESCRIPTION / NOTES / PAGE rows
until a blank row is found
'''
codes, done = [], False
while not done:
try:
row = csv_reader.next()
except StopIteration:
done = True
break
if len(row[0].strip()) == 0:
done = True
else:
codes.append({'code': row[0].strip(),
'description': row[1].strip(),
'notes': row[2].strip(),
'page': row[3].strip()
})
return codes
def load_bf_codes(csvfile=None):
csv_reader = csv.reader(csvfile)
# parse the file, looking for variable def, then header, then codes
variable_def = read_binford_variable_def(csv_reader)
while variable_def is not None:
read_binford_header_row(csv_reader)
codes = read_binford_code_rows(csv_reader)
variable = VariableDescription.objects.get(label=variable_def['field'])
for code in codes:
code_description = VariableCodeDescription.objects.get_or_create(variable=variable,
code=code['code'],
description=code['description'])
# Set up for next pass
variable_def = read_binford_variable_def(csv_reader)
if __name__ == '__main__':
run(sys.argv[1], sys.argv[2])
Skip CODEs that start with class:
# -*- coding: utf-8 -*-
import csv
import sys
import re
from django.contrib.gis.geos import Point
from django.core.exceptions import ObjectDoesNotExist
from django.db.utils import IntegrityError
from dplace_app.models import *
MISSING_CODES = []
LOAD_BY_ROW=('iso', 'env_vals',
'langs', 'iso_lat_long',
'ea_soc', 'ea_vars', 'ea_vals',
'bf_soc', 'bf_vars', 'bf_vals')
def run(file_name=None, mode=None):
# read the csv file
with open(file_name, 'rb') as csvfile:
if mode in LOAD_BY_ROW:
csv_reader = csv.DictReader(csvfile)
for dict_row in csv_reader:
if mode == 'iso':
load_isocode(dict_row)
elif mode == 'iso_lat_long':
load_iso_lat_long(dict_row)
elif mode == 'ea_soc':
load_ea_society(dict_row)
elif mode == 'env_vals':
load_environmental(dict_row)
elif mode == 'ea_vars':
load_ea_var(dict_row)
elif mode == 'ea_vals':
load_ea_val(dict_row)
elif mode == 'langs':
load_lang(dict_row)
elif mode == 'bf_soc':
load_bf_society(dict_row)
elif mode =='bf_vars':
load_bf_var(dict_row)
elif mode == 'ea_codes':
load_ea_codes(csvfile)
elif mode == 'bf_codes':
load_bf_codes(csvfile)
if len(MISSING_CODES) > 0:
print "Missing ISO Codes:"
print '\n'.join(MISSING_CODES)
if mode == 'ea_vals':
# after loading values, populate society-level data from variable values
postprocess_ea_societies()
elif mode == 'env_vars':
create_environmental_variables()
# get a value from a dictionary, searching the possible keys
def get_value(dict,possible_keys):
for key in possible_keys:
if key in dict.keys():
return dict[key]
return None
def get_isocode(dict):
# ISO Code may appear in 'ISO' column (17th Ed Missing ISO codes)
# or the 'ISO 693-3 code' column (17th Ed - ISO 693-3 - current)
return get_value(dict,('ISO','ISO 693-3 code'))
def load_isocode(iso_dict):
code = get_isocode(iso_dict)
if code is None:
print "ISO Code not found in row, skipping"
return
if len(code) > 3:
print "ISO Code '%s' too long, skipping" % code
return
ISOCode.objects.get_or_create(iso_code=code)
def load_iso_lat_long(iso_dict):
code = iso_dict['ISO']
found_code = None
try:
found_code = ISOCode.objects.get(iso_code=code)
except ObjectDoesNotExist:
print "Tried to attach Lat/Long to ISO Code %s but code not found" % code
return
location = Point(float(iso_dict['LMP_LON']),float(iso_dict['LMP_LAT']))
found_code.location = location
found_code.save()
ENVIRONMENTAL_MAP = {
'AnnualMeanTemperature': {
'name': 'Annual Mean Temperature',
'units': '℃',
},
'AnnualTemperatureVariance': {
'name': 'Annual Temperature Variance',
'units': '℃',
},
'TemperatureConstancy': {
'name': 'Temperature Constancy',
'units': '',
},
'TemperatureContingency': {
'name': 'Temperature Contingency',
'units': '',
},
'TemperaturePredictability': {
'name': 'Temperature Predictability',
'units': '',
},
'AnnualMeanPrecipitation': {
'name': 'Annual Mean Precipitation',
'units': 'mm',
},
'AnnualPrecipitationVariance': {
'name': 'Annual Precipitation Variance',
'units': '',
},
'PrecipitationConstancy': {
'name': 'Precipitation Constancy',
'units': '',
},
'PrecipitationContingency': {
'name': 'Precipitation Contingency',
'units': '',
},
'PrecipitationPredictability': {
'name': 'Precipitation Predictability',
'units': '',
},
'BirdRichness': {
'name': 'Bird Richness',
'units': '',
},
'MammalRichness': {
'name': 'Mammal Richness',
'units': '',
},
'AmphibianRichness': {
'name': 'Amphibian Richness',
'units': '',
},
'VascularPlantsRichness': {
'name': 'Vascular Plants Richness',
'units': '',
},
# TODO: EcoRegion! (text)
'Elevation': {
'name': 'Elevation',
'units': '',
},
'Slope': {
'name': 'Slope',
'units': '',
},
# TODO: Coastal (Bool)
'NetPrimaryProduction': {
'name': 'Net Primary Production',
'units': '',
},
'DurationOfGrowingSeason': {
'name': 'Duration of Growing Season',
'units': 'mo',
},
'MeanGrowingSeason.NPP': {
'name': 'Mean Growing Season NPP',
'units': '',
},
'InterYearVariance.GrowingSeason.NPP': {
'name': 'Inter-Year Variance Growing Season NPP',
'units': '',
},
}
def iso_from_code(code):
if code == 'NA':
return None
try:
return ISOCode.objects.get(iso_code=code)
except ObjectDoesNotExist:
return None
def create_environmental_variables():
for k in ENVIRONMENTAL_MAP:
var_dict = ENVIRONMENTAL_MAP[k]
EnvironmentalVariable.objects.get_or_create(name=var_dict['name'],units=var_dict['units'])
def load_environmental(env_dict):
ext_id = env_dict['ID']
source = env_dict['Source']
# hack for B109 vs. 109
if source == 'Binford' and ext_id.find('B') == -1:
ext_id = 'B' + ext_id
try:
society = Society.objects.get(ext_id=ext_id, source=source)
except ObjectDoesNotExist:
print "Unable to find a Society object with ext_id %s and source %s, skipping..." % (ext_id, source)
return
# This limits the environmental data to one record per society record
found_environmentals = Environmental.objects.filter(society=society)
if len(found_environmentals) == 0:
reported_latlon = Point(float(env_dict['Orig.longitude']),float(env_dict['Orig.latitude']))
actual_latlon = Point(float(env_dict['longitude']), float(env_dict['latitude']))
iso_code = iso_from_code(env_dict['iso'])
# Create the base Environmental
environmental = Environmental(society=society,
reported_location=reported_latlon,
actual_location=actual_latlon,
iso_code=iso_code)
environmental.save()
for k in ENVIRONMENTAL_MAP: # keys are the columns in the CSV file
var_dict = ENVIRONMENTAL_MAP[k]
try:
# Get the variable
variable = EnvironmentalVariable.objects.get(name=var_dict['name'])
except ObjectDoesNotExist:
print "Warning: Did not find an EnvironmentalVariable with name %s" % var_dict['name']
continue
if env_dict[k] and env_dict[k] != 'NA':
value = float(env_dict[k])
EnvironmentalValue.objects.get_or_create(variable=variable,value=value,environmental=environmental)
def load_ea_society(society_dict):
ext_id = society_dict['ID']
source = 'EA'
found_societies = Society.objects.filter(ext_id=ext_id,source=source)
if len(found_societies) == 0:
name = society_dict['Society_name_EA']
iso_code = iso_from_code(society_dict['ISO693_3'])
# Get the language
language_name = society_dict['LangNam']
try:
language = Language.objects.get(name=language_name,iso_code=iso_code)
except ObjectDoesNotExist:
language = None
print "Warning: Creating society record for %s but no language found with name %s" % (ext_id, language_name)
society = Society(ext_id=ext_id,
name=name,
source=source,
iso_code=iso_code,
language=language
)
society.save()
def postprocess_ea_societies():
'''
Some of the EA Variable values represent data that is needed at the society level, e.g.
source and location
'''
try:
lon_var = VariableDescription.objects.get(name='Longitude')
lat_var = VariableDescription.objects.get(name='Latitude')
focal_year_var = VariableDescription.objects.get(name='Date: Year with Century')
except ObjectDoesNotExist:
print "Unable to find vars for Lon/Lat/Year. Have you loaded the ea_vars?"
for society in Society.objects.filter(source='EA'):
# Get location
try:
lon_val = society.variablecodedvalue_set.get(variable=lon_var)
lat_val = society.variablecodedvalue_set.get(variable=lat_var)
except ObjectDoesNotExist:
print "Unable to get lon/lat for society %s, skipping postprocessing" % society
continue
try:
location = Point(
float(lon_val.coded_value),
float(lat_val.coded_value)
)
society.location = location
except ValueError:
print "Unable to create Point from (%s,%s) for society %s" % (lon_val.coded_value, lat_val.coded_value, society)
# TODO: Get source, incl focal year
society.save()
def eavar_number_to_label(number):
return "EA{0:0>3}".format(number)
def load_ea_var(var_dict):
"""
Variables are loaded form ea_variable_names+categories.csv for simplicity,
but there is more detailed information in ea_codes.csv
"""
try:
number = int(var_dict['Variable number'])
except ValueError:
return
exclude = var_dict['Exclude from D-PLACE?']
if exclude == '1':
return
label = eavar_number_to_label(number)
name = var_dict['Variable'].strip()
variable, created = VariableDescription.objects.get_or_create(label=label,name=name)
index_categories = [clean_category(x) for x in var_dict['INDEXCATEGORY'].split(',')]
# Currently max 1 niche category
niche_categories = [clean_category(x) for x in var_dict['NICHECATEGORY'].split(',')]
# when creating categories, ignore '?'
for category_name in index_categories:
index_category, created = VariableCategory.objects.get_or_create(name=category_name)
variable.index_categories.add(index_category)
for category_name in niche_categories:
niche_category, created = VariableCategory.objects.get_or_create(name=category_name)
variable.niche_categories.add(niche_category)
def clean_category(category):
return category.strip().capitalize()
SORT_COLUMN = 0
VARIABLE_VNUMBER_COLUMN = 1
VARIABLE_NUMBER_COLUMN = 2
VARIABLE_NAME_COLUMN = 3
N_COLUMN = 4
CODE_COLUMN = 5
DESCRIPTION_COLUMN = 6
# e.g. N CODE DESCRIPTION
def row_is_headers(row):
possible_code = row[CODE_COLUMN].strip()
possible_n = row[N_COLUMN].strip()
possible_desc = row[DESCRIPTION_COLUMN].strip()
if possible_code == 'CODE' and possible_n == 'N' and possible_desc == 'DESCRIPTION':
return True
else:
return False
# e.g. 1 1 Gathering 1267
def row_is_def(row):
possible_number = row[VARIABLE_NUMBER_COLUMN].strip()
if possible_number.isdigit():
return True
else:
return False
# has a code value and a description text
# e.g. 706 0 0 - 5% Dependence
def row_is_data(row):
# N_row is numeric
n_cell = row[N_COLUMN].strip()
# variable_number is empty
number_cell = row[VARIABLE_NUMBER_COLUMN].strip()
# Code may be ., 0, or abc... so it's not a simple identifier
if n_cell.isdigit() and len(number_cell) == 0:
return True
else:
return False
# Junk rows
def row_is_skip(row):
sort_cell = row[SORT_COLUMN].strip()
if sort_cell.isdigit():
return False
else:
return True
def load_ea_codes(csvfile=None):
number = None
variable = None
csv_reader = csv.reader(csvfile)
for row in csv_reader:
if row_is_skip(row):
pass
elif row_is_data(row):
if variable is None:
# Variable may have been excluded from D-PLACE, ignore this data row
continue
code = row[CODE_COLUMN].strip()
n = row[N_COLUMN].strip()
try:
n = int(n)
except ValueError:
n = 0
found_descriptions = VariableCodeDescription.objects.filter(variable=variable,code=code)
if len(found_descriptions) == 0:
# This won't help for things that specify a range or include the word or
description = row[DESCRIPTION_COLUMN].strip()
code_description = VariableCodeDescription(variable=variable,
code=code,
description=description,
n=n)
code_description.save()
elif row_is_headers(row):
pass
elif row_is_def(row):
# get the variable number
number = int(row[VARIABLE_NUMBER_COLUMN])
try:
# Some variables in the EA have been excluded from D-PLACE, so there
# will be no VariableDescription object for them
label = eavar_number_to_label(number)
variable = VariableDescription.objects.get(label=label)
except ObjectDoesNotExist:
variable = None
else:
print "did not get anything from this row %s" % (','.join(row)).strip()
def load_ea_val(val_row):
ext_id = val_row['ID'].strip()
# find the existing society
try:
society = Society.objects.get(ext_id=ext_id)
except ObjectDoesNotExist:
print "Attempting to load EA values for %s but did not find an existing Society object" % ext_id
return
# get the keys that start with v
for key in val_row.keys():
if key.find('v') == 0:
number = int(key[1:])
label = eavar_number_to_label(number)
value = val_row[key].strip()
try:
variable = VariableDescription.objects.get(label=label)
except ObjectDoesNotExist:
continue
try:
# Check for Code description if it exists.
code = VariableCodeDescription.objects.get(variable=variable,code=value)
except ObjectDoesNotExist:
code = None
try:
variable_value = VariableCodedValue(variable=variable,
society=society,
coded_value=value,
code=code)
variable_value.save()
except IntegrityError:
print "Unable to store value %s for var %s in society %s, already exists" % (value, variable, society)
def load_lang(lang_row):
# Extract values from dictionary
code = get_isocode(lang_row)
if code is None:
print "ISO Code not found in row, skipping"
return
language_name = get_value(lang_row,('Language name','NAM'))
ethnologue_classification = get_value(lang_row,('Ethnologue Classification (unrevised)','Ethnologue classification (if applicable)'))
family_names = [
get_value(lang_row,('FAMILY-REVISED','FAMILY')),
lang_row['Class2'],
lang_row['Class3']
]
# ISO Code
isocode = iso_from_code(code) # Does not create new ISO Codes
if isocode is None:
print "No ISO Code found in database for %s, skipping language %s" % (code, language_name)
add_missing_isocode(code)
return
# Language
try:
language = Language.objects.get(iso_code=isocode)
except ObjectDoesNotExist:
language = Language(name=language_name,
iso_code=isocode
)
language.save()
# Classes
classes = []
for i in range(3):
level = i + 1
name = family_names[i].strip()
if len(name) == 0:
# empty cell
continue
try:
classes.append(LanguageClass.objects.get(level=level,name=name))
except ObjectDoesNotExist:
if len(classes) > 0:
parent = classes[-1]
else:
parent = None
lang_class = LanguageClass(level=level, name=name, parent=parent)
lang_class.save()
classes.append(lang_class)
# Finally, create the LanguageClassification
classification_scheme = 'R' # Ethnologue17-Revised
try:
classification = LanguageClassification.objects.get(ethnologue_classification=ethnologue_classification)
except ObjectDoesNotExist:
class_family = classes[0]
class_subfamily = classes[1] if len(classes) > 1 else None
class_subsubfamily = classes[2] if len(classes) > 2 else None
classification = LanguageClassification(scheme=classification_scheme,
language=language,
ethnologue_classification=ethnologue_classification,
class_family=class_family,
class_subfamily=class_subfamily,
class_subsubfamily=class_subsubfamily,
)
classification.save()
def add_missing_isocode(isocode):
MISSING_CODES.append(isocode)
def load_bf_society(society_dict):
ext_id = society_dict['ID']
source = 'Binford'
found_societies = Society.objects.filter(ext_id=ext_id,source=source)
if len(found_societies) == 0:
name = society_dict['STANDARD SOCIETY NAME Binford']
iso_code = iso_from_code(society_dict['ISO693_3'])
# Get the language
language_name = society_dict['LangNam']
try:
language = Language.objects.get(name=language_name,iso_code=iso_code)
except ObjectDoesNotExist:
language = None
print "Warning: Creating society record for %s but no language found with name %s" % (ext_id, language_name)
society = Society(ext_id=ext_id,
name=name,
source=source,
iso_code=iso_code,
language=language
)
society.save()
def load_bf_var(var_dict):
"""
Variables are loaded form binford_variable_names+categories.csv for simplicity,
but there is more detailed information in bf_codebook.csv
"""
label = var_dict['Field name'].strip()
name = var_dict['Variable name'].strip()
description = var_dict['Detailed description'].strip()
variable, created = VariableDescription.objects.get_or_create(label=label,name=name)
index_categories = [clean_category(x) for x in var_dict['IndexCategory'].split(',')]
# Currently max 1 niche category
niche_categories = [clean_category(x) for x in var_dict['NicheCategory'].split(',')]
# when creating categories, ignore '?'
for category_name in index_categories:
index_category, created = VariableCategory.objects.get_or_create(name=category_name)
variable.index_categories.add(index_category)
for category_name in niche_categories:
niche_category, created = VariableCategory.objects.get_or_create(name=category_name)
variable.niche_categories.add(niche_category)
VARIABLE_DEF_EXPRESSION = 'B[0-9]{3}_.*'
BF_CODE_COLUMN_VARIABLE_DEF = 0
BF_CODE_COLUMN_VARIABLE_NAME = 6
BF_CODE_COLUMN_VARIABLE_DESC = 8
def read_binford_variable_def(csv_reader):
'''
Advances the CSV reader row-by-row until finding a row that starts with
VARIABLE_DEF_EXPRESSION
'''
row, matched = None, None
while matched is None:
try:
row = csv_reader.next()
except StopIteration:
return None
matched = re.match(VARIABLE_DEF_EXPRESSION, row[BF_CODE_COLUMN_VARIABLE_DEF])
variable_def = dict()
variable_def['field'] = row[BF_CODE_COLUMN_VARIABLE_DEF]
variable_def['name'] = row[BF_CODE_COLUMN_VARIABLE_NAME]
variable_def['desc'] = row[BF_CODE_COLUMN_VARIABLE_DESC]
return variable_def
def read_binford_header_row(csv_reader):
'''
Advances the CSV reader row-by-row until finding a row with CODE,DESCRIPTION,NOTES
'''
row, matched = None, False
while not matched:
try:
row = csv_reader.next()
except StopIteration:
return None
matched = row[0].strip() == 'CODE'
return row
def read_binford_code_rows(csv_reader):
'''
Advances the CSV reader row-by-row, collecting CODE / DESCRIPTION / NOTES / PAGE rows
until a blank row is found
'''
codes, done = [], False
while not done:
try:
row = csv_reader.next()
except StopIteration:
done = True
break
if len(row[0].strip()) == 0:
done = True
else:
codes.append({'code': row[0].strip(),
'description': row[1].strip(),
'notes': row[2].strip(),
'page': row[3].strip()
})
return codes
def load_bf_codes(csvfile=None):
csv_reader = csv.reader(csvfile)
# parse the file, looking for variable def, then header, then codes
variable_def = read_binford_variable_def(csv_reader)
while variable_def is not None:
read_binford_header_row(csv_reader)
codes = read_binford_code_rows(csv_reader)
variable = VariableDescription.objects.get(label=variable_def['field'])
for code in codes:
# Special cases
if code['code'].startswith('class:'):
print "Code %s starts with 'class:', skipping" % code['code']
continue
code_description = VariableCodeDescription.objects.get_or_create(variable=variable,
code=code['code'],
description=code['description'])
# Set up for next pass
variable_def = read_binford_variable_def(csv_reader)
if __name__ == '__main__':
run(sys.argv[1], sys.argv[2])
|
""" this here's the code responsible for executing 'bru install' commands, e.g.
'bru install package.json' or 'bru install protobug googlemock@1.7.0'.
"""
import os
import re
import glob
import shutil
import filecmp
import platform
import collections
import brulib.jsonc
import brulib.make
import brulib.module_downloader
class Installable:
def __init__(self, module, version):
self.module = module
self.version = version
def __eq__(self, other):
if not isinstance(other, Installable):
return False
return self.module == other.module and self.version == other.version
def get_single_bru_file(dir):
""" return None of no *.bru file in this dir """
matches = glob.glob(os.path.join(dir, "*.bru"))
if len(matches) == 0:
return None
if len(matches) > 1:
raise Exception("there are multiple *.bru files in {}: {}".format(
dir, matches))
return matches[0]
def get_or_create_single_bru_file(dir):
""" returns single *.bru file from given dir or creates an empty
package.bru file (corresponding to package.json for npm).
So unlike get_single_bru_file() never returns None.
"""
bru_file = get_single_bru_file(dir)
if bru_file == None:
bru_file = os.path.join(dir, 'package.bru')
brulib.jsonc.savefile(bru_file, {'dependencies':{}})
print('created ', bru_file)
assert bru_file != None
return bru_file
def parse_module_at_version(installable):
""" parses 'googlemock@1.7.0' into tuple (module, version),
and returns (module, None) for input lacking the @version suffix.
"""
elems = installable.split('@')
if len(elems) == 1:
return Installable(elems[0], None)
if len(elems) == 2:
return Installable(elems[0], elems[1])
raise Exception("expected module@version but got {}".format(installable))
def parse_existing_module_at_version(installable, library):
""" like parse_module_at_version but returns the latest version if version
was unspecified. Also verifies module at version actually exist
in ./library.
Param library is of type brulib.library.Library
"""
installable = parse_module_at_version(installable)
module = installable.module
version = installable.version
if not os.path.exists(library.get_module_dir(module)):
raise Exception("no module {} in {}, may want to 'git pull'"\
" if this module was added very recently".format(
module, library.get_root_dir()))
if version == None:
version = library.get_latest_version_of(module)
if not library.has_formula(module, version):
raise Exception("no version {} in {}/{}, may want to 'git pull'"\
" if this version was added very recently".format(
version, library.get_root_dir(), module))
assert version != None
return Installable(module, version)
def add_dependencies_to_bru(bru_filename, installables):
bru = brulib.jsonc.loadfile(bru_filename)
if not 'dependencies' in bru:
bru['dependencies'] = {}
deps = bru['dependencies']
for installable in installables:
deps[installable.module] = installable.version
brulib.jsonc.savefile(bru_filename, bru) # warning: this nukes comments atm
def add_dependencies_to_gyp(gyp_filename, installables):
gyp = brulib.jsonc.loadfile(gyp_filename)
# typically a gyp file has multiple targets, e.g. a static_library and
# one or more test executables. Here we add the new dep to only the first
# target in the gyp file, which is somewhat arbitrary. TODO: revise.
# Until then end user can always shuffle around dependencies as needed
# between targets.
if not 'targets' in gyp:
gyp['targets'] = []
targets = gyp['targets']
if len(targets) == 0:
targets[0] = {}
first_target = targets[0]
if not 'dependencies' in first_target:
first_target['dependencies'] = []
deps = first_target['dependencies']
for installable in installables:
module = installable.module
dep_gyp_path = "bru_modules/{}/{}.gyp".format(module, module)
dep_expr = dep_gyp_path + ":*" # depend on all targets, incl tests
if not dep_expr in deps:
deps.append(dep_expr)
brulib.jsonc.savefile(gyp_filename, gyp) # warning: this nukes comments atm
def create_gyp_file(gyp_filename):
""" creates enough of a gyp file so that we can record dependencies """
if os.path.exists(gyp_filename):
raise Exception('{} already exists'.format(gyp_filename))
gyp = collections.OrderedDict([
("includes", [
# bru_common.gypi should in general not be edited, but stay a copy
# of the original. If you want to override settings in this gypi
# file then you're better off editing bru_overrides.gypi.
# That way if bru_common.gyp gets improvements in git you don't
# need to merge these external changes with your local ones.
"bru_common.gypi",
# This is the gypi file you're encourage to edit, bru will always
# keep this empty and untouched.
"bru_overrides.gypi"
]),
("targets", [
collections.OrderedDict([
("target_name", "foo"), # just a guess, user should rename
("type", "none"), # more likely 'static_library' or 'executable'
# these two props are going to have to be filled in by enduser
("sources", []),
("includes_dirs", []),
("dependencies", [])
])])
])
brulib.jsonc.savefile(gyp_filename, gyp)
# from http://stackoverflow.com/questions/431684/how-do-i-cd-in-python
class Chdir:
""" Context manager for changing the current working directory.
Used in conjunction with os.system for executing $make_command,
typically used to run ./configure
"""
def __init__( self, newPath ):
self.newPath = newPath
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
def touch(file_name, times=None):
# http://stackoverflow.com/questions/1158076/implement-touch-using-python
with open(file_name, 'a'):
os.utime(file_name, times)
def exec_make_command(formula, bru_modules_root, system):
""" note that few modules specify a make_command. The few that do usually
don't execute a full make but only a ./configure.
This part is kinda ugly atm: consistent use of gyp for building modules
we depent on would be preferable. TODO: revisit.
param system should be platform.system()
"""
# make_command should only be used if we're too lazy to provide a
# gyp file for a module.
# A drawback of using ./configure make is that build are less reproducible
# across machines, e.g. ./configure may enable some code paths on one
# machine but not another depending on which libs are installed on both
# machines.
if 'make_command' in formula:
module_dir = os.path.join(bru_modules_root, formula['module'], formula['version'])
make_done_file = os.path.join(module_dir, "make_command.done")
if not os.path.exists(make_done_file):
# pick a make command depending on host OS
make_commands = formula['make_command']
if not system in make_commands:
raise Exception("no key {} in make_command".format(system))
make_command = make_commands[system]
# On Windows msvs toolchain build tools are typically not in your
# PATH, but are expected to be added to your PATH via
# %VS110COMNTOOLS%\vsvars32.bat. Let's call this vsvars32.bat
# script here automatically on Windows so that the command line
# may call nmake for example without having to hardcode in *.bru
# which (of often multiple installed) msvs toolchain to use.
if system == 'Windows':
# I had trouble with passing cmds like %VS110COMNTOOLS%\\vsvars32.bat
# thru os.system to powershell or cmd.exe. So instead let's
# write the make_command into a batch file and execute that:
make_command_bat = 'make_command.bat'
make_command_path = os.path.join(module_dir, make_command_bat)
with open(make_command_path, 'w') as batch_file:
msvs_version = brulib.make.get_latest_installed_msvs_version()
if msvs_version != None:
vsvars = 'call "%VS{}COMNTOOLS%\\vsvars32.bat"'.format(
msvs_version)
print("prefixing make_command with " + vsvars)
batch_file.write(vsvars + '\r\n')
for cmd in make_command.split(';'):
batch_file.write(cmd + '\r\n')
make_command = make_command_bat
# exec make_command with cwd being the module_dir (so the dir the
# gyp file is in, not that the gyp file is used here, but using the
# same base dir for the gyp & make_command probably makes sense)
with Chdir(module_dir):
print("building via '{}' ...".format(make_command))
error_code = os.system(make_command)
if error_code != 0:
raise ValueError("build failed with error code {}".format(error_code))
touch(make_done_file)
def download_module(library, module_name, module_version):
bru_modules_root = "./bru_modules"
formula = library.load_formula(module_name, module_version)
brulib.module_downloader.get_urls(library, formula, bru_modules_root)
exec_make_command(formula, bru_modules_root, platform.system())
def verify_resolved_dependencies(formula, target, resolved_dependencies):
""" param formula is the formula with a bunch of desired(!) dependencies
which after conflict resolution across the whole set of diverse deps
may be required to pull a different version for that module for not
violate the ODR. But which of course risks not compiling the module
(but which hopefully will compile & pass tests anyway).
Param resolved_dependencies is this global modulename-to-version map
computed across the whole bru.json dependency list.
Returns the subset of deps for the formula, using the resolved_dependencies
"""
# this here's the module we want to resolve deps for now:
module = formula['module']
version = formula['version']
# iterate over all target and their deps, fill in resolved versions
target_name = target['target_name']
resolved_target_deps = []
def map_dependency(dep):
""" param dep is a gyp file dependency, so either a local dep to a local
target like 'zlib' or a cross-module dep like '../boost-regex/...'.
There should be no other kinds of gyp deps in use """
# detect the regexes as written by scan_deps.py: references into
# a sibling module within ./bru_modules.
bru_regex = "^../([^/]+)/([^/]+)\\.gyp:(.+)"
match = re.match(bru_regex, dep)
if match == None:
return dep
upstream_module = match.group(1)
upstream_targets = match.group(2)
if not upstream_module in resolved_dependencies:
raise Exception("module {} listed in {}/{}.gyp's target '{}'"
" not found. Add it to {}/{}.bru:dependencies"
.format(
upstream_module, module, version, target_name,
module, version
))
return resolved_dependencies[upstream_module]
return list(map(map_dependency, target['dependencies']))
def apply_glob_exprs(formula, sources):
""" gyp does not support glob expression or wildcards in 'sources', this
here turns these glob expressions into a list of source files.
param sources is target['sources'] or target['sources!']
"""
def is_glob_expr(source):
return '*' in source
gyp_target_dir = os.path.join('bru_modules', formula['module']) # that is
# the dir the gyp file for this module is being stored in, so paths
# in the gyp file are interpreted relative to that
result = []
for source in sources:
if source.startswith('ant:'):
raise Exception('Ant-style glob exprs no longer supported: ' + source)
if is_glob_expr(source):
matching_sources = [os.path.relpath(filename, start=gyp_target_dir)
.replace('\\', '/') # otherwise sources! on windows will not match
for filename in
glob.glob(os.path.join(gyp_target_dir, source))]
assert len(matching_sources) > 0, "no matches for glob " + source
result += matching_sources
else:
# source os a flat file name (relative to gyp parent dir)
result.append(source)
return list(sorted(result))
def apply_recursive(dic, func):
""" param dic is usually a dictionary, e.g. 'target' or 'condition'
child node. It can also be a child dict or child list of
these nodes/dicts
param func is a func to be applied to each child dictionary, taking
the dictionary as the only param
"""
if isinstance(dic, dict):
func(dic)
for key, val in dic.items():
if isinstance(val, dict) or isinstance(val, list):
apply_recursive(val, func)
if isinstance(dic, list):
for elem in dic:
apply_recursive(elem, func)
def apply_glob_to_sources(dic, formula):
""" param dic is a 'target' dictionary, or one of the childnodes
in a 'conditions' list
"""
for prop in ['sources', 'sources!']:
if prop in dic:
dic[prop] = apply_glob_exprs(formula, dic[prop])
def copy_gyp(library, formula, resolved_dependencies):
"""
Param resolved_dependencies is a superset of the deps in formula
with recursively resolved module versions (after resolving conflicts).
"""
# If the module has a gyp file then let's copy it into ./bru_modules/$module,
# so next to the unpacked tar.gz, which is where the gyp file's relative
# paths expect include_dirs and source files and such.
# Not all modules need a gyp file, but a gyp file allows specifying upstream
# module dependencies, whereas a ./configure; make might have easily overlooked
# dependencies that result in harder-to-reproduce builds (unless you build
# on only one single machine in your organization).
# Actually even for modules build via make_command we need a gyp file to
# specify include paths and module libs via all|direct_dependent_settings.
#
# Note that the gyp file in the ./library does not contain 'dependencies'
# property yet, we add this property now (to not have the same redundant deps
# both in *.bru and *.gyp in the ./library dir)
module_name = formula['module']
assert module_name in resolved_dependencies
resolved_version = resolved_dependencies[module_name]
gyp = library.load_gyp(formula)
for target in gyp['targets']:
if 'dependencies' in target:
# Initially I thought there should be no such prop in the
# library/.../*.gyp file because these deps will be filled in with
# resolved deps from the *.bru file. But then I ran into two
# problems:
# a) I wanted for example zlib tests to build via gyp also
# (espcially since zlib is being built via gyp target alrdy
# anyway), so the gyp test target should depend on the lib
# target.
# b) often test targets need to pull in additional module deps
# that the module (without its tests) does not depend on, for
# example tests often depend on googletest or googlemock,
# whereas the main module does not.
# So now a *.bru file lists the union of dependencies for all
# targets in a gyp file, while each target depends explicitly
# lists dependencies as "bru:googletest". Could also support a
# format like "bru:googletest:1.7.0" but then the *.gyp file
# and *.bru file dependency lists would be redundant. Todo: move
# dependency lists from *.bru to *.gyp file altogether? Maybe...
verify_resolved_dependencies(formula, target, resolved_dependencies)
# Sanity check: verify the 'sources' prop doesn't contain glob exprs
# or wildcards: initially I though gyp was ok with
# "sources" : ".../src/*.cc"
# in *.gyp files because at first glance this 'compiled', but it
# turned out gyp just silently compiled zero source files in that case.
#
# Alternatively we could expand these wildcards now, drawback of that
# is that the files in ./library are not really *.gyp files anymore,
# and should probably be called *.gyp.in or *.gyp-bru or something
# like that.
# Apply the same mapping to 'sources' in the 'target' itelf and within
# its childnodes like 'conditions':
apply_recursive(target, lambda dic: apply_glob_to_sources(dic, formula))
# note that library/boost-regex/1.57.0.gyp is being copied to
# bru_modules/boost-regex/boost-regex.gyp here (with some minor
# transformations that were applied, e.g. expanding wildcards)
gyp_target_file = os.path.join('bru_modules', module_name, module_name + ".gyp")
# We also need a certain set of MSVC options imported into gyp files
# and don't want to repeat the same boring MSVC settings in every single
# module's individual gyp file. So add common.gypi include unless
# the module's gyp file explicitly specifies includes already.
if not 'includes' in gyp:
# we want the 'includes' at the begin, to achieve this order see
# http://stackoverflow.com/questions/16664874/how-can-i-add-the-element-at-the-top-of-ordereddict-in-python
new_gyp = collections.OrderedDict()
new_gyp['includes'] = [
'../../bru_common.gypi',
'../../bru_overrides.gypi'
]
for key, value in gyp.items():
new_gyp[key] = value
gyp = new_gyp
brulib.jsonc.savefile(gyp_target_file, gyp)
# this file is only saved for human reader's sake atm:
brulib.jsonc.savefile(os.path.join('bru_modules', module_name, 'bru-version.json'),
{'version': resolved_version})
def resolve_conflicts(library, dependencies, root_requestor):
""" takes a dict of modules and version matchers and recursively finds
all indirect deps. Then resolves version conflicts by picking the newer
of competing deps, or by picking the version that was requested by the module
closest to the root of the dependency tree (unsure still).
param root_requestor is whatever topmost *.bru listed deps, e.g. 'package.bru'
"""
todo = [(module, version, root_requestor) for (module, version)
in dependencies.items()]
recursive_deps = collections.OrderedDict()
for module_name, version_matcher, requestor in todo:
module_version = version_matcher # todo: allow for npm-style version specs (e.g. '4.*')
#print('resolving dependency {} version {} requested by {}'
# .format(module_name, module_version, requestor))
if module_name in recursive_deps:
resolved = recursive_deps[module_name]
resolved_version = resolved['version']
if module_version != resolved_version:
winning_requestor = resolved['requestor']
print("WARNING: version conflict for {} requested by first {} and then {}"
.format(module_name, winning_requestor, requestor))
# instead of just letting the 2nd and later requestors loose
# the competition we could probably do something more sensible.
# todo?
else:
# this is the first time this module was requested, freeze that
# chosen version:
formula = library.load_formula(module_name, module_version)
recursive_deps[module_name] = {
'version' : module_version,
'requestor' : requestor
}
# then descend deeper into the dependency tree:
deps = formula['dependencies'] if 'dependencies' in formula else {}
child_requestor = module_name
todo += [(child_module, version, child_requestor)
for (child_module, version)
in deps.items()]
return [(module, resolved['version'], resolved['requestor'])
for (module, resolved) in recursive_deps.items()]
def install_from_bru_file(bru_filename, library):
""" this gets executed when you 'bru install': it looks for a *.bru file
in cwd and downloads the listed deps """
package_jso = brulib.jsonc.loadfile(bru_filename)
recursive_deps = resolve_conflicts(library, package_jso['dependencies'], bru_filename)
resolved_dependencies = dict((module, version)
for (module, version, requestor) in recursive_deps)
for module_name, module_version, requestor in recursive_deps:
print('processing dependency {} version {} requested by {}'
.format(module_name, module_version, requestor))
formula = library.load_formula(module_name, module_version)
download_module(library, module_name, module_version)
copy_gyp(library, formula, resolved_dependencies)
# copy common.gypi which is referenced by module.gyp files and usually
# also by the parent *.gyp (e.g. bru-sample:foo.gyp).
# Should end users be allowed to make changes to bru_common.gypi or
# should they rather edit their own optional bru_overrides.gpyi which
# shadowsbru_common.gypi? Let's do the latter. See comments in
# create_gyp_file for more details.
# Anyway: just in case the user did make changes to bru_common.gypi
# let's only copy it if it's new.
common_gypi = 'bru_common.gypi'
overrides_gypi = 'bru_overrides.gypi'
common_gypi_src = os.path.join(library.get_root_dir(), '..', common_gypi)
if not os.path.exists(common_gypi):
print('copying', common_gypi)
shutil.copyfile(common_gypi_src, common_gypi)
elif not filecmp.cmp(common_gypi_src, common_gypi):
print('WARNING: {} differs from {}: it is OK but not recommended'
' to modify {}, instead rather modify {}',
common_gypi, common_gypi_src, common_gypi, overrides_gypi)
# create empty bru_overrides.gypi only if it doesn't exist yet
# One use case for bru_overides.gypi is to tweak the Debug build to
# include clang -fsanitize=address options.
if not os.path.exists(overrides_gypi):
print('creating empty {}'.format(overrides_gypi))
brulib.jsonc.savefile(overrides_gypi, {})
#for module, version, requestor in recursive_deps:
# for ext in ['bru', 'gyp']:
# print("git add -f library/{}/{}.{}".format(module, version, ext))
# todo: clean up unused module dependencies from /bru_modules?
def cmd_install(library, installables):
""" param installables: e.g. [] or ['googlemock@1.7.0', 'boost-regex']
This is supposed to mimic 'npm install' syntax, see
https://docs.npmjs.com/cli/install. Examples:
a) bru install googlemock@1.7.0
b) bru install googlemock
c) bru install
Variant (a) is self-explanatory, installing the module of the given
version. Variant (b) installs the latest known version of the module
as specified by the versions listed in bru/library/googlemock.
Variant (c) will install all dependencies listed in the local *.bru
file (similar as how 'npm install' install all deps from ./package.json).
Unlike for 'npm install' the option --save is implied, means whatever you
install will end up in the local *.bru file's "dependencies" list, as
well as in the companion *.gyp file.
Param library is of type brulib.library.Library
"""
if len(installables) == 0:
# 'bru install'
bru_filename = get_single_bru_file(os.getcwd())
if bru_filename == None:
raise Exception("no file *.bru in cwd")
print('installing dependencies listed in', bru_filename)
install_from_bru_file(bru_filename, library)
else:
# installables are ['googlemock', 'googlemock@1.7.0']
# In this case we simply add deps to the *.bru (and *.gyp) file in
# the cwd and then execute the same as the next 'bru install' would.
installables = [parse_existing_module_at_version(installable, library)
for installable in installables]
bru_filename = get_or_create_single_bru_file(os.getcwd())
gyp_filename = bru_filename[:-3] + 'gyp'
if not os.path.exists(gyp_filename):
create_gyp_file(gyp_filename)
add_dependencies_to_bru(bru_filename, installables)
add_dependencies_to_gyp(gyp_filename, installables)
for installable in installables:
print("added dependency {}@{} to {} and {}".format(
installable.module, installable.version,
bru_filename, gyp_filename))
# now download the new dependency just like 'bru install' would do
# after we added the dep to the bru & gyp file:
install_from_bru_file(bru_filename, library)
fix warning msg
""" this here's the code responsible for executing 'bru install' commands, e.g.
'bru install package.json' or 'bru install protobug googlemock@1.7.0'.
"""
import os
import re
import glob
import shutil
import filecmp
import platform
import collections
import brulib.jsonc
import brulib.make
import brulib.module_downloader
class Installable:
def __init__(self, module, version):
self.module = module
self.version = version
def __eq__(self, other):
if not isinstance(other, Installable):
return False
return self.module == other.module and self.version == other.version
def get_single_bru_file(dir):
""" return None of no *.bru file in this dir """
matches = glob.glob(os.path.join(dir, "*.bru"))
if len(matches) == 0:
return None
if len(matches) > 1:
raise Exception("there are multiple *.bru files in {}: {}".format(
dir, matches))
return matches[0]
def get_or_create_single_bru_file(dir):
""" returns single *.bru file from given dir or creates an empty
package.bru file (corresponding to package.json for npm).
So unlike get_single_bru_file() never returns None.
"""
bru_file = get_single_bru_file(dir)
if bru_file == None:
bru_file = os.path.join(dir, 'package.bru')
brulib.jsonc.savefile(bru_file, {'dependencies':{}})
print('created ', bru_file)
assert bru_file != None
return bru_file
def parse_module_at_version(installable):
""" parses 'googlemock@1.7.0' into tuple (module, version),
and returns (module, None) for input lacking the @version suffix.
"""
elems = installable.split('@')
if len(elems) == 1:
return Installable(elems[0], None)
if len(elems) == 2:
return Installable(elems[0], elems[1])
raise Exception("expected module@version but got {}".format(installable))
def parse_existing_module_at_version(installable, library):
""" like parse_module_at_version but returns the latest version if version
was unspecified. Also verifies module at version actually exist
in ./library.
Param library is of type brulib.library.Library
"""
installable = parse_module_at_version(installable)
module = installable.module
version = installable.version
if not os.path.exists(library.get_module_dir(module)):
raise Exception("no module {} in {}, may want to 'git pull'"\
" if this module was added very recently".format(
module, library.get_root_dir()))
if version == None:
version = library.get_latest_version_of(module)
if not library.has_formula(module, version):
raise Exception("no version {} in {}/{}, may want to 'git pull'"\
" if this version was added very recently".format(
version, library.get_root_dir(), module))
assert version != None
return Installable(module, version)
def add_dependencies_to_bru(bru_filename, installables):
bru = brulib.jsonc.loadfile(bru_filename)
if not 'dependencies' in bru:
bru['dependencies'] = {}
deps = bru['dependencies']
for installable in installables:
deps[installable.module] = installable.version
brulib.jsonc.savefile(bru_filename, bru) # warning: this nukes comments atm
def add_dependencies_to_gyp(gyp_filename, installables):
gyp = brulib.jsonc.loadfile(gyp_filename)
# typically a gyp file has multiple targets, e.g. a static_library and
# one or more test executables. Here we add the new dep to only the first
# target in the gyp file, which is somewhat arbitrary. TODO: revise.
# Until then end user can always shuffle around dependencies as needed
# between targets.
if not 'targets' in gyp:
gyp['targets'] = []
targets = gyp['targets']
if len(targets) == 0:
targets[0] = {}
first_target = targets[0]
if not 'dependencies' in first_target:
first_target['dependencies'] = []
deps = first_target['dependencies']
for installable in installables:
module = installable.module
dep_gyp_path = "bru_modules/{}/{}.gyp".format(module, module)
dep_expr = dep_gyp_path + ":*" # depend on all targets, incl tests
if not dep_expr in deps:
deps.append(dep_expr)
brulib.jsonc.savefile(gyp_filename, gyp) # warning: this nukes comments atm
def create_gyp_file(gyp_filename):
""" creates enough of a gyp file so that we can record dependencies """
if os.path.exists(gyp_filename):
raise Exception('{} already exists'.format(gyp_filename))
gyp = collections.OrderedDict([
("includes", [
# bru_common.gypi should in general not be edited, but stay a copy
# of the original. If you want to override settings in this gypi
# file then you're better off editing bru_overrides.gypi.
# That way if bru_common.gyp gets improvements in git you don't
# need to merge these external changes with your local ones.
"bru_common.gypi",
# This is the gypi file you're encourage to edit, bru will always
# keep this empty and untouched.
"bru_overrides.gypi"
]),
("targets", [
collections.OrderedDict([
("target_name", "foo"), # just a guess, user should rename
("type", "none"), # more likely 'static_library' or 'executable'
# these two props are going to have to be filled in by enduser
("sources", []),
("includes_dirs", []),
("dependencies", [])
])])
])
brulib.jsonc.savefile(gyp_filename, gyp)
# from http://stackoverflow.com/questions/431684/how-do-i-cd-in-python
class Chdir:
""" Context manager for changing the current working directory.
Used in conjunction with os.system for executing $make_command,
typically used to run ./configure
"""
def __init__( self, newPath ):
self.newPath = newPath
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
def touch(file_name, times=None):
# http://stackoverflow.com/questions/1158076/implement-touch-using-python
with open(file_name, 'a'):
os.utime(file_name, times)
def exec_make_command(formula, bru_modules_root, system):
""" note that few modules specify a make_command. The few that do usually
don't execute a full make but only a ./configure.
This part is kinda ugly atm: consistent use of gyp for building modules
we depent on would be preferable. TODO: revisit.
param system should be platform.system()
"""
# make_command should only be used if we're too lazy to provide a
# gyp file for a module.
# A drawback of using ./configure make is that build are less reproducible
# across machines, e.g. ./configure may enable some code paths on one
# machine but not another depending on which libs are installed on both
# machines.
if 'make_command' in formula:
module_dir = os.path.join(bru_modules_root, formula['module'], formula['version'])
make_done_file = os.path.join(module_dir, "make_command.done")
if not os.path.exists(make_done_file):
# pick a make command depending on host OS
make_commands = formula['make_command']
if not system in make_commands:
raise Exception("no key {} in make_command".format(system))
make_command = make_commands[system]
# On Windows msvs toolchain build tools are typically not in your
# PATH, but are expected to be added to your PATH via
# %VS110COMNTOOLS%\vsvars32.bat. Let's call this vsvars32.bat
# script here automatically on Windows so that the command line
# may call nmake for example without having to hardcode in *.bru
# which (of often multiple installed) msvs toolchain to use.
if system == 'Windows':
# I had trouble with passing cmds like %VS110COMNTOOLS%\\vsvars32.bat
# thru os.system to powershell or cmd.exe. So instead let's
# write the make_command into a batch file and execute that:
make_command_bat = 'make_command.bat'
make_command_path = os.path.join(module_dir, make_command_bat)
with open(make_command_path, 'w') as batch_file:
msvs_version = brulib.make.get_latest_installed_msvs_version()
if msvs_version != None:
vsvars = 'call "%VS{}COMNTOOLS%\\vsvars32.bat"'.format(
msvs_version)
print("prefixing make_command with " + vsvars)
batch_file.write(vsvars + '\r\n')
for cmd in make_command.split(';'):
batch_file.write(cmd + '\r\n')
make_command = make_command_bat
# exec make_command with cwd being the module_dir (so the dir the
# gyp file is in, not that the gyp file is used here, but using the
# same base dir for the gyp & make_command probably makes sense)
with Chdir(module_dir):
print("building via '{}' ...".format(make_command))
error_code = os.system(make_command)
if error_code != 0:
raise ValueError("build failed with error code {}".format(error_code))
touch(make_done_file)
def download_module(library, module_name, module_version):
bru_modules_root = "./bru_modules"
formula = library.load_formula(module_name, module_version)
brulib.module_downloader.get_urls(library, formula, bru_modules_root)
exec_make_command(formula, bru_modules_root, platform.system())
def verify_resolved_dependencies(formula, target, resolved_dependencies):
""" param formula is the formula with a bunch of desired(!) dependencies
which after conflict resolution across the whole set of diverse deps
may be required to pull a different version for that module for not
violate the ODR. But which of course risks not compiling the module
(but which hopefully will compile & pass tests anyway).
Param resolved_dependencies is this global modulename-to-version map
computed across the whole bru.json dependency list.
Returns the subset of deps for the formula, using the resolved_dependencies
"""
# this here's the module we want to resolve deps for now:
module = formula['module']
version = formula['version']
# iterate over all target and their deps, fill in resolved versions
target_name = target['target_name']
resolved_target_deps = []
def map_dependency(dep):
""" param dep is a gyp file dependency, so either a local dep to a local
target like 'zlib' or a cross-module dep like '../boost-regex/...'.
There should be no other kinds of gyp deps in use """
# detect the regexes as written by scan_deps.py: references into
# a sibling module within ./bru_modules.
bru_regex = "^../([^/]+)/([^/]+)\\.gyp:(.+)"
match = re.match(bru_regex, dep)
if match == None:
return dep
upstream_module = match.group(1)
upstream_targets = match.group(2)
if not upstream_module in resolved_dependencies:
raise Exception("module {} listed in {}/{}.gyp's target '{}'"
" not found. Add it to {}/{}.bru:dependencies"
.format(
upstream_module, module, version, target_name,
module, version
))
return resolved_dependencies[upstream_module]
return list(map(map_dependency, target['dependencies']))
def apply_glob_exprs(formula, sources):
""" gyp does not support glob expression or wildcards in 'sources', this
here turns these glob expressions into a list of source files.
param sources is target['sources'] or target['sources!']
"""
def is_glob_expr(source):
return '*' in source
gyp_target_dir = os.path.join('bru_modules', formula['module']) # that is
# the dir the gyp file for this module is being stored in, so paths
# in the gyp file are interpreted relative to that
result = []
for source in sources:
if source.startswith('ant:'):
raise Exception('Ant-style glob exprs no longer supported: ' + source)
if is_glob_expr(source):
matching_sources = [os.path.relpath(filename, start=gyp_target_dir)
.replace('\\', '/') # otherwise sources! on windows will not match
for filename in
glob.glob(os.path.join(gyp_target_dir, source))]
assert len(matching_sources) > 0, "no matches for glob " + source
result += matching_sources
else:
# source os a flat file name (relative to gyp parent dir)
result.append(source)
return list(sorted(result))
def apply_recursive(dic, func):
""" param dic is usually a dictionary, e.g. 'target' or 'condition'
child node. It can also be a child dict or child list of
these nodes/dicts
param func is a func to be applied to each child dictionary, taking
the dictionary as the only param
"""
if isinstance(dic, dict):
func(dic)
for key, val in dic.items():
if isinstance(val, dict) or isinstance(val, list):
apply_recursive(val, func)
if isinstance(dic, list):
for elem in dic:
apply_recursive(elem, func)
def apply_glob_to_sources(dic, formula):
""" param dic is a 'target' dictionary, or one of the childnodes
in a 'conditions' list
"""
for prop in ['sources', 'sources!']:
if prop in dic:
dic[prop] = apply_glob_exprs(formula, dic[prop])
def copy_gyp(library, formula, resolved_dependencies):
"""
Param resolved_dependencies is a superset of the deps in formula
with recursively resolved module versions (after resolving conflicts).
"""
# If the module has a gyp file then let's copy it into ./bru_modules/$module,
# so next to the unpacked tar.gz, which is where the gyp file's relative
# paths expect include_dirs and source files and such.
# Not all modules need a gyp file, but a gyp file allows specifying upstream
# module dependencies, whereas a ./configure; make might have easily overlooked
# dependencies that result in harder-to-reproduce builds (unless you build
# on only one single machine in your organization).
# Actually even for modules build via make_command we need a gyp file to
# specify include paths and module libs via all|direct_dependent_settings.
#
# Note that the gyp file in the ./library does not contain 'dependencies'
# property yet, we add this property now (to not have the same redundant deps
# both in *.bru and *.gyp in the ./library dir)
module_name = formula['module']
assert module_name in resolved_dependencies
resolved_version = resolved_dependencies[module_name]
gyp = library.load_gyp(formula)
for target in gyp['targets']:
if 'dependencies' in target:
# Initially I thought there should be no such prop in the
# library/.../*.gyp file because these deps will be filled in with
# resolved deps from the *.bru file. But then I ran into two
# problems:
# a) I wanted for example zlib tests to build via gyp also
# (espcially since zlib is being built via gyp target alrdy
# anyway), so the gyp test target should depend on the lib
# target.
# b) often test targets need to pull in additional module deps
# that the module (without its tests) does not depend on, for
# example tests often depend on googletest or googlemock,
# whereas the main module does not.
# So now a *.bru file lists the union of dependencies for all
# targets in a gyp file, while each target depends explicitly
# lists dependencies as "bru:googletest". Could also support a
# format like "bru:googletest:1.7.0" but then the *.gyp file
# and *.bru file dependency lists would be redundant. Todo: move
# dependency lists from *.bru to *.gyp file altogether? Maybe...
verify_resolved_dependencies(formula, target, resolved_dependencies)
# Sanity check: verify the 'sources' prop doesn't contain glob exprs
# or wildcards: initially I though gyp was ok with
# "sources" : ".../src/*.cc"
# in *.gyp files because at first glance this 'compiled', but it
# turned out gyp just silently compiled zero source files in that case.
#
# Alternatively we could expand these wildcards now, drawback of that
# is that the files in ./library are not really *.gyp files anymore,
# and should probably be called *.gyp.in or *.gyp-bru or something
# like that.
# Apply the same mapping to 'sources' in the 'target' itelf and within
# its childnodes like 'conditions':
apply_recursive(target, lambda dic: apply_glob_to_sources(dic, formula))
# note that library/boost-regex/1.57.0.gyp is being copied to
# bru_modules/boost-regex/boost-regex.gyp here (with some minor
# transformations that were applied, e.g. expanding wildcards)
gyp_target_file = os.path.join('bru_modules', module_name, module_name + ".gyp")
# We also need a certain set of MSVC options imported into gyp files
# and don't want to repeat the same boring MSVC settings in every single
# module's individual gyp file. So add common.gypi include unless
# the module's gyp file explicitly specifies includes already.
if not 'includes' in gyp:
# we want the 'includes' at the begin, to achieve this order see
# http://stackoverflow.com/questions/16664874/how-can-i-add-the-element-at-the-top-of-ordereddict-in-python
new_gyp = collections.OrderedDict()
new_gyp['includes'] = [
'../../bru_common.gypi',
'../../bru_overrides.gypi'
]
for key, value in gyp.items():
new_gyp[key] = value
gyp = new_gyp
brulib.jsonc.savefile(gyp_target_file, gyp)
# this file is only saved for human reader's sake atm:
brulib.jsonc.savefile(os.path.join('bru_modules', module_name, 'bru-version.json'),
{'version': resolved_version})
def resolve_conflicts(library, dependencies, root_requestor):
""" takes a dict of modules and version matchers and recursively finds
all indirect deps. Then resolves version conflicts by picking the newer
of competing deps, or by picking the version that was requested by the module
closest to the root of the dependency tree (unsure still).
param root_requestor is whatever topmost *.bru listed deps, e.g. 'package.bru'
"""
todo = [(module, version, root_requestor) for (module, version)
in dependencies.items()]
recursive_deps = collections.OrderedDict()
for module_name, version_matcher, requestor in todo:
module_version = version_matcher # todo: allow for npm-style version specs (e.g. '4.*')
#print('resolving dependency {} version {} requested by {}'
# .format(module_name, module_version, requestor))
if module_name in recursive_deps:
resolved = recursive_deps[module_name]
resolved_version = resolved['version']
if module_version != resolved_version:
winning_requestor = resolved['requestor']
print("WARNING: version conflict for {} requested by first {} and then {}"
.format(module_name, winning_requestor, requestor))
# instead of just letting the 2nd and later requestors loose
# the competition we could probably do something more sensible.
# todo?
else:
# this is the first time this module was requested, freeze that
# chosen version:
formula = library.load_formula(module_name, module_version)
recursive_deps[module_name] = {
'version' : module_version,
'requestor' : requestor
}
# then descend deeper into the dependency tree:
deps = formula['dependencies'] if 'dependencies' in formula else {}
child_requestor = module_name
todo += [(child_module, version, child_requestor)
for (child_module, version)
in deps.items()]
return [(module, resolved['version'], resolved['requestor'])
for (module, resolved) in recursive_deps.items()]
def install_from_bru_file(bru_filename, library):
""" this gets executed when you 'bru install': it looks for a *.bru file
in cwd and downloads the listed deps """
package_jso = brulib.jsonc.loadfile(bru_filename)
recursive_deps = resolve_conflicts(library, package_jso['dependencies'], bru_filename)
resolved_dependencies = dict((module, version)
for (module, version, requestor) in recursive_deps)
for module_name, module_version, requestor in recursive_deps:
print('processing dependency {} version {} requested by {}'
.format(module_name, module_version, requestor))
formula = library.load_formula(module_name, module_version)
download_module(library, module_name, module_version)
copy_gyp(library, formula, resolved_dependencies)
# copy common.gypi which is referenced by module.gyp files and usually
# also by the parent *.gyp (e.g. bru-sample:foo.gyp).
# Should end users be allowed to make changes to bru_common.gypi or
# should they rather edit their own optional bru_overrides.gpyi which
# shadowsbru_common.gypi? Let's do the latter. See comments in
# create_gyp_file for more details.
# Anyway: just in case the user did make changes to bru_common.gypi
# let's only copy it if it's new.
common_gypi = 'bru_common.gypi'
overrides_gypi = 'bru_overrides.gypi'
common_gypi_src = os.path.join(library.get_root_dir(), '..', common_gypi)
if not os.path.exists(common_gypi):
print('copying', common_gypi)
shutil.copyfile(common_gypi_src, common_gypi)
elif not filecmp.cmp(common_gypi_src, common_gypi):
print('WARNING: {} differs from {}: it is OK but not recommended'
' to modify {}, instead rather modify {}'.format(
common_gypi, common_gypi_src, common_gypi, overrides_gypi))
# create empty bru_overrides.gypi only if it doesn't exist yet
# One use case for bru_overides.gypi is to tweak the Debug build to
# include clang -fsanitize=address options.
if not os.path.exists(overrides_gypi):
print('creating empty {}'.format(overrides_gypi))
brulib.jsonc.savefile(overrides_gypi, {})
#for module, version, requestor in recursive_deps:
# for ext in ['bru', 'gyp']:
# print("git add -f library/{}/{}.{}".format(module, version, ext))
# todo: clean up unused module dependencies from /bru_modules?
def cmd_install(library, installables):
""" param installables: e.g. [] or ['googlemock@1.7.0', 'boost-regex']
This is supposed to mimic 'npm install' syntax, see
https://docs.npmjs.com/cli/install. Examples:
a) bru install googlemock@1.7.0
b) bru install googlemock
c) bru install
Variant (a) is self-explanatory, installing the module of the given
version. Variant (b) installs the latest known version of the module
as specified by the versions listed in bru/library/googlemock.
Variant (c) will install all dependencies listed in the local *.bru
file (similar as how 'npm install' install all deps from ./package.json).
Unlike for 'npm install' the option --save is implied, means whatever you
install will end up in the local *.bru file's "dependencies" list, as
well as in the companion *.gyp file.
Param library is of type brulib.library.Library
"""
if len(installables) == 0:
# 'bru install'
bru_filename = get_single_bru_file(os.getcwd())
if bru_filename == None:
raise Exception("no file *.bru in cwd")
print('installing dependencies listed in', bru_filename)
install_from_bru_file(bru_filename, library)
else:
# installables are ['googlemock', 'googlemock@1.7.0']
# In this case we simply add deps to the *.bru (and *.gyp) file in
# the cwd and then execute the same as the next 'bru install' would.
installables = [parse_existing_module_at_version(installable, library)
for installable in installables]
bru_filename = get_or_create_single_bru_file(os.getcwd())
gyp_filename = bru_filename[:-3] + 'gyp'
if not os.path.exists(gyp_filename):
create_gyp_file(gyp_filename)
add_dependencies_to_bru(bru_filename, installables)
add_dependencies_to_gyp(gyp_filename, installables)
for installable in installables:
print("added dependency {}@{} to {} and {}".format(
installable.module, installable.version,
bru_filename, gyp_filename))
# now download the new dependency just like 'bru install' would do
# after we added the dep to the bru & gyp file:
install_from_bru_file(bru_filename, library)
|
# coding=utf-8
# noinspection PyUnresolvedReferences
from chatcommunicate import add_room, block_room, CmdException, command, get_report_data, is_privileged, message, \
tell_rooms
# noinspection PyUnresolvedReferences
from globalvars import GlobalVars
from findspam import FindSpam
# noinspection PyUnresolvedReferences
from datetime import datetime
from utcdate import UtcDate
from apigetpost import api_get_post, PostData
from datahandling import *
from blacklists import load_blacklists
from metasmoke import Metasmoke
from parsing import *
from spamhandling import handle_spam
from gitmanager import GitManager
from tasks import Tasks
import threading
import random
import requests
import os
import time
from html import unescape
# noinspection PyCompatibility
import regex
from helpers import only_blacklists_changed
from classes import Post
from classes.feedback import *
# TODO: Do we need uid == -2 check? Turn into "is_user_valid" check
#
#
# System command functions below here
# The following two commands are just bypasses for the "unrecognized command" message, so that pingbot
# can respond instead.
@command(aliases=['ping-help'])
def ping_help():
return None
@command()
def groups()
return None
@command(int, whole_msg=True, privileged=True)
def approve(msg, pr_num):
if is_code_privileged(msg._client.host, msg.owner.id):
resp = requests.post('{}/github/pr_approve/{}'.format(GlobalVars.metasmoke_host, pr_num))
if resp.status_code == 200:
return "Posted approval comment. PR will be merged automatically if it's a blacklist PR."
else:
return "Forwarding request to metasmoke returned HTTP {}. Check status manually.".format(resp.status_code)
else:
raise CmdException("You don't have permission to do that.")
# --- Blacklist Functions --- #
# noinspection PyIncorrectDocstring,PyMissingTypeHints
@command(str, whole_msg=True, privileged=True)
def addblu(msg, user):
"""
Adds a user to site whitelist
:param msg: ChatExchange message
:param user:
:return: A string
"""
uid, val = get_user_from_list_command(user)
if int(uid) > -1 and val != "":
message_url = "https://chat.{}/transcript/{}?m={}".format(msg._client.host, msg.room.id, msg.id)
add_blacklisted_user((uid, val), message_url, "")
return "User blacklisted (`{}` on `{}`).".format(uid, val)
elif int(uid) == -2:
raise CmdException("Error: {}".format(val))
else:
raise CmdException("Invalid format. Valid format: `!!/addblu profileurl` *or* `!!/addblu userid sitename`.")
# noinspection PyIncorrectDocstring,PyMissingTypeHints
@command(str)
def isblu(user):
"""
Check if a user is blacklisted
:param user:
:return: A string
"""
uid, val = get_user_from_list_command(user)
if int(uid) > -1 and val != "":
if is_blacklisted_user((uid, val)):
return "User is blacklisted (`{}` on `{}`).".format(uid, val)
else:
return "User is not blacklisted (`{}` on `{}`).".format(uid, val)
elif int(uid) == -2:
return "Error: {}".format(val)
else:
return "Invalid format. Valid format: `!!/isblu profileurl` *or* `!!/isblu userid sitename`."
# noinspection PyIncorrectDocstring,PyUnusedLocal
@command(str, privileged=True)
def rmblu(user):
"""
Removes user from site blacklist
:param user:
:return: A string
"""
uid, val = get_user_from_list_command(user)
if int(uid) > -1 and val != "":
if remove_blacklisted_user((uid, val)):
return "User removed from blacklist (`{}` on `{}`).".format(uid, val)
else:
return "User is not blacklisted."
elif int(uid) == -2:
return "Error: {}".format(val)
else:
return "Invalid format. Valid format: `!!/rmblu profileurl` *or* `!!/rmblu userid sitename`."
# --- Whitelist functions --- #
# noinspection PyIncorrectDocstring,PyUnusedLocal,PyMissingTypeHints
@command(str, privileged=True)
def addwlu(user):
"""
Adds a user to site whitelist
:param user:
:return: A string
"""
uid, val = get_user_from_list_command(user)
if int(uid) > -1 and val != "":
add_whitelisted_user((uid, val))
return "User whitelisted (`{}` on `{}`).".format(uid, val)
elif int(uid) == -2:
return "Error: {}".format(val)
else:
return "Invalid format. Valid format: `!!/addwlu profileurl` *or* `!!/addwlu userid sitename`."
# noinspection PyIncorrectDocstring,PyUnusedLocal,PyMissingTypeHints
@command(str)
def iswlu(user):
"""
Checks if a user is whitelisted
:param user:
:return: A string
"""
uid, val = get_user_from_list_command(user)
if int(uid) > -1 and val != "":
if is_whitelisted_user((uid, val)):
return "User is whitelisted (`{}` on `{}`).".format(uid, val)
else:
return "User is not whitelisted (`{}` on `{}`).".format(uid, val)
elif int(uid) == -2:
return "Error: {}".format(val)
else:
raise CmdException("Invalid format. Valid format: `!!/iswlu profileurl` *or* `!!/iswlu userid sitename`.")
# noinspection PyIncorrectDocstring,PyMissingTypeHints
@command(str, privileged=True)
def rmwlu(user):
"""
Removes a user from site whitelist
:param user:
:return: A string
"""
uid, val = get_user_from_list_command(user)
if int(uid) != -1 and val != "":
if remove_whitelisted_user((uid, val)):
return "User removed from whitelist (`{}` on `{}`).".format(uid, val)
else:
return "User is not whitelisted."
elif int(uid) == -2:
return "Error: {}".format(val)
else:
return "Invalid format. Valid format: `!!/rmwlu profileurl` *or* `!!/rmwlu userid sitename`."
# noinspection PyIncorrectDocstring
@command(str)
def blacklist(_):
"""
Returns a string which explains the usage of the new blacklist commands.
:return: A string
"""
raise CmdException("The !!/blacklist command has been deprecated. "
"Please use !!/blacklist-website, !!/blacklist-username,"
"!!/blacklist-keyword, or perhaps !!/watch-keyword. "
"Remember to escape dots in URLs using \\.")
def check_blacklist(string_to_test, is_username, is_watchlist):
# Test the string and provide a warning message if it is already caught.
if is_username:
question = Post(api_response={'title': 'Valid title', 'body': 'Valid body',
'owner': {'display_name': string_to_test, 'reputation': 1, 'link': ''},
'site': "", 'IsAnswer': False, 'score': 0})
answer = Post(api_response={'title': 'Valid title', 'body': 'Valid body',
'owner': {'display_name': string_to_test, 'reputation': 1, 'link': ''},
'site': "", 'IsAnswer': True, 'score': 0})
else:
question = Post(api_response={'title': 'Valid title', 'body': string_to_test,
'owner': {'display_name': "Valid username", 'reputation': 1, 'link': ''},
'site': "", 'IsAnswer': False, 'score': 0})
answer = Post(api_response={'title': 'Valid title', 'body': string_to_test,
'owner': {'display_name': "Valid username", 'reputation': 1, 'link': ''},
'site': "", 'IsAnswer': True, 'score': 0})
question_reasons, _ = FindSpam.test_post(question)
answer_reasons, _ = FindSpam.test_post(answer)
# Filter out duplicates
reasons = list(set(question_reasons) | set(answer_reasons))
# Filter out watchlist results
if not is_watchlist:
reasons = list(filter(lambda reason: "potentially bad keyword" not in reason, reasons))
return reasons
def format_blacklist_reasons(reasons):
# Capitalize
reasons = list(map(lambda reason: reason.capitalize(), reasons))
# Join
if len(reasons) < 3:
reason_string = " and ".join(reasons)
else:
reason_string = ", and ".join([", ".join(reasons[:-1]), reasons[-1]])
return reason_string
def do_blacklist(pattern, blacklist_type, msg, force=False):
"""
Adds a string to the website blacklist and commits/pushes to GitHub
:param pattern:
:param blacklist_type:
:param msg:
:param force:
:return: A string
"""
chat_user_profile_link = "http://chat.{host}/users/{id}".format(host=msg._client.host,
id=msg.owner.id)
# noinspection PyProtectedMember
try:
regex.compile(pattern)
except regex._regex_core.error:
raise CmdException("An invalid pattern was provided, not blacklisting.")
if not force:
reasons = check_blacklist(pattern.replace("\\W", " ").replace("\\.", "."),
blacklist_type == "username",
blacklist_type == "watch_keyword")
if reasons:
raise CmdException("That pattern looks like it's already caught by " + format_blacklist_reasons(reasons) +
"; append `-force` if you really want to do that.")
_, result = GitManager.add_to_blacklist(
blacklist=blacklist_type,
item_to_blacklist=pattern,
username=msg.owner.name,
chat_profile_link=chat_user_profile_link,
code_permissions=is_code_privileged(msg._client.host, msg.owner.id)
)
return result
# noinspection PyIncorrectDocstring
@command(str, whole_msg=True, privileged=True, give_name=True, aliases=["blacklist-keyword",
"blacklist-website",
"blacklist-username",
"blacklist-keyword-force",
"blacklist-website-force",
"blacklist-username-force"])
def blacklist_keyword(msg, pattern, alias_used="blacklist-keyword"):
"""
Adds a string to the blacklist and commits/pushes to GitHub
:param msg:
:param pattern:
:return: A string
"""
parts = alias_used.split("-")
return do_blacklist(pattern, parts[1], msg, force=len(parts) > 2)
# noinspection PyIncorrectDocstring
@command(str, whole_msg=True, privileged=True, aliases=["watch-keyword"])
def watch(msg, website):
"""
Adds a string to the watched keywords list and commits/pushes to GitHub
:param msg:
:param website:
:return: A string
"""
return do_blacklist(website, "watch_keyword", msg, force=False)
# noinspection PyIncorrectDocstring
@command(str, whole_msg=True, privileged=True, aliases=["watch-force", "watch-keyword-force"])
def watch_force(msg, website):
"""
Adds a string to the watched keywords list and commits/pushes to GitHub
:param msg:
:param website:
:return: A string
"""
return do_blacklist(website, "watch_keyword", msg, force=True)
# noinspection PyIncorrectDocstring
@command(privileged=True)
def gitstatus():
return GitManager.current_git_status()
@command(privileged=True, aliases=["remote-diff", "remote_diff"])
def remotediff():
will_require_full_restart = "SmokeDetector will require a full restart to pull changes: " \
"{}".format(str(not only_blacklists_changed(GitManager.get_remote_diff())))
return "{}\n\n{}".format(GitManager.get_remote_diff(), will_require_full_restart)
# --- Joke Commands --- #
@command(whole_msg=True)
def blame(msg):
unlucky_victim = msg._client.get_user(random.choice(msg.room.get_current_user_ids()))
return "It's [{}](https://chat.{}/users/{})'s fault.".format(unlucky_victim.name,
msg._client.host,
unlucky_victim.id)
@command(str, whole_msg=True, aliases=["blame\u180E"])
def blame2(msg, x):
base = {"\u180E": 0, "\u200B": 1, "\u200C": 2, "\u200D": 3, "\u2060": 4, "\u2063": 5, "\uFEFF": 6}
user = 0
for i, char in enumerate(reversed(x)):
user += (len(base)**i) * base[char]
unlucky_victim = msg._client.get_user(user)
return "It's [{}](https://chat.{}/users/{})'s fault.".format(unlucky_victim.name,
msg._client.host,
unlucky_victim.id)
# noinspection PyIncorrectDocstring
@command()
def brownie():
"""
Returns a string equal to "Brown!" (This is a joke command)
:return: A string
"""
return "Brown!"
# noinspection PyIncorrectDocstring
@command(str, whole_msg=True, arity=(0, 1))
def coffee(msg, other_user):
"""
Returns a string stating who the coffee is for (This is a joke command)
:param msg:
:param other_user:
:return: A string
"""
return "*brews coffee for @" + (other_user if other_user else msg.owner.name.replace(" ", "")) + "*"
# noinspection PyIncorrectDocstring
@command()
def lick():
"""
Returns a string when a user says 'lick' (This is a joke command)
:return: A string
"""
return "*licks ice cream cone*"
TEAS = ['earl grey', 'green', 'chamomile', 'lemon', 'darjeeling', 'mint', 'jasmine', 'passionfruit']
# noinspection PyIncorrectDocstring
@command(str, whole_msg=True, arity=(0, 1))
def tea(msg, other_user):
"""
Returns a string stating who the tea is for (This is a joke command)
:param msg:
:param other_user:
:return: A string
"""
if other_user is None:
return "*brews a cup of {} tea for @{}*".format(random.choice(TEAS), msg.owner.name.replace(" ", ""))
else:
return "*brews a cup of {} tea for @{}*".format(random.choice(TEAS), other_user)
# noinspection PyIncorrectDocstring
@command()
def wut():
"""
Returns a string when a user asks 'wut' (This is a joke command)
:return: A string
"""
return "Whaddya mean, 'wut'? Humans..."
@command(aliases=["zomg_hats"])
def hats():
wb_start = datetime(2017, 12, 13, 0, 0, 0)
wb_end = datetime(2018, 1, 3, 0, 0, 0)
now = datetime.utcnow()
return_string = ""
if wb_start > now:
diff = wb_start - now
hours, remainder = divmod(diff.seconds, 3600)
minutes, seconds = divmod(remainder, 60)
daystr = "days" if diff.days != 1 else "day"
hourstr = "hours" if hours != 1 else "hour"
minutestr = "minutes" if minutes != 1 else "minute"
secondstr = "seconds" if seconds != 1 else "second"
return_string = "WE LOVE HATS! Winter Bash will begin in {} {}, {} {}, {} {}, and {} {}.".format(
diff.days, daystr, hours, hourstr, minutes, minutestr, seconds, secondstr)
elif wb_end > now:
diff = wb_end - now
hours, remainder = divmod(diff.seconds, 3600)
minutes, seconds = divmod(remainder, 60)
daystr = "days" if diff.days != 1 else "day"
hourstr = "hours" if hours != 1 else "hour"
minutestr = "minutes" if minutes != 1 else "minute"
secondstr = "seconds" if seconds != 1 else "second"
return_string = "Winter Bash won't end for {} {}, {} {}, {} {}, and {} {}. GO EARN SOME HATS!".format(
diff.days, daystr, hours, hourstr, minutes, minutestr, seconds, secondstr)
return return_string
# --- Block application from posting functions --- #
# noinspection PyIncorrectDocstring
@command(int, int, whole_msg=True, privileged=True, arity=(1, 2))
def block(msg, block_time, room_id):
"""
Blocks posts from application for a period of time
:param msg:
:param block_time:
:param room_id:
:return: A string
"""
time_to_block = block_time if 0 < block_time < 14400 else 900
block_room(room_id, msg._client.host, time.time() + time_to_block)
which_room = "globally" if room_id is None else "in room " + room_id
block_message = "Reports blocked for {} seconds {}.".format(time_to_block, which_room)
tell_rooms(block_message, ("debug", "metatavern"), ())
return report
# noinspection PyIncorrectDocstring,PyUnusedLocal
@command(int, int, whole_msg=True, privileged=True, arity=(1, 2))
def unblock(msg, room_id):
"""
Unblocks posting to a room
:param msg:
:param room_id:
:return: A string
"""
block_room(room_id, msg._client.host, -1)
which_room = "globally" if room_id is None else "in room " + room_id
unblock_message = "Reports unblocked {}.".format(which_room)
tell_rooms(unblock_message, ("debug", "metatavern"), ())
return report
# --- Administration Commands --- #
# noinspection PyIncorrectDocstring
@command()
def alive():
"""
Returns a string indicating the process is still active
:return: A string
"""
return random.choice(['Yup', 'You doubt me?', 'Of course',
'... did I miss something?', 'plz send teh coffee',
'Watching this endless list of new questions *never* gets boring',
'Kinda sorta'])
# noinspection PyIncorrectDocstring
@command(int, privileged=True, arity=(0, 1))
def errorlogs(count):
"""
Shows the most recent lines in the error logs
:param count:
:return: A string
"""
return fetch_lines_from_error_log(count or 50)
# noinspection PyIncorrectDocstring
@command(aliases=["commands", "help"])
def info():
"""
Returns the help text
:return: A string
"""
return "I'm " + GlobalVars.chatmessage_prefix +\
" a bot that detects spam and offensive posts on the network and"\
" posts alerts to chat."\
" [A command list is available here](https://charcoal-se.org/smokey/Commands)."
# noinspection PyIncorrectDocstring
@command()
def location():
"""
Returns the current location the application is running from
:return: A string with current location
"""
return GlobalVars.location
# noinspection PyIncorrectDocstring,PyProtectedMember
@command(privileged=True)
def master():
"""
Forces a system exit with exit code = 8
:return: None
"""
os._exit(8)
# noinspection PyIncorrectDocstring,PyProtectedMember
@command(privileged=True)
def pull():
"""
Pull an update from GitHub
:return: String on failure, None on success
"""
if only_blacklists_changed(GitManager.get_remote_diff()):
GitManager.pull_remote()
load_blacklists()
return "No code modified, only blacklists reloaded."
else:
request = requests.get('https://api.github.com/repos/Charcoal-SE/SmokeDetector/git/refs/heads/deploy')
latest_sha = request.json()["object"]["sha"]
request = requests.get(
'https://api.github.com/repos/Charcoal-SE/SmokeDetector/commits/{commit_code}/statuses'.format(
commit_code=latest_sha))
states = []
for ci_status in request.json():
state = ci_status["state"]
states.append(state)
if "success" in states:
os._exit(3)
elif "error" in states or "failure" in states:
raise CmdException("CI build failed! :( Please check your commit.")
elif "pending" in states or not states:
raise CmdException("CI build is still pending, wait until the build has finished and then pull again.")
# noinspection PyIncorrectDocstring,PyProtectedMember
@command(whole_msg=True, privileged=True, aliases=["restart"])
def reboot(msg):
"""
Forces a system exit with exit code = 5
:param msg:
:return: None
"""
msg.room.send_message("Goodbye, cruel world")
os._exit(5)
# noinspection PyIncorrectDocstring,PyMissingTypeHints
@command(whole_msg=True)
def amiprivileged(msg):
"""
Tells user whether or not they have privileges
:param msg:
:return: A string
"""
if is_privileged(msg.owner, msg.room):
return "\u2713 You are a privileged user."
return "\u2573 " + GlobalVars.not_privileged_warning
# noinspection PyIncorrectDocstring,
@command(whole_msg=True)
def amicodeprivileged(msg):
"""
Tells user whether or not they have code privileges
:param msg:
:return: A string
"""
if is_code_privileged(msg._client.host, msg.owner.id):
return "\u2713 You are a code-privileged user."
return "\u2573 No, you are not a code-privileged user."
# noinspection PyIncorrectDocstring
@command()
def apiquota():
"""
Report how many API hits remain for the day
:return: A string
"""
return "The current API quota remaining is {}.".format(GlobalVars.apiquota)
# noinspection PyIncorrectDocstring
@command()
def queuestatus():
"""
Report current API queue
:return: A string
"""
return GlobalVars.bodyfetcher.print_queue()
# noinspection PyIncorrectDocstring,PyProtectedMember
@command(str, whole_msg=True, privileged=True, arity=(0, 1))
def stappit(msg, location_search):
"""
Forces a system exit with exit code = 6
:param msg:
:param location_search:
:return: None
"""
if location_search is None or location_search.lower() in GlobalVars.location.lower():
msg.room.send_message("Goodbye, cruel world")
time.sleep(1)
os._exit(6)
def td_format(td_object):
# source: http://stackoverflow.com/a/13756038/5244995
seconds = int(td_object.total_seconds())
periods = [
('year', 60 * 60 * 24 * 365),
('month', 60 * 60 * 24 * 30),
('day', 60 * 60 * 24),
('hour', 60 * 60),
('minute', 60),
('second', 1)
]
strings = []
for period_name, period_seconds in periods:
if seconds > period_seconds:
period_value, seconds = divmod(seconds, period_seconds)
if period_value == 1:
strings.append("%s %s" % (period_value, period_name))
else:
strings.append("%s %ss" % (period_value, period_name))
return ", ".join(strings)
# noinspection PyIncorrectDocstring
@command()
def status():
"""
Returns the amount of time the application has been running
:return: A string
"""
now = datetime.utcnow()
diff = now - UtcDate.startup_utc_date
return 'Running since {time} UTC ({relative})'.format(time=GlobalVars.startup_utc, relative=td_format(diff))
# noinspection PyIncorrectDocstring
@command(privileged=True)
def stopflagging():
Tasks.do(Metasmoke.stop_autoflagging)
return "Request sent..."
# noinspection PyIncorrectDocstring,PyProtectedMember
@command(str, whole_msg=True, privileged=True)
def standby(msg, location_search):
"""
Forces a system exit with exit code = 7
:param msg:
:param location_search:
:return: None
"""
if location_search.lower() in GlobalVars.location.lower():
msg.room.send_message("{location} is switching to standby".format(location=GlobalVars.location))
time.sleep(1)
os._exit(7)
# noinspection PyIncorrectDocstring
@command(str, aliases=["test-q", "test-a", "test-u", "test-t"], give_name=True)
def test(content, alias_used="test"):
"""
Test an answer to determine if it'd be automatically reported
:param content:
:return: A string
"""
result = "> "
if alias_used == "test-q":
kind = " question."
fakepost = Post(api_response={'title': 'Valid title', 'body': content,
'owner': {'display_name': "Valid username", 'reputation': 1, 'link': ''},
'site': "", 'IsAnswer': False, 'score': 0})
elif alias_used == "test-a":
kind = "n answer."
fakepost = Post(api_response={'title': 'Valid title', 'body': content,
'owner': {'display_name': "Valid username", 'reputation': 1, 'link': ''},
'site': "", 'IsAnswer': True, 'score': 0})
elif alias_used == "test-u":
kind = " username."
fakepost = Post(api_response={'title': 'Valid title', 'body': "Valid question body",
'owner': {'display_name': content, 'reputation': 1, 'link': ''},
'site': "", 'IsAnswer': False, 'score': 0})
elif alias_used == "test-t":
kind = " title."
fakepost = Post(api_response={'title': content, 'body': "Valid question body",
'owner': {'display_name': "Valid username", 'reputation': 1, 'link': ''},
'site': "", 'IsAnswer': False, 'score': 0})
else:
kind = " post, title or username."
fakepost = Post(api_response={'title': content, 'body': content,
'owner': {'display_name': content, 'reputation': 1, 'link': ''},
'site': "", 'IsAnswer': False, 'score': 0})
reasons, why_response = FindSpam.test_post(fakepost)
if len(reasons) == 0:
result += "Would not be caught as a{}".format(kind)
else:
result += ", ".join(reasons).capitalize()
if why_response is not None and len(why_response) > 0:
result += "\n----------\n"
result += why_response
return result
# noinspection PyIncorrectDocstring
@command()
def threads():
"""
Returns a description of current threads, for debugging
:return: A string
"""
threads_list = ("{ident}: {name}".format(ident=t.ident, name=t.name) for t in threading.enumerate())
return "{threads}".format(threads="\n".join(list(threads_list)))
# noinspection PyIncorrectDocstring
@command(aliases=["rev", "ver"])
def version():
"""
Returns the current version of the application
:return: A string
"""
return '{id} [{commit_name}]({repository}/commit/{commit_code})'.format(id=GlobalVars.location,
commit_name=GlobalVars.commit_with_author,
commit_code=GlobalVars.commit['id'],
repository=GlobalVars.bot_repository)
# noinspection PyIncorrectDocstring
@command(whole_msg=True)
def whoami(msg):
"""
Returns user id of smoke detector
:param msg:
:return:
"""
return "My id for this room is {}, and it's not apnorton's fault.".format(msg._client._br.user_id)
# --- Notification functions --- #
# noinspection PyIncorrectDocstring
@command(int, whole_msg=True)
def allnotificationsites(msg, room_id):
"""
Returns a string stating what sites a user will be notified about
:param msg:
:param room_id:
:return: A string
"""
sites = get_all_notification_sites(msg.owner.id, msg._client.host, room_id)
if len(sites) == 0:
return "You won't get notified for any sites in that room."
return "You will get notified for these sites:\r\n" + ", ".join(sites)
# noinspection PyIncorrectDocstring,PyMissingTypeHints
@command(int, str, whole_msg=True)
def notify(msg, room_id, se_site):
"""
Subscribe a user to events on a site in a single room
:param msg:
:param room_id:
:param se_site:
:return: A string
"""
# TODO: Add check whether smokey reports in that room
response, full_site = add_to_notification_list(msg.owner.id, msg._client.host, room_id, se_site)
if response == 0:
return "You'll now get pings from me if I report a post on `{site}`, in room "\
"`{room}` on `chat.{domain}`".format(site=se_site, room=room_id, domain=msg._client.host)
elif response == -1:
raise CmdException("That notification configuration is already registered.")
elif response == -2:
raise CmdException("The given SE site does not exist.")
else:
raise CmdException("Unrecognized code returned when adding notification.")
# TODO: !!/unnotify-all
# noinspection PyIncorrectDocstring,PyMissingTypeHints
@command(int, str, whole_msg=True)
def unnotify(msg, room_id, se_site):
"""
Unsubscribes a user to specific events
:param msg:
:param room_id:
:param se_site:
:return: A string
"""
response = remove_from_notification_list(msg.owner.id, msg._client.host, room_id, se_site)
if response:
return "I will no longer ping you if I report a post on `{site}`, in room `{room}` "\
"on `chat.{domain}`".format(site=se_site, room=room_id, domain=msg._client.host)
raise CmdException("That configuration doesn't exist.")
# noinspection PyIncorrectDocstring,PyMissingTypeHints
@command(int, str, whole_msg=True)
def willbenotified(msg, room_id, se_site):
"""
Returns a string stating whether a user will be notified or not
:param msg:
:param room_id:
:param se_site:
:return: A string
"""
if will_i_be_notified(msg.owner.id, msg._client.host, room_id, se_site):
return "Yes, you will be notified for that site in that room."
return "No, you won't be notified for that site in that room."
RETURN_NAMES = {"admin": ["admin", "admins"], "code_admin": ["code admin", "code admins"]}
VALID_ROLES = {"admin": "admin",
"code_admin": "code_admin",
"admins": "admin",
"codeadmins": "code_admin"}
# noinspection PyIncorrectDocstring,PyMissingTypeHints
@command(str, whole_msg=True)
def whois(msg, role):
"""
Return a list of important users
:param msg:
:param role:
:return: A string
"""
if role not in VALID_ROLES:
raise CmdException("That is not a user level I can check. "
"I know about {0}".format(", ".join(set(VALID_ROLES.values()))))
ms_route = "https://metasmoke.erwaysoftware.com/api/v2.0/users/with_role/{}?key={}&per_page=100&filter=JIHF".format(
VALID_ROLES[role],
GlobalVars.metasmoke_key)
user_response = requests.get(ms_route)
user_response.encoding = 'utf-8-sig'
user_response = user_response.json()
chat_host = msg._client.host
# Build our list of admin chat ids
key = ""
if chat_host == "stackexchange.com":
key = 'stackexchange_chat_id'
elif chat_host == "meta.stackexchange.com":
key = 'meta_stackexchange_chat_id'
elif chat_host == "stackoverflow.com":
key = 'stackoverflow_chat_id'
admin_ids = [a[key] for a in user_response['items'] if a[key] and a['id'] != -1]
all_users_in_room = msg.room.get_current_user_ids()
admins_in_room = list(set(admin_ids) & set(all_users_in_room))
admins_not_in_room = list(set(admin_ids) - set(admins_in_room))
admins_list = [(admin,
msg._client.get_user(admin).name,
msg._client.get_user(admin).last_message,
msg._client.get_user(admin).last_seen)
for admin in admin_ids]
admins_in_room_list = [(admin,
msg._client.get_user(admin).name,
msg._client.get_user(admin).last_message,
msg._client.get_user(admin).last_seen)
for admin in admins_in_room]
admins_not_in_room_list = [(admin,
msg._client.get_user(admin).name,
msg._client.get_user(admin).last_message,
msg._client.get_user(admin).last_seen)
for admin in admins_not_in_room]
return_name = RETURN_NAMES[VALID_ROLES[role]][0 if len(admin_ids) == 1 else 1]
response = "I am aware of {} {}".format(len(admin_ids), return_name)
if admins_in_room_list:
admins_in_room_list.sort(key=lambda x: x[2]) # Sort by last message (last seen = x[3])
response += ". Currently in this room: **"
for admin in admins_in_room_list:
response += "{}, ".format(admin[1])
response = response[:-2] + "**. "
response += "Not currently in this room: "
for admin in admins_not_in_room_list:
response += "{}, ".format(admin[1])
response = response[:-2] + "."
else:
response += ": "
for admin in admins_list:
response += "{}, ".format(admin[1])
response = response[:-2] + ". "
response += "None of them are currently in this room. Other users in this room might be able to help you."
return response
@command(int, str, privileged=True, whole_msg=True)
def invite(msg, room_id, roles):
add_room((msg._client.host, room_id), roles.split(","))
return "I'll now send messages with types `{}` to room `{}` on `{}`." \
" (Note that this will not persist after restarts.)".format(roles, room_id, msg._client.host)
# --- Post Responses --- #
# noinspection PyIncorrectDocstring
@command(str, whole_msg=True, privileged=True)
def report(msg, urls):
"""
Report a post (or posts)
:param msg:
:param urls:
:return: A string (or None)
"""
crn, wait = can_report_now(msg.owner.id, msg._client.host)
if not crn:
raise CmdException("You can execute the !!/report command again in {} seconds. "
"To avoid one user sending lots of reports in a few commands and "
"slowing SmokeDetector down due to rate-limiting, you have to "
"wait 30 seconds after you've reported multiple posts in "
"one go.".format(wait))
output = []
urls = list(set(urls.split()))
if len(urls) > 5:
raise CmdException("To avoid SmokeDetector reporting posts too slowly, you can "
"report at most 5 posts at a time. This is to avoid "
"SmokeDetector's chat messages getting rate-limited too much, "
"which would slow down reports.")
for index, url in enumerate(urls, start=1):
post_data = api_get_post(url)
if post_data is None:
output.append("Post {}: That does not look like a valid post URL.".format(index))
continue
if post_data is False:
output.append("Post {}: Could not find data for this post in the API. "
"It may already have been deleted.".format(index))
continue
if has_already_been_posted(post_data.site, post_data.post_id, post_data.title) and not is_false_positive(
(post_data.post_id, post_data.site)):
# Don't re-report if the post wasn't marked as a false positive. If it was marked as a false positive,
# this re-report might be attempting to correct that/fix a mistake/etc.
if GlobalVars.metasmoke_key is not None:
se_link = to_protocol_relative(post_data.post_url)
ms_link = "https://m.erwaysoftware.com/posts/by-url?url={}".format(se_link)
output.append("Post {}: Already recently reported [ [MS]({}) ]".format(index, ms_link))
continue
else:
output.append("Post {}: Already recently reported".format(index))
continue
post_data.is_answer = (post_data.post_type == "answer")
post = Post(api_response=post_data.as_dict)
user = get_user_from_url(post_data.owner_url)
if user is not None:
message_url = "https://chat.{}/transcript/{}?m={}".format(msg._client.host, msg.room.id, msg.id)
add_blacklisted_user(user, message_url, post_data.post_url)
why_info = u"Post manually reported by user *{}* in room *{}*.\n".format(msg.owner.name, msg.room.name)
batch = ""
if len(urls) > 1:
batch = " (batch report: post {} out of {})".format(index, len(urls))
handle_spam(post=post,
reasons=["Manually reported " + post_data.post_type + batch],
why=why_info)
if 1 < len(urls) > len(output):
add_or_update_multiple_reporter(msg.owner.id, msg._client.host, time.time())
if len(output) > 0:
return os.linesep.join(output)
# noinspection PyIncorrectDocstring,PyUnusedLocal
@command(str, whole_msg=True, privileged=True, aliases=['reportuser'])
def allspam(msg, url):
"""
Reports all of a user's posts as spam
:param msg:
:param url: A user profile URL
:return:
"""
crn, wait = can_report_now(msg.owner.id, msg._client.host)
if not crn:
raise CmdException("You can execute the !!/allspam command again in {} seconds. "
"To avoid one user sending lots of reports in a few commands and "
"slowing SmokeDetector down due to rate-limiting, you have to "
"wait 30 seconds after you've reported multiple posts in "
"one go.".format(wait))
user = get_user_from_url(url)
if user is None:
raise CmdException("That doesn't look like a valid user URL.")
user_sites = []
user_posts = []
# Detect whether link is to network profile or site profile
if user[1] == 'stackexchange.com':
# Respect backoffs etc
GlobalVars.api_request_lock.acquire()
if GlobalVars.api_backoff_time > time.time():
time.sleep(GlobalVars.api_backoff_time - time.time() + 2)
# Fetch sites
api_filter = "!6Pbp)--cWmv(1"
request_url = "http://api.stackexchange.com/2.2/users/{}/associated?filter={}&key=IAkbitmze4B8KpacUfLqkw((" \
.format(user[0], api_filter)
res = requests.get(request_url).json()
if "backoff" in res:
if GlobalVars.api_backoff_time < time.time() + res["backoff"]:
GlobalVars.api_backoff_time = time.time() + res["backoff"]
GlobalVars.api_request_lock.release()
if 'items' not in res or len(res['items']) == 0:
raise CmdException("The specified user does not appear to exist.")
if res['has_more']:
raise CmdException("The specified user has an abnormally high number of accounts. Please consider flagging "
"for moderator attention, otherwise use !!/report on the user's posts individually.")
# Add accounts with posts
for site in res['items']:
if site['question_count'] > 0 or site['answer_count'] > 0:
user_sites.append((site['user_id'], get_api_sitename_from_url(site['site_url'])))
else:
user_sites.append((user[0], get_api_sitename_from_url(user[1])))
# Fetch posts
for u_id, u_site in user_sites:
# Respect backoffs etc
GlobalVars.api_request_lock.acquire()
if GlobalVars.api_backoff_time > time.time():
time.sleep(GlobalVars.api_backoff_time - time.time() + 2)
# Fetch posts
api_filter = "!)Q4RrMH0DC96Y4g9yVzuwUrW"
request_url = "http://api.stackexchange.com/2.2/users/{}/posts?site={}&filter={}&key=IAkbitmze4B8KpacUfLqkw((" \
.format(u_id, u_site, api_filter)
res = requests.get(request_url).json()
if "backoff" in res:
if GlobalVars.api_backoff_time < time.time() + res["backoff"]:
GlobalVars.api_backoff_time = time.time() + res["backoff"]
GlobalVars.api_request_lock.release()
if 'items' not in res or len(res['items']) == 0:
raise CmdException("The specified user has no posts on this site.")
posts = res['items']
if posts[0]['owner']['reputation'] > 100:
raise CmdException("The specified user's reputation is abnormally high. Please consider flagging for "
"moderator attention, otherwise use !!/report on the posts individually.")
# Add blacklisted user - use most downvoted post as post URL
message_url = "https://chat.{}/transcript/{}?m={}".format(msg._client.host, msg.room.id, msg.id)
add_blacklisted_user(user, message_url, sorted(posts, key=lambda x: x['score'])[0]['owner']['link'])
# TODO: Postdata refactor, figure out a better way to use apigetpost
for post in posts:
post_data = PostData()
post_data.post_id = post['post_id']
post_data.post_url = url_to_shortlink(post['link'])
*discard, post_data.site, post_data.post_type = fetch_post_id_and_site_from_url(
url_to_shortlink(post['link']))
post_data.title = unescape(post['title'])
post_data.owner_name = unescape(post['owner']['display_name'])
post_data.owner_url = post['owner']['link']
post_data.owner_rep = post['owner']['reputation']
post_data.body = post['body']
post_data.score = post['score']
post_data.up_vote_count = post['up_vote_count']
post_data.down_vote_count = post['down_vote_count']
if post_data.post_type == "answer":
# Annoyingly we have to make another request to get the question ID, since it is only returned by the
# /answers route
# Respect backoffs etc
GlobalVars.api_request_lock.acquire()
if GlobalVars.api_backoff_time > time.time():
time.sleep(GlobalVars.api_backoff_time - time.time() + 2)
# Fetch posts
filter = "!*Jxb9s5EOrE51WK*"
req_url = "http://api.stackexchange.com/2.2/answers/{}?site={}&filter={}&key=IAkbitmze4B8KpacUfLqkw((" \
.format(post['post_id'], u_site, filter)
answer_res = requests.get(req_url).json()
if "backoff" in res:
if GlobalVars.api_backoff_time < time.time() + res["backoff"]:
GlobalVars.api_backoff_time = time.time() + res["backoff"]
GlobalVars.api_request_lock.release()
# Finally, set the attribute
post_data.question_id = answer_res['items'][0]['question_id']
post_data.is_answer = True
user_posts.append(post_data)
if len(user_posts) == 0:
raise CmdException("The specified user hasn't posted anything.")
if len(user_posts) > 15:
raise CmdException("The specified user has an abnormally high number of spam posts. Please consider flagging "
"for moderator attention, otherwise use !!/report on the posts individually.")
why_info = u"User manually reported by *{}* in room *{}*.\n".format(msg.owner.name, msg.room.name)
# Handle all posts
for index, post in enumerate(user_posts, start=1):
batch = ""
if len(user_posts) > 1:
batch = " (batch report: post {} out of {})".format(index, len(user_posts))
handle_spam(post=Post(api_response=post.as_dict),
reasons=["Manually reported " + post.post_type + batch],
why=why_info)
time.sleep(2) # Should this be implemented differently?
if len(user_posts) > 2:
add_or_update_multiple_reporter(msg.owner.id, msg._client.host, time.time())
#
#
# Subcommands go below here
# noinspection PyIncorrectDocstring,PyBroadException
DELETE_ALIASES = ["delete", "del", "remove", "poof", "gone", "kaboom"]
@command(message, reply=True, privileged=True, aliases=[alias + "-force" for alias in DELETE_ALIASES])
def delete_force(msg):
"""
Delete a post from the room, ignoring protection for Charcoal HQ
:param msg:
:return: None
"""
# noinspection PyBroadException
try:
msg.delete()
except:
pass # couldn't delete message
# noinspection PyIncorrectDocstring,PyUnusedLocal,PyBroadException
@command(message, reply=True, privileged=True, aliases=DELETE_ALIASES)
def delete(msg):
"""
Delete a post from a chatroom, with an override for Charcoal HQ.
:param msg:
:return: None
"""
post_data = get_report_data(msg)
if post_data and msg.room.id == 11540:
return "Reports from SmokeDetector in Charcoal HQ are generally kept "\
"as records. If you really need to delete a report, please use "\
"`sd delete-force`. See [this note on message deletion]"\
"(https://charcoal-se.org/smokey/Commands"\
"#a-note-on-message-deletion) for more details."
else:
try:
msg.delete()
except:
pass
# noinspection PyIncorrectDocstring,PyUnusedLocal
@command(message, reply=True, privileged=True)
def postgone(msg):
"""
Removes link from a marked report message
:param msg:
:return: None
"""
edited = edited_message_after_postgone_command(msg.content)
if edited is None:
raise CmdException("That's not a report.")
msg.edit(edited)
# noinspection PyIncorrectDocstring
@command(message, reply=True, privileged=True, whole_msg=True, give_name=True, aliases=FALSE_FEEDBACKS.keys())
def false(feedback, msg, alias_used="false"):
"""
Marks a post as a false positive
:param feedback:
:param msg:
:return: String
"""
post_data = get_report_data(msg)
if not post_data:
raise CmdException("That message is not a report.")
post_url, owner_url = post_data
feedback_type = FALSE_FEEDBACKS[alias_used]
feedback_type.send(post_url, feedback)
post_id, site, post_type = fetch_post_id_and_site_from_url(post_url)
add_false_positive((post_id, site))
user = get_user_from_url(owner_url)
if user is not None:
if feedback_type.blacklist:
add_whitelisted_user(user)
result = "Registered " + post_type + " as false positive and whitelisted user."
elif is_blacklisted_user(user):
remove_blacklisted_user(user)
result = "Registered " + post_type + " as false positive and removed user from the blacklist."
else:
result = "Registered " + post_type + " as false positive."
else:
result = "Registered " + post_type + " as false positive."
try:
if int(msg.room.id) != int(GlobalVars.charcoal_hq.id):
msg.delete()
except:
pass
return result if not feedback_type.always_silent else ""
# noinspection PyIncorrectDocstring,PyMissingTypeHints
@command(message, reply=True, privileged=True, whole_msg=True)
def ignore(feedback, msg):
"""
Marks a post to be ignored
:param feedback:
:param msg:
:return: String
"""
post_data = get_report_data(msg)
if not post_data:
raise CmdException("That message is not a report.")
post_url, _ = post_data
Feedback.send_custom("ignore", post_url, feedback)
post_id, site, _ = fetch_post_id_and_site_from_url(post_url)
add_ignored_post((post_id, site))
return "Post ignored; alerts about it will no longer be posted."
# noinspection PyIncorrectDocstring
@command(message, reply=True, privileged=True, whole_msg=True, give_name=True, aliases=NAA_FEEDBACKS.keys())
def naa(feedback, msg, alias_used="naa"):
"""
Marks a post as NAA
:param feedback:
:param msg:
:return: String
"""
post_data = get_report_data(msg)
if not post_data:
raise CmdException("That message is not a report.")
post_url, _ = post_data
post_id, site, post_type = fetch_post_id_and_site_from_url(post_url)
if post_type != "answer":
raise CmdException("That report was a question; questions cannot be marked as NAAs.")
feedback_type = NAA_FEEDBACKS[alias_used]
feedback_type.send(post_url, feedback)
post_id, site, _ = fetch_post_id_and_site_from_url(post_url)
add_ignored_post((post_id, site))
return "Recorded answer as an NAA in metasmoke." if not feedback_type.always_silent else ""
# noinspection PyIncorrectDocstring
@command(message, reply=True, privileged=True, whole_msg=True, give_name=True, aliases=TRUE_FEEDBACKS.keys())
def true(feedback, msg, alias_used="true"):
"""
Marks a post as a true positive
:param feedback:
:param msg:
:return: string
"""
post_data = get_report_data(msg)
if not post_data:
raise CmdException("That message is not a report.")
post_url, owner_url = post_data
feedback_type = TRUE_FEEDBACKS[alias_used]
feedback_type.send(post_url, feedback)
user = get_user_from_url(owner_url)
_, _, post_type = fetch_post_id_and_site_from_url(post_url)
message_url = "https://chat.{}/transcript/{}?m={}".format(msg._client.host, msg.room.id, msg.id)
if user is not None:
if feedback_type.blacklist:
add_blacklisted_user(user, message_url, post_url)
result = "Registered " + post_type + " as true positive and blacklisted user."
else:
result = "Registered " + post_type + " as true positive. If you want to "\
"blacklist the poster, use `trueu` or `tpu`."
else:
result = "Registered " + post_type + " as true positive."
return result if not feedback_type.always_silent else ""
# noinspection PyIncorrectDocstring,PyUnusedLocal
@command(message, reply=True)
def why(msg):
"""
Returns reasons a post was reported
:param msg:
:return: A string
"""
post_data = get_report_data(msg)
if not post_data:
raise CmdException("That's not a report.")
else:
*post, _ = fetch_post_id_and_site_from_url(post_data[0])
why_info = get_why(post[1], post[0])
if why_info:
return why_info
else:
raise CmdException("There is no `why` data for that user (anymore).")
# noinspection PyIncorrectDocstring,PyUnusedLocal
@command(message, reply=True)
def autoflagged(msg):
"""
Determines whether a post was automatically flagged by Metasmoke
:param msg:
:return: A string
"""
post_data = get_report_data(msg)
if not post_data:
raise CmdException("That's not a report.")
is_autoflagged, names = Metasmoke.determine_if_autoflagged(post_data[0])
if is_autoflagged:
return "That post was automatically flagged, using flags from: {}.".format(", ".join(names))
else:
return "That post was **not** automatically flagged by metasmoke."
why isn't Python Ruby
# coding=utf-8
# noinspection PyUnresolvedReferences
from chatcommunicate import add_room, block_room, CmdException, command, get_report_data, is_privileged, message, \
tell_rooms
# noinspection PyUnresolvedReferences
from globalvars import GlobalVars
from findspam import FindSpam
# noinspection PyUnresolvedReferences
from datetime import datetime
from utcdate import UtcDate
from apigetpost import api_get_post, PostData
from datahandling import *
from blacklists import load_blacklists
from metasmoke import Metasmoke
from parsing import *
from spamhandling import handle_spam
from gitmanager import GitManager
from tasks import Tasks
import threading
import random
import requests
import os
import time
from html import unescape
# noinspection PyCompatibility
import regex
from helpers import only_blacklists_changed
from classes import Post
from classes.feedback import *
# TODO: Do we need uid == -2 check? Turn into "is_user_valid" check
#
#
# System command functions below here
# The following two commands are just bypasses for the "unrecognized command" message, so that pingbot
# can respond instead.
@command(aliases=['ping-help'])
def ping_help():
return None
@command()
def groups():
return None
@command(int, whole_msg=True, privileged=True)
def approve(msg, pr_num):
if is_code_privileged(msg._client.host, msg.owner.id):
resp = requests.post('{}/github/pr_approve/{}'.format(GlobalVars.metasmoke_host, pr_num))
if resp.status_code == 200:
return "Posted approval comment. PR will be merged automatically if it's a blacklist PR."
else:
return "Forwarding request to metasmoke returned HTTP {}. Check status manually.".format(resp.status_code)
else:
raise CmdException("You don't have permission to do that.")
# --- Blacklist Functions --- #
# noinspection PyIncorrectDocstring,PyMissingTypeHints
@command(str, whole_msg=True, privileged=True)
def addblu(msg, user):
"""
Adds a user to site whitelist
:param msg: ChatExchange message
:param user:
:return: A string
"""
uid, val = get_user_from_list_command(user)
if int(uid) > -1 and val != "":
message_url = "https://chat.{}/transcript/{}?m={}".format(msg._client.host, msg.room.id, msg.id)
add_blacklisted_user((uid, val), message_url, "")
return "User blacklisted (`{}` on `{}`).".format(uid, val)
elif int(uid) == -2:
raise CmdException("Error: {}".format(val))
else:
raise CmdException("Invalid format. Valid format: `!!/addblu profileurl` *or* `!!/addblu userid sitename`.")
# noinspection PyIncorrectDocstring,PyMissingTypeHints
@command(str)
def isblu(user):
"""
Check if a user is blacklisted
:param user:
:return: A string
"""
uid, val = get_user_from_list_command(user)
if int(uid) > -1 and val != "":
if is_blacklisted_user((uid, val)):
return "User is blacklisted (`{}` on `{}`).".format(uid, val)
else:
return "User is not blacklisted (`{}` on `{}`).".format(uid, val)
elif int(uid) == -2:
return "Error: {}".format(val)
else:
return "Invalid format. Valid format: `!!/isblu profileurl` *or* `!!/isblu userid sitename`."
# noinspection PyIncorrectDocstring,PyUnusedLocal
@command(str, privileged=True)
def rmblu(user):
"""
Removes user from site blacklist
:param user:
:return: A string
"""
uid, val = get_user_from_list_command(user)
if int(uid) > -1 and val != "":
if remove_blacklisted_user((uid, val)):
return "User removed from blacklist (`{}` on `{}`).".format(uid, val)
else:
return "User is not blacklisted."
elif int(uid) == -2:
return "Error: {}".format(val)
else:
return "Invalid format. Valid format: `!!/rmblu profileurl` *or* `!!/rmblu userid sitename`."
# --- Whitelist functions --- #
# noinspection PyIncorrectDocstring,PyUnusedLocal,PyMissingTypeHints
@command(str, privileged=True)
def addwlu(user):
"""
Adds a user to site whitelist
:param user:
:return: A string
"""
uid, val = get_user_from_list_command(user)
if int(uid) > -1 and val != "":
add_whitelisted_user((uid, val))
return "User whitelisted (`{}` on `{}`).".format(uid, val)
elif int(uid) == -2:
return "Error: {}".format(val)
else:
return "Invalid format. Valid format: `!!/addwlu profileurl` *or* `!!/addwlu userid sitename`."
# noinspection PyIncorrectDocstring,PyUnusedLocal,PyMissingTypeHints
@command(str)
def iswlu(user):
"""
Checks if a user is whitelisted
:param user:
:return: A string
"""
uid, val = get_user_from_list_command(user)
if int(uid) > -1 and val != "":
if is_whitelisted_user((uid, val)):
return "User is whitelisted (`{}` on `{}`).".format(uid, val)
else:
return "User is not whitelisted (`{}` on `{}`).".format(uid, val)
elif int(uid) == -2:
return "Error: {}".format(val)
else:
raise CmdException("Invalid format. Valid format: `!!/iswlu profileurl` *or* `!!/iswlu userid sitename`.")
# noinspection PyIncorrectDocstring,PyMissingTypeHints
@command(str, privileged=True)
def rmwlu(user):
"""
Removes a user from site whitelist
:param user:
:return: A string
"""
uid, val = get_user_from_list_command(user)
if int(uid) != -1 and val != "":
if remove_whitelisted_user((uid, val)):
return "User removed from whitelist (`{}` on `{}`).".format(uid, val)
else:
return "User is not whitelisted."
elif int(uid) == -2:
return "Error: {}".format(val)
else:
return "Invalid format. Valid format: `!!/rmwlu profileurl` *or* `!!/rmwlu userid sitename`."
# noinspection PyIncorrectDocstring
@command(str)
def blacklist(_):
"""
Returns a string which explains the usage of the new blacklist commands.
:return: A string
"""
raise CmdException("The !!/blacklist command has been deprecated. "
"Please use !!/blacklist-website, !!/blacklist-username,"
"!!/blacklist-keyword, or perhaps !!/watch-keyword. "
"Remember to escape dots in URLs using \\.")
def check_blacklist(string_to_test, is_username, is_watchlist):
# Test the string and provide a warning message if it is already caught.
if is_username:
question = Post(api_response={'title': 'Valid title', 'body': 'Valid body',
'owner': {'display_name': string_to_test, 'reputation': 1, 'link': ''},
'site': "", 'IsAnswer': False, 'score': 0})
answer = Post(api_response={'title': 'Valid title', 'body': 'Valid body',
'owner': {'display_name': string_to_test, 'reputation': 1, 'link': ''},
'site': "", 'IsAnswer': True, 'score': 0})
else:
question = Post(api_response={'title': 'Valid title', 'body': string_to_test,
'owner': {'display_name': "Valid username", 'reputation': 1, 'link': ''},
'site': "", 'IsAnswer': False, 'score': 0})
answer = Post(api_response={'title': 'Valid title', 'body': string_to_test,
'owner': {'display_name': "Valid username", 'reputation': 1, 'link': ''},
'site': "", 'IsAnswer': True, 'score': 0})
question_reasons, _ = FindSpam.test_post(question)
answer_reasons, _ = FindSpam.test_post(answer)
# Filter out duplicates
reasons = list(set(question_reasons) | set(answer_reasons))
# Filter out watchlist results
if not is_watchlist:
reasons = list(filter(lambda reason: "potentially bad keyword" not in reason, reasons))
return reasons
def format_blacklist_reasons(reasons):
# Capitalize
reasons = list(map(lambda reason: reason.capitalize(), reasons))
# Join
if len(reasons) < 3:
reason_string = " and ".join(reasons)
else:
reason_string = ", and ".join([", ".join(reasons[:-1]), reasons[-1]])
return reason_string
def do_blacklist(pattern, blacklist_type, msg, force=False):
"""
Adds a string to the website blacklist and commits/pushes to GitHub
:param pattern:
:param blacklist_type:
:param msg:
:param force:
:return: A string
"""
chat_user_profile_link = "http://chat.{host}/users/{id}".format(host=msg._client.host,
id=msg.owner.id)
# noinspection PyProtectedMember
try:
regex.compile(pattern)
except regex._regex_core.error:
raise CmdException("An invalid pattern was provided, not blacklisting.")
if not force:
reasons = check_blacklist(pattern.replace("\\W", " ").replace("\\.", "."),
blacklist_type == "username",
blacklist_type == "watch_keyword")
if reasons:
raise CmdException("That pattern looks like it's already caught by " + format_blacklist_reasons(reasons) +
"; append `-force` if you really want to do that.")
_, result = GitManager.add_to_blacklist(
blacklist=blacklist_type,
item_to_blacklist=pattern,
username=msg.owner.name,
chat_profile_link=chat_user_profile_link,
code_permissions=is_code_privileged(msg._client.host, msg.owner.id)
)
return result
# noinspection PyIncorrectDocstring
@command(str, whole_msg=True, privileged=True, give_name=True, aliases=["blacklist-keyword",
"blacklist-website",
"blacklist-username",
"blacklist-keyword-force",
"blacklist-website-force",
"blacklist-username-force"])
def blacklist_keyword(msg, pattern, alias_used="blacklist-keyword"):
"""
Adds a string to the blacklist and commits/pushes to GitHub
:param msg:
:param pattern:
:return: A string
"""
parts = alias_used.split("-")
return do_blacklist(pattern, parts[1], msg, force=len(parts) > 2)
# noinspection PyIncorrectDocstring
@command(str, whole_msg=True, privileged=True, aliases=["watch-keyword"])
def watch(msg, website):
"""
Adds a string to the watched keywords list and commits/pushes to GitHub
:param msg:
:param website:
:return: A string
"""
return do_blacklist(website, "watch_keyword", msg, force=False)
# noinspection PyIncorrectDocstring
@command(str, whole_msg=True, privileged=True, aliases=["watch-force", "watch-keyword-force"])
def watch_force(msg, website):
"""
Adds a string to the watched keywords list and commits/pushes to GitHub
:param msg:
:param website:
:return: A string
"""
return do_blacklist(website, "watch_keyword", msg, force=True)
# noinspection PyIncorrectDocstring
@command(privileged=True)
def gitstatus():
return GitManager.current_git_status()
@command(privileged=True, aliases=["remote-diff", "remote_diff"])
def remotediff():
will_require_full_restart = "SmokeDetector will require a full restart to pull changes: " \
"{}".format(str(not only_blacklists_changed(GitManager.get_remote_diff())))
return "{}\n\n{}".format(GitManager.get_remote_diff(), will_require_full_restart)
# --- Joke Commands --- #
@command(whole_msg=True)
def blame(msg):
unlucky_victim = msg._client.get_user(random.choice(msg.room.get_current_user_ids()))
return "It's [{}](https://chat.{}/users/{})'s fault.".format(unlucky_victim.name,
msg._client.host,
unlucky_victim.id)
@command(str, whole_msg=True, aliases=["blame\u180E"])
def blame2(msg, x):
base = {"\u180E": 0, "\u200B": 1, "\u200C": 2, "\u200D": 3, "\u2060": 4, "\u2063": 5, "\uFEFF": 6}
user = 0
for i, char in enumerate(reversed(x)):
user += (len(base)**i) * base[char]
unlucky_victim = msg._client.get_user(user)
return "It's [{}](https://chat.{}/users/{})'s fault.".format(unlucky_victim.name,
msg._client.host,
unlucky_victim.id)
# noinspection PyIncorrectDocstring
@command()
def brownie():
"""
Returns a string equal to "Brown!" (This is a joke command)
:return: A string
"""
return "Brown!"
# noinspection PyIncorrectDocstring
@command(str, whole_msg=True, arity=(0, 1))
def coffee(msg, other_user):
"""
Returns a string stating who the coffee is for (This is a joke command)
:param msg:
:param other_user:
:return: A string
"""
return "*brews coffee for @" + (other_user if other_user else msg.owner.name.replace(" ", "")) + "*"
# noinspection PyIncorrectDocstring
@command()
def lick():
"""
Returns a string when a user says 'lick' (This is a joke command)
:return: A string
"""
return "*licks ice cream cone*"
TEAS = ['earl grey', 'green', 'chamomile', 'lemon', 'darjeeling', 'mint', 'jasmine', 'passionfruit']
# noinspection PyIncorrectDocstring
@command(str, whole_msg=True, arity=(0, 1))
def tea(msg, other_user):
"""
Returns a string stating who the tea is for (This is a joke command)
:param msg:
:param other_user:
:return: A string
"""
if other_user is None:
return "*brews a cup of {} tea for @{}*".format(random.choice(TEAS), msg.owner.name.replace(" ", ""))
else:
return "*brews a cup of {} tea for @{}*".format(random.choice(TEAS), other_user)
# noinspection PyIncorrectDocstring
@command()
def wut():
"""
Returns a string when a user asks 'wut' (This is a joke command)
:return: A string
"""
return "Whaddya mean, 'wut'? Humans..."
@command(aliases=["zomg_hats"])
def hats():
wb_start = datetime(2017, 12, 13, 0, 0, 0)
wb_end = datetime(2018, 1, 3, 0, 0, 0)
now = datetime.utcnow()
return_string = ""
if wb_start > now:
diff = wb_start - now
hours, remainder = divmod(diff.seconds, 3600)
minutes, seconds = divmod(remainder, 60)
daystr = "days" if diff.days != 1 else "day"
hourstr = "hours" if hours != 1 else "hour"
minutestr = "minutes" if minutes != 1 else "minute"
secondstr = "seconds" if seconds != 1 else "second"
return_string = "WE LOVE HATS! Winter Bash will begin in {} {}, {} {}, {} {}, and {} {}.".format(
diff.days, daystr, hours, hourstr, minutes, minutestr, seconds, secondstr)
elif wb_end > now:
diff = wb_end - now
hours, remainder = divmod(diff.seconds, 3600)
minutes, seconds = divmod(remainder, 60)
daystr = "days" if diff.days != 1 else "day"
hourstr = "hours" if hours != 1 else "hour"
minutestr = "minutes" if minutes != 1 else "minute"
secondstr = "seconds" if seconds != 1 else "second"
return_string = "Winter Bash won't end for {} {}, {} {}, {} {}, and {} {}. GO EARN SOME HATS!".format(
diff.days, daystr, hours, hourstr, minutes, minutestr, seconds, secondstr)
return return_string
# --- Block application from posting functions --- #
# noinspection PyIncorrectDocstring
@command(int, int, whole_msg=True, privileged=True, arity=(1, 2))
def block(msg, block_time, room_id):
"""
Blocks posts from application for a period of time
:param msg:
:param block_time:
:param room_id:
:return: A string
"""
time_to_block = block_time if 0 < block_time < 14400 else 900
block_room(room_id, msg._client.host, time.time() + time_to_block)
which_room = "globally" if room_id is None else "in room " + room_id
block_message = "Reports blocked for {} seconds {}.".format(time_to_block, which_room)
tell_rooms(block_message, ("debug", "metatavern"), ())
return report
# noinspection PyIncorrectDocstring,PyUnusedLocal
@command(int, int, whole_msg=True, privileged=True, arity=(1, 2))
def unblock(msg, room_id):
"""
Unblocks posting to a room
:param msg:
:param room_id:
:return: A string
"""
block_room(room_id, msg._client.host, -1)
which_room = "globally" if room_id is None else "in room " + room_id
unblock_message = "Reports unblocked {}.".format(which_room)
tell_rooms(unblock_message, ("debug", "metatavern"), ())
return report
# --- Administration Commands --- #
# noinspection PyIncorrectDocstring
@command()
def alive():
"""
Returns a string indicating the process is still active
:return: A string
"""
return random.choice(['Yup', 'You doubt me?', 'Of course',
'... did I miss something?', 'plz send teh coffee',
'Watching this endless list of new questions *never* gets boring',
'Kinda sorta'])
# noinspection PyIncorrectDocstring
@command(int, privileged=True, arity=(0, 1))
def errorlogs(count):
"""
Shows the most recent lines in the error logs
:param count:
:return: A string
"""
return fetch_lines_from_error_log(count or 50)
# noinspection PyIncorrectDocstring
@command(aliases=["commands", "help"])
def info():
"""
Returns the help text
:return: A string
"""
return "I'm " + GlobalVars.chatmessage_prefix +\
" a bot that detects spam and offensive posts on the network and"\
" posts alerts to chat."\
" [A command list is available here](https://charcoal-se.org/smokey/Commands)."
# noinspection PyIncorrectDocstring
@command()
def location():
"""
Returns the current location the application is running from
:return: A string with current location
"""
return GlobalVars.location
# noinspection PyIncorrectDocstring,PyProtectedMember
@command(privileged=True)
def master():
"""
Forces a system exit with exit code = 8
:return: None
"""
os._exit(8)
# noinspection PyIncorrectDocstring,PyProtectedMember
@command(privileged=True)
def pull():
"""
Pull an update from GitHub
:return: String on failure, None on success
"""
if only_blacklists_changed(GitManager.get_remote_diff()):
GitManager.pull_remote()
load_blacklists()
return "No code modified, only blacklists reloaded."
else:
request = requests.get('https://api.github.com/repos/Charcoal-SE/SmokeDetector/git/refs/heads/deploy')
latest_sha = request.json()["object"]["sha"]
request = requests.get(
'https://api.github.com/repos/Charcoal-SE/SmokeDetector/commits/{commit_code}/statuses'.format(
commit_code=latest_sha))
states = []
for ci_status in request.json():
state = ci_status["state"]
states.append(state)
if "success" in states:
os._exit(3)
elif "error" in states or "failure" in states:
raise CmdException("CI build failed! :( Please check your commit.")
elif "pending" in states or not states:
raise CmdException("CI build is still pending, wait until the build has finished and then pull again.")
# noinspection PyIncorrectDocstring,PyProtectedMember
@command(whole_msg=True, privileged=True, aliases=["restart"])
def reboot(msg):
"""
Forces a system exit with exit code = 5
:param msg:
:return: None
"""
msg.room.send_message("Goodbye, cruel world")
os._exit(5)
# noinspection PyIncorrectDocstring,PyMissingTypeHints
@command(whole_msg=True)
def amiprivileged(msg):
"""
Tells user whether or not they have privileges
:param msg:
:return: A string
"""
if is_privileged(msg.owner, msg.room):
return "\u2713 You are a privileged user."
return "\u2573 " + GlobalVars.not_privileged_warning
# noinspection PyIncorrectDocstring,
@command(whole_msg=True)
def amicodeprivileged(msg):
"""
Tells user whether or not they have code privileges
:param msg:
:return: A string
"""
if is_code_privileged(msg._client.host, msg.owner.id):
return "\u2713 You are a code-privileged user."
return "\u2573 No, you are not a code-privileged user."
# noinspection PyIncorrectDocstring
@command()
def apiquota():
"""
Report how many API hits remain for the day
:return: A string
"""
return "The current API quota remaining is {}.".format(GlobalVars.apiquota)
# noinspection PyIncorrectDocstring
@command()
def queuestatus():
"""
Report current API queue
:return: A string
"""
return GlobalVars.bodyfetcher.print_queue()
# noinspection PyIncorrectDocstring,PyProtectedMember
@command(str, whole_msg=True, privileged=True, arity=(0, 1))
def stappit(msg, location_search):
"""
Forces a system exit with exit code = 6
:param msg:
:param location_search:
:return: None
"""
if location_search is None or location_search.lower() in GlobalVars.location.lower():
msg.room.send_message("Goodbye, cruel world")
time.sleep(1)
os._exit(6)
def td_format(td_object):
# source: http://stackoverflow.com/a/13756038/5244995
seconds = int(td_object.total_seconds())
periods = [
('year', 60 * 60 * 24 * 365),
('month', 60 * 60 * 24 * 30),
('day', 60 * 60 * 24),
('hour', 60 * 60),
('minute', 60),
('second', 1)
]
strings = []
for period_name, period_seconds in periods:
if seconds > period_seconds:
period_value, seconds = divmod(seconds, period_seconds)
if period_value == 1:
strings.append("%s %s" % (period_value, period_name))
else:
strings.append("%s %ss" % (period_value, period_name))
return ", ".join(strings)
# noinspection PyIncorrectDocstring
@command()
def status():
"""
Returns the amount of time the application has been running
:return: A string
"""
now = datetime.utcnow()
diff = now - UtcDate.startup_utc_date
return 'Running since {time} UTC ({relative})'.format(time=GlobalVars.startup_utc, relative=td_format(diff))
# noinspection PyIncorrectDocstring
@command(privileged=True)
def stopflagging():
Tasks.do(Metasmoke.stop_autoflagging)
return "Request sent..."
# noinspection PyIncorrectDocstring,PyProtectedMember
@command(str, whole_msg=True, privileged=True)
def standby(msg, location_search):
"""
Forces a system exit with exit code = 7
:param msg:
:param location_search:
:return: None
"""
if location_search.lower() in GlobalVars.location.lower():
msg.room.send_message("{location} is switching to standby".format(location=GlobalVars.location))
time.sleep(1)
os._exit(7)
# noinspection PyIncorrectDocstring
@command(str, aliases=["test-q", "test-a", "test-u", "test-t"], give_name=True)
def test(content, alias_used="test"):
"""
Test an answer to determine if it'd be automatically reported
:param content:
:return: A string
"""
result = "> "
if alias_used == "test-q":
kind = " question."
fakepost = Post(api_response={'title': 'Valid title', 'body': content,
'owner': {'display_name': "Valid username", 'reputation': 1, 'link': ''},
'site': "", 'IsAnswer': False, 'score': 0})
elif alias_used == "test-a":
kind = "n answer."
fakepost = Post(api_response={'title': 'Valid title', 'body': content,
'owner': {'display_name': "Valid username", 'reputation': 1, 'link': ''},
'site': "", 'IsAnswer': True, 'score': 0})
elif alias_used == "test-u":
kind = " username."
fakepost = Post(api_response={'title': 'Valid title', 'body': "Valid question body",
'owner': {'display_name': content, 'reputation': 1, 'link': ''},
'site': "", 'IsAnswer': False, 'score': 0})
elif alias_used == "test-t":
kind = " title."
fakepost = Post(api_response={'title': content, 'body': "Valid question body",
'owner': {'display_name': "Valid username", 'reputation': 1, 'link': ''},
'site': "", 'IsAnswer': False, 'score': 0})
else:
kind = " post, title or username."
fakepost = Post(api_response={'title': content, 'body': content,
'owner': {'display_name': content, 'reputation': 1, 'link': ''},
'site': "", 'IsAnswer': False, 'score': 0})
reasons, why_response = FindSpam.test_post(fakepost)
if len(reasons) == 0:
result += "Would not be caught as a{}".format(kind)
else:
result += ", ".join(reasons).capitalize()
if why_response is not None and len(why_response) > 0:
result += "\n----------\n"
result += why_response
return result
# noinspection PyIncorrectDocstring
@command()
def threads():
"""
Returns a description of current threads, for debugging
:return: A string
"""
threads_list = ("{ident}: {name}".format(ident=t.ident, name=t.name) for t in threading.enumerate())
return "{threads}".format(threads="\n".join(list(threads_list)))
# noinspection PyIncorrectDocstring
@command(aliases=["rev", "ver"])
def version():
"""
Returns the current version of the application
:return: A string
"""
return '{id} [{commit_name}]({repository}/commit/{commit_code})'.format(id=GlobalVars.location,
commit_name=GlobalVars.commit_with_author,
commit_code=GlobalVars.commit['id'],
repository=GlobalVars.bot_repository)
# noinspection PyIncorrectDocstring
@command(whole_msg=True)
def whoami(msg):
"""
Returns user id of smoke detector
:param msg:
:return:
"""
return "My id for this room is {}, and it's not apnorton's fault.".format(msg._client._br.user_id)
# --- Notification functions --- #
# noinspection PyIncorrectDocstring
@command(int, whole_msg=True)
def allnotificationsites(msg, room_id):
"""
Returns a string stating what sites a user will be notified about
:param msg:
:param room_id:
:return: A string
"""
sites = get_all_notification_sites(msg.owner.id, msg._client.host, room_id)
if len(sites) == 0:
return "You won't get notified for any sites in that room."
return "You will get notified for these sites:\r\n" + ", ".join(sites)
# noinspection PyIncorrectDocstring,PyMissingTypeHints
@command(int, str, whole_msg=True)
def notify(msg, room_id, se_site):
"""
Subscribe a user to events on a site in a single room
:param msg:
:param room_id:
:param se_site:
:return: A string
"""
# TODO: Add check whether smokey reports in that room
response, full_site = add_to_notification_list(msg.owner.id, msg._client.host, room_id, se_site)
if response == 0:
return "You'll now get pings from me if I report a post on `{site}`, in room "\
"`{room}` on `chat.{domain}`".format(site=se_site, room=room_id, domain=msg._client.host)
elif response == -1:
raise CmdException("That notification configuration is already registered.")
elif response == -2:
raise CmdException("The given SE site does not exist.")
else:
raise CmdException("Unrecognized code returned when adding notification.")
# TODO: !!/unnotify-all
# noinspection PyIncorrectDocstring,PyMissingTypeHints
@command(int, str, whole_msg=True)
def unnotify(msg, room_id, se_site):
"""
Unsubscribes a user to specific events
:param msg:
:param room_id:
:param se_site:
:return: A string
"""
response = remove_from_notification_list(msg.owner.id, msg._client.host, room_id, se_site)
if response:
return "I will no longer ping you if I report a post on `{site}`, in room `{room}` "\
"on `chat.{domain}`".format(site=se_site, room=room_id, domain=msg._client.host)
raise CmdException("That configuration doesn't exist.")
# noinspection PyIncorrectDocstring,PyMissingTypeHints
@command(int, str, whole_msg=True)
def willbenotified(msg, room_id, se_site):
"""
Returns a string stating whether a user will be notified or not
:param msg:
:param room_id:
:param se_site:
:return: A string
"""
if will_i_be_notified(msg.owner.id, msg._client.host, room_id, se_site):
return "Yes, you will be notified for that site in that room."
return "No, you won't be notified for that site in that room."
RETURN_NAMES = {"admin": ["admin", "admins"], "code_admin": ["code admin", "code admins"]}
VALID_ROLES = {"admin": "admin",
"code_admin": "code_admin",
"admins": "admin",
"codeadmins": "code_admin"}
# noinspection PyIncorrectDocstring,PyMissingTypeHints
@command(str, whole_msg=True)
def whois(msg, role):
"""
Return a list of important users
:param msg:
:param role:
:return: A string
"""
if role not in VALID_ROLES:
raise CmdException("That is not a user level I can check. "
"I know about {0}".format(", ".join(set(VALID_ROLES.values()))))
ms_route = "https://metasmoke.erwaysoftware.com/api/v2.0/users/with_role/{}?key={}&per_page=100&filter=JIHF".format(
VALID_ROLES[role],
GlobalVars.metasmoke_key)
user_response = requests.get(ms_route)
user_response.encoding = 'utf-8-sig'
user_response = user_response.json()
chat_host = msg._client.host
# Build our list of admin chat ids
key = ""
if chat_host == "stackexchange.com":
key = 'stackexchange_chat_id'
elif chat_host == "meta.stackexchange.com":
key = 'meta_stackexchange_chat_id'
elif chat_host == "stackoverflow.com":
key = 'stackoverflow_chat_id'
admin_ids = [a[key] for a in user_response['items'] if a[key] and a['id'] != -1]
all_users_in_room = msg.room.get_current_user_ids()
admins_in_room = list(set(admin_ids) & set(all_users_in_room))
admins_not_in_room = list(set(admin_ids) - set(admins_in_room))
admins_list = [(admin,
msg._client.get_user(admin).name,
msg._client.get_user(admin).last_message,
msg._client.get_user(admin).last_seen)
for admin in admin_ids]
admins_in_room_list = [(admin,
msg._client.get_user(admin).name,
msg._client.get_user(admin).last_message,
msg._client.get_user(admin).last_seen)
for admin in admins_in_room]
admins_not_in_room_list = [(admin,
msg._client.get_user(admin).name,
msg._client.get_user(admin).last_message,
msg._client.get_user(admin).last_seen)
for admin in admins_not_in_room]
return_name = RETURN_NAMES[VALID_ROLES[role]][0 if len(admin_ids) == 1 else 1]
response = "I am aware of {} {}".format(len(admin_ids), return_name)
if admins_in_room_list:
admins_in_room_list.sort(key=lambda x: x[2]) # Sort by last message (last seen = x[3])
response += ". Currently in this room: **"
for admin in admins_in_room_list:
response += "{}, ".format(admin[1])
response = response[:-2] + "**. "
response += "Not currently in this room: "
for admin in admins_not_in_room_list:
response += "{}, ".format(admin[1])
response = response[:-2] + "."
else:
response += ": "
for admin in admins_list:
response += "{}, ".format(admin[1])
response = response[:-2] + ". "
response += "None of them are currently in this room. Other users in this room might be able to help you."
return response
@command(int, str, privileged=True, whole_msg=True)
def invite(msg, room_id, roles):
add_room((msg._client.host, room_id), roles.split(","))
return "I'll now send messages with types `{}` to room `{}` on `{}`." \
" (Note that this will not persist after restarts.)".format(roles, room_id, msg._client.host)
# --- Post Responses --- #
# noinspection PyIncorrectDocstring
@command(str, whole_msg=True, privileged=True)
def report(msg, urls):
"""
Report a post (or posts)
:param msg:
:param urls:
:return: A string (or None)
"""
crn, wait = can_report_now(msg.owner.id, msg._client.host)
if not crn:
raise CmdException("You can execute the !!/report command again in {} seconds. "
"To avoid one user sending lots of reports in a few commands and "
"slowing SmokeDetector down due to rate-limiting, you have to "
"wait 30 seconds after you've reported multiple posts in "
"one go.".format(wait))
output = []
urls = list(set(urls.split()))
if len(urls) > 5:
raise CmdException("To avoid SmokeDetector reporting posts too slowly, you can "
"report at most 5 posts at a time. This is to avoid "
"SmokeDetector's chat messages getting rate-limited too much, "
"which would slow down reports.")
for index, url in enumerate(urls, start=1):
post_data = api_get_post(url)
if post_data is None:
output.append("Post {}: That does not look like a valid post URL.".format(index))
continue
if post_data is False:
output.append("Post {}: Could not find data for this post in the API. "
"It may already have been deleted.".format(index))
continue
if has_already_been_posted(post_data.site, post_data.post_id, post_data.title) and not is_false_positive(
(post_data.post_id, post_data.site)):
# Don't re-report if the post wasn't marked as a false positive. If it was marked as a false positive,
# this re-report might be attempting to correct that/fix a mistake/etc.
if GlobalVars.metasmoke_key is not None:
se_link = to_protocol_relative(post_data.post_url)
ms_link = "https://m.erwaysoftware.com/posts/by-url?url={}".format(se_link)
output.append("Post {}: Already recently reported [ [MS]({}) ]".format(index, ms_link))
continue
else:
output.append("Post {}: Already recently reported".format(index))
continue
post_data.is_answer = (post_data.post_type == "answer")
post = Post(api_response=post_data.as_dict)
user = get_user_from_url(post_data.owner_url)
if user is not None:
message_url = "https://chat.{}/transcript/{}?m={}".format(msg._client.host, msg.room.id, msg.id)
add_blacklisted_user(user, message_url, post_data.post_url)
why_info = u"Post manually reported by user *{}* in room *{}*.\n".format(msg.owner.name, msg.room.name)
batch = ""
if len(urls) > 1:
batch = " (batch report: post {} out of {})".format(index, len(urls))
handle_spam(post=post,
reasons=["Manually reported " + post_data.post_type + batch],
why=why_info)
if 1 < len(urls) > len(output):
add_or_update_multiple_reporter(msg.owner.id, msg._client.host, time.time())
if len(output) > 0:
return os.linesep.join(output)
# noinspection PyIncorrectDocstring,PyUnusedLocal
@command(str, whole_msg=True, privileged=True, aliases=['reportuser'])
def allspam(msg, url):
"""
Reports all of a user's posts as spam
:param msg:
:param url: A user profile URL
:return:
"""
crn, wait = can_report_now(msg.owner.id, msg._client.host)
if not crn:
raise CmdException("You can execute the !!/allspam command again in {} seconds. "
"To avoid one user sending lots of reports in a few commands and "
"slowing SmokeDetector down due to rate-limiting, you have to "
"wait 30 seconds after you've reported multiple posts in "
"one go.".format(wait))
user = get_user_from_url(url)
if user is None:
raise CmdException("That doesn't look like a valid user URL.")
user_sites = []
user_posts = []
# Detect whether link is to network profile or site profile
if user[1] == 'stackexchange.com':
# Respect backoffs etc
GlobalVars.api_request_lock.acquire()
if GlobalVars.api_backoff_time > time.time():
time.sleep(GlobalVars.api_backoff_time - time.time() + 2)
# Fetch sites
api_filter = "!6Pbp)--cWmv(1"
request_url = "http://api.stackexchange.com/2.2/users/{}/associated?filter={}&key=IAkbitmze4B8KpacUfLqkw((" \
.format(user[0], api_filter)
res = requests.get(request_url).json()
if "backoff" in res:
if GlobalVars.api_backoff_time < time.time() + res["backoff"]:
GlobalVars.api_backoff_time = time.time() + res["backoff"]
GlobalVars.api_request_lock.release()
if 'items' not in res or len(res['items']) == 0:
raise CmdException("The specified user does not appear to exist.")
if res['has_more']:
raise CmdException("The specified user has an abnormally high number of accounts. Please consider flagging "
"for moderator attention, otherwise use !!/report on the user's posts individually.")
# Add accounts with posts
for site in res['items']:
if site['question_count'] > 0 or site['answer_count'] > 0:
user_sites.append((site['user_id'], get_api_sitename_from_url(site['site_url'])))
else:
user_sites.append((user[0], get_api_sitename_from_url(user[1])))
# Fetch posts
for u_id, u_site in user_sites:
# Respect backoffs etc
GlobalVars.api_request_lock.acquire()
if GlobalVars.api_backoff_time > time.time():
time.sleep(GlobalVars.api_backoff_time - time.time() + 2)
# Fetch posts
api_filter = "!)Q4RrMH0DC96Y4g9yVzuwUrW"
request_url = "http://api.stackexchange.com/2.2/users/{}/posts?site={}&filter={}&key=IAkbitmze4B8KpacUfLqkw((" \
.format(u_id, u_site, api_filter)
res = requests.get(request_url).json()
if "backoff" in res:
if GlobalVars.api_backoff_time < time.time() + res["backoff"]:
GlobalVars.api_backoff_time = time.time() + res["backoff"]
GlobalVars.api_request_lock.release()
if 'items' not in res or len(res['items']) == 0:
raise CmdException("The specified user has no posts on this site.")
posts = res['items']
if posts[0]['owner']['reputation'] > 100:
raise CmdException("The specified user's reputation is abnormally high. Please consider flagging for "
"moderator attention, otherwise use !!/report on the posts individually.")
# Add blacklisted user - use most downvoted post as post URL
message_url = "https://chat.{}/transcript/{}?m={}".format(msg._client.host, msg.room.id, msg.id)
add_blacklisted_user(user, message_url, sorted(posts, key=lambda x: x['score'])[0]['owner']['link'])
# TODO: Postdata refactor, figure out a better way to use apigetpost
for post in posts:
post_data = PostData()
post_data.post_id = post['post_id']
post_data.post_url = url_to_shortlink(post['link'])
*discard, post_data.site, post_data.post_type = fetch_post_id_and_site_from_url(
url_to_shortlink(post['link']))
post_data.title = unescape(post['title'])
post_data.owner_name = unescape(post['owner']['display_name'])
post_data.owner_url = post['owner']['link']
post_data.owner_rep = post['owner']['reputation']
post_data.body = post['body']
post_data.score = post['score']
post_data.up_vote_count = post['up_vote_count']
post_data.down_vote_count = post['down_vote_count']
if post_data.post_type == "answer":
# Annoyingly we have to make another request to get the question ID, since it is only returned by the
# /answers route
# Respect backoffs etc
GlobalVars.api_request_lock.acquire()
if GlobalVars.api_backoff_time > time.time():
time.sleep(GlobalVars.api_backoff_time - time.time() + 2)
# Fetch posts
filter = "!*Jxb9s5EOrE51WK*"
req_url = "http://api.stackexchange.com/2.2/answers/{}?site={}&filter={}&key=IAkbitmze4B8KpacUfLqkw((" \
.format(post['post_id'], u_site, filter)
answer_res = requests.get(req_url).json()
if "backoff" in res:
if GlobalVars.api_backoff_time < time.time() + res["backoff"]:
GlobalVars.api_backoff_time = time.time() + res["backoff"]
GlobalVars.api_request_lock.release()
# Finally, set the attribute
post_data.question_id = answer_res['items'][0]['question_id']
post_data.is_answer = True
user_posts.append(post_data)
if len(user_posts) == 0:
raise CmdException("The specified user hasn't posted anything.")
if len(user_posts) > 15:
raise CmdException("The specified user has an abnormally high number of spam posts. Please consider flagging "
"for moderator attention, otherwise use !!/report on the posts individually.")
why_info = u"User manually reported by *{}* in room *{}*.\n".format(msg.owner.name, msg.room.name)
# Handle all posts
for index, post in enumerate(user_posts, start=1):
batch = ""
if len(user_posts) > 1:
batch = " (batch report: post {} out of {})".format(index, len(user_posts))
handle_spam(post=Post(api_response=post.as_dict),
reasons=["Manually reported " + post.post_type + batch],
why=why_info)
time.sleep(2) # Should this be implemented differently?
if len(user_posts) > 2:
add_or_update_multiple_reporter(msg.owner.id, msg._client.host, time.time())
#
#
# Subcommands go below here
# noinspection PyIncorrectDocstring,PyBroadException
DELETE_ALIASES = ["delete", "del", "remove", "poof", "gone", "kaboom"]
@command(message, reply=True, privileged=True, aliases=[alias + "-force" for alias in DELETE_ALIASES])
def delete_force(msg):
"""
Delete a post from the room, ignoring protection for Charcoal HQ
:param msg:
:return: None
"""
# noinspection PyBroadException
try:
msg.delete()
except:
pass # couldn't delete message
# noinspection PyIncorrectDocstring,PyUnusedLocal,PyBroadException
@command(message, reply=True, privileged=True, aliases=DELETE_ALIASES)
def delete(msg):
"""
Delete a post from a chatroom, with an override for Charcoal HQ.
:param msg:
:return: None
"""
post_data = get_report_data(msg)
if post_data and msg.room.id == 11540:
return "Reports from SmokeDetector in Charcoal HQ are generally kept "\
"as records. If you really need to delete a report, please use "\
"`sd delete-force`. See [this note on message deletion]"\
"(https://charcoal-se.org/smokey/Commands"\
"#a-note-on-message-deletion) for more details."
else:
try:
msg.delete()
except:
pass
# noinspection PyIncorrectDocstring,PyUnusedLocal
@command(message, reply=True, privileged=True)
def postgone(msg):
"""
Removes link from a marked report message
:param msg:
:return: None
"""
edited = edited_message_after_postgone_command(msg.content)
if edited is None:
raise CmdException("That's not a report.")
msg.edit(edited)
# noinspection PyIncorrectDocstring
@command(message, reply=True, privileged=True, whole_msg=True, give_name=True, aliases=FALSE_FEEDBACKS.keys())
def false(feedback, msg, alias_used="false"):
"""
Marks a post as a false positive
:param feedback:
:param msg:
:return: String
"""
post_data = get_report_data(msg)
if not post_data:
raise CmdException("That message is not a report.")
post_url, owner_url = post_data
feedback_type = FALSE_FEEDBACKS[alias_used]
feedback_type.send(post_url, feedback)
post_id, site, post_type = fetch_post_id_and_site_from_url(post_url)
add_false_positive((post_id, site))
user = get_user_from_url(owner_url)
if user is not None:
if feedback_type.blacklist:
add_whitelisted_user(user)
result = "Registered " + post_type + " as false positive and whitelisted user."
elif is_blacklisted_user(user):
remove_blacklisted_user(user)
result = "Registered " + post_type + " as false positive and removed user from the blacklist."
else:
result = "Registered " + post_type + " as false positive."
else:
result = "Registered " + post_type + " as false positive."
try:
if int(msg.room.id) != int(GlobalVars.charcoal_hq.id):
msg.delete()
except:
pass
return result if not feedback_type.always_silent else ""
# noinspection PyIncorrectDocstring,PyMissingTypeHints
@command(message, reply=True, privileged=True, whole_msg=True)
def ignore(feedback, msg):
"""
Marks a post to be ignored
:param feedback:
:param msg:
:return: String
"""
post_data = get_report_data(msg)
if not post_data:
raise CmdException("That message is not a report.")
post_url, _ = post_data
Feedback.send_custom("ignore", post_url, feedback)
post_id, site, _ = fetch_post_id_and_site_from_url(post_url)
add_ignored_post((post_id, site))
return "Post ignored; alerts about it will no longer be posted."
# noinspection PyIncorrectDocstring
@command(message, reply=True, privileged=True, whole_msg=True, give_name=True, aliases=NAA_FEEDBACKS.keys())
def naa(feedback, msg, alias_used="naa"):
"""
Marks a post as NAA
:param feedback:
:param msg:
:return: String
"""
post_data = get_report_data(msg)
if not post_data:
raise CmdException("That message is not a report.")
post_url, _ = post_data
post_id, site, post_type = fetch_post_id_and_site_from_url(post_url)
if post_type != "answer":
raise CmdException("That report was a question; questions cannot be marked as NAAs.")
feedback_type = NAA_FEEDBACKS[alias_used]
feedback_type.send(post_url, feedback)
post_id, site, _ = fetch_post_id_and_site_from_url(post_url)
add_ignored_post((post_id, site))
return "Recorded answer as an NAA in metasmoke." if not feedback_type.always_silent else ""
# noinspection PyIncorrectDocstring
@command(message, reply=True, privileged=True, whole_msg=True, give_name=True, aliases=TRUE_FEEDBACKS.keys())
def true(feedback, msg, alias_used="true"):
"""
Marks a post as a true positive
:param feedback:
:param msg:
:return: string
"""
post_data = get_report_data(msg)
if not post_data:
raise CmdException("That message is not a report.")
post_url, owner_url = post_data
feedback_type = TRUE_FEEDBACKS[alias_used]
feedback_type.send(post_url, feedback)
user = get_user_from_url(owner_url)
_, _, post_type = fetch_post_id_and_site_from_url(post_url)
message_url = "https://chat.{}/transcript/{}?m={}".format(msg._client.host, msg.room.id, msg.id)
if user is not None:
if feedback_type.blacklist:
add_blacklisted_user(user, message_url, post_url)
result = "Registered " + post_type + " as true positive and blacklisted user."
else:
result = "Registered " + post_type + " as true positive. If you want to "\
"blacklist the poster, use `trueu` or `tpu`."
else:
result = "Registered " + post_type + " as true positive."
return result if not feedback_type.always_silent else ""
# noinspection PyIncorrectDocstring,PyUnusedLocal
@command(message, reply=True)
def why(msg):
"""
Returns reasons a post was reported
:param msg:
:return: A string
"""
post_data = get_report_data(msg)
if not post_data:
raise CmdException("That's not a report.")
else:
*post, _ = fetch_post_id_and_site_from_url(post_data[0])
why_info = get_why(post[1], post[0])
if why_info:
return why_info
else:
raise CmdException("There is no `why` data for that user (anymore).")
# noinspection PyIncorrectDocstring,PyUnusedLocal
@command(message, reply=True)
def autoflagged(msg):
"""
Determines whether a post was automatically flagged by Metasmoke
:param msg:
:return: A string
"""
post_data = get_report_data(msg)
if not post_data:
raise CmdException("That's not a report.")
is_autoflagged, names = Metasmoke.determine_if_autoflagged(post_data[0])
if is_autoflagged:
return "That post was automatically flagged, using flags from: {}.".format(", ".join(names))
else:
return "That post was **not** automatically flagged by metasmoke."
|
"""
Sile object for reading/writing FDF files
"""
from __future__ import print_function, division
from os.path import dirname, sep
import numpy as np
# Import sile objects
from .sile import SileSIESTA
from ..sile import *
from sisl.io._help import *
# Import the geometry object
from sisl import Geometry, Atom, SuperCell
from sisl.utils.misc import name_spec
from sisl.units import unit_default, unit_group
from sisl.units.siesta import unit_convert
__all__ = ['FDFSile']
_LOGICAL_TRUE = ['.true.','true','yes','y','t']
_LOGICAL_FALSE = ['.false.','false','no','n','f']
_LOGICAL = _LOGICAL_FALSE + _LOGICAL_TRUE
Bohr2Ang = unit_convert('Bohr', 'Ang')
class FDFSile(SileSIESTA):
""" FDF file object """
def __init__(self, filename, mode='r', base=None):
""" Initialize an FDF file from the filename
By supplying base you can reference files in other directories.
By default the ``base`` is the directory given in the file name.
"""
super(FDFSile, self).__init__(filename, mode=mode)
if base is None:
# Extract from filename
self._directory = dirname(filename)
else:
self._directory = base
if len(self._directory) == 0:
self._directory = '.'
def _setup(self):
""" Setup the `FDFSile` after initialization """
# These are the comments
self._comment = ['#', '!', ';']
# List of parent file-handles used while reading
self._parent_fh = []
self._directory = '.'
def readline(self, comment=False):
""" Reads the next line of the file """
# Call the parent readline function
l = super(FDFSile, self).readline(comment=comment)
# In FDF files, %include marks files that progress
# down in a tree structure
if '%include' in l:
# Split for reading tree file
self._parent_fh.append(self.fh)
self.fh = open(self._directory + sep + l.split()[1], self._mode)
# Read the following line in the new file
return self.readline()
if len(self._parent_fh) > 0 and l == '':
# l == '' marks the end of the file
self.fh.close()
self.fh = self._parent_fh.pop()
return self.readline()
return l
def type(self, key):
""" Return the type of the fdf-keyword """
found, fdf = self._read(key)
if not found:
return None
if fdf.startswith('%block'):
return 'B'
# Grab the entire line (beside the key)
fdf = fdf.split()[1:]
if len(fdf) == 1:
fdf = fdf[0].lower()
if fdf in __LOGICAL:
return 'b'
if '.' in fdf:
return 'r'
return 'i'
return 'n'
def key(self, key):
""" Return the key as written in the fdf-file. If not found, returns `None`. """
found, fdf = self._read(key)
if found:
return fdf.split()[0]
else:
return None
def get(self, key, unit=None, default=None, with_unit=False):
""" Retrieve fdf-keyword from the file """
# First split into specification and key
key, tmp_unit = name_spec(key)
if unit is None:
unit = tmp_unit
found, fdf = self._read(key)
if not found:
return default
# The keyword is found...
if fdf.startswith('%block'):
found, fdf = self._read_block(key)
if not found:
return default
else:
return fdf
# We need to process the returned value further.
fdfl = fdf.split()
if len(fdfl) == 1:
# This *MUST* be a boolean
# SCF.Converge.H
# defaults to .true.
return True
# Check whether this is a logical
# flag
if fdfl[1] in _LOGICAL_TRUE:
return True
elif fdfl[1] in _LOGICAL_FALSE:
return False
# It is something different.
# Try and figure out what it is
if len(fdfl) == 3:
# We expect it to be a unit
if unit is None:
# Get group of unit
group = unit_group(fdfl[2])
# return in default sisl units
unit = unit_default(group)
if with_unit and tmp_unit is not None:
# The user has specifically requested the unit
return '{0:.4f} {1}'.format(float(fdfl[1]) * unit_convert(fdfl[2], unit), unit)
elif with_unit:
return ' '.join(fdfl[1:])
return float(fdfl[1]) * unit_convert(fdfl[2], unit)
return ' '.join(fdfl[1:])
def set(self, key, value):
""" Add the key and value to the FDF file """
raise NotImplementedError("Setting a fdf key is not yet implemented")
@Sile_fh_open
def _read(self, key):
""" Returns the arguments following the keyword in the FDF file """
found, fdf = self.step_to(key, case=False)
# Check whether the key is piped
if found and fdf.find('<') >= 0:
# Create new fdf-file
sub_fdf = FDFSile(fdf.split('<')[1].replace('\n','').strip())
return sub_fdf._read(key)
return found, fdf
@Sile_fh_open
def _read_block(self, key, force=False):
""" Returns the arguments following the keyword in the FDF file """
k = key.lower()
f, fdf = self.step_to(k, case=False)
if force and not f:
# The user requests that the block *MUST* be found
raise SileError(
'Requested forced block could not be found: ' +
str(key) +
'.',
self)
if not f:
return False, [] # not found
# If the block is piped in from another file...
if '<' in fdf:
# Create new fdf-file
sub_fdf = FDFSile(fdf.split('<')[1].replace('\n','').strip())
return sub_fdf._read_block(key, force = force)
li = []
while True:
l = self.readline()
if self.line_has_key(l, '%endblock', case=False) or \
self.line_has_key(l, k, case=False):
return True, li
# Append list
li.append(l)
raise SileError(
'Error on reading block: ' +
str(key) +
' could not find start/end.')
@Sile_fh_open
def write_geom(self, geom, fmt='.5f'):
""" Writes the geometry to the contained file """
# Check that we can write to the file
sile_raise_write(self)
# Write out the cell
self._write('LatticeConstant 1. Ang\n')
self._write('%block LatticeVectors\n')
self._write(' {0} {1} {2}\n'.format(*geom.cell[0, :]))
self._write(' {0} {1} {2}\n'.format(*geom.cell[1, :]))
self._write(' {0} {1} {2}\n'.format(*geom.cell[2, :]))
self._write('%endblock LatticeVectors\n\n')
self._write('NumberOfAtoms {0}\n'.format(geom.na))
self._write('AtomicCoordinatesFormat Ang\n')
self._write('%block AtomicCoordinatesAndAtomicSpecies\n')
fmt_str = ' {{2:{0}}} {{3:{0}}} {{4:{0}}} {{0}} # {{1}}\n'.format(fmt)
# Count for the species
spec = []
for ia, a, isp in geom.iter_species():
self._write(fmt_str.format(isp + 1, ia + 1, *geom.xyz[ia, :]))
if isp >= len(spec):
spec.append(a)
self._write('%endblock AtomicCoordinatesAndAtomicSpecies\n\n')
# Write out species
# First swap key and value
self._write('NumberOfSpecies {0}\n'.format(len(spec)))
self._write('%block ChemicalSpeciesLabel\n')
for i, a in enumerate(spec):
self._write(' {0} {1} {2}\n'.format(i + 1, a.Z, a.tag))
self._write('%endblock ChemicalSpeciesLabel\n')
@Sile_fh_open
def read_sc(self, *args, **kwargs):
""" Returns `SuperCell` object from the FDF file """
f, lc = self._read('LatticeConstant')
s = float(lc.split()[1])
if 'ang' in lc.lower():
pass
elif 'bohr' in lc.lower():
s *= Bohr2Ang
# Read in cell
cell = np.empty([3, 3], np.float64)
f, lc = self._read_block('LatticeVectors')
if f:
for i in range(3):
cell[i, :] = [float(k) for k in lc[i].split()[:3]]
else:
f, lc = self._read_block('LatticeParameters')
tmp = [float(k) for k in lc[0].split()[:6]]
if f:
cell = SuperCell.tocell(*tmp)
if not f:
# the fdf file contains neither the latticevectors or parameters
raise SileError(
'Could not find Vectors or Parameters block in file')
cell *= s
return SuperCell(cell)
@Sile_fh_open
def read_geom(self, *args, **kwargs):
""" Returns Geometry object from the FDF file
NOTE: Interaction range of the Atoms are currently not read.
"""
f, lc = self._read('LatticeConstant')
if not f:
raise ValueError('Could not find LatticeConstant in fdf file.')
s = float(lc.split()[1])
if 'ang' in lc.lower():
pass
elif 'bohr' in lc.lower():
s *= Bohr2Ang
sc = self.read_sc(*args, **kwargs)
# No fractional coordinates
is_frac = False
# Read atom scaling
f, lc = self._read('AtomicCoordinatesFormat')
if not f:
raise ValueError(
'Could not find AtomicCoordinatesFormat in fdf file.')
lc = lc.lower()
if 'ang' in lc or 'notscaledcartesianang' in lc:
s = 1.
pass
elif 'bohr' in lc or 'notscaledcartesianbohr' in lc:
s = Bohr2Ang
elif 'scaledcartesian' in lc:
# the same scaling as the lattice-vectors
pass
elif 'fractional' in lc or 'scaledbylatticevectors' in lc:
# no scaling of coordinates as that is entirely
# done by the latticevectors
s = 1.
is_frac = True
# If the user requests a shifted geometry
# we correct for this
origo = np.zeros([3], np.float64)
run = 'origin' in kwargs
if run:
run = kwargs['origin']
if run:
f, lor = self._read_block('AtomicCoordinatesOrigin')
if f:
origo = np.fromstring(lor[0], count=3, sep=' ') * s
# Origo cannot be interpreted with fractional coordinates
# hence, it is not transformed.
# Read atom block
f, atms = self._read_block(
'AtomicCoordinatesAndAtomicSpecies', force=True)
if not f:
raise ValueError(
'Could not find AtomicCoordinatesAndAtomicSpecies in fdf file.')
# Read number of atoms and block
f, l = self._read('NumberOfAtoms')
if not f:
# We default to the number of elements in the
# AtomicCoordinatesAndAtomicSpecies block
na = len(atms)
else:
na = int(l.split()[1])
# Reduce space if number of atoms specified
if na != len(atms):
# align number of atoms and atms array
atms = atms[:na]
if na == 0:
raise ValueError(
'NumberOfAtoms has been determined to be zero, no atoms.')
# Create array
xyz = np.empty([na, 3], np.float64)
species = np.empty([na], np.int32)
for ia in range(na):
l = atms[ia].split()
xyz[ia, :] = [float(k) for k in l[:3]]
species[ia] = int(l[3]) - 1
if is_frac:
xyz = np.dot(xyz, sc.cell)
xyz *= s
xyz += origo
# Now we read in the species
f, l = self._read('NumberOfSpecies')
ns = 0
if f:
ns = int(l.split()[1])
# Read the block (not strictly needed, if so we simply set all atoms to
# H)
f, spcs = self._read_block('ChemicalSpeciesLabel')
if f:
# Initialize number of species to
# the length of the ChemicalSpeciesLabel block
if ns == 0:
ns = len(spcs)
# Pre-allocate the species array
sp = [None] * ns
for spc in spcs:
# index Z pseudo-tag
l = spc.split()
idx = int(l[0]) - 1
# Insert the atom
sp[idx] = Atom(Z=int(l[1]), tag=l[2])
if None in sp:
idx = sp.index(None) + 1
raise ValueError(
("Could not populate entire "
"species list. "
"Please ensure specie with index {} is present".format(idx)))
# Create atoms array with species
atom = [None] * na
for ia in range(na):
atom[ia] = sp[species[ia]]
if None in atom:
idx = atom.index(None) + 1
raise ValueError(
("Could not populate entire "
"atomic list list. "
"Please ensure atom with index {} is present".format(idx)))
else:
# Default atom (hydrogen)
atom = Atom(1)
# Force number of species to 1
ns = 1
# Create and return geometry object
return Geometry(xyz, atom=atom, sc=sc)
def ArgumentParser(self, parser=None, *args, **kwargs):
""" Returns the arguments that is available for this Sile """
import argparse as arg
try:
geom = self.read_geom()
p, namespace = geom.ArgumentParser(parser=parser, *args, **kwargs)
except:
# In case the fdf does not hold the geometry, we allow the
#
# Create the parser and the custom namespace
if parser is None:
p = arg.ArgumentParser("Manipulate a FDF file.")
else:
p = parser
class CustomNamespace(object):
pass
namespace = CustomNamespace()
namespace._FDF = self
namespace._first_fdf = True
# As the fdf may provide additional stuff, we do not add EVERYTHING from
# the Geometry class.
class FDFAdd(arg.Action):
def __call__(self, parser, ns, values, option_string=None):
key = values[0]
val = values[1]
if ns._first_fdf:
# Append to the end of the file
with ns._FDF as fd:
fd.write('\n\n# SISL added keywords\n')
setattr(ns, '_first_fdf', False)
ns._FDF.set(key, val)
#p.add_argument('--fdf-add', nargs=2, metavar=('KEY', 'VALUE'),
# action=FDFAdd,
# help='Add a key to the FDF file. If it already exists it will be overwritten')
class FDFGet(arg.Action):
def __call__(self, parser, ns, value, option_string=None):
# Retrieve the value in standard units
# Currently, we write out the unit "as-is"
val = ns._FDF.get(value[0], with_unit = True)
if val is None:
print('# {} is currently not in the FDF file '.format(value[0]))
return
if isinstance(val, list):
# if the value has any new-values
has_nl = False
for v in val:
if '\n' in v:
has_nl = True
break
if not has_nl:
print('{} {}'.format(val[0], ' '.join(val[1:])) )
else:
print('{}\n'.format(val[0]) + '\n'.join(val[1:]) )
else:
print('{}'.format(val))
p.add_argument('--fdf', nargs=1, metavar='KEY',
action=FDFGet,
help='Print (to stdout) the value of the key in the FDF file.')
return p, namespace
add_sile('fdf', FDFSile, case=False, gzip=True)
enh: updated fdf-files return quantities with units
Signed-off-by: Nick Papior <81a73bca3d342ffee33d7cf845fdbf77b6553831@gmail.com>
"""
Sile object for reading/writing FDF files
"""
from __future__ import print_function, division
from os.path import dirname, sep
import numpy as np
# Import sile objects
from .sile import SileSIESTA
from ..sile import *
from sisl.io._help import *
# Import the geometry object
from sisl import Geometry, Atom, SuperCell
from sisl.utils.misc import name_spec
from sisl.units import unit_default, unit_group
from sisl.units.siesta import unit_convert
__all__ = ['FDFSile']
_LOGICAL_TRUE = ['.true.','true','yes','y','t']
_LOGICAL_FALSE = ['.false.','false','no','n','f']
_LOGICAL = _LOGICAL_FALSE + _LOGICAL_TRUE
Bohr2Ang = unit_convert('Bohr', 'Ang')
class FDFSile(SileSIESTA):
""" FDF file object """
def __init__(self, filename, mode='r', base=None):
""" Initialize an FDF file from the filename
By supplying base you can reference files in other directories.
By default the ``base`` is the directory given in the file name.
"""
super(FDFSile, self).__init__(filename, mode=mode)
if base is None:
# Extract from filename
self._directory = dirname(filename)
else:
self._directory = base
if len(self._directory) == 0:
self._directory = '.'
def _setup(self):
""" Setup the `FDFSile` after initialization """
# These are the comments
self._comment = ['#', '!', ';']
# List of parent file-handles used while reading
self._parent_fh = []
self._directory = '.'
def readline(self, comment=False):
""" Reads the next line of the file """
# Call the parent readline function
l = super(FDFSile, self).readline(comment=comment)
# In FDF files, %include marks files that progress
# down in a tree structure
if '%include' in l:
# Split for reading tree file
self._parent_fh.append(self.fh)
self.fh = open(self._directory + sep + l.split()[1], self._mode)
# Read the following line in the new file
return self.readline()
if len(self._parent_fh) > 0 and l == '':
# l == '' marks the end of the file
self.fh.close()
self.fh = self._parent_fh.pop()
return self.readline()
return l
def type(self, key):
""" Return the type of the fdf-keyword """
found, fdf = self._read(key)
if not found:
return None
if fdf.startswith('%block'):
return 'B'
# Grab the entire line (beside the key)
fdf = fdf.split()[1:]
if len(fdf) == 1:
fdf = fdf[0].lower()
if fdf in __LOGICAL:
return 'b'
if '.' in fdf:
return 'r'
return 'i'
return 'n'
def key(self, key):
""" Return the key as written in the fdf-file. If not found, returns `None`. """
found, fdf = self._read(key)
if found:
return fdf.split()[0]
else:
return None
def get(self, key, unit=None, default=None, with_unit=False):
""" Retrieve fdf-keyword from the file """
# First split into specification and key
key, tmp_unit = name_spec(key)
if unit is None:
unit = tmp_unit
found, fdf = self._read(key)
if not found:
return default
# The keyword is found...
if fdf.startswith('%block'):
found, fdf = self._read_block(key)
if not found:
return default
else:
return fdf
# We need to process the returned value further.
fdfl = fdf.split()
# Check whether this is a logical flag
if len(fdfl) == 1:
# This *MUST* be a boolean
# SCF.Converge.H
# defaults to .true.
return True
elif fdfl[1] in _LOGICAL_TRUE:
return True
elif fdfl[1] in _LOGICAL_FALSE:
return False
# It is something different.
# Try and figure out what it is
if len(fdfl) == 3:
# We expect it to be a unit
if unit is None:
# Get group of unit
group = unit_group(fdfl[2])
# return in default sisl units
unit = unit_default(group)
if with_unit and tmp_unit is not None:
# The user has specifically requested the unit:
# key{unit}
return '{0:.4f} {1}'.format(float(fdfl[1]) * unit_convert(fdfl[2], unit), unit)
elif not with_unit:
return float(fdfl[1]) * unit_convert(fdfl[2], unit)
return ' '.join(fdfl[1:])
def set(self, key, value):
""" Add the key and value to the FDF file """
raise NotImplementedError("Setting a fdf key is not yet implemented")
@Sile_fh_open
def _read(self, key):
""" Returns the arguments following the keyword in the FDF file """
found, fdf = self.step_to(key, case=False)
# Check whether the key is piped
if found and fdf.find('<') >= 0:
# Create new fdf-file
sub_fdf = FDFSile(fdf.split('<')[1].replace('\n','').strip())
return sub_fdf._read(key)
return found, fdf
@Sile_fh_open
def _read_block(self, key, force=False):
""" Returns the arguments following the keyword in the FDF file """
k = key.lower()
f, fdf = self.step_to(k, case=False)
if force and not f:
# The user requests that the block *MUST* be found
raise SileError(
'Requested forced block could not be found: ' +
str(key) +
'.',
self)
if not f:
return False, [] # not found
# If the block is piped in from another file...
if '<' in fdf:
# Create new fdf-file
sub_fdf = FDFSile(fdf.split('<')[1].replace('\n','').strip())
return sub_fdf._read_block(key, force = force)
li = []
while True:
l = self.readline()
if self.line_has_key(l, '%endblock', case=False) or \
self.line_has_key(l, k, case=False):
return True, li
# Append list
li.append(l)
raise SileError(
'Error on reading block: ' +
str(key) +
' could not find start/end.')
@Sile_fh_open
def write_geom(self, geom, fmt='.5f'):
""" Writes the geometry to the contained file """
# Check that we can write to the file
sile_raise_write(self)
# Write out the cell
self._write('LatticeConstant 1. Ang\n')
self._write('%block LatticeVectors\n')
self._write(' {0} {1} {2}\n'.format(*geom.cell[0, :]))
self._write(' {0} {1} {2}\n'.format(*geom.cell[1, :]))
self._write(' {0} {1} {2}\n'.format(*geom.cell[2, :]))
self._write('%endblock LatticeVectors\n\n')
self._write('NumberOfAtoms {0}\n'.format(geom.na))
self._write('AtomicCoordinatesFormat Ang\n')
self._write('%block AtomicCoordinatesAndAtomicSpecies\n')
fmt_str = ' {{2:{0}}} {{3:{0}}} {{4:{0}}} {{0}} # {{1}}\n'.format(fmt)
# Count for the species
spec = []
for ia, a, isp in geom.iter_species():
self._write(fmt_str.format(isp + 1, ia + 1, *geom.xyz[ia, :]))
if isp >= len(spec):
spec.append(a)
self._write('%endblock AtomicCoordinatesAndAtomicSpecies\n\n')
# Write out species
# First swap key and value
self._write('NumberOfSpecies {0}\n'.format(len(spec)))
self._write('%block ChemicalSpeciesLabel\n')
for i, a in enumerate(spec):
self._write(' {0} {1} {2}\n'.format(i + 1, a.Z, a.tag))
self._write('%endblock ChemicalSpeciesLabel\n')
@Sile_fh_open
def read_sc(self, *args, **kwargs):
""" Returns `SuperCell` object from the FDF file """
f, lc = self._read('LatticeConstant')
s = float(lc.split()[1])
if 'ang' in lc.lower():
pass
elif 'bohr' in lc.lower():
s *= Bohr2Ang
# Read in cell
cell = np.empty([3, 3], np.float64)
f, lc = self._read_block('LatticeVectors')
if f:
for i in range(3):
cell[i, :] = [float(k) for k in lc[i].split()[:3]]
else:
f, lc = self._read_block('LatticeParameters')
tmp = [float(k) for k in lc[0].split()[:6]]
if f:
cell = SuperCell.tocell(*tmp)
if not f:
# the fdf file contains neither the latticevectors or parameters
raise SileError(
'Could not find Vectors or Parameters block in file')
cell *= s
return SuperCell(cell)
@Sile_fh_open
def read_geom(self, *args, **kwargs):
""" Returns Geometry object from the FDF file
NOTE: Interaction range of the Atoms are currently not read.
"""
f, lc = self._read('LatticeConstant')
if not f:
raise ValueError('Could not find LatticeConstant in fdf file.')
s = float(lc.split()[1])
if 'ang' in lc.lower():
pass
elif 'bohr' in lc.lower():
s *= Bohr2Ang
sc = self.read_sc(*args, **kwargs)
# No fractional coordinates
is_frac = False
# Read atom scaling
f, lc = self._read('AtomicCoordinatesFormat')
if not f:
raise ValueError(
'Could not find AtomicCoordinatesFormat in fdf file.')
lc = lc.lower()
if 'ang' in lc or 'notscaledcartesianang' in lc:
s = 1.
pass
elif 'bohr' in lc or 'notscaledcartesianbohr' in lc:
s = Bohr2Ang
elif 'scaledcartesian' in lc:
# the same scaling as the lattice-vectors
pass
elif 'fractional' in lc or 'scaledbylatticevectors' in lc:
# no scaling of coordinates as that is entirely
# done by the latticevectors
s = 1.
is_frac = True
# If the user requests a shifted geometry
# we correct for this
origo = np.zeros([3], np.float64)
run = 'origin' in kwargs
if run:
run = kwargs['origin']
if run:
f, lor = self._read_block('AtomicCoordinatesOrigin')
if f:
origo = np.fromstring(lor[0], count=3, sep=' ') * s
# Origo cannot be interpreted with fractional coordinates
# hence, it is not transformed.
# Read atom block
f, atms = self._read_block(
'AtomicCoordinatesAndAtomicSpecies', force=True)
if not f:
raise ValueError(
'Could not find AtomicCoordinatesAndAtomicSpecies in fdf file.')
# Read number of atoms and block
f, l = self._read('NumberOfAtoms')
if not f:
# We default to the number of elements in the
# AtomicCoordinatesAndAtomicSpecies block
na = len(atms)
else:
na = int(l.split()[1])
# Reduce space if number of atoms specified
if na != len(atms):
# align number of atoms and atms array
atms = atms[:na]
if na == 0:
raise ValueError(
'NumberOfAtoms has been determined to be zero, no atoms.')
# Create array
xyz = np.empty([na, 3], np.float64)
species = np.empty([na], np.int32)
for ia in range(na):
l = atms[ia].split()
xyz[ia, :] = [float(k) for k in l[:3]]
species[ia] = int(l[3]) - 1
if is_frac:
xyz = np.dot(xyz, sc.cell)
xyz *= s
xyz += origo
# Now we read in the species
f, l = self._read('NumberOfSpecies')
ns = 0
if f:
ns = int(l.split()[1])
# Read the block (not strictly needed, if so we simply set all atoms to
# H)
f, spcs = self._read_block('ChemicalSpeciesLabel')
if f:
# Initialize number of species to
# the length of the ChemicalSpeciesLabel block
if ns == 0:
ns = len(spcs)
# Pre-allocate the species array
sp = [None] * ns
for spc in spcs:
# index Z pseudo-tag
l = spc.split()
idx = int(l[0]) - 1
# Insert the atom
sp[idx] = Atom(Z=int(l[1]), tag=l[2])
if None in sp:
idx = sp.index(None) + 1
raise ValueError(
("Could not populate entire "
"species list. "
"Please ensure specie with index {} is present".format(idx)))
# Create atoms array with species
atom = [None] * na
for ia in range(na):
atom[ia] = sp[species[ia]]
if None in atom:
idx = atom.index(None) + 1
raise ValueError(
("Could not populate entire "
"atomic list list. "
"Please ensure atom with index {} is present".format(idx)))
else:
# Default atom (hydrogen)
atom = Atom(1)
# Force number of species to 1
ns = 1
# Create and return geometry object
return Geometry(xyz, atom=atom, sc=sc)
def ArgumentParser(self, parser=None, *args, **kwargs):
""" Returns the arguments that is available for this Sile """
import argparse as arg
try:
geom = self.read_geom()
p, namespace = geom.ArgumentParser(parser=parser, *args, **kwargs)
except:
# In case the fdf does not hold the geometry, we allow the
#
# Create the parser and the custom namespace
if parser is None:
p = arg.ArgumentParser("Manipulate a FDF file.")
else:
p = parser
class CustomNamespace(object):
pass
namespace = CustomNamespace()
namespace._FDF = self
namespace._first_fdf = True
# As the fdf may provide additional stuff, we do not add EVERYTHING from
# the Geometry class.
class FDFAdd(arg.Action):
def __call__(self, parser, ns, values, option_string=None):
key = values[0]
val = values[1]
if ns._first_fdf:
# Append to the end of the file
with ns._FDF as fd:
fd.write('\n\n# SISL added keywords\n')
setattr(ns, '_first_fdf', False)
ns._FDF.set(key, val)
#p.add_argument('--fdf-add', nargs=2, metavar=('KEY', 'VALUE'),
# action=FDFAdd,
# help='Add a key to the FDF file. If it already exists it will be overwritten')
class FDFGet(arg.Action):
def __call__(self, parser, ns, value, option_string=None):
# Retrieve the value in standard units
# Currently, we write out the unit "as-is"
val = ns._FDF.get(value[0], with_unit = True)
if val is None:
print('# {} is currently not in the FDF file '.format(value[0]))
return
if isinstance(val, list):
# if the value has any new-values
has_nl = False
for v in val:
if '\n' in v:
has_nl = True
break
if not has_nl:
print('{} {}'.format(val[0], ' '.join(val[1:])) )
else:
print('{}\n'.format(val[0]) + '\n'.join(val[1:]) )
else:
print('{}'.format(val))
p.add_argument('--fdf', nargs=1, metavar='KEY',
action=FDFGet,
help='Print (to stdout) the value of the key in the FDF file.')
return p, namespace
add_sile('fdf', FDFSile, case=False, gzip=True)
|
#!/usr/bin/env python
class XMLTranslator:
# Get display indentation for a certain depth
def get_indent(self, depth):
line = ""
for i in range(0, depth):
line += "\t"
return line
# Get variable id (name)
def get_varid(self, node):
return node[0].get("value")
# Get variables
def get_vars(self, var):
vars = []
for varid in var:
vars.append(self.get_varid(varid))
return vars
# Get variable list used by an action
def get_varlist(self, node):
varlist = []
for var in node.iter("PrimVar"):
varlist += self.get_vars(var)
return varlist
# PML action
def handle_action(self, node, depth, processes_sofar, process_current, resources_sofar):
# Blocks (requires)
reqlist = []
for req in node.iter("SpecReqs"):
reqlist[0:] = self.get_varlist(req)
#print "requires: " + str(reqlist)
curdepth = depth
if(len(reqlist) > 0):
line = self.get_indent(curdepth)
curdepth += 1
line += reqlist[0]
for req in reqlist[1:]:
line += " && " + req
line += " ->"
process_current.append(line)
# State changes (provides)
provlist = []
for prov in node.iter("SpecProv"):
provlist[0:] = self.get_varlist(prov)
#print "provides: " + str(provlist)
if len(provlist) == 1:
line = self.get_indent(curdepth)
line += provlist[0] + " = true;"
process_current.append(line)
elif len(provlist) > 0:
process_current.append(self.get_indent(curdepth-1) + "{")
for prov in provlist:
line = self.get_indent(curdepth)
line += prov + " = true;"
process_current.append(line)
process_current.append(self.get_indent(curdepth-1) + "}")
for req in reqlist:
resources_sofar.add(req)
for prov in provlist:
resources_sofar.add(prov)
# PML iteration
def handle_iteration(self, node, depth, processes_sofar, process_current, resources_sofar):
pass
# PML sequence
def handle_sequence(self, node, depth, processes_sofar, process_current, resources_sofar):
self.parse_nodes(node, depth, processes_sofar, process_current, resources_sofar)
constructs = {
"PrimAct" : handle_action,
"PrimIter" : handle_iteration,
"PrimSeq" : handle_sequence,
"PrimTask" : handle_sequence
# More..
}
# Parse non-Process node of the XML file
def parse_nodes(self, node, depth, processes_sofar, process_current, resources_sofar):
for child in node:
if child.tag in XMLTranslator.constructs:
XMLTranslator.constructs[child.tag](self, child, depth+1, processes_sofar, process_current, resources_sofar)
pass
# Parse Process, the outermost level of a PML file
def parse_process(self, root):
processes = [] # List of Promela proctypes
resources = set() # Set of resources
procname = root[0].get("value") # Process name; ID is always the first element in well-formed PML
process_main = ["active proctype " + procname + "()", "{"]
processes.append(process_main)
# Parse inner tree nodes
self.parse_nodes(root, 0, processes, process_main, resources)
# Add dummy instruction to cope with empty processes
if len(process_main) <= 2:
process_main.append("\tskip;")
process_main.append("}")
# Assemble resources and processes into translation
translation = []
if len(resources) > 0:
for resource in resources: # FIXME: not sure this is where resources should be going - scoping?
translation.append("bool " + resource + ";")
translation.append("")
for process in processes:
for line in process:
translation.append(line)
return translation
def translate_xml(self, xml_string):
import lxml
import sys
from lxml import etree
root = None
try:
root = lxml.etree.fromstring(xml_string)
except lxml.etree.XMLSyntaxError:
print "Error parsing XML, exiting."
sys.exit(1)
translation = self.parse_process(root)
return translation
Reformat to conform to PEP 8
#!/usr/bin/env python
class XMLTranslator:
def __init__(self):
self.constructs = {
"PrimAct": self.handle_action,
"PrimIter": self.handle_iteration,
"PrimSeq": self.handle_sequence,
"PrimTask": self.handle_sequence
# More..
}
# Get display indentation for a certain depth
@staticmethod
def get_indent(depth):
line = ""
for i in range(0, depth):
line += "\t"
return line
# Get variable id (name)
@staticmethod
def get_varid(node):
return node[0].get("value")
# Get variables
def get_vars(self, var):
vars = []
for varid in var:
vars.append(self.get_varid(varid))
return vars
# Get variable list used by an action
def get_varlist(self, node):
varlist = []
for var in node.iter("PrimVar"):
varlist += self.get_vars(var)
return varlist
# PML action
def handle_action(self, node, depth, processes_sofar, process_current, resources_sofar):
# Blocks (requires)
reqlist = []
for req in node.iter("SpecReqs"):
reqlist[0:] = self.get_varlist(req)
# print "requires: " + str(reqlist)
curdepth = depth
if len(reqlist) > 0:
line = self.get_indent(curdepth)
curdepth += 1
line += reqlist[0]
for req in reqlist[1:]:
line += " && " + req
line += " ->"
process_current.append(line)
# State changes (provides)
provlist = []
for prov in node.iter("SpecProv"):
provlist[0:] = self.get_varlist(prov)
# print "provides: " + str(provlist)
if len(provlist) == 1:
line = self.get_indent(curdepth)
line += provlist[0] + " = true;"
process_current.append(line)
elif len(provlist) > 0:
process_current.append(self.get_indent(curdepth - 1) + "{")
for prov in provlist:
line = self.get_indent(curdepth)
line += prov + " = true;"
process_current.append(line)
process_current.append(self.get_indent(curdepth - 1) + "}")
for req in reqlist:
resources_sofar.add(req)
for prov in provlist:
resources_sofar.add(prov)
# PML iteration
def handle_iteration(self, node, depth, processes_sofar, process_current, resources_sofar):
pass
# PML sequence
def handle_sequence(self, node, depth, processes_sofar, process_current, resources_sofar):
self.parse_nodes(node, depth, processes_sofar, process_current, resources_sofar)
# Parse non-Process node of the XML file
def parse_nodes(self, node, depth, processes_sofar, process_current, resources_sofar):
for child in node:
if child.tag in self.constructs:
self.constructs[child.tag](child, depth + 1, processes_sofar, process_current,
resources_sofar)
pass
# Parse Process, the outermost level of a PML file
def parse_process(self, root):
processes = [] # List of Promela proctypes
resources = set() # Set of resources
procname = root[0].get("value") # Process name; ID is always the first element in well-formed PML
process_main = ["active proctype " + procname + "()", "{"]
processes.append(process_main)
# Parse inner tree nodes
self.parse_nodes(root, 0, processes, process_main, resources)
# Add dummy instruction to cope with empty processes
if len(process_main) <= 2:
process_main.append("\tskip;")
process_main.append("}")
# Assemble resources and processes into translation
translation = []
if len(resources) > 0:
for resource in resources: # FIXME: not sure this is where resources should be going - scoping?
translation.append("bool " + resource + ";")
translation.append("")
for process in processes:
for line in process:
translation.append(line)
return translation
# PML selection
def handle_selection(self, node, depth, processes_sofar, process_current, resources_sofar):
pass
def translate_xml(self, xml_string):
import lxml
import sys
from lxml import etree
root = None
try:
root = lxml.etree.fromstring(xml_string)
except lxml.etree.XMLSyntaxError:
print "Error parsing XML, exiting."
sys.exit(1)
translation = self.parse_process(root)
return translation
|
import re
from datetime import datetime
import pytz
from radar.constants import HUMAN_DATE_FORMAT
from radar.safe_strftime import safe_strftime
from radar.utils import is_date, date_to_datetime, datetime_to_date, is_datetime
from radar.validation.core import SkipField, ValidationError, pass_context, pass_call
USERNAME_REGEX = re.compile('^[a-z0-9](?:[a-z0-9]*(?:[\.][a-z0-9]+)?)*$')
USERNAME_MIN_LENGTH = 4
USERNAME_MAX_LENGTH = 32
PASSWORD_MIN_LENGTH = 8
EMAIL_REGEX = re.compile(r'^.+@[^.@][^@]*\.[^.@]+$')
POSTCODE_BFPO_REGEX = re.compile('^BFPO[ ]?\\d{1,4}$')
POSTCODE_REGEX = re.compile('^(GIR[ ]?0AA|((AB|AL|B|BA|BB|BD|BH|BL|BN|BR|BS|BT|BX|CA|CB|CF|CH|CM|CO|CR|CT|CV|CW|DA|DD|DE|DG|DH|DL|DN|DT|DY|E|EC|EH|EN|EX|FK|FY|G|GL|GY|GU|HA|HD|HG|HP|HR|HS|HU|HX|IG|IM|IP|IV|JE|KA|KT|KW|KY|L|LA|LD|LE|LL|LN|LS|LU|M|ME|MK|ML|N|NE|NG|NN|NP|NR|NW|OL|OX|PA|PE|PH|PL|PO|PR|RG|RH|RM|S|SA|SE|SG|SK|SL|SM|SN|SO|SP|SR|SS|ST|SW|SY|TA|TD|TF|TN|TQ|TR|TS|TW|UB|W|WA|WC|WD|WF|WN|WR|WS|WV|YO|ZE)(\\d[\\dA-Z]?[ ]?\\d[ABD-HJLN-UW-Z]{2}))|BFPO[ ]?\\d{1,4})$') # noqa
TRAILING_COMMA_REGEX = re.compile('\s*,$')
TAB_TO_SPACE_REGEX = re.compile('\t')
NORMALISE_WHITESPACE_REGEX = re.compile('\s{2,}')
DAY_ZERO = datetime(1900, 1, 1, 0, 0, 0, tzinfo=pytz.utc)
def required():
def required_f(value):
if value is None:
raise ValidationError('This field is required.')
return value
return required_f
def optional():
def optional_f(value):
if value is None:
raise SkipField()
return value
return optional_f
def after_date_of_birth():
@pass_context
def after_date_of_birth_f(ctx, value):
if is_datetime(value):
value_date = datetime_to_date(value)
else:
value_date = value
patient = ctx['patient']
earliest_date_of_birth = patient.earliest_date_of_birth
if earliest_date_of_birth is not None and value_date < earliest_date_of_birth:
raise ValidationError("Value is before the patient's date of birth (%s)." % safe_strftime(earliest_date_of_birth, HUMAN_DATE_FORMAT))
return value
return after_date_of_birth_f
def none_if_blank():
def none_if_blank_f(value):
if value is not None and len(value) == 0:
value = None
return value
return none_if_blank_f
def valid_date_for_patient():
@pass_call
def valid_date_for_patient_f(call, value):
value = call(after_day_zero(), value)
value = call(not_in_future(), value)
value = call(after_date_of_birth(), value)
return value
return valid_date_for_patient_f
def not_empty():
def not_empty_f(value):
if value is None or len(value) == 0:
raise ValidationError('This field is required.')
return value
return not_empty_f
def min_(min_value):
def min_f(value):
if value < min_value:
raise ValidationError('Must be greater than or equal to %s.' % min_value)
return value
return min_f
def max_(max_value):
def max_f(value):
if value > max_value:
raise ValidationError('Must be less than or equal to %s.' % max_value)
return value
return max_f
def range_(min_value=None, max_value=None):
@pass_call
def range_f(call, value):
if min_value is not None:
value = call(min_(min_value), value)
if max_value is not None:
value = call(max_(max_value), value)
return value
return range_f
def in_(values):
def in_f(value):
if value not in values:
raise ValidationError('Not a valid value.')
return value
return in_f
def not_in_future():
def not_in_future_f(value):
now = datetime.now(pytz.utc)
if is_date(value):
now = now.date()
if value > now:
raise ValidationError("Can't be in the future.")
return value
return not_in_future_f
def after(min_dt, dt_format=HUMAN_DATE_FORMAT):
if is_date(min_dt):
min_dt = date_to_datetime(min_dt)
def after_f(value):
if is_date(value):
value_dt = date_to_datetime(value)
else:
value_dt = value
if value_dt < min_dt:
raise ValidationError('Value is before %s.' % safe_strftime(min_dt, dt_format))
return value
return after_f
def before(max_dt, dt_format=HUMAN_DATE_FORMAT):
if is_date(max_dt):
max_dt = date_to_datetime(max_dt)
def before_f(value):
if is_date(value):
value_dt = date_to_datetime(value)
else:
value_dt = value
if value_dt > max_dt:
raise ValidationError('Value is after %s.' % safe_strftime(max_dt, dt_format))
return value
return before_f
def max_length(max_value):
def max_length_f(value):
if len(value) > max_value:
raise ValidationError('Value is too long (max length is %d characters).' % max_value)
return value
return max_length_f
def min_length(min_value):
def min_length_f(value):
if len(value) < min_value:
raise ValidationError('Value is too short (min length is %d characters).' % min_value)
return value
return min_length_f
def email_address():
def email_address_f(value):
value = value.lower()
if not EMAIL_REGEX.match(value):
raise ValidationError('Not a valid email address.')
return value
return email_address_f
def username():
def username_f(value):
value = value.lower()
message = None
if not USERNAME_REGEX.match(value):
message = 'Not a valid username.'
elif len(value) < USERNAME_MIN_LENGTH:
message = 'Username too short.'
elif len(value) > USERNAME_MAX_LENGTH:
message = 'Username too long.'
# Old usernames are email addresses
if message is not None and not EMAIL_REGEX.match(value):
raise ValidationError(message)
return value
return username_f
# TODO
def password():
def password_f(value):
if len(value) < PASSWORD_MIN_LENGTH:
raise ValidationError('Password too short (must be at least %d characters).' % PASSWORD_MIN_LENGTH)
return value
return password_f
def postcode():
def postcode_f(value):
value = value.upper()
value = re.sub('[^A-Z0-9]', '', value)
if not POSTCODE_REGEX.match(value):
raise ValidationError('Not a valid postcode.')
if POSTCODE_BFPO_REGEX.match(value):
value = value[:-4] + ' ' + value[-4:]
else:
value = value[:-3] + ' ' + value[-3:]
return value
return postcode_f
def remove_trailing_comma():
def remove_trailing_comma_f(value):
value = TRAILING_COMMA_REGEX.sub('', value)
return value
return remove_trailing_comma_f
def normalise_whitespace():
def normalise_whitespace_f(value):
# Tabs to spaces
value = TAB_TO_SPACE_REGEX.sub(' ', value)
# Multiple spaces
value = NORMALISE_WHITESPACE_REGEX.sub(' ', value)
return value
return normalise_whitespace_f
def upper():
def upper_f(value):
value = value.upper()
return value
return upper_f
def lower():
def lower_f(value):
value = value.lower()
return value
return lower_f
def after_day_zero(dt_format=HUMAN_DATE_FORMAT):
after_f = after(min_dt=DAY_ZERO, dt_format=dt_format)
def after_day_zero_f(value):
return after_f(value)
return after_day_zero_f
Fix not in future bug
Only happens between midnight and 1AM (at least until the clocks go
back).
import re
from datetime import datetime, date
import pytz
from radar.constants import HUMAN_DATE_FORMAT
from radar.safe_strftime import safe_strftime
from radar.utils import is_date, date_to_datetime, datetime_to_date, is_datetime
from radar.validation.core import SkipField, ValidationError, pass_context, pass_call
USERNAME_REGEX = re.compile('^[a-z0-9](?:[a-z0-9]*(?:[\.][a-z0-9]+)?)*$')
USERNAME_MIN_LENGTH = 4
USERNAME_MAX_LENGTH = 32
PASSWORD_MIN_LENGTH = 8
EMAIL_REGEX = re.compile(r'^.+@[^.@][^@]*\.[^.@]+$')
POSTCODE_BFPO_REGEX = re.compile('^BFPO[ ]?\\d{1,4}$')
POSTCODE_REGEX = re.compile('^(GIR[ ]?0AA|((AB|AL|B|BA|BB|BD|BH|BL|BN|BR|BS|BT|BX|CA|CB|CF|CH|CM|CO|CR|CT|CV|CW|DA|DD|DE|DG|DH|DL|DN|DT|DY|E|EC|EH|EN|EX|FK|FY|G|GL|GY|GU|HA|HD|HG|HP|HR|HS|HU|HX|IG|IM|IP|IV|JE|KA|KT|KW|KY|L|LA|LD|LE|LL|LN|LS|LU|M|ME|MK|ML|N|NE|NG|NN|NP|NR|NW|OL|OX|PA|PE|PH|PL|PO|PR|RG|RH|RM|S|SA|SE|SG|SK|SL|SM|SN|SO|SP|SR|SS|ST|SW|SY|TA|TD|TF|TN|TQ|TR|TS|TW|UB|W|WA|WC|WD|WF|WN|WR|WS|WV|YO|ZE)(\\d[\\dA-Z]?[ ]?\\d[ABD-HJLN-UW-Z]{2}))|BFPO[ ]?\\d{1,4})$') # noqa
TRAILING_COMMA_REGEX = re.compile('\s*,$')
TAB_TO_SPACE_REGEX = re.compile('\t')
NORMALISE_WHITESPACE_REGEX = re.compile('\s{2,}')
DAY_ZERO = datetime(1900, 1, 1, 0, 0, 0, tzinfo=pytz.utc)
def required():
def required_f(value):
if value is None:
raise ValidationError('This field is required.')
return value
return required_f
def optional():
def optional_f(value):
if value is None:
raise SkipField()
return value
return optional_f
def after_date_of_birth():
@pass_context
def after_date_of_birth_f(ctx, value):
if is_datetime(value):
value_date = datetime_to_date(value)
else:
value_date = value
patient = ctx['patient']
earliest_date_of_birth = patient.earliest_date_of_birth
if earliest_date_of_birth is not None and value_date < earliest_date_of_birth:
raise ValidationError("Value is before the patient's date of birth (%s)." % safe_strftime(earliest_date_of_birth, HUMAN_DATE_FORMAT))
return value
return after_date_of_birth_f
def none_if_blank():
def none_if_blank_f(value):
if value is not None and len(value) == 0:
value = None
return value
return none_if_blank_f
def valid_date_for_patient():
@pass_call
def valid_date_for_patient_f(call, value):
value = call(after_day_zero(), value)
value = call(not_in_future(), value)
value = call(after_date_of_birth(), value)
return value
return valid_date_for_patient_f
def not_empty():
def not_empty_f(value):
if value is None or len(value) == 0:
raise ValidationError('This field is required.')
return value
return not_empty_f
def min_(min_value):
def min_f(value):
if value < min_value:
raise ValidationError('Must be greater than or equal to %s.' % min_value)
return value
return min_f
def max_(max_value):
def max_f(value):
if value > max_value:
raise ValidationError('Must be less than or equal to %s.' % max_value)
return value
return max_f
def range_(min_value=None, max_value=None):
@pass_call
def range_f(call, value):
if min_value is not None:
value = call(min_(min_value), value)
if max_value is not None:
value = call(max_(max_value), value)
return value
return range_f
def in_(values):
def in_f(value):
if value not in values:
raise ValidationError('Not a valid value.')
return value
return in_f
def not_in_future():
def not_in_future_f(value):
if is_date(value):
now = date.today()
else:
now = datetime.now(pytz.utc)
if value > now:
raise ValidationError("Can't be in the future.")
return value
return not_in_future_f
def after(min_dt, dt_format=HUMAN_DATE_FORMAT):
if is_date(min_dt):
min_dt = date_to_datetime(min_dt)
def after_f(value):
if is_date(value):
value_dt = date_to_datetime(value)
else:
value_dt = value
if value_dt < min_dt:
raise ValidationError('Value is before %s.' % safe_strftime(min_dt, dt_format))
return value
return after_f
def before(max_dt, dt_format=HUMAN_DATE_FORMAT):
if is_date(max_dt):
max_dt = date_to_datetime(max_dt)
def before_f(value):
if is_date(value):
value_dt = date_to_datetime(value)
else:
value_dt = value
if value_dt > max_dt:
raise ValidationError('Value is after %s.' % safe_strftime(max_dt, dt_format))
return value
return before_f
def max_length(max_value):
def max_length_f(value):
if len(value) > max_value:
raise ValidationError('Value is too long (max length is %d characters).' % max_value)
return value
return max_length_f
def min_length(min_value):
def min_length_f(value):
if len(value) < min_value:
raise ValidationError('Value is too short (min length is %d characters).' % min_value)
return value
return min_length_f
def email_address():
def email_address_f(value):
value = value.lower()
if not EMAIL_REGEX.match(value):
raise ValidationError('Not a valid email address.')
return value
return email_address_f
def username():
def username_f(value):
value = value.lower()
message = None
if not USERNAME_REGEX.match(value):
message = 'Not a valid username.'
elif len(value) < USERNAME_MIN_LENGTH:
message = 'Username too short.'
elif len(value) > USERNAME_MAX_LENGTH:
message = 'Username too long.'
# Old usernames are email addresses
if message is not None and not EMAIL_REGEX.match(value):
raise ValidationError(message)
return value
return username_f
# TODO
def password():
def password_f(value):
if len(value) < PASSWORD_MIN_LENGTH:
raise ValidationError('Password too short (must be at least %d characters).' % PASSWORD_MIN_LENGTH)
return value
return password_f
def postcode():
def postcode_f(value):
value = value.upper()
value = re.sub('[^A-Z0-9]', '', value)
if not POSTCODE_REGEX.match(value):
raise ValidationError('Not a valid postcode.')
if POSTCODE_BFPO_REGEX.match(value):
value = value[:-4] + ' ' + value[-4:]
else:
value = value[:-3] + ' ' + value[-3:]
return value
return postcode_f
def remove_trailing_comma():
def remove_trailing_comma_f(value):
value = TRAILING_COMMA_REGEX.sub('', value)
return value
return remove_trailing_comma_f
def normalise_whitespace():
def normalise_whitespace_f(value):
# Tabs to spaces
value = TAB_TO_SPACE_REGEX.sub(' ', value)
# Multiple spaces
value = NORMALISE_WHITESPACE_REGEX.sub(' ', value)
return value
return normalise_whitespace_f
def upper():
def upper_f(value):
value = value.upper()
return value
return upper_f
def lower():
def lower_f(value):
value = value.lower()
return value
return lower_f
def after_day_zero(dt_format=HUMAN_DATE_FORMAT):
after_f = after(min_dt=DAY_ZERO, dt_format=dt_format)
def after_day_zero_f(value):
return after_f(value)
return after_day_zero_f
|
# -*- coding: utf-8 -*-
"""
@file
@brief Default values for the Sphinx configuration.
"""
import sys
import os
import datetime
import re
import warnings
from .style_css_template import style_figure_notebook
from sphinx.builders.html import Stylesheet
if sys.version_info[0] == 2:
from codecs import open
FileNotFoundError = Exception
def set_sphinx_variables(fileconf, module_name, author, year, theme, theme_path, ext_locals,
add_extensions=None, bootswatch_theme="spacelab", bootswatch_navbar_links=None,
description_latex="", use_mathjax=False, use_lunrsearch=False,
enable_disabled_parts="enable_disabled_documented_pieces_of_code",
sharepost="facebook-linkedin-twitter-20-body", custom_style=None,
extlinks=None, github_user=None, github_repo=None, title=None,
book=True, link_resolve=None):
"""
Define variables for :epkg:`Sphinx`.
@param fileconf location of the configuration file
@param module_name name of the module
@param author author
@param year year
@param theme theme to use
@param theme_path themepath
@param ext_locals context (see `locals <https://docs.python.org/2/library/functions.html#locals>`_)
@param add_extensions additional extensions
@param bootswatch_theme for example, ``spacelab``, look at `spacelab <http://bootswatch.com/spacelab/>`_
@param bootswatch_navbar_links see `sphinx-bootstrap-theme <http://ryan-roemer.github.io/sphinx-bootstrap-theme/README.html>`_
@param description_latex description latex
@param use_mathjax set up the documentation to use mathjax,
see `sphinx.ext.mathjax <http://sphinx-doc.org/ext/math.html?highlight=math#module-sphinx.ext.mathjax>`_,
default option is True
@param use_lunrsearch suggest autocompletion in sphinx,
see `sphinxcontrib-lunrsearch <https://github.com/rmcgibbo/sphinxcontrib-lunrsearch>`_
@param enable_disabled_parts @see fn remove_undesired_part_for_documentation
@param sharepost add share button to share blog post on usual networks
@param custom_style custom style sheet
@param extlinks parameter `extlinks <http://www.sphinx-doc.org/en/stable/ext/extlinks.html#confval-extlinks>`_,
example: ``{'issue': ('https://github.com/sdpython/pyquickhelper/issues/%s', 'issue {0} on GitHub')}``
@param github_user git(hub) user
@param github_repo git(hub) project
@param title if not None, use *title* instead of *module_name* as a title
@param book the output is a book
@param link_resolve url where the documentation is published,
used for parameter *linkcode_resolve*
If the parameter *custom_style* is not None, it will call ``app.add_stylesheet(custom_style)``
in the setup.
.. exref::
:title: Simple configuration file for Sphinx
We assume a module is configurated using the same
structure as `pyquickhelper <https://github.com/sdpython/pyquickhelper/>`_.
The file ``conf.py`` could just contain:
::
# -*- coding: utf-8 -*-
import sys, os, datetime, re
import solar_theme
from pyquickhelper.helpgen.default_conf import set_sphinx_variables
sys.path.insert(0, os.path.abspath(os.path.join(os.path.split(__file__)[0])))
set_sphinx_variables( __file__,
"pyquickhelper",
"Xavier Dupré",
2014,
"solar_theme",
solar_theme.theme_path,
locals())
# custom settings
...
*setup.py* must contain a string such as ``__version__ = 3.4``.
Close to the setup, there must be a file ``version.txt``.
You overwrite a value by giving a variable another value after the fucntion is called.
Some parts of the code can be disabled before generating the documentation.
Those parts are surrounded by::
# -- HELP BEGIN EXCLUDE --
import module
# -- HELP END EXCLUDE --
If *enable_disabled_parts* is set to a string, these sections will become::
# -- HELP BEGIN EXCLUDE --
if hasattr(sys, <enable_disabled_parts>) and sys.<enable_disabled_parts>:
import module
# -- HELP END EXCLUDE --
.. versionchanged:: 1.4
Add parameters *extlinks*, *github_user*, *github_repo*,
*title*. Add extension
`extlinks <http://www.sphinx-doc.org/en/stable/ext/extlinks.html#module-sphinx.ext.extlinks>`_.
"""
# version .txt
dirconf = os.path.abspath(os.path.dirname(fileconf))
version_file = os.path.join(dirconf, "..", "..", "..", "version.txt")
if not os.path.exists(version_file):
warnings.warn(
"File '{0}' must contain the commit number (or last part of the version).".format(version_file))
first_line = "0"
else:
first_line = get_first_line(version_file)
# language
language = "en"
# main version
version = extract_version_from_setup(fileconf)
# settings sphinx
pygments_style = 'sphinx'
# personnalization
project_var_name = module_name
author = author
year = str(year)
modindex_common_prefix = [project_var_name + ".", ]
project = (project_var_name + ' documentation') if title is None else title
copyright = str(year) + ", " + author
release = '%s.%s' % (version, first_line)
html_title = ("%s %s" % (project_var_name, release)
) if title is None else title
htmlhelp_basename = '%s_doc' % project_var_name
enable_disabled_parts = enable_disabled_parts
# personnalization latex
_proj = project_var_name.replace("_", "\\_")
latex_book = book
latex_use_parts = False
latex_documents = [('index', '%s_doc.tex' % project_var_name, _proj if title is None else title,
author, 'manual', True), ]
man_pages = [('index', '%s_doc' % project_var_name,
('%s Documentation' % _proj) if title is None else title,
[author], 1)]
texinfo_documents = [('index',
('%s documentation' %
_proj) if title is None else title,
('%s' % _proj) if title is None else title,
author,
('%s documentation' %
_proj) if title is None else title,
description_latex,
'Miscellaneous'),
]
latex_show_pagerefs = True
preamble = '''
\\usepackage{etex}
\\usepackage{fixltx2e} % LaTeX patches, \\textsubscript
\\usepackage{cmap} % fix search and cut-and-paste in Acrobat
\\usepackage[raccourcis]{fast-diagram}
\\usepackage{titlesec}
\\usepackage{amsmath}
\\usepackage{amssymb}
\\usepackage{amsfonts}
\\usepackage{graphics}
\\usepackage{epic}
\\usepackage{eepic}
%\\usepackage{pict2e}
%%% Redefined titleformat
\\setlength{\\parindent}{0cm}
\\setlength{\\parskip}{1ex plus 0.5ex minus 0.2ex}
\\newcommand{\\hsp}{\\hspace{20pt}}
\\newcommand{\\acc}[1]{\\left\\{#1\\right\\}}
\\newcommand{\\cro}[1]{\\left[#1\\right]}
\\newcommand{\\pa}[1]{\\left(#1\\right)}
\\newcommand{\\R}{\\mathbb{R}}
\\newcommand{\\HRule}{\\rule{\\linewidth}{0.5mm}}
%\\titleformat{\\chapter}[hang]{\\Huge\\bfseries\\sffamily}{\\thechapter\\hsp}{0pt}{\\Huge\\bfseries\\sffamily}
'''.replace(" ", "")
latex_elements = {'papersize': 'a4', 'pointsize': '10pt',
'preamble': preamble,
}
# pyquickhelper automation
auto_rst_generation = True
# latex_additional_files = ["mfgan-bw.sty", "_static/cover.png"]
# figure
numfig = False
# theme
html_theme = theme
shtml_theme_options = {"bodyfont": "Calibri"}
if theme_path is not None:
if isinstance(theme_path, list):
html_theme_path = theme_path
else:
html_theme_path = [theme_path]
# static files
html_logo = "project_ico.png"
html_favicon = "project_ico.ico"
html_static_path = ['phdoc_static']
templates_path = ['phdoc_templates']
# extensions, encoding
source_suffix = '.rst'
source_encoding = 'utf-8'
master_doc = 'index'
html_output_encoding = 'utf-8'
# blogs (custom parameter)
blog_background = True
blog_background_page = False
sharepost = sharepost
# settings
exclude_patterns = ["*.py", "**/*.py"]
html_show_sphinx = False
html_show_copyright = False
__html_last_updated_fmt_dt = datetime.datetime.now()
html_last_updated_fmt = '%04d-%02d-%02d' % (
__html_last_updated_fmt_dt.year,
__html_last_updated_fmt_dt.month,
__html_last_updated_fmt_dt.day)
autoclass_content = 'both'
autosummary_generate = True
# import helpers to find tools to build the documentation
from .conf_path_tools import find_latex_path, find_graphviz_dot
# graphviz
graphviz_output_format = "svg"
graphviz_dot = find_graphviz_dot()
# todo, mathdef, blocref, faqref, exref, nbref
todo_include_todos = True
todoext_include_todosext = True
mathdef_include_mathsext = True
blocref_include_blocrefs = True
faqref_include_faqrefs = True
exref_include_exrefs = True
nbref_include_nbrefs = True
mathdef_link_number = "{first_letter}{number}"
# extensions
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.autosummary', 'sphinx.ext.coverage',
'sphinx.ext.extlinks', 'sphinx.ext.graphviz', 'sphinx.ext.ifconfig',
'sphinx.ext.inheritance_diagram',
'sphinx.ext.mathjax' if use_mathjax else 'sphinx.ext.imgmath',
'sphinx.ext.napoleon', 'sphinx.ext.todo', 'sphinx.ext.viewcode',
'sphinxcontrib.images', 'sphinxcontrib.imagesvg', 'sphinxcontrib.jsdemo',
# 'matplotlib.sphinxext.only_directives',
# 'matplotlib.sphinxext.mathmpl',
# 'matplotlib.sphinxext.only_directives',
'matplotlib.sphinxext.plot_directive',
# 'matplotlib.sphinxext.ipython_directive',
'jupyter_sphinx.embed_widgets',
"nbsphinx",
'pyquickhelper.sphinxext.sphinx_rst_builder',
]
if use_lunrsearch:
extensions.append('sphinxcontrib.lunrsearch')
if not use_mathjax:
# extensions.append('matplotlib.sphinxext.mathmpl')
# this extension disables sphinx.ext.imgmath
pass
if not use_mathjax:
sep = ";" if sys.platform.startswith("win") else ":"
imgmath_latex = find_latex_path()
if sys.platform.startswith("win"):
imgmath_dvipng = os.path.join(imgmath_latex, "dvipng.exe")
if not os.path.exists(imgmath_dvipng):
raise FileNotFoundError(imgmath_dvipng)
else:
imgmath_dvipng = os.path.join(imgmath_latex, "dvipng")
env_path = os.environ.get("PATH", "")
if imgmath_latex not in env_path:
if len(env_path) > 0:
env_path += sep
env_path += imgmath_latex
if sys.platform.startswith("win"):
imgmath_latex = os.path.join(imgmath_latex, "latex.exe")
else:
imgmath_latex = os.path.join(imgmath_latex, "latex")
# verification
if sys.platform.startswith("win"):
if not os.path.exists(imgmath_latex):
raise FileNotFoundError(imgmath_latex)
if not os.path.exists(imgmath_dvipng):
raise FileNotFoundError(imgmath_dvipng)
else:
# TODO: check on linux
pass
# bokeh
try:
import bokeh
extensions.append('%s.sphinxext.bokeh_plot' % bokeh.__name__)
# this ticks avoid being noticed by flake8 or pycodestyle
except ImportError as e:
# bokeh is not installed
pass
if add_extensions is not None:
extensions.extend(add_extensions)
# add_function_parentheses = True
# add_module_names = True
# show_authors = False
# html_sidebars = {}
# html_additional_pages = {}
# html_domain_indices = True
# html_use_index = True
# html_split_index = False
# html_show_sourcelink = True
# html_use_opensearch = ''
# html_file_suffix = None
# latex_logo = None
latex_show_urls = 'footnote'
# latex_appendices = []
# latex_domain_indices = True
# texinfo_appendices = []
# texinfo_domain_indices = True
# texinfo_show_urls = 'footnote'
# it modifies the set of things to display inside the sidebar
# see http://www.sphinx-doc.org/en/stable/config.html#confval-html_sidebars
html_sidebars = {
'[!blog]**': ['searchbox.html', 'moduletoc.html', 'relations.html', 'sourcelink.html', ],
'blog/**': ['searchbox.html', 'blogtoc.html', 'localtoc.html', 'sourcelink.html', ],
}
# tpl_role
from ..sphinxext.documentation_link import python_link_doc
tpl_template = {'py': python_link_doc}
# epkg_role
epkg_dictionary = {
'7z': "http://www.7-zip.org/",
'Anaconda': 'http://continuum.io/downloads',
'appveyor': 'https://www.appveyor.com/',
'class Sphinx': 'https://github.com/sphinx-doc/sphinx/blob/master/sphinx/application.py#L107',
'codecov': 'https://codecov.io/',
'coverage': 'https://pypi.python.org/pypi/coverage',
'cryptography': 'http://cryptography.readthedocs.org/',
'docutils': 'http://docutils.sourceforge.net/',
'GIT': 'http://git-scm.com/',
'git': 'http://git-scm.com/',
'Git': 'http://git-scm.com/',
'GitHub': 'https://github.com/',
'GraphViz': 'http://www.graphviz.org/',
'Graphviz': 'http://www.graphviz.org/',
'Inkscape': 'https://inkscape.org/',
'InkScape': 'https://inkscape.org/',
'Java': 'http://www.java.com/fr/download/',
'Jenkins': 'https://jenkins-ci.org/',
'jinja2': 'http://jinja.pocoo.org/docs/',
'Jupyter': 'http://jupyter.org/',
'jupyter': 'http://jupyter.org/',
'mako': 'http://www.makotemplates.org/',
"matplotlib": "https://matplotlib.org/index.html",
'mistune': 'https://pypi.python.org/pypi/mistune',
'MiKTeX': 'http://miktex.org/',
'MinGW': 'http://www.mingw.org/',
'nbconvert': 'http://nbconvert.readthedocs.io/en/latest/',
'nbpresent': 'https://github.com/Anaconda-Platform/nbpresent',
'nose': 'https://pypi.python.org/pypi/nose',
'numpy': ('http://www.numpy.org/',
('http://docs.scipy.org/doc/numpy/reference/generated/numpy.{0}.html', 1)),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable/',
('http://pandas.pydata.org/pandas-docs/stable/generated/pandas.{0}.html', 1)),
'pandoc': 'http://johnmacfarlane.net/pandoc/',
'PEP8': 'https://www.python.org/dev/peps/pep-0008/',
'Pillow': 'http://pillow.readthedocs.io/',
'pycodestyle': 'http://pycodestyle.readthedocs.io/',
'pycrypto': 'https://pypi.python.org/pypi/pycrypto',
'pygments': 'http://pygments.org/',
'pylzma': 'https://pypi.python.org/pypi/pylzma',
'python': 'http://www.python.org/',
'Python': 'http://www.python.org/',
'python-jenkins': 'http://python-jenkins.readthedocs.org/en/latest/',
'pywin32': 'https://sourceforge.net/projects/pywin32/',
'reveal.js': 'https://github.com/hakimel/reveal.js/releases',
'sphinx': 'http://www.sphinx-doc.org/en/stable/',
'Sphinx': 'http://www.sphinx-doc.org/en/stable/',
'SVN': 'https://subversion.apache.org/',
'travis': 'https://travis-ci.org/',
'Visual Studio Community Edition': 'https://www.visualstudio.com/',
'Visual Studio Community Edition 2015': 'https://imagine.microsoft.com/en-us/Catalog/Product/101',
'*py': ('https://docs.python.org/3/',
('https://docs.python.org/3/library/{0}.html', 1),
('https://docs.python.org/3/library/{0}.html#{0}.{1}', 2)),
'*pyf': (('https://docs.python.org/3/library/functions.html#{0}', 1),),
# Custom.
'jyquickhelper': 'http://www.xavierdupre.fr/app/jyquickhelper/helpsphinx/index.html',
'pymyinstall': 'http://www.xavierdupre.fr/app/pymyinstall/helpsphinx/index.html',
'pyquickhelper': 'http://www.xavierdupre.fr/app/pyquickhelper/helpsphinx/index.html',
'pyrsslocal': 'http://www.xavierdupre.fr/app/pyrsslocal/helpsphinx/index.html',
# Specific.
'datetime.datetime.strptime': 'https://docs.python.org/3/library/datetime.html#strftime-strptime-behavior',
}
# latex
math_number_all = False
imgmath_latex_preamble = """
\\usepackage{epic}
\\newcommand{\\acc}[1]{\\left\\{#1\\right\\}}
\\newcommand{\\cro}[1]{\\left[#1\\right]}
\\newcommand{\\pa}[1]{\\left(#1\\right)}
\\newcommand{\\R}{\\mathbb{R}}
"""
# post processing of the full latex file
# it should be a function, None by default
custom_latex_processing = None
releases_release_uri = "https://pypi.python.org/pypi/{0}/%s".format(
module_name)
releases_document_name = "HISTORY.rst"
# github or git link
if github_user:
releases_issue_uri = "https://github.com/{0}/{1}/issues/%s".format(
github_user, module_name)
githublink_options = dict(user=github_user)
github_anchor = "source on GitHub"
else:
githublink_options = None
if github_repo:
if githublink_options is None:
githublink_options = {}
value = github_repo.strip("/").split("/")[-1]
if value.endswith(".git"):
value = value[:-4]
githublink_options['project'] = value
if 'anchor' not in githublink_options and "github" in github_repo.lower():
githublink_options['anchor'] = "source on GitHub"
if extlinks is None:
extlinks = dict()
elif 'issue' in extlinks:
issue = extlinks['issue'][0].split('/')
le = len(issue)
if le > 0:
user = issue[-4]
project = issue[-3]
if githublink_options is None:
githublink_options = {}
if 'user' not in githublink_options:
githublink_options["user"] = user
if 'project' not in githublink_options:
githublink_options["project"] = project
if 'anchor' not in githublink_options and 'github' in extlinks['issue'][0].lower():
githublink_options["anchor"] = 'source on GitHub'
if not github_repo and extlinks['issue'][0].startswith("https://github.com"):
github_repo = "https://github.com/{0}/{1}.git".format(
user, project)
# themes
if html_theme == "bootstrap":
if bootswatch_navbar_links is None:
bootswatch_navbar_links = []
html_logo = "project_ico_small.png"
navbar_links = bootswatch_navbar_links
html_theme_options = {
'navbar_title': "home",
'navbar_site_name': "Site",
'navbar_links': navbar_links,
'navbar_sidebarrel': True,
'navbar_pagenav': True,
'navbar_pagenav_name': "Page",
'globaltoc_depth': 3,
'globaltoc_includehidden': "true",
'navbar_class': "navbar navbar-inverse",
'navbar_fixed_top': "true",
'source_link_position': "footer",
'bootswatch_theme': bootswatch_theme,
'bootstrap_version': "3",
}
elif html_theme == "guzzle_sphinx_theme":
html_translator_class = 'guzzle_sphinx_theme.HTMLTranslator'
if "guzzle_sphinx_theme" not in extensions:
extensions.append('guzzle_sphinx_theme')
html_theme_options = {
"project_nav_name": module_name,
# specified, then no sitemap will be built.
# "base_url": ""
# "homepage": "index",
# "projectlink": "http://myproject.url",
}
elif html_theme == "foundation_sphinx_theme":
import foundation_sphinx_theme
html_theme_path = foundation_sphinx_theme.HTML_THEME_PATH
if "foundation_sphinx_theme" not in extensions:
extensions.append('foundation_sphinx_theme')
html_theme_options = {
'logo_screen': 'project_ico.png',
'logo_mobile': 'project_ico.ico',
'favicon': 'project_ico.ico',
'github_user': github_user,
'github_repo': github_repo,
}
pygments_style = 'monokai'
# mapping
intersphinx_mapping = {'python': (
'https://docs.python.org/{0}.{1}'.format(*(sys.version_info[:2])), None)}
intersphinx_mapping['matplotlib'] = ('http://matplotlib.org/', None)
try:
import numpy
intersphinx_mapping['numpy'] = (
'http://www.numpy.org/{0}'.format(numpy.__version__), None)
except ImportError:
pass
try:
import pandas
intersphinx_mapping['pandas'] = (
'http://pandas.pydata.org/pandas-docs/version/{0}'.format(pandas.__version__), None)
except ImportError:
pass
# disable some checkings
check_ie_layout_html = False
# information about code
def linkcode_resolve_function(domain, info):
if link_resolve is None:
return None
if domain != 'py':
return None
if not info['module']:
return None
filename = info['module'].replace('.', '/')
return "%s/%s.py" % (link_resolve, filename)
if link_resolve is not None:
linkcode_resolve = linkcode_resolve_function
extensions.append("sphinx.ext.linkcode")
# commit modification
def modify_commit_function(nbch, date, author, comment):
if author is not None and "@" in author:
author = author.split("@")[0]
return nbch, date, author, comment
modify_commit = modify_commit_function
# sphinx gallery
backreferences_dir = "backreferences_dir"
dirname = os.path.dirname(fileconf)
exa = os.path.join(dirname, "..", "..", "..", "_doc", "examples")
if os.path.exists(exa):
exa = os.path.normpath(exa)
import pathlib
pp = pathlib.Path(exa)
readmes = pp.glob("**/README.txt")
examples_dirs = []
gallery_dirs = []
for res in readmes:
last = res.parts[-2]
if last.startswith("temp_"):
continue
parts = last.replace("\\", "/").split("/")
if any(filter(lambda x: x.startswith("temp_"), parts)):
continue
nn = res.parent
examples_dirs.append(str(nn))
if last in ("notebooks", "examples"):
last = "gy" + last
dest = os.path.join(dirname, last)
if dest in gallery_dirs:
raise ValueError(
"Gallery '{0}' already exists (source='{1}', last={2}).".format(dest, nn, last))
gallery_dirs.append(dest)
extensions.append('sphinx_gallery.gen_gallery')
if len(examples_dirs) == 0:
raise ValueError(
"Unable to find any 'README.txt' in '{0}'.".foramt(exa))
reference_url = {k: v[0] for k, v in intersphinx_mapping.items()}
example_dir = os.path.join(dirname, "gallery")
if not os.path.exists(example_dir):
os.makedirs(example_dir)
sphinx_gallery_conf = {
'doc_module': (module_name),
'reference_url': {},
'examples_dirs': examples_dirs,
'gallery_dirs': gallery_dirs,
'backreferences_dir': example_dir,
'expected_failing_examples': [],
}
# collect local variables
loc = locals()
for k, v in loc.items():
if not k.startswith("_"):
ext_locals[k] = v
if custom_style is not None:
ex = False
for st in html_static_path:
full = os.path.join(dirconf, st, custom_style)
if os.path.exists(full):
ex = True
break
if not ex:
raise FileNotFoundError("unable to find {0} in\n{1}\nand\n{2}".format(
custom_style, dirconf, "\n".join(html_static_path)))
def this_setup(app):
if custom_style is not None:
app.add_stylesheet(custom_style)
return custom_setup(app, author)
ext_locals["setup"] = this_setup
#################
# custom functions
#################
def extract_version_from_setup(filename):
"""
extract the version from setup.py assuming it is located in ../../..
and the version is specified by the following line: ``sversion = "..."``
"""
setup = os.path.abspath(os.path.split(filename)[0])
setup = os.path.join(setup, "..", "..", "..", "setup.py")
if os.path.exists(setup):
with open(setup, "r") as f:
content = f.read()
exp = re.compile("sversion *= *['\\\"]([0-9.]+?)['\\\"]")
all = exp.findall(content)
if len(all) == 0:
raise Exception("unable to locate the version from setup.py")
if len(all) != 1:
raise Exception("more than one version was found: " + str(all))
return all[0]
else:
raise FileNotFoundError("unable to find setup.py, tried: " + setup)
def get_first_line(filename):
"""
expects to find a text file with a line, the function extracts and returns this line
"""
try:
with open(filename, "r") as ff:
first_line = ff.readlines()[0].strip(" \n\r")
except FileNotFoundError:
first_line = "xxx"
return first_line
#################
# sphinx functions
#################
def skip(app, what, name, obj, skip, options):
"""
to skip some functions,
see `Skipping members <http://sphinx-doc.org/ext/autodoc.html#event-autodoc-skip-member>`_
"""
if name.startswith("_") and name not in \
["__qualname__",
"__module__",
"__dict__",
"__doc__",
"__weakref__",
]:
return False
return skip
def custom_setup(app, author):
"""
see `Sphinx core events <http://sphinx-doc.org/extdev/appapi.html?highlight=setup#sphinx-core-events>`_
"""
from ..sphinxext.sphinx_bigger_extension import setup as setup_bigger
from ..sphinxext.sphinx_githublink_extension import setup as setup_githublink
from ..sphinxext.sphinx_blog_extension import setup as setup_blogpost
from ..sphinxext.sphinx_blocref_extension import setup as setup_blocref
from ..sphinxext.sphinx_exref_extension import setup as setup_exref
from ..sphinxext.sphinx_faqref_extension import setup as setup_faqref
from ..sphinxext.sphinx_mathdef_extension import setup as setup_mathdef
from ..sphinxext.sphinx_nbref_extension import setup as setup_nbref
from ..sphinxext.sphinx_runpython_extension import setup as setup_runpython
from ..sphinxext.sphinx_sharenet_extension import setup as setup_sharenet
from ..sphinxext.sphinx_todoext_extension import setup as setup_todoext
from ..sphinxext.sphinx_docassert_extension import setup as setup_docassert
from ..sphinxext.sphinx_autosignature import setup as setup_signature
from ..sphinxext.sphinx_template_extension import setup as setup_tpl
from ..sphinxext.sphinx_cmdref_extension import setup as setup_cmdref
from ..sphinxext.sphinx_postcontents_extension import setup as setup_postcontents
from ..sphinxext.sphinx_tocdelay_extension import setup as setup_tocdelay
from ..sphinxext.sphinx_epkg_extension import setup as setup_epkg
from ..sphinxext.releases import setup as setup_releases
from ..sphinxext.sphinx_toctree_extension import setup as setup_toctree
# from ..sphinxext.sphinx_rst_builder import setup as setup_rst
app.connect("autodoc-skip-member", skip)
app.add_config_value('author', author, True)
setup_toctree(app)
setup_runpython(app)
setup_bigger(app)
setup_githublink(app)
setup_sharenet(app)
setup_todoext(app)
setup_blogpost(app)
setup_mathdef(app)
setup_blocref(app)
setup_exref(app)
setup_faqref(app)
setup_nbref(app)
setup_cmdref(app)
setup_signature(app)
setup_docassert(app)
setup_postcontents(app)
setup_tocdelay(app)
setup_tpl(app)
setup_epkg(app)
setup_releases(app)
# Already part of the added extensions.
# setup_rst(app)
# from sphinx.util.texescape import tex_replacements
# tex_replacements += [('oe', '\\oe '), ]
app.add_javascript("require.js")
# style for notebooks
app.add_stylesheet(style_figure_notebook[0])
def get_default_stylesheet():
"""
Returns the style of additional style sheets
@return list of files
.. versionadded:: 1.5
"""
rel = "_static/" + style_figure_notebook[0]
# rel2 = "_static/gallery.css" # This should not be needed for sphinx-gallery.
return [Stylesheet(rel="stylesheet", title="style_figure_notebook", filename=rel)]
# Stylesheet(rel="stylesheet", title="sphinx_gallery_missing", filename=rel2)
def get_default_javascript():
"""
Returns the style of additional style sheets
@return list of files
.. versionadded:: 1.5
"""
return ["_static/require.js"]
add default websites
# -*- coding: utf-8 -*-
"""
@file
@brief Default values for the Sphinx configuration.
"""
import sys
import os
import datetime
import re
import warnings
from .style_css_template import style_figure_notebook
from sphinx.builders.html import Stylesheet
if sys.version_info[0] == 2:
from codecs import open
FileNotFoundError = Exception
def set_sphinx_variables(fileconf, module_name, author, year, theme, theme_path, ext_locals,
add_extensions=None, bootswatch_theme="spacelab", bootswatch_navbar_links=None,
description_latex="", use_mathjax=False, use_lunrsearch=False,
enable_disabled_parts="enable_disabled_documented_pieces_of_code",
sharepost="facebook-linkedin-twitter-20-body", custom_style=None,
extlinks=None, github_user=None, github_repo=None, title=None,
book=True, link_resolve=None):
"""
Define variables for :epkg:`Sphinx`.
@param fileconf location of the configuration file
@param module_name name of the module
@param author author
@param year year
@param theme theme to use
@param theme_path themepath
@param ext_locals context (see `locals <https://docs.python.org/2/library/functions.html#locals>`_)
@param add_extensions additional extensions
@param bootswatch_theme for example, ``spacelab``, look at `spacelab <http://bootswatch.com/spacelab/>`_
@param bootswatch_navbar_links see `sphinx-bootstrap-theme <http://ryan-roemer.github.io/sphinx-bootstrap-theme/README.html>`_
@param description_latex description latex
@param use_mathjax set up the documentation to use mathjax,
see `sphinx.ext.mathjax <http://sphinx-doc.org/ext/math.html?highlight=math#module-sphinx.ext.mathjax>`_,
default option is True
@param use_lunrsearch suggest autocompletion in sphinx,
see `sphinxcontrib-lunrsearch <https://github.com/rmcgibbo/sphinxcontrib-lunrsearch>`_
@param enable_disabled_parts @see fn remove_undesired_part_for_documentation
@param sharepost add share button to share blog post on usual networks
@param custom_style custom style sheet
@param extlinks parameter `extlinks <http://www.sphinx-doc.org/en/stable/ext/extlinks.html#confval-extlinks>`_,
example: ``{'issue': ('https://github.com/sdpython/pyquickhelper/issues/%s', 'issue {0} on GitHub')}``
@param github_user git(hub) user
@param github_repo git(hub) project
@param title if not None, use *title* instead of *module_name* as a title
@param book the output is a book
@param link_resolve url where the documentation is published,
used for parameter *linkcode_resolve*
If the parameter *custom_style* is not None, it will call ``app.add_stylesheet(custom_style)``
in the setup.
.. exref::
:title: Simple configuration file for Sphinx
We assume a module is configurated using the same
structure as `pyquickhelper <https://github.com/sdpython/pyquickhelper/>`_.
The file ``conf.py`` could just contain:
::
# -*- coding: utf-8 -*-
import sys, os, datetime, re
import solar_theme
from pyquickhelper.helpgen.default_conf import set_sphinx_variables
sys.path.insert(0, os.path.abspath(os.path.join(os.path.split(__file__)[0])))
set_sphinx_variables( __file__,
"pyquickhelper",
"Xavier Dupré",
2014,
"solar_theme",
solar_theme.theme_path,
locals())
# custom settings
...
*setup.py* must contain a string such as ``__version__ = 3.4``.
Close to the setup, there must be a file ``version.txt``.
You overwrite a value by giving a variable another value after the fucntion is called.
Some parts of the code can be disabled before generating the documentation.
Those parts are surrounded by::
# -- HELP BEGIN EXCLUDE --
import module
# -- HELP END EXCLUDE --
If *enable_disabled_parts* is set to a string, these sections will become::
# -- HELP BEGIN EXCLUDE --
if hasattr(sys, <enable_disabled_parts>) and sys.<enable_disabled_parts>:
import module
# -- HELP END EXCLUDE --
.. versionchanged:: 1.4
Add parameters *extlinks*, *github_user*, *github_repo*,
*title*. Add extension
`extlinks <http://www.sphinx-doc.org/en/stable/ext/extlinks.html#module-sphinx.ext.extlinks>`_.
"""
# version .txt
dirconf = os.path.abspath(os.path.dirname(fileconf))
version_file = os.path.join(dirconf, "..", "..", "..", "version.txt")
if not os.path.exists(version_file):
warnings.warn(
"File '{0}' must contain the commit number (or last part of the version).".format(version_file))
first_line = "0"
else:
first_line = get_first_line(version_file)
# language
language = "en"
# main version
version = extract_version_from_setup(fileconf)
# settings sphinx
pygments_style = 'sphinx'
# personnalization
project_var_name = module_name
author = author
year = str(year)
modindex_common_prefix = [project_var_name + ".", ]
project = (project_var_name + ' documentation') if title is None else title
copyright = str(year) + ", " + author
release = '%s.%s' % (version, first_line)
html_title = ("%s %s" % (project_var_name, release)
) if title is None else title
htmlhelp_basename = '%s_doc' % project_var_name
enable_disabled_parts = enable_disabled_parts
# personnalization latex
_proj = project_var_name.replace("_", "\\_")
latex_book = book
latex_use_parts = False
latex_documents = [('index', '%s_doc.tex' % project_var_name, _proj if title is None else title,
author, 'manual', True), ]
man_pages = [('index', '%s_doc' % project_var_name,
('%s Documentation' % _proj) if title is None else title,
[author], 1)]
texinfo_documents = [('index',
('%s documentation' %
_proj) if title is None else title,
('%s' % _proj) if title is None else title,
author,
('%s documentation' %
_proj) if title is None else title,
description_latex,
'Miscellaneous'),
]
latex_show_pagerefs = True
preamble = '''
\\usepackage{etex}
\\usepackage{fixltx2e} % LaTeX patches, \\textsubscript
\\usepackage{cmap} % fix search and cut-and-paste in Acrobat
\\usepackage[raccourcis]{fast-diagram}
\\usepackage{titlesec}
\\usepackage{amsmath}
\\usepackage{amssymb}
\\usepackage{amsfonts}
\\usepackage{graphics}
\\usepackage{epic}
\\usepackage{eepic}
%\\usepackage{pict2e}
%%% Redefined titleformat
\\setlength{\\parindent}{0cm}
\\setlength{\\parskip}{1ex plus 0.5ex minus 0.2ex}
\\newcommand{\\hsp}{\\hspace{20pt}}
\\newcommand{\\acc}[1]{\\left\\{#1\\right\\}}
\\newcommand{\\cro}[1]{\\left[#1\\right]}
\\newcommand{\\pa}[1]{\\left(#1\\right)}
\\newcommand{\\R}{\\mathbb{R}}
\\newcommand{\\HRule}{\\rule{\\linewidth}{0.5mm}}
%\\titleformat{\\chapter}[hang]{\\Huge\\bfseries\\sffamily}{\\thechapter\\hsp}{0pt}{\\Huge\\bfseries\\sffamily}
'''.replace(" ", "")
latex_elements = {'papersize': 'a4', 'pointsize': '10pt',
'preamble': preamble,
}
# pyquickhelper automation
auto_rst_generation = True
# latex_additional_files = ["mfgan-bw.sty", "_static/cover.png"]
# figure
numfig = False
# theme
html_theme = theme
shtml_theme_options = {"bodyfont": "Calibri"}
if theme_path is not None:
if isinstance(theme_path, list):
html_theme_path = theme_path
else:
html_theme_path = [theme_path]
# static files
html_logo = "project_ico.png"
html_favicon = "project_ico.ico"
html_static_path = ['phdoc_static']
templates_path = ['phdoc_templates']
# extensions, encoding
source_suffix = '.rst'
source_encoding = 'utf-8'
master_doc = 'index'
html_output_encoding = 'utf-8'
# blogs (custom parameter)
blog_background = True
blog_background_page = False
sharepost = sharepost
# settings
exclude_patterns = ["*.py", "**/*.py"]
html_show_sphinx = False
html_show_copyright = False
__html_last_updated_fmt_dt = datetime.datetime.now()
html_last_updated_fmt = '%04d-%02d-%02d' % (
__html_last_updated_fmt_dt.year,
__html_last_updated_fmt_dt.month,
__html_last_updated_fmt_dt.day)
autoclass_content = 'both'
autosummary_generate = True
# import helpers to find tools to build the documentation
from .conf_path_tools import find_latex_path, find_graphviz_dot
# graphviz
graphviz_output_format = "svg"
graphviz_dot = find_graphviz_dot()
# todo, mathdef, blocref, faqref, exref, nbref
todo_include_todos = True
todoext_include_todosext = True
mathdef_include_mathsext = True
blocref_include_blocrefs = True
faqref_include_faqrefs = True
exref_include_exrefs = True
nbref_include_nbrefs = True
mathdef_link_number = "{first_letter}{number}"
# extensions
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.autosummary', 'sphinx.ext.coverage',
'sphinx.ext.extlinks', 'sphinx.ext.graphviz', 'sphinx.ext.ifconfig',
'sphinx.ext.inheritance_diagram',
'sphinx.ext.mathjax' if use_mathjax else 'sphinx.ext.imgmath',
'sphinx.ext.napoleon', 'sphinx.ext.todo', 'sphinx.ext.viewcode',
'sphinxcontrib.images', 'sphinxcontrib.imagesvg', 'sphinxcontrib.jsdemo',
# 'matplotlib.sphinxext.only_directives',
# 'matplotlib.sphinxext.mathmpl',
# 'matplotlib.sphinxext.only_directives',
'matplotlib.sphinxext.plot_directive',
# 'matplotlib.sphinxext.ipython_directive',
'jupyter_sphinx.embed_widgets',
"nbsphinx",
'pyquickhelper.sphinxext.sphinx_rst_builder',
]
if use_lunrsearch:
extensions.append('sphinxcontrib.lunrsearch')
if not use_mathjax:
# extensions.append('matplotlib.sphinxext.mathmpl')
# this extension disables sphinx.ext.imgmath
pass
if not use_mathjax:
sep = ";" if sys.platform.startswith("win") else ":"
imgmath_latex = find_latex_path()
if sys.platform.startswith("win"):
imgmath_dvipng = os.path.join(imgmath_latex, "dvipng.exe")
if not os.path.exists(imgmath_dvipng):
raise FileNotFoundError(imgmath_dvipng)
else:
imgmath_dvipng = os.path.join(imgmath_latex, "dvipng")
env_path = os.environ.get("PATH", "")
if imgmath_latex not in env_path:
if len(env_path) > 0:
env_path += sep
env_path += imgmath_latex
if sys.platform.startswith("win"):
imgmath_latex = os.path.join(imgmath_latex, "latex.exe")
else:
imgmath_latex = os.path.join(imgmath_latex, "latex")
# verification
if sys.platform.startswith("win"):
if not os.path.exists(imgmath_latex):
raise FileNotFoundError(imgmath_latex)
if not os.path.exists(imgmath_dvipng):
raise FileNotFoundError(imgmath_dvipng)
else:
# TODO: check on linux
pass
# bokeh
try:
import bokeh
extensions.append('%s.sphinxext.bokeh_plot' % bokeh.__name__)
# this ticks avoid being noticed by flake8 or pycodestyle
except ImportError as e:
# bokeh is not installed
pass
if add_extensions is not None:
extensions.extend(add_extensions)
# add_function_parentheses = True
# add_module_names = True
# show_authors = False
# html_sidebars = {}
# html_additional_pages = {}
# html_domain_indices = True
# html_use_index = True
# html_split_index = False
# html_show_sourcelink = True
# html_use_opensearch = ''
# html_file_suffix = None
# latex_logo = None
latex_show_urls = 'footnote'
# latex_appendices = []
# latex_domain_indices = True
# texinfo_appendices = []
# texinfo_domain_indices = True
# texinfo_show_urls = 'footnote'
# it modifies the set of things to display inside the sidebar
# see http://www.sphinx-doc.org/en/stable/config.html#confval-html_sidebars
html_sidebars = {
'[!blog]**': ['searchbox.html', 'moduletoc.html', 'relations.html', 'sourcelink.html', ],
'blog/**': ['searchbox.html', 'blogtoc.html', 'localtoc.html', 'sourcelink.html', ],
}
# tpl_role
from ..sphinxext.documentation_link import python_link_doc
tpl_template = {'py': python_link_doc}
# epkg_role
epkg_dictionary = {
'7z': "http://www.7-zip.org/",
'Anaconda': 'http://continuum.io/downloads',
'appveyor': 'https://www.appveyor.com/',
'class Sphinx': 'https://github.com/sphinx-doc/sphinx/blob/master/sphinx/application.py#L107',
'codecov': 'https://codecov.io/',
'coverage': 'https://pypi.python.org/pypi/coverage',
'cryptography': 'http://cryptography.readthedocs.org/',
'docutils': 'http://docutils.sourceforge.net/',
'GIT': 'http://git-scm.com/',
'git': 'http://git-scm.com/',
'Git': 'http://git-scm.com/',
'GitHub': 'https://github.com/',
'GraphViz': 'http://www.graphviz.org/',
'Graphviz': 'http://www.graphviz.org/',
'Inkscape': 'https://inkscape.org/',
'InkScape': 'https://inkscape.org/',
'Java': 'http://www.java.com/fr/download/',
'Jenkins': 'https://jenkins-ci.org/',
'jinja2': 'http://jinja.pocoo.org/docs/',
'Jupyter': 'http://jupyter.org/',
'jupyter': 'http://jupyter.org/',
'mako': 'http://www.makotemplates.org/',
"matplotlib": "https://matplotlib.org/index.html",
'mistune': 'https://pypi.python.org/pypi/mistune',
'MiKTeX': 'http://miktex.org/',
'MinGW': 'http://www.mingw.org/',
'nbconvert': 'http://nbconvert.readthedocs.io/en/latest/',
'nbpresent': 'https://github.com/Anaconda-Platform/nbpresent',
'nose': 'https://pypi.python.org/pypi/nose',
'numpy': ('http://www.numpy.org/',
('http://docs.scipy.org/doc/numpy/reference/generated/numpy.{0}.html', 1)),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable/',
('http://pandas.pydata.org/pandas-docs/stable/generated/pandas.{0}.html', 1)),
'pandoc': 'http://johnmacfarlane.net/pandoc/',
'PEP8': 'https://www.python.org/dev/peps/pep-0008/',
'Pillow': 'http://pillow.readthedocs.io/',
'pycodestyle': 'http://pycodestyle.readthedocs.io/',
'pycrypto': 'https://pypi.python.org/pypi/pycrypto',
'pygments': 'http://pygments.org/',
'pylzma': 'https://pypi.python.org/pypi/pylzma',
'python': 'http://www.python.org/',
'Python': 'http://www.python.org/',
'python-jenkins': 'http://python-jenkins.readthedocs.org/en/latest/',
'pywin32': 'https://sourceforge.net/projects/pywin32/',
'reveal.js': 'https://github.com/hakimel/reveal.js/releases',
'scikit-learn': 'http://scikit-learn.org/',
'scipy': 'https://www.scipy.org/',
'sphinx': 'http://www.sphinx-doc.org/en/stable/',
'Sphinx': 'http://www.sphinx-doc.org/en/stable/',
'SVN': 'https://subversion.apache.org/',
'travis': 'https://travis-ci.org/',
'Visual Studio Community Edition': 'https://www.visualstudio.com/',
'Visual Studio Community Edition 2015': 'https://imagine.microsoft.com/en-us/Catalog/Product/101',
'*py': ('https://docs.python.org/3/',
('https://docs.python.org/3/library/{0}.html', 1),
('https://docs.python.org/3/library/{0}.html#{0}.{1}', 2)),
'*pyf': (('https://docs.python.org/3/library/functions.html#{0}', 1),),
# Custom.
'jyquickhelper': 'http://www.xavierdupre.fr/app/jyquickhelper/helpsphinx/index.html',
'pymyinstall': 'http://www.xavierdupre.fr/app/pymyinstall/helpsphinx/index.html',
'pyquickhelper': 'http://www.xavierdupre.fr/app/pyquickhelper/helpsphinx/index.html',
'pyrsslocal': 'http://www.xavierdupre.fr/app/pyrsslocal/helpsphinx/index.html',
# Specific.
'datetime.datetime.strptime': 'https://docs.python.org/3/library/datetime.html#strftime-strptime-behavior',
}
# latex
math_number_all = False
imgmath_latex_preamble = """
\\usepackage{epic}
\\newcommand{\\acc}[1]{\\left\\{#1\\right\\}}
\\newcommand{\\cro}[1]{\\left[#1\\right]}
\\newcommand{\\pa}[1]{\\left(#1\\right)}
\\newcommand{\\R}{\\mathbb{R}}
"""
# post processing of the full latex file
# it should be a function, None by default
custom_latex_processing = None
releases_release_uri = "https://pypi.python.org/pypi/{0}/%s".format(
module_name)
releases_document_name = "HISTORY.rst"
# github or git link
if github_user:
releases_issue_uri = "https://github.com/{0}/{1}/issues/%s".format(
github_user, module_name)
githublink_options = dict(user=github_user)
github_anchor = "source on GitHub"
else:
githublink_options = None
if github_repo:
if githublink_options is None:
githublink_options = {}
value = github_repo.strip("/").split("/")[-1]
if value.endswith(".git"):
value = value[:-4]
githublink_options['project'] = value
if 'anchor' not in githublink_options and "github" in github_repo.lower():
githublink_options['anchor'] = "source on GitHub"
if extlinks is None:
extlinks = dict()
elif 'issue' in extlinks:
issue = extlinks['issue'][0].split('/')
le = len(issue)
if le > 0:
user = issue[-4]
project = issue[-3]
if githublink_options is None:
githublink_options = {}
if 'user' not in githublink_options:
githublink_options["user"] = user
if 'project' not in githublink_options:
githublink_options["project"] = project
if 'anchor' not in githublink_options and 'github' in extlinks['issue'][0].lower():
githublink_options["anchor"] = 'source on GitHub'
if not github_repo and extlinks['issue'][0].startswith("https://github.com"):
github_repo = "https://github.com/{0}/{1}.git".format(
user, project)
# themes
if html_theme == "bootstrap":
if bootswatch_navbar_links is None:
bootswatch_navbar_links = []
html_logo = "project_ico_small.png"
navbar_links = bootswatch_navbar_links
html_theme_options = {
'navbar_title': "home",
'navbar_site_name': "Site",
'navbar_links': navbar_links,
'navbar_sidebarrel': True,
'navbar_pagenav': True,
'navbar_pagenav_name': "Page",
'globaltoc_depth': 3,
'globaltoc_includehidden': "true",
'navbar_class': "navbar navbar-inverse",
'navbar_fixed_top': "true",
'source_link_position': "footer",
'bootswatch_theme': bootswatch_theme,
'bootstrap_version': "3",
}
elif html_theme == "guzzle_sphinx_theme":
html_translator_class = 'guzzle_sphinx_theme.HTMLTranslator'
if "guzzle_sphinx_theme" not in extensions:
extensions.append('guzzle_sphinx_theme')
html_theme_options = {
"project_nav_name": module_name,
# specified, then no sitemap will be built.
# "base_url": ""
# "homepage": "index",
# "projectlink": "http://myproject.url",
}
elif html_theme == "foundation_sphinx_theme":
import foundation_sphinx_theme
html_theme_path = foundation_sphinx_theme.HTML_THEME_PATH
if "foundation_sphinx_theme" not in extensions:
extensions.append('foundation_sphinx_theme')
html_theme_options = {
'logo_screen': 'project_ico.png',
'logo_mobile': 'project_ico.ico',
'favicon': 'project_ico.ico',
'github_user': github_user,
'github_repo': github_repo,
}
pygments_style = 'monokai'
# mapping
intersphinx_mapping = {'python': (
'https://docs.python.org/{0}.{1}'.format(*(sys.version_info[:2])), None)}
intersphinx_mapping['matplotlib'] = ('http://matplotlib.org/', None)
try:
import numpy
intersphinx_mapping['numpy'] = (
'http://www.numpy.org/{0}'.format(numpy.__version__), None)
except ImportError:
pass
try:
import pandas
intersphinx_mapping['pandas'] = (
'http://pandas.pydata.org/pandas-docs/version/{0}'.format(pandas.__version__), None)
except ImportError:
pass
# disable some checkings
check_ie_layout_html = False
# information about code
def linkcode_resolve_function(domain, info):
if link_resolve is None:
return None
if domain != 'py':
return None
if not info['module']:
return None
filename = info['module'].replace('.', '/')
return "%s/%s.py" % (link_resolve, filename)
if link_resolve is not None:
linkcode_resolve = linkcode_resolve_function
extensions.append("sphinx.ext.linkcode")
# commit modification
def modify_commit_function(nbch, date, author, comment):
if author is not None and "@" in author:
author = author.split("@")[0]
return nbch, date, author, comment
modify_commit = modify_commit_function
# sphinx gallery
backreferences_dir = "backreferences_dir"
dirname = os.path.dirname(fileconf)
exa = os.path.join(dirname, "..", "..", "..", "_doc", "examples")
if os.path.exists(exa):
exa = os.path.normpath(exa)
import pathlib
pp = pathlib.Path(exa)
readmes = pp.glob("**/README.txt")
examples_dirs = []
gallery_dirs = []
for res in readmes:
last = res.parts[-2]
if last.startswith("temp_"):
continue
parts = last.replace("\\", "/").split("/")
if any(filter(lambda x: x.startswith("temp_"), parts)):
continue
nn = res.parent
examples_dirs.append(str(nn))
if last in ("notebooks", "examples"):
last = "gy" + last
dest = os.path.join(dirname, last)
if dest in gallery_dirs:
raise ValueError(
"Gallery '{0}' already exists (source='{1}', last={2}).".format(dest, nn, last))
gallery_dirs.append(dest)
extensions.append('sphinx_gallery.gen_gallery')
if len(examples_dirs) == 0:
raise ValueError(
"Unable to find any 'README.txt' in '{0}'.".foramt(exa))
reference_url = {k: v[0] for k, v in intersphinx_mapping.items()}
example_dir = os.path.join(dirname, "gallery")
if not os.path.exists(example_dir):
os.makedirs(example_dir)
sphinx_gallery_conf = {
'doc_module': (module_name),
'reference_url': {},
'examples_dirs': examples_dirs,
'gallery_dirs': gallery_dirs,
'backreferences_dir': example_dir,
'expected_failing_examples': [],
}
# collect local variables
loc = locals()
for k, v in loc.items():
if not k.startswith("_"):
ext_locals[k] = v
if custom_style is not None:
ex = False
for st in html_static_path:
full = os.path.join(dirconf, st, custom_style)
if os.path.exists(full):
ex = True
break
if not ex:
raise FileNotFoundError("unable to find {0} in\n{1}\nand\n{2}".format(
custom_style, dirconf, "\n".join(html_static_path)))
def this_setup(app):
if custom_style is not None:
app.add_stylesheet(custom_style)
return custom_setup(app, author)
ext_locals["setup"] = this_setup
#################
# custom functions
#################
def extract_version_from_setup(filename):
"""
extract the version from setup.py assuming it is located in ../../..
and the version is specified by the following line: ``sversion = "..."``
"""
setup = os.path.abspath(os.path.split(filename)[0])
setup = os.path.join(setup, "..", "..", "..", "setup.py")
if os.path.exists(setup):
with open(setup, "r") as f:
content = f.read()
exp = re.compile("sversion *= *['\\\"]([0-9.]+?)['\\\"]")
all = exp.findall(content)
if len(all) == 0:
raise Exception("unable to locate the version from setup.py")
if len(all) != 1:
raise Exception("more than one version was found: " + str(all))
return all[0]
else:
raise FileNotFoundError("unable to find setup.py, tried: " + setup)
def get_first_line(filename):
"""
expects to find a text file with a line, the function extracts and returns this line
"""
try:
with open(filename, "r") as ff:
first_line = ff.readlines()[0].strip(" \n\r")
except FileNotFoundError:
first_line = "xxx"
return first_line
#################
# sphinx functions
#################
def skip(app, what, name, obj, skip, options):
"""
to skip some functions,
see `Skipping members <http://sphinx-doc.org/ext/autodoc.html#event-autodoc-skip-member>`_
"""
if name.startswith("_") and name not in \
["__qualname__",
"__module__",
"__dict__",
"__doc__",
"__weakref__",
]:
return False
return skip
def custom_setup(app, author):
"""
see `Sphinx core events <http://sphinx-doc.org/extdev/appapi.html?highlight=setup#sphinx-core-events>`_
"""
from ..sphinxext.sphinx_bigger_extension import setup as setup_bigger
from ..sphinxext.sphinx_githublink_extension import setup as setup_githublink
from ..sphinxext.sphinx_blog_extension import setup as setup_blogpost
from ..sphinxext.sphinx_blocref_extension import setup as setup_blocref
from ..sphinxext.sphinx_exref_extension import setup as setup_exref
from ..sphinxext.sphinx_faqref_extension import setup as setup_faqref
from ..sphinxext.sphinx_mathdef_extension import setup as setup_mathdef
from ..sphinxext.sphinx_nbref_extension import setup as setup_nbref
from ..sphinxext.sphinx_runpython_extension import setup as setup_runpython
from ..sphinxext.sphinx_sharenet_extension import setup as setup_sharenet
from ..sphinxext.sphinx_todoext_extension import setup as setup_todoext
from ..sphinxext.sphinx_docassert_extension import setup as setup_docassert
from ..sphinxext.sphinx_autosignature import setup as setup_signature
from ..sphinxext.sphinx_template_extension import setup as setup_tpl
from ..sphinxext.sphinx_cmdref_extension import setup as setup_cmdref
from ..sphinxext.sphinx_postcontents_extension import setup as setup_postcontents
from ..sphinxext.sphinx_tocdelay_extension import setup as setup_tocdelay
from ..sphinxext.sphinx_epkg_extension import setup as setup_epkg
from ..sphinxext.releases import setup as setup_releases
from ..sphinxext.sphinx_toctree_extension import setup as setup_toctree
# from ..sphinxext.sphinx_rst_builder import setup as setup_rst
app.connect("autodoc-skip-member", skip)
app.add_config_value('author', author, True)
setup_toctree(app)
setup_runpython(app)
setup_bigger(app)
setup_githublink(app)
setup_sharenet(app)
setup_todoext(app)
setup_blogpost(app)
setup_mathdef(app)
setup_blocref(app)
setup_exref(app)
setup_faqref(app)
setup_nbref(app)
setup_cmdref(app)
setup_signature(app)
setup_docassert(app)
setup_postcontents(app)
setup_tocdelay(app)
setup_tpl(app)
setup_epkg(app)
setup_releases(app)
# Already part of the added extensions.
# setup_rst(app)
# from sphinx.util.texescape import tex_replacements
# tex_replacements += [('oe', '\\oe '), ]
app.add_javascript("require.js")
# style for notebooks
app.add_stylesheet(style_figure_notebook[0])
def get_default_stylesheet():
"""
Returns the style of additional style sheets
@return list of files
.. versionadded:: 1.5
"""
rel = "_static/" + style_figure_notebook[0]
# rel2 = "_static/gallery.css" # This should not be needed for sphinx-gallery.
return [Stylesheet(rel="stylesheet", title="style_figure_notebook", filename=rel)]
# Stylesheet(rel="stylesheet", title="sphinx_gallery_missing", filename=rel2)
def get_default_javascript():
"""
Returns the style of additional style sheets
@return list of files
.. versionadded:: 1.5
"""
return ["_static/require.js"]
|
from functools import wraps
from app import app
from flask import send_from_directory,session,g,Flask,render_template, redirect, request, url_for, flash
import config
from flask_login import current_user
from models import db, login_data, post_data
@app.route("/")
def home():
username=None
if 'username' in session:
username = session['username']
return redirect(url_for('user',username=username))
return render_template("base.html")
def login_required(f):
@wraps(f)
def temp(*args, **kwargs):
if 'logged_in' in session:
return f(*args, **kwargs)
else:
return redirect(url_for('login'))
return temp
@app.route("/login", methods=['POST','GET'])
def login():
if request.method == 'POST':
username = request.form['username']
password = request.form['password']
user = login_data.query.filter_by(username=username).first()
if user and user.password == password:
session['logged_in']=username
session['user_id']=user.user_id
session['username']=user.username
return redirect(url_for('user',username=user.username))
else:
pass
return redirect(url_for('home'))
@app.route("/user/<username>")
@login_required
def user(username):
all_posts =None
if username == session['logged_in']:
all_posts = show_post()
return render_template('user.html',all_posts=all_posts, username = username)
else:
return redirect(url_for('error404'))
@app.route('/signup', methods=['POST','GET'])
def signup():
error = None
if request.method == 'POST':
user = login_data(request.form['username'], request.form['password'])
if login_data.query.filter_by(username=user.username).first():
error="Username Already exists"
else:
db.session.add(user)
db.session.commit()
return render_template("base.html",error=error)
# return redirect(url_for('home',error=error))
@app.route('/logout')
@login_required
def logout():
session.pop('logged_in', None)
session.pop('user_id', None)
session.pop('username', None)
return redirect(url_for('home'))
@app.route('/404')
def error404():
return render_template('404.html')
@app.route('/post-new', methods = ['POST','GET'])
@login_required
def postnew():
if request.method == 'POST':
post = request.form['post']
title = request.form['title']
tags = request.form['tags']
date = request.form['date']
time = request.form['time']
user_id = session['user_id']
save = post_data(post,title,tags,date+' '+time,user_id)
db.session.add(save)
db.session.commit()
return redirect(url_for('home'))
def show_post():
all_posts = post_data.query.filter_by(user_id= session['user_id']).order_by('-date')
return all_posts
@app.route('/search', methods = ['POST','GET'])
@login_required
def search():
results=None
if request.method == 'POST':
keyword = request.form['search']
results = post_data.query.filter_by(tags = keyword, user_id = session['user_id'])
return render_template('user.html',all_posts=results, username = session['username'])
Update views.py
from functools import wraps
from app import app
from flask import (send_from_directory,session,g,
Flask,render_template, redirect,
request, url_for, flash)
import config
from flask_login import current_user
from models import db, login_data, post_data
@app.route("/")
def home():
username=None
if 'username' in session:
username = session['username']
return redirect(url_for('user',username=username))
return render_template("base.html")
def login_required(f):
@wraps(f)
def temp(*args, **kwargs):
if 'logged_in' in session:
return f(*args, **kwargs)
else:
return redirect(url_for('login'))
return temp
@app.route("/login", methods=['POST','GET'])
def login():
if request.method == 'POST':
username = request.form['username']
password = request.form['password']
user = login_data.query.filter_by(username=username).first()
if user and user.password == password:
session['logged_in']=username
session['user_id']=user.user_id
session['username']=user.username
return redirect(url_for('user',username=user.username))
else:
pass
return redirect(url_for('home'))
@app.route("/user/<username>")
@login_required
def user(username):
all_posts =None
if username == session['logged_in']:
all_posts = show_post()
return render_template('user.html',all_posts=all_posts, username = username)
else:
return redirect(url_for('error404'))
@app.route('/signup', methods=['POST','GET'])
def signup():
error = None
if request.method == 'POST':
user = login_data(request.form['username'], request.form['password'])
if login_data.query.filter_by(username=user.username).first():
error="Username Already exists"
else:
db.session.add(user)
db.session.commit()
return render_template("base.html",error=error)
# return redirect(url_for('home',error=error))
@app.route('/logout')
@login_required
def logout():
session.pop('logged_in', None)
session.pop('user_id', None)
session.pop('username', None)
return redirect(url_for('home'))
@app.route('/404')
def error404():
return render_template('404.html')
@app.route('/post-new', methods = ['POST','GET'])
@login_required
def postnew():
if request.method == 'POST':
post = request.form['post']
title = request.form['title']
tags = request.form['tags']
date = request.form['date']
time = request.form['time']
user_id = session['user_id']
save = post_data(post,title,tags,date+' '+time,user_id)
db.session.add(save)
db.session.commit()
return redirect(url_for('home'))
def show_post():
all_posts = post_data.query.filter_by(user_id= session['user_id']).order_by('-date')
return all_posts
@app.route('/search', methods = ['POST','GET'])
@login_required
def search():
results=None
if request.method == 'POST':
keyword = request.form['search']
results = post_data.query.filter_by(tags = keyword, user_id = session['user_id'])
return render_template('user.html',all_posts=results, username = session['username'])
|
#!/usr/bin/python
# Fixer.io is a free JSON API for current .
# It relies on daily feeds published by the European Central Bank.
import requests
from datetime import datetime
BASE_URL = 'http://api.fixer.io/'
CURRENCY_CHOICE = ["EUR", "AUD", "BGN", "BRL", "CAD", "CHF", "CNY", "CZK",
"DKK", "GBP", "HKD", "HRK", "HUF", "IDR", "ILS",
"INR", "JPY", "KRW", "MXN", "MYR", "NOK", "NZD",
"PHP", "PLN", "RON", "RUB", "SEK", "SGD", "THB",
"TRY", "USD", "ZAR"]
class Fixer(object):
"""
class definining the api
date:
Either a date in "yyyy-mm-dd" format (available from 1999)
either "latest" for latest date
default = "latest"
base:
A currency in CURRENCY_CHOICE list.
Will setup the base currency for conversion
default = "EUR"
Will raise a ValueError exception
"""
def __init__(self, date="latest", base="EUR", symbols=None):
super(Fixer, self).__init__()
self.symbols_string = ''
if self.currency_available(base, "Base currency"):
self.base = base
if symbols:
self.symbols = []
for cur in symbols:
if self.currency_available(cur, "Symbols currency"):
self.symbols.append(cur)
self.symbols_string = 'symbols=' % ','.join(self.symbols)
self.check_date(date)
def currency_available(self, cur, method=""):
if cur not in CURRENCY_CHOICE:
# Raise a ValueError exception
raise ValueError("Currency %s not available through this api" % cur, method)
else:
return True
def check_date(self, dt):
if type(dt) == datetime:
self.date = dt
elif type(dt) == str:
if dt == "latest":
self.date = dt
else:
try:
self.date = datetime.strptime(dt, "%Y-%m-%d")
except ValueError, e:
raise e
if not self.date.year >= 1999:
raise ValueError("Data available from 1999, %s is to old" % self.date.strftime("%Y-%m-%d"))
if self.date > datetime.now():
raise ValueError("%s is in the future, data cannot be found" % self.date.strftime("%Y-%m-%d"))
else:
raise ValueError("%s does not match required date format" % dt)
def convert(self):
url = '%s%s?%s&base=%s' % (BASE_URL, self.date, self.symbols_string, self.base)
r = requests.get(url).json()
if 'error' in r:
raise ReferenceError(r['error'])
return r
Delete api.py (move content to __init__.py)
|
# Filename: test_math.py
# pylint: disable=locally-disabled,C0111,R0904,C0103
import numpy as np
from numpy.testing import (assert_almost_equal, assert_allclose)
import pytest
from km3pipe.testing import TestCase
from km3pipe.math import (
angle_between, dist, pld3, com, zenith, azimuth, Polygon, IrregularPrism,
rotation_matrix, SparseCone, space_angle, hsin, phi, theta, unit_vector,
innerprod_1d, log_b, qeuler, qrot, qrot_yaw, intersect_3d
)
__author__ = ["Tamas Gal", "Moritz Lotze"]
__copyright__ = "Copyright 2016, KM3Pipe devs and the KM3NeT collaboration."
__credits__ = ["Thomas Heid"]
__license__ = "MIT"
__maintainer__ = ["Tamas Gal", "Moritz Lotze"]
__email__ = "tgal@km3net.de"
__status__ = "Development"
class TestMath(TestCase):
def setUp(self):
# self.vecs = np.array([[0., 1., 5.],
# [1., 1., 4.],
# [2., 1., 3.],
# [3., 1., 2.],
# [4., 1., 1.]])
# self.v = (1, 2, 3)
self.v = np.array([0.26726124, 0.53452248, 0.80178373])
self.vecs = np.array([[0., 0.19611614, 0.98058068],
[0.23570226, 0.23570226, 0.94280904],
[0.53452248, 0.26726124, 0.80178373],
[0.80178373, 0.26726124, 0.53452248],
[0.94280904, 0.23570226, 0.23570226]])
def test_phi(self):
print(phi((1, 0, 0)))
assert_almost_equal(0, phi((1, 0, 0)))
assert_almost_equal(np.pi, phi((-1, 0, 0)))
assert_almost_equal(np.pi / 2, phi((0, 1, 0)))
assert_almost_equal(np.pi / 2 * 3, phi((0, -1, 0)))
assert_almost_equal(np.pi / 2 * 3, phi((0, -1, 0)))
assert_almost_equal(0, phi((0, 0, 0)))
assert_almost_equal(phi(self.v), 1.10714872)
assert_almost_equal(
phi(self.vecs),
np.array([
1.57079633, 0.78539816, 0.46364761, 0.32175055, 0.24497866
])
)
def test_zenith(self):
assert_allclose(np.pi, zenith((0, 0, 1)))
assert_allclose(0, zenith((0, 0, -1)))
assert_allclose(np.pi / 2, zenith((0, 1, 0)))
assert_allclose(np.pi / 2, zenith((0, -1, 0)))
assert_allclose(np.pi / 4 * 3, zenith((0, 1, 1)))
assert_allclose(np.pi / 4 * 3, zenith((0, -1, 1)))
assert_almost_equal(zenith(self.v), 2.5010703409103687)
assert_allclose(
zenith(self.vecs),
np.array([
2.94419709, 2.80175574, 2.50107034, 2.13473897, 1.80873745
])
)
def test_azimuth(self):
self.assertTrue(np.allclose(np.pi, azimuth((1, 0, 0))))
self.assertTrue(np.allclose(0, azimuth((-1, 0, 0))))
print(azimuth((0, 1, 0)))
print(azimuth((0, -1, 0)))
print(azimuth((0, 0, 0)))
print(azimuth(self.v))
print(azimuth(self.vecs))
self.assertTrue(np.allclose(np.pi / 2 * 3, azimuth((0, 1, 0))))
self.assertTrue(np.allclose(np.pi / 2, azimuth((0, -1, 0))))
self.assertTrue(np.allclose(np.pi, azimuth((0, 0, 0))))
self.assertTrue(np.allclose(azimuth(self.v), 4.24874137138))
self.assertTrue(
np.allclose(
azimuth(self.vecs),
np.array([
4.71238898, 3.92699082, 3.60524026, 3.46334321, 3.38657132
])
)
)
def test_theta(self):
print(theta((0, 0, -1)))
print(theta((0, 0, 1)))
print(theta((0, 1, 0)))
print(theta((0, -1, 0)))
print(theta((0, 1, 1)))
print(theta((0, -1, 1)))
print(theta(self.v))
print(theta(self.vecs))
self.assertTrue(np.allclose(0, theta((0, 0, 1))))
self.assertTrue(np.allclose(np.pi, theta((0, 0, -1))))
self.assertTrue(np.allclose(np.pi / 2, theta((0, 1, 0))))
self.assertTrue(np.allclose(np.pi / 2, theta((0, -1, 0))))
self.assertTrue(np.allclose(0, theta((0, 1, 1))))
self.assertTrue(np.allclose(0, theta((0, -1, 1))))
self.assertTrue(np.allclose(theta(self.v), 0.64052231))
self.assertTrue(
np.allclose(
theta(self.vecs),
np.array([
0.19739554, 0.33983691, 0.64052231, 1.00685369, 1.3328552
])
)
)
def test_unit_vector(self):
v1 = (1, 0, 0)
v2 = (1, 1, 0)
v3 = (-1, 2, 0)
assert np.allclose(v1, unit_vector(v1))
assert np.allclose(np.array(v2) / np.sqrt(2), unit_vector(v2))
assert np.allclose(np.array(v3) / np.sqrt(5), unit_vector(v3))
def test_innerprod_1d(self):
v1 = [[1, 2, 3], [4, 5, 6]]
v2 = [[11, 22, 33], [44, 55, 66]]
assert np.allclose([14, 77], innerprod_1d(v1, v2))
def test_angle_between(self):
v1 = (1, 0, 0)
v2 = (0, 1, 0)
v3 = (-1, 0, 0)
self.assertAlmostEqual(0, angle_between(v1, v1))
self.assertAlmostEqual(np.pi / 2, angle_between(v1, v2))
self.assertAlmostEqual(np.pi, angle_between(v1, v3))
self.assertAlmostEqual(angle_between(self.v, v1), 1.3002465638163236)
self.assertAlmostEqual(angle_between(self.v, v2), 1.0068536854342678)
self.assertAlmostEqual(angle_between(self.v, v3), 1.8413460897734695)
self.assertTrue(
np.allclose(
angle_between(self.vecs, v1),
np.array([
1.57079633, 1.3328552, 1.0068537, 0.64052231, 0.33983691
])
)
)
self.assertTrue(
np.allclose(
angle_between(self.vecs, v2),
np.array([
1.37340077, 1.3328552, 1.3002466, 1.30024656, 1.3328552
])
)
)
self.assertTrue(
np.allclose(
angle_between(self.vecs, v3),
np.array([
1.57079633, 1.80873745, 2.13473897, 2.50107034, 2.80175574
])
)
)
def test_angle_between_returns_nan_for_zero_length_vectors(self):
v1 = (0, 0, 0)
v2 = (1, 0, 0)
with pytest.warns(RuntimeWarning):
self.assertTrue(np.isnan(angle_between(v1, v2)))
def test_space_angle(self):
p1 = (np.pi / 2, np.pi)
p2 = (np.pi, 0)
self.assertAlmostEqual(
space_angle(p1[0], p2[0], p1[1], p2[1]), 1.57079632679489
)
p3 = (0, np.pi)
p4 = (np.pi / 2, 0)
self.assertAlmostEqual(
space_angle(p3[0], p4[0], p3[1], p4[1]), 1.57079632679489
)
def test_hsin(self):
assert np.all(hsin((np.pi, 0)) == (1, 0))
self.assertAlmostEqual(hsin(np.pi / 2), 0.5)
def test_pld3(self):
p1 = np.array((0, 0, 0))
p2 = np.array((0, 0, 1))
d2 = np.array((0, 1, 0))
self.assertAlmostEqual(1, pld3(p1, p2, d2))
p1 = np.array((0, 0, 0))
p2 = np.array((0, 0, 2))
d2 = np.array((0, 1, 0))
self.assertAlmostEqual(2, pld3(p1, p2, d2))
p1 = np.array((0, 0, 0))
p2 = np.array((0, 0, 0))
d2 = np.array((0, 1, 0))
self.assertAlmostEqual(0, pld3(p1, p2, d2))
p1 = np.array((1, 2, 3))
p2 = np.array((4, 5, 6))
d2 = np.array((7, 8, 9))
self.assertAlmostEqual(0.5275893, pld3(p1, p2, d2))
p1 = np.array((0, 0, 2))
p2 = np.array((-100, 0, -100))
d2 = np.array((1, 0, 1))
self.assertAlmostEqual(1.4142136, pld3(p1, p2, d2))
p1 = np.array([183., -311., 351.96083871])
p2 = np.array([40.256, -639.888, 921.93])
d2 = np.array([0.185998, 0.476123, -0.859483])
self.assertAlmostEqual(21.25456308, pld3(p1, p2, d2))
def test_com(self):
center_of_mass = com(((1, 2, 3), (4, 5, 6), (7, 8, 9), (10, 11, 12)))
self.assertEqual((5.5, 6.5, 7.5), tuple(center_of_mass))
center_of_mass = com(((1, 2, 3), (4, 5, 6), (7, 8, 9)),
masses=(1, 0, 0))
self.assertEqual((1, 2, 3), tuple(center_of_mass))
center_of_mass = com(((1, 1, 1), (0, 0, 0)))
self.assertEqual((0.5, 0.5, 0.5), tuple(center_of_mass))
class TestShapes(TestCase):
def setUp(self):
self.poly = [
(-60, 120),
(80, 120),
(110, 60),
(110, -30),
(70, -110),
(-70, -110),
(-90, -70),
(-90, 60),
]
def test_poly_containment(self):
polygon = Polygon(self.poly)
point_in = (-40, -40)
point_out = (-140, -140)
points = [
(-40, -40),
(-140, -140),
(40, -140),
]
assert np.all(polygon.contains(point_in))
assert not np.any(polygon.contains(point_out))
assert np.all(polygon.contains(points) == [True, False, False])
def test_poly_xy(self):
polygon = Polygon(self.poly)
x = (-40, -140, 40)
y = (-40, -140, -140)
assert np.all(polygon.contains_xy(x, y) == [True, False, False])
def test_prism_contained(self):
z = (-90, 90)
prism = IrregularPrism(self.poly, z[0], z[1])
points = [
(0, 1, 2),
(-100, 20, 10),
(10, 90, 10),
]
assert np.all(prism.contains(points) == [True, False, True])
def test_prism_contained_xyz(self):
z = (-90, 90)
prism = IrregularPrism(self.poly, z[0], z[1])
x = (0, -100, 10)
y = (1, 20, 90)
z = (2, 10, 10)
assert np.all(prism.contains_xyz(x, y, z) == [True, False, True])
class TestRotation(TestCase):
def test_rotmat(self):
v = [3, 5, 0]
axis = [4, 4, 1]
theta = 1.2
newvec = np.dot(rotation_matrix(axis, theta), v)
self.assertTrue(
np.allclose(
newvec, np.array([2.74911638, 4.77180932, 1.91629719])
)
)
def test_cone(self):
spike = [1, 1, 0]
bottom = [0, 2, 0]
angle = np.pi / 4
n_angles = 20
cone = SparseCone(spike, bottom, angle)
circ_samp = cone.sample_circle(n_angles=n_angles)
axis_samp = cone.sample_axis
samp = cone.sample(n_angles)
assert len(circ_samp) == n_angles
assert len(axis_samp) == 2
assert len(samp) == len(circ_samp) + 2
class TestLog(TestCase):
def test_val(self):
assert_allclose(log_b(5, 2), np.log2(5))
assert_allclose(log_b(5, 10), np.log10(5))
assert_allclose(log_b(5, np.e), np.log(5))
class TestQeuler(TestCase):
def test_conversion_of_yaw(self):
assert np.allclose([1, 0, 0, 0], qeuler(0, 0, 0))
assert np.allclose([0.7071, 0, 0, 0.7071], qeuler(90, 0, 0))
assert np.allclose([0, 0, 0, 1], qeuler(180, 0, 0))
assert np.allclose([-0.7071, 0, 0, 0.7071], qeuler(270, 0, 0))
assert np.allclose([-1, 0, 0, 0], qeuler(360, 0, 0))
def test_conversion_of_pitch(self):
assert np.allclose([0.92388, 0, 0.38268, 0], qeuler(0, 45, 0))
assert np.allclose([0.92388, 0, -0.38268, 0], qeuler(0, -45, 0))
assert np.allclose([0.7071, 0, 0.7071, 0], qeuler(0, 90, 0))
assert np.allclose([0.8660254, 0, 0.5, 0], qeuler(0, 60, 0))
assert np.allclose([-0.96592583, 0, -0.25881905, 0], qeuler(0, 390, 0))
def test_conversion_of_roll(self):
assert np.allclose([0.92388, 0.38268, 0, 0], qeuler(0, 0, 45))
assert np.allclose([0.92388, -0.38268, 0, 0], qeuler(0, 0, -45))
assert np.allclose([0.70710, 0.70710, 0, 0], qeuler(0, 0, 90))
assert np.allclose([0.86602, 0.5, 0, 0], qeuler(0, 0, 60))
assert np.allclose([-0.96592583, -0.25881905, 0, 0], qeuler(0, 0, 390))
def test_mixed_conversion(self):
assert np.allclose([0.999471, 0.02601972, 0.01767416, 0.00826538],
qeuler(1, 2, 3))
assert np.allclose([0.94371436, 0.26853582, -0.14487813, 0.12767944],
qeuler(10, -20, 30))
assert np.allclose([-0.16575384, -0.69624819, 0.05479592, -0.69624819],
qeuler(-999, 999, -999))
class TestQrot(TestCase):
def test_rotation_of_x_vector(self):
assert np.allclose([0, 1, 0], qrot([1, 0, 0], qeuler(90, 0, 0)))
assert np.allclose([-1, 0, 0], qrot([1, 0, 0], qeuler(180, 0, 0)))
assert np.allclose([-1, 0, 0], qrot([1, 0, 0], qeuler(180, 0, -45)))
assert np.allclose([0, 0, -1], qrot([1, 0, 0], qeuler(180, 90, 45)))
def test_rotation_of_y_vector(self):
assert np.allclose([-1, 0, 0], qrot([0, 1, 0], qeuler(90, 0, 0)))
assert np.allclose([0, -1, 0], qrot([0, 1, 0], qeuler(180, 0, 0)))
assert np.allclose([0, -0.70710, -0.70710],
qrot([0, 1, 0], qeuler(180, 0, -45)))
assert np.allclose([-0.70710, -0.70710, 0],
qrot([0, 1, 0], qeuler(180, 90, 45)))
def test_rotation_of_z_vector(self):
assert np.allclose([0, 0, 1], qrot([0, 0, 1], qeuler(90, 0, 0)))
assert np.allclose([0, 0, 1], qrot([0, 0, 1], qeuler(180, 0, 0)))
assert np.allclose([0, -0.70710, 0.70710],
qrot([0, 0, 1], qeuler(180, 0, -45)))
assert np.allclose([-0.70710, 0.70710, 0],
qrot([0, 0, 1], qeuler(180, 90, 45)))
def test_mixed_rotation(self):
assert np.allclose([1, 2, 3], qrot([1, 2, 3], qeuler(0, 0, 0)))
assert np.allclose([0, -1.414213, 0],
qrot([0, 1, -1], qeuler(180, 90, 45)))
assert np.allclose([-1.41421356, 0, -1],
qrot([1, 1, 1], qeuler(180, 90, 45)))
assert np.allclose([-14.1421356, 0, -10],
qrot([10, 10, 10], qeuler(180, 90, 45)))
class TestQrotYaw(TestCase):
def test_call_with_list(self):
qrot_yaw([1, 2, 3], 1)
def test_no_rotation(self):
vec = (1, 0, 0)
vec_rot = qrot_yaw(vec, 0)
assert np.allclose([1, 0, 0], vec_rot)
def test_a_rotation_of_90(self):
vec = (1, 0, 0)
vec_rot = qrot_yaw(vec, 90)
assert np.allclose([0, 1, 0], vec_rot)
def test_a_rotation_of_180(self):
vec = (1, 0, 0)
vec_rot = qrot_yaw(vec, 180)
assert np.allclose([-1, 0, 0], vec_rot)
def test_a_full_rotation(self):
vec = (1, 0, 0)
vec_rot = qrot_yaw(vec, 360)
assert np.allclose([1, 0, 0], vec_rot)
def test_a_rotation_of_45(self):
vec = (1, 0, 0)
vec_rot = qrot_yaw(vec, 45)
assert np.allclose([0.7071, 0.7071, 0], vec_rot)
class TestIntersect3D(TestCase):
def test_intersection_at_zero(self):
p1 = np.array([(1, 0, 0), (0, 0, 1)])
p2 = -p1
intersection = intersect_3d(p1, p2)
assert np.allclose([0, 0, 0], intersection)
def test_intersection_of_multiple_lines_with_same_endpoints(self):
p1 = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)])
p2 = np.array([(4, 4, 4), (4, 4, 4), (4, 4, 4)])
intersection = intersect_3d(p1, p2)
assert np.allclose([4, 4, 4], intersection)
def test_intersection_of_multiple_lines_with_target(self):
p1 = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)])
target = np.array([23, 5, 42])
p2 = 2 * target - p1
intersection = intersect_3d(p1, p2)
assert np.allclose(target, intersection)
def test_another_intersection(self):
p1 = np.array([(1, 10, 0), (0, 10, 1)])
p2 = np.array([(-1, 10, 0), (0, 10, -1)])
intersection = intersect_3d(p1, p2)
assert np.allclose([0, 10, 0], intersection)
class TestDist(TestCase):
def test_dist_between_two_3D_points(self):
self.assertAlmostEqual(
1, dist(np.array([0, 0, 0]), np.array([1, 0, 0]))
)
self.assertAlmostEqual(
np.sqrt(2), dist(np.array([0, 1, 0]), np.array([1, 0, 0]))
)
self.assertAlmostEqual(
2, dist(np.array([0, 0, 2]), np.array([0, 0, 0]))
)
self.assertAlmostEqual(
5.1961524, dist(np.array([1, 2, 3]), np.array([4, 5, 6]))
)
def test_dist_to_many_points(self):
assert np.allclose([1, 1, 0, 1.73205081],
dist(
np.array([0, 0, 0]),
np.array([[0, 0, 1], [0, 0, 1], [0, 0, 0],
[1, 1, 1]]),
axis=1
))
More tests
# Filename: test_math.py
# pylint: disable=locally-disabled,C0111,R0904,C0103
import numpy as np
from numpy.testing import (assert_almost_equal, assert_allclose)
import pytest
from km3pipe.testing import TestCase
from km3pipe.math import (
angle_between, dist, pld3, com, zenith, azimuth, Polygon, IrregularPrism,
rotation_matrix, SparseCone, space_angle, hsin, phi, theta, unit_vector,
innerprod_1d, log_b, qeuler, qrot, qrot_yaw, intersect_3d
)
__author__ = ["Tamas Gal", "Moritz Lotze"]
__copyright__ = "Copyright 2016, KM3Pipe devs and the KM3NeT collaboration."
__credits__ = ["Thomas Heid"]
__license__ = "MIT"
__maintainer__ = ["Tamas Gal", "Moritz Lotze"]
__email__ = "tgal@km3net.de"
__status__ = "Development"
class TestMath(TestCase):
def setUp(self):
# self.vecs = np.array([[0., 1., 5.],
# [1., 1., 4.],
# [2., 1., 3.],
# [3., 1., 2.],
# [4., 1., 1.]])
# self.v = (1, 2, 3)
self.v = np.array([0.26726124, 0.53452248, 0.80178373])
self.vecs = np.array([[0., 0.19611614, 0.98058068],
[0.23570226, 0.23570226, 0.94280904],
[0.53452248, 0.26726124, 0.80178373],
[0.80178373, 0.26726124, 0.53452248],
[0.94280904, 0.23570226, 0.23570226]])
def test_phi(self):
print(phi((1, 0, 0)))
assert_almost_equal(0, phi((1, 0, 0)))
assert_almost_equal(np.pi, phi((-1, 0, 0)))
assert_almost_equal(np.pi / 2, phi((0, 1, 0)))
assert_almost_equal(np.pi / 2 * 3, phi((0, -1, 0)))
assert_almost_equal(np.pi / 2 * 3, phi((0, -1, 0)))
assert_almost_equal(0, phi((0, 0, 0)))
assert_almost_equal(phi(self.v), 1.10714872)
assert_almost_equal(
phi(self.vecs),
np.array([
1.57079633, 0.78539816, 0.46364761, 0.32175055, 0.24497866
])
)
def test_zenith(self):
assert_allclose(np.pi, zenith((0, 0, 1)))
assert_allclose(0, zenith((0, 0, -1)))
assert_allclose(np.pi / 2, zenith((0, 1, 0)))
assert_allclose(np.pi / 2, zenith((0, -1, 0)))
assert_allclose(np.pi / 4 * 3, zenith((0, 1, 1)))
assert_allclose(np.pi / 4 * 3, zenith((0, -1, 1)))
assert_almost_equal(zenith(self.v), 2.5010703409103687)
assert_allclose(
zenith(self.vecs),
np.array([
2.94419709, 2.80175574, 2.50107034, 2.13473897, 1.80873745
])
)
def test_azimuth(self):
self.assertTrue(np.allclose(np.pi, azimuth((1, 0, 0))))
self.assertTrue(np.allclose(0, azimuth((-1, 0, 0))))
print(azimuth((0, 1, 0)))
print(azimuth((0, -1, 0)))
print(azimuth((0, 0, 0)))
print(azimuth(self.v))
print(azimuth(self.vecs))
self.assertTrue(np.allclose(np.pi / 2 * 3, azimuth((0, 1, 0))))
self.assertTrue(np.allclose(np.pi / 2, azimuth((0, -1, 0))))
self.assertTrue(np.allclose(np.pi, azimuth((0, 0, 0))))
self.assertTrue(np.allclose(azimuth(self.v), 4.24874137138))
self.assertTrue(
np.allclose(
azimuth(self.vecs),
np.array([
4.71238898, 3.92699082, 3.60524026, 3.46334321, 3.38657132
])
)
)
def test_theta(self):
print(theta((0, 0, -1)))
print(theta((0, 0, 1)))
print(theta((0, 1, 0)))
print(theta((0, -1, 0)))
print(theta((0, 1, 1)))
print(theta((0, -1, 1)))
print(theta(self.v))
print(theta(self.vecs))
self.assertTrue(np.allclose(0, theta((0, 0, 1))))
self.assertTrue(np.allclose(np.pi, theta((0, 0, -1))))
self.assertTrue(np.allclose(np.pi / 2, theta((0, 1, 0))))
self.assertTrue(np.allclose(np.pi / 2, theta((0, -1, 0))))
self.assertTrue(np.allclose(0, theta((0, 1, 1))))
self.assertTrue(np.allclose(0, theta((0, -1, 1))))
self.assertTrue(np.allclose(theta(self.v), 0.64052231))
self.assertTrue(
np.allclose(
theta(self.vecs),
np.array([
0.19739554, 0.33983691, 0.64052231, 1.00685369, 1.3328552
])
)
)
def test_unit_vector(self):
v1 = (1, 0, 0)
v2 = (1, 1, 0)
v3 = (-1, 2, 0)
assert np.allclose(v1, unit_vector(v1))
assert np.allclose(np.array(v2) / np.sqrt(2), unit_vector(v2))
assert np.allclose(np.array(v3) / np.sqrt(5), unit_vector(v3))
def test_innerprod_1d(self):
v1 = [[1, 2, 3], [4, 5, 6]]
v2 = [[11, 22, 33], [44, 55, 66]]
assert np.allclose([14, 77], innerprod_1d(v1, v2))
def test_angle_between(self):
v1 = (1, 0, 0)
v2 = (0, 1, 0)
v3 = (-1, 0, 0)
self.assertAlmostEqual(0, angle_between(v1, v1))
self.assertAlmostEqual(np.pi / 2, angle_between(v1, v2))
self.assertAlmostEqual(np.pi, angle_between(v1, v3))
self.assertAlmostEqual(angle_between(self.v, v1), 1.3002465638163236)
self.assertAlmostEqual(angle_between(self.v, v2), 1.0068536854342678)
self.assertAlmostEqual(angle_between(self.v, v3), 1.8413460897734695)
self.assertTrue(
np.allclose(
angle_between(self.vecs, v1),
np.array([
1.57079633, 1.3328552, 1.0068537, 0.64052231, 0.33983691
])
)
)
self.assertTrue(
np.allclose(
angle_between(self.vecs, v2),
np.array([
1.37340077, 1.3328552, 1.3002466, 1.30024656, 1.3328552
])
)
)
self.assertTrue(
np.allclose(
angle_between(self.vecs, v3),
np.array([
1.57079633, 1.80873745, 2.13473897, 2.50107034, 2.80175574
])
)
)
def test_angle_between_returns_nan_for_zero_length_vectors(self):
v1 = (0, 0, 0)
v2 = (1, 0, 0)
with pytest.warns(RuntimeWarning):
self.assertTrue(np.isnan(angle_between(v1, v2)))
def test_space_angle(self):
p1 = (np.pi / 2, np.pi)
p2 = (np.pi, 0)
self.assertAlmostEqual(
space_angle(p1[0], p2[0], p1[1], p2[1]), 1.57079632679489
)
p3 = (0, np.pi)
p4 = (np.pi / 2, 0)
self.assertAlmostEqual(
space_angle(p3[0], p4[0], p3[1], p4[1]), 1.57079632679489
)
def test_hsin(self):
assert np.all(hsin((np.pi, 0)) == (1, 0))
self.assertAlmostEqual(hsin(np.pi / 2), 0.5)
def test_pld3(self):
p1 = np.array((0, 0, 0))
p2 = np.array((0, 0, 1))
d2 = np.array((0, 1, 0))
self.assertAlmostEqual(1, pld3(p1, p2, d2))
p1 = np.array((0, 0, 0))
p2 = np.array((0, 0, 2))
d2 = np.array((0, 1, 0))
self.assertAlmostEqual(2, pld3(p1, p2, d2))
p1 = np.array((0, 0, 0))
p2 = np.array((0, 0, 0))
d2 = np.array((0, 1, 0))
self.assertAlmostEqual(0, pld3(p1, p2, d2))
p1 = np.array((1, 2, 3))
p2 = np.array((4, 5, 6))
d2 = np.array((7, 8, 9))
self.assertAlmostEqual(0.5275893, pld3(p1, p2, d2))
p1 = np.array((0, 0, 2))
p2 = np.array((-100, 0, -100))
d2 = np.array((1, 0, 1))
self.assertAlmostEqual(1.4142136, pld3(p1, p2, d2))
p1 = np.array([183., -311., 351.96083871])
p2 = np.array([40.256, -639.888, 921.93])
d2 = np.array([0.185998, 0.476123, -0.859483])
self.assertAlmostEqual(21.25456308, pld3(p1, p2, d2))
def test_com(self):
center_of_mass = com(((1, 2, 3), (4, 5, 6), (7, 8, 9), (10, 11, 12)))
self.assertEqual((5.5, 6.5, 7.5), tuple(center_of_mass))
center_of_mass = com(((1, 2, 3), (4, 5, 6), (7, 8, 9)),
masses=(1, 0, 0))
self.assertEqual((1, 2, 3), tuple(center_of_mass))
center_of_mass = com(((1, 1, 1), (0, 0, 0)))
self.assertEqual((0.5, 0.5, 0.5), tuple(center_of_mass))
class TestShapes(TestCase):
def setUp(self):
self.poly = [
(-60, 120),
(80, 120),
(110, 60),
(110, -30),
(70, -110),
(-70, -110),
(-90, -70),
(-90, 60),
]
def test_poly_containment(self):
polygon = Polygon(self.poly)
point_in = (-40, -40)
point_out = (-140, -140)
points = [
(-40, -40),
(-140, -140),
(40, -140),
]
assert np.all(polygon.contains(point_in))
assert not np.any(polygon.contains(point_out))
assert np.all(polygon.contains(points) == [True, False, False])
def test_poly_xy(self):
polygon = Polygon(self.poly)
x = (-40, -140, 40)
y = (-40, -140, -140)
assert np.all(polygon.contains_xy(x, y) == [True, False, False])
def test_prism_contained(self):
z = (-90, 90)
prism = IrregularPrism(self.poly, z[0], z[1])
points = [
(0, 1, 2),
(-100, 20, 10),
(10, 90, 10),
]
assert np.all(prism.contains(points) == [True, False, True])
def test_prism_contained_xyz(self):
z = (-90, 90)
prism = IrregularPrism(self.poly, z[0], z[1])
x = (0, -100, 10)
y = (1, 20, 90)
z = (2, 10, 10)
assert np.all(prism.contains_xyz(x, y, z) == [True, False, True])
class TestRotation(TestCase):
def test_rotmat(self):
v = [3, 5, 0]
axis = [4, 4, 1]
theta = 1.2
newvec = np.dot(rotation_matrix(axis, theta), v)
self.assertTrue(
np.allclose(
newvec, np.array([2.74911638, 4.77180932, 1.91629719])
)
)
def test_cone(self):
spike = [1, 1, 0]
bottom = [0, 2, 0]
angle = np.pi / 4
n_angles = 20
cone = SparseCone(spike, bottom, angle)
circ_samp = cone.sample_circle(n_angles=n_angles)
axis_samp = cone.sample_axis
samp = cone.sample(n_angles)
assert len(circ_samp) == n_angles
assert len(axis_samp) == 2
assert len(samp) == len(circ_samp) + 2
class TestLog(TestCase):
def test_val(self):
assert_allclose(log_b(5, 2), np.log2(5))
assert_allclose(log_b(5, 10), np.log10(5))
assert_allclose(log_b(5, np.e), np.log(5))
class TestQeuler(TestCase):
def test_conversion_of_yaw(self):
assert np.allclose([1, 0, 0, 0], qeuler(0, 0, 0))
assert np.allclose([0.7071, 0, 0, 0.7071], qeuler(90, 0, 0))
assert np.allclose([0, 0, 0, 1], qeuler(180, 0, 0))
assert np.allclose([-0.7071, 0, 0, 0.7071], qeuler(270, 0, 0))
assert np.allclose([-1, 0, 0, 0], qeuler(360, 0, 0))
def test_conversion_of_pitch(self):
assert np.allclose([0.92388, 0, 0.38268, 0], qeuler(0, 45, 0))
assert np.allclose([0.92388, 0, -0.38268, 0], qeuler(0, -45, 0))
assert np.allclose([0.7071, 0, 0.7071, 0], qeuler(0, 90, 0))
assert np.allclose([0.8660254, 0, 0.5, 0], qeuler(0, 60, 0))
assert np.allclose([-0.96592583, 0, -0.25881905, 0], qeuler(0, 390, 0))
def test_conversion_of_roll(self):
assert np.allclose([0.92388, 0.38268, 0, 0], qeuler(0, 0, 45))
assert np.allclose([0.92388, -0.38268, 0, 0], qeuler(0, 0, -45))
assert np.allclose([0.70710, 0.70710, 0, 0], qeuler(0, 0, 90))
assert np.allclose([0.86602, 0.5, 0, 0], qeuler(0, 0, 60))
assert np.allclose([-0.96592583, -0.25881905, 0, 0], qeuler(0, 0, 390))
def test_mixed_conversion(self):
assert np.allclose([0.999471, 0.02601972, 0.01767416, 0.00826538],
qeuler(1, 2, 3))
assert np.allclose([0.94371436, 0.26853582, -0.14487813, 0.12767944],
qeuler(10, -20, 30))
assert np.allclose([-0.16575384, -0.69624819, 0.05479592, -0.69624819],
qeuler(-999, 999, -999))
class TestQrot(TestCase):
def test_rotation_of_x_vector(self):
assert np.allclose([0, 1, 0], qrot([1, 0, 0], qeuler(90, 0, 0)))
assert np.allclose([-1, 0, 0], qrot([1, 0, 0], qeuler(180, 0, 0)))
assert np.allclose([-1, 0, 0], qrot([1, 0, 0], qeuler(180, 0, -45)))
assert np.allclose([0, 0, -1], qrot([1, 0, 0], qeuler(180, 90, 45)))
def test_rotation_of_y_vector(self):
assert np.allclose([-1, 0, 0], qrot([0, 1, 0], qeuler(90, 0, 0)))
assert np.allclose([0, -1, 0], qrot([0, 1, 0], qeuler(180, 0, 0)))
assert np.allclose([0, -0.70710, -0.70710],
qrot([0, 1, 0], qeuler(180, 0, -45)))
assert np.allclose([-0.70710, -0.70710, 0],
qrot([0, 1, 0], qeuler(180, 90, 45)))
def test_rotation_of_z_vector(self):
assert np.allclose([0, 0, 1], qrot([0, 0, 1], qeuler(90, 0, 0)))
assert np.allclose([0, 0, 1], qrot([0, 0, 1], qeuler(180, 0, 0)))
assert np.allclose([0, -0.70710, 0.70710],
qrot([0, 0, 1], qeuler(180, 0, -45)))
assert np.allclose([-0.70710, 0.70710, 0],
qrot([0, 0, 1], qeuler(180, 90, 45)))
def test_mixed_rotation(self):
assert np.allclose([1, 2, 3], qrot([1, 2, 3], qeuler(0, 0, 0)))
assert np.allclose([0, -1.414213, 0],
qrot([0, 1, -1], qeuler(180, 90, 45)))
assert np.allclose([-1.41421356, 0, -1],
qrot([1, 1, 1], qeuler(180, 90, 45)))
assert np.allclose([-14.1421356, 0, -10],
qrot([10, 10, 10], qeuler(180, 90, 45)))
class TestQrotYaw(TestCase):
def test_call_with_list(self):
qrot_yaw([1, 2, 3], 1)
def test_no_rotation(self):
vec = (1, 0, 0)
vec_rot = qrot_yaw(vec, 0)
assert np.allclose([1, 0, 0], vec_rot)
def test_a_rotation_of_90(self):
vec = (1, 0, 0)
vec_rot = qrot_yaw(vec, 90)
assert np.allclose([0, 1, 0], vec_rot)
def test_a_rotation_of_180(self):
vec = (1, 0, 0)
vec_rot = qrot_yaw(vec, 180)
assert np.allclose([-1, 0, 0], vec_rot)
def test_a_full_rotation(self):
vec = (1, 0, 0)
vec_rot = qrot_yaw(vec, 360)
assert np.allclose([1, 0, 0], vec_rot)
def test_a_rotation_of_45(self):
vec = (1, 0, 0)
vec_rot = qrot_yaw(vec, 45)
assert np.allclose([0.7071, 0.7071, 0], vec_rot)
class TestIntersect3D(TestCase):
def test_intersection_at_zero(self):
p1 = np.array([(1, 0, 0), (0, 0, 1)])
p2 = -p1
intersection = intersect_3d(p1, p2)
assert np.allclose([0, 0, 0], intersection)
def test_intersection_of_multiple_lines_with_same_endpoints(self):
p1 = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)])
p2 = np.array([(4, 4, 4), (4, 4, 4), (4, 4, 4)])
intersection = intersect_3d(p1, p2)
assert np.allclose([4, 4, 4], intersection)
def test_intersection_of_multiple_lines_with_target(self):
p1 = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)])
target = np.array([23, 5, 42])
p2 = 2 * target - p1
intersection = intersect_3d(p1, p2)
assert np.allclose(target, intersection)
def test_another_intersection(self):
p1 = np.array([(1, 10, 0), (0, 10, 1)])
p2 = np.array([(-1, 10, 0), (0, 10, -1)])
intersection = intersect_3d(p1, p2)
assert np.allclose([0, 10, 0], intersection)
class TestDist(TestCase):
def test_dist_between_two_2D_points(self):
self.assertAlmostEqual(1, dist(np.array([0, 0]), np.array([1, 0])))
self.assertAlmostEqual(
np.sqrt(2), dist(np.array([0, 1]), np.array([1, 0]))
)
self.assertAlmostEqual(
2 * np.sqrt(2), dist(np.array([1, 2]), np.array([3, 4]))
)
def test_dist_between_two_3D_points(self):
self.assertAlmostEqual(
1, dist(np.array([0, 0, 0]), np.array([1, 0, 0]))
)
self.assertAlmostEqual(
np.sqrt(2), dist(np.array([0, 1, 0]), np.array([1, 0, 0]))
)
self.assertAlmostEqual(
2, dist(np.array([0, 0, 2]), np.array([0, 0, 0]))
)
self.assertAlmostEqual(
5.1961524, dist(np.array([1, 2, 3]), np.array([4, 5, 6]))
)
def test_dist_to_many_points(self):
assert np.allclose([1, 1, 0, 1.73205081],
dist(
np.array([0, 0, 0]),
np.array([[0, 0, 1], [0, 0, 1], [0, 0, 0],
[1, 1, 1]]),
axis=1
))
|
# Copyright 2014 eBay Software Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import jsonschema
from mock import MagicMock
from mock import Mock
from mock import patch
from testtools.matchers import Is, Equals
from trove.cluster import models
from trove.cluster.models import Cluster, DBCluster
from trove.cluster.service import ClusterController
from trove.cluster.tasks import ClusterTasks
from trove.cluster import views
import trove.common.cfg as cfg
from trove.common import exception
from trove.common.strategies.cluster import strategy
from trove.common import utils
from trove.datastore import models as datastore_models
from trove.tests.unittests import trove_testtools
class TestClusterController(trove_testtools.TestCase):
def setUp(self):
super(TestClusterController, self).setUp()
self.controller = ClusterController()
self.locality = 'anti-affinity'
instances = [
{
"flavorRef": "7",
"volume": {
"size": 1
},
"availability_zone": "az",
"nics": [
{"net-id": "e89aa5fd-6b0a-436d-a75c-1545d34d5331"}
]
}
] * 5
self.cluster = {
"cluster": {
"name": "products",
"datastore": {
"type": "mongodb",
"version": "2.4.10"
},
"instances": instances,
"locality": self.locality,
}
}
self.add_shard = {
"add_shard": {}
}
self.grow = {
"grow": [
{"flavorRef": "7"},
]
}
self.shrink = {
"shrink": [
{"id": "e89aa5fd-6b0a-436d-a75c-1545d34d5331"},
]
}
self.upgrade = {
"upgrade": {
"datastore_version": "2.4.10"
}
}
def test_get_schema_create(self):
schema = self.controller.get_schema('create', self.cluster)
self.assertIsNotNone(schema)
self.assertIn('cluster', schema['properties'])
self.assertTrue('cluster')
def test_get_schema_action_add_shard(self):
schema = self.controller.get_schema('action', self.add_shard)
self.assertIsNotNone(schema)
self.assertIn('add_shard', schema['properties'])
def test_get_schema_action_grow(self):
schema = self.controller.get_schema('action', self.grow)
self.assertIsNotNone(schema)
self.assertIn('grow', schema['properties'])
def test_get_schema_action_shrink(self):
schema = self.controller.get_schema('action', self.shrink)
self.assertIsNotNone(schema)
self.assertIn('shrink', schema['properties'])
def test_get_schema_action_upgrade(self):
schema = self.controller.get_schema('action', self.upgrade)
self.assertIsNotNone(schema)
self.assertIn('upgrade', schema['properties'])
def test_get_schema_action_invalid(self):
schema = self.controller.get_schema('action', {'wow': {}})
self.assertIsNotNone(schema)
self.assertThat(len(schema.keys()), Is(0))
def test_validate_create(self):
body = self.cluster
schema = self.controller.get_schema('create', body)
validator = jsonschema.Draft4Validator(schema)
self.assertTrue(validator.is_valid(body))
def test_validate_add_shard(self):
body = self.add_shard
schema = self.controller.get_schema('action', body)
validator = jsonschema.Draft4Validator(schema)
self.assertTrue(validator.is_valid(body))
def test_validate_grow(self):
body = self.grow
schema = self.controller.get_schema('action', body)
validator = jsonschema.Draft4Validator(schema)
self.assertTrue(validator.is_valid(body))
def test_validate_shrink(self):
body = self.shrink
schema = self.controller.get_schema('action', body)
validator = jsonschema.Draft4Validator(schema)
self.assertTrue(validator.is_valid(body))
def test_validate_upgrade(self):
body = self.upgrade
schema = self.controller.get_schema('action', body)
validator = jsonschema.Draft4Validator(schema)
self.assertTrue(validator.is_valid(body))
def test_validate_create_blankname(self):
body = self.cluster
body['cluster']['name'] = " "
schema = self.controller.get_schema('create', body)
validator = jsonschema.Draft4Validator(schema)
self.assertFalse(validator.is_valid(body))
errors = sorted(validator.iter_errors(body), key=lambda e: e.path)
self.assertThat(len(errors), Is(1))
self.assertThat(errors[0].message,
Equals("' ' does not match '^.*[0-9a-zA-Z]+.*$'"))
def test_validate_create_blank_datastore(self):
body = self.cluster
body['cluster']['datastore']['type'] = ""
schema = self.controller.get_schema('create', body)
validator = jsonschema.Draft4Validator(schema)
self.assertFalse(validator.is_valid(body))
errors = sorted(validator.iter_errors(body), key=lambda e: e.path)
error_messages = [error.message for error in errors]
error_paths = [error.path.pop() for error in errors]
self.assertThat(len(errors), Is(2))
self.assertIn("'' is too short", error_messages)
self.assertIn("'' does not match '^.*[0-9a-zA-Z]+.*$'", error_messages)
self.assertIn("type", error_paths)
def test_validate_create_bad_locality(self):
body = self.cluster
body['cluster']['locality'] = "$%^&"
schema = self.controller.get_schema('create', body)
validator = jsonschema.Draft4Validator(schema)
self.assertFalse(validator.is_valid(body))
errors = sorted(validator.iter_errors(body), key=lambda e: e.path)
error_messages = [error.message for error in errors]
error_paths = [error.path.pop() for error in errors]
self.assertThat(len(errors), Is(1))
self.assertIn("'$%^&' does not match '^.*[0-9a-zA-Z]+.*$'",
error_messages)
self.assertIn("locality", error_paths)
@patch.object(datastore_models, 'get_datastore_version')
def test_create_clusters_disabled(self, mock_get_datastore_version):
body = self.cluster
tenant_id = Mock()
context = trove_testtools.TroveTestContext(self)
req = Mock()
req.environ = MagicMock()
req.environ.get = Mock(return_value=context)
datastore_version = Mock()
datastore_version.manager = 'mysql'
mock_get_datastore_version.return_value = (Mock(), datastore_version)
self.assertRaises(exception.ClusterDatastoreNotSupported,
self.controller.create,
req,
body,
tenant_id)
@patch.object(Cluster, 'create')
@patch.object(utils, 'get_id_from_href')
@patch.object(datastore_models, 'get_datastore_version')
def test_create_clusters(self,
mock_get_datastore_version,
mock_id_from_href,
mock_cluster_create):
body = self.cluster
tenant_id = Mock()
context = trove_testtools.TroveTestContext(self)
req = Mock()
req.environ = Mock()
req.environ.__getitem__ = Mock(return_value=context)
datastore_version = Mock()
datastore_version.manager = 'mongodb'
datastore = Mock()
mock_get_datastore_version.return_value = (datastore,
datastore_version)
instances = [
{
'volume_size': 1,
'volume_type': None,
'flavor_id': '1234',
'availability_zone': 'az',
'modules': None,
'region_name': None,
'nics': [
{'net-id': 'e89aa5fd-6b0a-436d-a75c-1545d34d5331'}
]
}
] * 5
mock_id_from_href.return_value = '1234'
mock_cluster = Mock()
mock_cluster.instances = []
mock_cluster.instances_without_server = []
mock_cluster.datastore_version.manager = 'mongodb'
mock_cluster_create.return_value = mock_cluster
self.controller.create(req, body, tenant_id)
mock_cluster_create.assert_called_with(context, 'products',
datastore, datastore_version,
instances, {},
self.locality, None)
@patch.object(Cluster, 'load')
def test_show_cluster(self,
mock_cluster_load):
tenant_id = Mock()
id = Mock()
context = trove_testtools.TroveTestContext(self)
req = Mock()
req.environ = Mock()
req.environ.__getitem__ = Mock(return_value=context)
mock_cluster = Mock()
mock_cluster.instances = []
mock_cluster.instances_without_server = []
mock_cluster.datastore_version.manager = 'mongodb'
mock_cluster_load.return_value = mock_cluster
mock_cluster.locality = self.locality
self.controller.show(req, tenant_id, id)
mock_cluster_load.assert_called_with(context, id)
@patch.object(Cluster, 'load')
@patch.object(Cluster, 'load_instance')
def test_show_cluster_instance(self,
mock_cluster_load_instance,
mock_cluster_load):
tenant_id = Mock()
cluster_id = Mock()
instance_id = Mock()
context = trove_testtools.TroveTestContext(self)
req = Mock()
req.environ = Mock()
req.environ.__getitem__ = Mock(return_value=context)
cluster = Mock()
mock_cluster_load.return_value = cluster
cluster.id = cluster_id
self.controller.show_instance(req, tenant_id, cluster_id, instance_id)
mock_cluster_load_instance.assert_called_with(context, cluster.id,
instance_id)
@patch.object(Cluster, 'load')
def test_delete_cluster(self, mock_cluster_load):
tenant_id = Mock()
cluster_id = Mock()
req = MagicMock()
cluster = Mock()
trove_testtools.patch_notifier(self)
mock_cluster_load.return_value = cluster
self.controller.delete(req, tenant_id, cluster_id)
cluster.delete.assert_called_with()
class TestClusterControllerWithStrategy(trove_testtools.TestCase):
def setUp(self):
super(TestClusterControllerWithStrategy, self).setUp()
self.controller = ClusterController()
self.cluster = {
"cluster": {
"name": "products",
"datastore": {
"type": "mongodb",
"version": "2.4.10"
},
"instances": [
{
"flavorRef": "7",
"volume": {
"size": 1
},
},
{
"flavorRef": "7",
"volume": {
"size": 1
},
},
{
"flavorRef": "7",
"volume": {
"size": 1
},
},
{
"flavorRef": "7",
"volume": {
"size": 1
},
},
{
"flavorRef": "7",
"volume": {
"size": 1
},
}
]
}
}
def tearDown(self):
super(TestClusterControllerWithStrategy, self).tearDown()
cfg.CONF.clear_override('cluster_support', group='mongodb')
cfg.CONF.clear_override('api_strategy', group='mongodb')
@patch.object(datastore_models, 'get_datastore_version')
@patch.object(models.Cluster, 'create')
def test_create_clusters_disabled(self,
mock_cluster_create,
mock_get_datastore_version):
cfg.CONF.set_override('cluster_support', False, group='mongodb')
body = self.cluster
tenant_id = Mock()
context = trove_testtools.TroveTestContext(self)
req = Mock()
req.environ = MagicMock()
req.environ.get = Mock(return_value=context)
datastore_version = Mock()
datastore_version.manager = 'mongodb'
mock_get_datastore_version.return_value = (Mock(), datastore_version)
self.assertRaises(exception.TroveError, self.controller.create, req,
body, tenant_id)
@patch.object(views.ClusterView, 'data', return_value={})
@patch.object(datastore_models, 'get_datastore_version')
@patch.object(models.Cluster, 'create')
def test_create_clusters_enabled(self,
mock_cluster_create,
mock_get_datastore_version,
mock_cluster_view_data):
cfg.CONF.set_override('cluster_support', True, group='mongodb')
body = self.cluster
tenant_id = Mock()
context = trove_testtools.TroveTestContext(self)
req = Mock()
req.environ = MagicMock()
req.environ.get = Mock(return_value=context)
datastore_version = Mock()
datastore_version.manager = 'mongodb'
mock_get_datastore_version.return_value = (Mock(), datastore_version)
mock_cluster = Mock()
mock_cluster.datastore_version.manager = 'mongodb'
mock_cluster_create.return_value = mock_cluster
self.controller.create(req, body, tenant_id)
@patch.object(models.Cluster, 'load')
def test_controller_action_multi_action(self,
mock_cluster_load):
body = {'do_stuff': {}, 'do_stuff2': {}}
tenant_id = Mock()
context = trove_testtools.TroveTestContext(self)
cluster_id = Mock()
req = Mock()
req.environ = MagicMock()
req.environ.get = Mock(return_value=context)
cluster = Mock()
cluster.instances_without_server = [Mock()]
cluster.datastore_version.manager = 'test_dsv'
mock_cluster_load.return_value = cluster
self.assertRaisesRegex(exception.TroveError,
'should have exactly one action specified',
self.controller.action, req,
body, tenant_id, cluster_id)
@patch.object(models.Cluster, 'load')
def test_controller_action_no_strategy(self,
mock_cluster_load):
body = {'do_stuff2': {}}
tenant_id = Mock()
context = trove_testtools.TroveTestContext(self)
cluster_id = Mock()
req = Mock()
req.environ = MagicMock()
req.environ.get = Mock(return_value=context)
db_info = DBCluster(ClusterTasks.NONE, id=cluster_id,
tenant_id=tenant_id)
cluster = Cluster(context, db_info, datastore='test_ds',
datastore_version='test_dsv')
mock_cluster_load.return_value = cluster
self.assertRaisesRegex(exception.TroveError,
'Action do_stuff2 not supported',
self.controller.action, req,
body, tenant_id, cluster_id)
@patch.object(strategy, 'load_api_strategy')
@patch.object(models.Cluster, 'load')
def test_controller_action_found(self,
mock_cluster_load,
mock_cluster_api_strategy):
body = {'grow': {}}
tenant_id = Mock()
context = trove_testtools.TroveTestContext(self)
cluster_id = 'test_uuid'
req = Mock()
req.environ = MagicMock()
req.environ.get = Mock(return_value=context)
cluster = Mock()
cluster.instances_without_server = [Mock()]
cluster.datastore_version.manager = 'test_dsv'
mock_cluster_load.return_value = cluster
self.controller.action(req, body, tenant_id, cluster_id)
self.assertEqual(1, cluster.action.call_count)
Remove invalid assert state
This is to remove invalid assert state of self.assertTrue('cluster').
Change-Id: I84aa5a39c9a9d9c410cc780ddc162768d37cab32
# Copyright 2014 eBay Software Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import jsonschema
from mock import MagicMock
from mock import Mock
from mock import patch
from testtools.matchers import Is, Equals
from trove.cluster import models
from trove.cluster.models import Cluster, DBCluster
from trove.cluster.service import ClusterController
from trove.cluster.tasks import ClusterTasks
from trove.cluster import views
import trove.common.cfg as cfg
from trove.common import exception
from trove.common.strategies.cluster import strategy
from trove.common import utils
from trove.datastore import models as datastore_models
from trove.tests.unittests import trove_testtools
class TestClusterController(trove_testtools.TestCase):
def setUp(self):
super(TestClusterController, self).setUp()
self.controller = ClusterController()
self.locality = 'anti-affinity'
instances = [
{
"flavorRef": "7",
"volume": {
"size": 1
},
"availability_zone": "az",
"nics": [
{"net-id": "e89aa5fd-6b0a-436d-a75c-1545d34d5331"}
]
}
] * 5
self.cluster = {
"cluster": {
"name": "products",
"datastore": {
"type": "mongodb",
"version": "2.4.10"
},
"instances": instances,
"locality": self.locality,
}
}
self.add_shard = {
"add_shard": {}
}
self.grow = {
"grow": [
{"flavorRef": "7"},
]
}
self.shrink = {
"shrink": [
{"id": "e89aa5fd-6b0a-436d-a75c-1545d34d5331"},
]
}
self.upgrade = {
"upgrade": {
"datastore_version": "2.4.10"
}
}
def test_get_schema_create(self):
schema = self.controller.get_schema('create', self.cluster)
self.assertIsNotNone(schema)
self.assertIn('cluster', schema['properties'])
def test_get_schema_action_add_shard(self):
schema = self.controller.get_schema('action', self.add_shard)
self.assertIsNotNone(schema)
self.assertIn('add_shard', schema['properties'])
def test_get_schema_action_grow(self):
schema = self.controller.get_schema('action', self.grow)
self.assertIsNotNone(schema)
self.assertIn('grow', schema['properties'])
def test_get_schema_action_shrink(self):
schema = self.controller.get_schema('action', self.shrink)
self.assertIsNotNone(schema)
self.assertIn('shrink', schema['properties'])
def test_get_schema_action_upgrade(self):
schema = self.controller.get_schema('action', self.upgrade)
self.assertIsNotNone(schema)
self.assertIn('upgrade', schema['properties'])
def test_get_schema_action_invalid(self):
schema = self.controller.get_schema('action', {'wow': {}})
self.assertIsNotNone(schema)
self.assertThat(len(schema.keys()), Is(0))
def test_validate_create(self):
body = self.cluster
schema = self.controller.get_schema('create', body)
validator = jsonschema.Draft4Validator(schema)
self.assertTrue(validator.is_valid(body))
def test_validate_add_shard(self):
body = self.add_shard
schema = self.controller.get_schema('action', body)
validator = jsonschema.Draft4Validator(schema)
self.assertTrue(validator.is_valid(body))
def test_validate_grow(self):
body = self.grow
schema = self.controller.get_schema('action', body)
validator = jsonschema.Draft4Validator(schema)
self.assertTrue(validator.is_valid(body))
def test_validate_shrink(self):
body = self.shrink
schema = self.controller.get_schema('action', body)
validator = jsonschema.Draft4Validator(schema)
self.assertTrue(validator.is_valid(body))
def test_validate_upgrade(self):
body = self.upgrade
schema = self.controller.get_schema('action', body)
validator = jsonschema.Draft4Validator(schema)
self.assertTrue(validator.is_valid(body))
def test_validate_create_blankname(self):
body = self.cluster
body['cluster']['name'] = " "
schema = self.controller.get_schema('create', body)
validator = jsonschema.Draft4Validator(schema)
self.assertFalse(validator.is_valid(body))
errors = sorted(validator.iter_errors(body), key=lambda e: e.path)
self.assertThat(len(errors), Is(1))
self.assertThat(errors[0].message,
Equals("' ' does not match '^.*[0-9a-zA-Z]+.*$'"))
def test_validate_create_blank_datastore(self):
body = self.cluster
body['cluster']['datastore']['type'] = ""
schema = self.controller.get_schema('create', body)
validator = jsonschema.Draft4Validator(schema)
self.assertFalse(validator.is_valid(body))
errors = sorted(validator.iter_errors(body), key=lambda e: e.path)
error_messages = [error.message for error in errors]
error_paths = [error.path.pop() for error in errors]
self.assertThat(len(errors), Is(2))
self.assertIn("'' is too short", error_messages)
self.assertIn("'' does not match '^.*[0-9a-zA-Z]+.*$'", error_messages)
self.assertIn("type", error_paths)
def test_validate_create_bad_locality(self):
body = self.cluster
body['cluster']['locality'] = "$%^&"
schema = self.controller.get_schema('create', body)
validator = jsonschema.Draft4Validator(schema)
self.assertFalse(validator.is_valid(body))
errors = sorted(validator.iter_errors(body), key=lambda e: e.path)
error_messages = [error.message for error in errors]
error_paths = [error.path.pop() for error in errors]
self.assertThat(len(errors), Is(1))
self.assertIn("'$%^&' does not match '^.*[0-9a-zA-Z]+.*$'",
error_messages)
self.assertIn("locality", error_paths)
@patch.object(datastore_models, 'get_datastore_version')
def test_create_clusters_disabled(self, mock_get_datastore_version):
body = self.cluster
tenant_id = Mock()
context = trove_testtools.TroveTestContext(self)
req = Mock()
req.environ = MagicMock()
req.environ.get = Mock(return_value=context)
datastore_version = Mock()
datastore_version.manager = 'mysql'
mock_get_datastore_version.return_value = (Mock(), datastore_version)
self.assertRaises(exception.ClusterDatastoreNotSupported,
self.controller.create,
req,
body,
tenant_id)
@patch.object(Cluster, 'create')
@patch.object(utils, 'get_id_from_href')
@patch.object(datastore_models, 'get_datastore_version')
def test_create_clusters(self,
mock_get_datastore_version,
mock_id_from_href,
mock_cluster_create):
body = self.cluster
tenant_id = Mock()
context = trove_testtools.TroveTestContext(self)
req = Mock()
req.environ = Mock()
req.environ.__getitem__ = Mock(return_value=context)
datastore_version = Mock()
datastore_version.manager = 'mongodb'
datastore = Mock()
mock_get_datastore_version.return_value = (datastore,
datastore_version)
instances = [
{
'volume_size': 1,
'volume_type': None,
'flavor_id': '1234',
'availability_zone': 'az',
'modules': None,
'region_name': None,
'nics': [
{'net-id': 'e89aa5fd-6b0a-436d-a75c-1545d34d5331'}
]
}
] * 5
mock_id_from_href.return_value = '1234'
mock_cluster = Mock()
mock_cluster.instances = []
mock_cluster.instances_without_server = []
mock_cluster.datastore_version.manager = 'mongodb'
mock_cluster_create.return_value = mock_cluster
self.controller.create(req, body, tenant_id)
mock_cluster_create.assert_called_with(context, 'products',
datastore, datastore_version,
instances, {},
self.locality, None)
@patch.object(Cluster, 'load')
def test_show_cluster(self,
mock_cluster_load):
tenant_id = Mock()
id = Mock()
context = trove_testtools.TroveTestContext(self)
req = Mock()
req.environ = Mock()
req.environ.__getitem__ = Mock(return_value=context)
mock_cluster = Mock()
mock_cluster.instances = []
mock_cluster.instances_without_server = []
mock_cluster.datastore_version.manager = 'mongodb'
mock_cluster_load.return_value = mock_cluster
mock_cluster.locality = self.locality
self.controller.show(req, tenant_id, id)
mock_cluster_load.assert_called_with(context, id)
@patch.object(Cluster, 'load')
@patch.object(Cluster, 'load_instance')
def test_show_cluster_instance(self,
mock_cluster_load_instance,
mock_cluster_load):
tenant_id = Mock()
cluster_id = Mock()
instance_id = Mock()
context = trove_testtools.TroveTestContext(self)
req = Mock()
req.environ = Mock()
req.environ.__getitem__ = Mock(return_value=context)
cluster = Mock()
mock_cluster_load.return_value = cluster
cluster.id = cluster_id
self.controller.show_instance(req, tenant_id, cluster_id, instance_id)
mock_cluster_load_instance.assert_called_with(context, cluster.id,
instance_id)
@patch.object(Cluster, 'load')
def test_delete_cluster(self, mock_cluster_load):
tenant_id = Mock()
cluster_id = Mock()
req = MagicMock()
cluster = Mock()
trove_testtools.patch_notifier(self)
mock_cluster_load.return_value = cluster
self.controller.delete(req, tenant_id, cluster_id)
cluster.delete.assert_called_with()
class TestClusterControllerWithStrategy(trove_testtools.TestCase):
def setUp(self):
super(TestClusterControllerWithStrategy, self).setUp()
self.controller = ClusterController()
self.cluster = {
"cluster": {
"name": "products",
"datastore": {
"type": "mongodb",
"version": "2.4.10"
},
"instances": [
{
"flavorRef": "7",
"volume": {
"size": 1
},
},
{
"flavorRef": "7",
"volume": {
"size": 1
},
},
{
"flavorRef": "7",
"volume": {
"size": 1
},
},
{
"flavorRef": "7",
"volume": {
"size": 1
},
},
{
"flavorRef": "7",
"volume": {
"size": 1
},
}
]
}
}
def tearDown(self):
super(TestClusterControllerWithStrategy, self).tearDown()
cfg.CONF.clear_override('cluster_support', group='mongodb')
cfg.CONF.clear_override('api_strategy', group='mongodb')
@patch.object(datastore_models, 'get_datastore_version')
@patch.object(models.Cluster, 'create')
def test_create_clusters_disabled(self,
mock_cluster_create,
mock_get_datastore_version):
cfg.CONF.set_override('cluster_support', False, group='mongodb')
body = self.cluster
tenant_id = Mock()
context = trove_testtools.TroveTestContext(self)
req = Mock()
req.environ = MagicMock()
req.environ.get = Mock(return_value=context)
datastore_version = Mock()
datastore_version.manager = 'mongodb'
mock_get_datastore_version.return_value = (Mock(), datastore_version)
self.assertRaises(exception.TroveError, self.controller.create, req,
body, tenant_id)
@patch.object(views.ClusterView, 'data', return_value={})
@patch.object(datastore_models, 'get_datastore_version')
@patch.object(models.Cluster, 'create')
def test_create_clusters_enabled(self,
mock_cluster_create,
mock_get_datastore_version,
mock_cluster_view_data):
cfg.CONF.set_override('cluster_support', True, group='mongodb')
body = self.cluster
tenant_id = Mock()
context = trove_testtools.TroveTestContext(self)
req = Mock()
req.environ = MagicMock()
req.environ.get = Mock(return_value=context)
datastore_version = Mock()
datastore_version.manager = 'mongodb'
mock_get_datastore_version.return_value = (Mock(), datastore_version)
mock_cluster = Mock()
mock_cluster.datastore_version.manager = 'mongodb'
mock_cluster_create.return_value = mock_cluster
self.controller.create(req, body, tenant_id)
@patch.object(models.Cluster, 'load')
def test_controller_action_multi_action(self,
mock_cluster_load):
body = {'do_stuff': {}, 'do_stuff2': {}}
tenant_id = Mock()
context = trove_testtools.TroveTestContext(self)
cluster_id = Mock()
req = Mock()
req.environ = MagicMock()
req.environ.get = Mock(return_value=context)
cluster = Mock()
cluster.instances_without_server = [Mock()]
cluster.datastore_version.manager = 'test_dsv'
mock_cluster_load.return_value = cluster
self.assertRaisesRegex(exception.TroveError,
'should have exactly one action specified',
self.controller.action, req,
body, tenant_id, cluster_id)
@patch.object(models.Cluster, 'load')
def test_controller_action_no_strategy(self,
mock_cluster_load):
body = {'do_stuff2': {}}
tenant_id = Mock()
context = trove_testtools.TroveTestContext(self)
cluster_id = Mock()
req = Mock()
req.environ = MagicMock()
req.environ.get = Mock(return_value=context)
db_info = DBCluster(ClusterTasks.NONE, id=cluster_id,
tenant_id=tenant_id)
cluster = Cluster(context, db_info, datastore='test_ds',
datastore_version='test_dsv')
mock_cluster_load.return_value = cluster
self.assertRaisesRegex(exception.TroveError,
'Action do_stuff2 not supported',
self.controller.action, req,
body, tenant_id, cluster_id)
@patch.object(strategy, 'load_api_strategy')
@patch.object(models.Cluster, 'load')
def test_controller_action_found(self,
mock_cluster_load,
mock_cluster_api_strategy):
body = {'grow': {}}
tenant_id = Mock()
context = trove_testtools.TroveTestContext(self)
cluster_id = 'test_uuid'
req = Mock()
req.environ = MagicMock()
req.environ.get = Mock(return_value=context)
cluster = Mock()
cluster.instances_without_server = [Mock()]
cluster.datastore_version.manager = 'test_dsv'
mock_cluster_load.return_value = cluster
self.controller.action(req, body, tenant_id, cluster_id)
self.assertEqual(1, cluster.action.call_count)
|
import os.path
import logging
import hashlib
import math
import time
import requests
from threading import Thread, Lock
from requests_toolbelt.multipart.encoder import MultipartEncoder, MultipartEncoderMonitor
from . import center, progress, util
class InvalidLoginException(Exception):
pass
class RequestFailedException(Exception):
pass
class AccessDeniedException(RequestFailedException):
pass
class BufferingMultipartEncoder(MultipartEncoder):
chunk_size = 4 * 1024 # 4 KiB
def _calculate_load_amount(self, read_size):
needed = read_size - self._buffer.len
if needed > 0:
return max(self.chunk_size, needed)
else:
return 0
class FileWrapper:
def __init__(self, hdl, size):
self._hdl = hdl
self._hasher = hashlib.new('sha256')
pos = hdl.tell()
hdl.seek(0, os.SEEK_END)
# Correct the size if the remaining amount of data is less
self._end = min(pos + size, hdl.tell())
self.len = self._end - pos
hdl.seek(pos)
def read(self, size=None):
if not size:
size = self.len
elif self.len <= 0:
return b''
elif size > self.len:
size = self.len
chunk = self._hdl.read(size)
self._hasher.update(chunk)
self.len -= size
return chunk
def get_hash(self):
return self._hasher.hexdigest()
class UploadWorker(Thread):
def __init__(self, manager):
super(UploadWorker, self).__init__()
self._manager = manager
self.daemon = True
self.start()
def run(self):
hdl = self._manager.get_hdl()
neb = self._manager.neb
uid = self._manager.upload_id
try:
while True:
idx, offset = self._manager.get_part()
if idx is None:
# We're done
return
hdl.seek(offset)
try:
# result = neb._call('multiupload/check', data={
# 'id': uid,
# 'part': idx
# })
# data = result.json()
# if data.get('result'):
# # Already uploaded
# self._manager.done(idx)
# continue
wrapper = FileWrapper(hdl, self._manager.part_size)
enc = BufferingMultipartEncoder({
'id': uid,
'part': str(idx),
'file': ('upload', wrapper, 'application/octet-stream')
})
# enc_len = enc.len
# def cb(monitor):
# progress.update(monitor.bytes_read / enc_len, 'Uploading %s...' % name)
# monitor = MultipartEncoderMonitor(enc, cb)
neb._call('multiupload/part', data=enc, timeout=10 * 60, retry=0, headers={ # timeout = 10 minutes (for ~10 MiB)
'Content-Type': enc.content_type
})
result = neb._call('multiupload/verify_part', data={
'id': uid,
'part': str(idx),
'checksum': wrapper.get_hash()
})
data = result.json()
if data.get('result'):
self._manager.done(idx)
else:
self._manager.failed(idx)
except Exception:
logging.exception('Failed to upload part %d for upload %s' % (idx, self._manager.name))
self._manager.failed(idx)
except Exception:
logging.exception('Worker exception during multi-upload for %s' % self._manager.name)
finally:
hdl.close()
self._manager._remove_worker(self)
class MultipartUploader:
upload_id = None
part_size = 10 * 1024 * 1024 # 10 MiB
_retries = 0
_aborted = False
def __init__(self, nebula, name, path, content_checksum, vp_checksum):
self.neb = nebula
self.name = name
self._path = path
self._content_checksum = content_checksum
self._vp_checksum = vp_checksum
self._workers = []
self._parts_left = []
self._parts_done = set()
self._parts_lock = Lock()
self._progress = None
def run(self, worker_count=3):
progress.update(0, 'Hashing...')
_, checksum = util.gen_hash(self._path)
size = os.stat(self._path).st_size
self._parts_left = list(range(math.ceil(size / self.part_size)))
self.upload_id = checksum
progress.update(0.1, 'Registering...')
try:
result = self.neb._call('multiupload/start', data={
'id': checksum,
'size': size,
'parts': len(self._parts_left)
})
data = result.json()
if data.get('done'):
progress.update(1, 'Already uploaded')
self._parts_done = set(self._parts_left)
self._parts_left = []
return True
if not data.get('result'):
raise Exception('Multiupload failed for unkown reasons')
if data.get('finished_parts'):
self._parts_done = set(data['finished_parts'])
for p in self._parts_done:
self._parts_left.remove(p)
except Exception:
logging.exception('Multiupload %s failed to start.' % self.name)
return False
progress.update(0.1, 'Starting workers...')
for i in range(worker_count):
self._workers.append(UploadWorker(self))
self._update_status()
# Wait for them to finish
while self._workers:
time.sleep(0.5)
if self._progress:
# Forward progress updates from the worker threads
progress.update(*self._progress)
self._progress = None
if self._aborted:
return False
progress.update(0.95, 'Verifying...')
try:
result = self.neb._call('multiupload/finish', timeout=10 * 60 * 60, data={
'id': checksum,
'checksum': checksum,
'content_checksum': self._content_checksum,
'vp_checksum': self._vp_checksum
})
data = result.json()
if not data.get('result'):
raise Exception('Multiupload failed for unkown reasons')
except Exception:
logging.exception('Multiupload %s failed to start.' % self.name)
return False
progress.update(1, 'Done')
return True
def _remove_worker(self, w):
if w in self._workers:
self._workers.remove(w)
def get_hdl(self):
return open(self._path, 'rb')
def get_part(self):
with self._parts_lock:
if self._parts_left and not self._aborted:
p = self._parts_left.pop(0)
return (p, self.part_size * p)
else:
return (None, None)
def done(self, idx):
logging.debug('%s: Part %d done.' % (self.name, idx))
with self._parts_lock:
self._parts_done.add(idx)
self._update_status()
def failed(self, idx):
logging.debug('%s: Part %d failed.' % (self.name, idx))
self._retries += 1
with self._parts_lock:
self._parts_left.append(idx)
self._update_status()
def _update_status(self):
done = len(self._parts_done)
left = len(self._parts_left)
total = float(done + left)
self._progress = (
(done / total * 0.85) + 0.1,
'Uploading... %3d / %3d, %d retried' % (done, total, self._retries)
)
def abort(self):
self._aborted = True
with self._parts_lock:
self._parts_left = []
class NebulaClient(object):
_token = None
_sess = None
def __init__(self):
self._sess = util.HTTP_SESSION
self._uploads = []
def _call(self, path, method='POST', skip_login=False, check_code=True, retry=3, **kwargs):
url = center.API + path
if not skip_login and not self._token:
if not self.login():
raise InvalidLoginException()
if self._token:
headers = kwargs.setdefault('headers', {})
headers['X-KN-TOKEN'] = self._token
kwargs.setdefault('timeout', 60)
for i in range(retry + 1):
try:
try:
result = self._sess.request(method, url, **kwargs)
except requests.RequestException:
logging.exception('Failed to send %s request to %s!' % (method, path))
raise RequestFailedException('connection')
if check_code and result.status_code != 200:
reason = 'unknown'
if result.status_code == 404:
reason = 'not found'
raise RequestFailedException(reason)
break
except Exception:
if i == retry:
raise
else:
time.sleep(0.3)
return result
def login(self, user=None, password=None):
# TODO: Switch to token-based authentication instead of password-based.
if not user:
user = center.settings['neb_user']
password = center.settings['neb_password']
try:
result = self._call('login', skip_login=True, data={
'user': user,
'password': password
})
except Exception:
return False
data = result.json()
if data['result']:
self._token = data['token']
return True
else:
return False
def register(self, user, password, email):
self._call('register', skip_login=True, retry=0, data={
'name': user,
'password': password,
'email': email
})
return True
def reset_password(self, user):
self._call('reset_password', skip_login=True, retry=0, data={'user': user})
return True
def get_editable_mods(self):
result = self._call('mod/editable', 'GET')
return result.json()['mods']
def is_editable(self, mid):
result = self._call('mod/is_editable', data={'mid': mid})
return result.json()
def _upload_mod_logos(self, mod):
chks = [None, None]
for i, prop in enumerate(('logo', 'tile')):
im = getattr(mod, prop)
if im and os.path.isfile(im):
chks[i] = util.gen_hash(im)[1]
self.upload_file(prop, im)
return chks
def check_mod_id(self, mid, title=None):
result = self._call('mod/check_id', data={
'id': mid,
'title': title
})
return result.json()['result']
def create_mod(self, mod):
logo_chk, tile_chk = self._upload_mod_logos(mod)
self._call('mod/create', retry=0, json={
'id': mod.mid,
'title': mod.title,
'type': mod.mtype,
'parent': mod.parent,
'logo': logo_chk,
'tile': tile_chk,
'first_release': mod.first_release.strftime('%Y-%m-%d') if mod.first_release else None,
'members': []
})
return True
def update_mod(self, mod):
# TODO: Check if these actually changed
logo_chk, tile_chk = self._upload_mod_logos(mod)
self._call('mod/update', json={
'id': mod.mid,
'title': mod.title,
'logo': logo_chk,
'tile': tile_chk,
'first_release': mod.first_release.strftime('%Y-%m-%d') if mod.first_release else None,
'members': [center.settings['neb_user']]
})
return True
def preflight_release(self, mod, private=False):
meta = mod.get()
meta['screenshots'] = []
meta['attachments'] = []
meta['banner'] = ''
meta['private'] = private
result = self._call('mod/release/preflight', json=meta)
data = result.json()
if not data:
raise RequestFailedException()
if data['result']:
return True
if data.get('reason') == 'unauthorized':
raise AccessDeniedException()
raise RequestFailedException(data.get('reason'))
def _prepare_release(self, mod, private):
meta = mod.get()
for prop in ('screenshots', 'attachments'):
sums = []
for fn in meta[prop]:
path = os.path.join(mod.folder, fn)
if os.path.isfile(path):
chk = util.gen_hash(path)[1]
self.upload_file(fn, path)
sums.append(chk)
meta[prop] = sums
if meta['banner']:
image = os.path.join(mod.folder, meta['banner'])
if os.path.isfile(image):
meta['banner'] = util.gen_hash(image)[1]
self.upload_file('banner', image)
meta['private'] = private
return meta
def create_release(self, mod, private=False):
meta = self._prepare_release(mod, private)
result = self._call('mod/release', json=meta)
data = result.json()
if not data:
raise RequestFailedException('unknown')
if data['result']:
return True
if data.get('reason') == 'unauthorized':
raise AccessDeniedException(data['reason'])
raise RequestFailedException(data.get('reason'))
def update_release(self, mod, private=False):
meta = self._prepare_release(mod, private)
result = self._call('mod/release/update', json=meta)
data = result.json()
if not data:
raise RequestFailedException('unknown')
if data['result']:
return True
if data.get('reason') == 'unauthorized':
raise AccessDeniedException(data['reason'])
raise RequestFailedException(data.get('reason'))
def report_release(self, mod, message):
result = self._call('mod/release/report', data={
'mid': mod.mid,
'version': str(mod.version),
'message': message
})
data = result.json()
if not data or not data.get('result'):
raise RequestFailedException('unknown')
return True
def delete_release(self, mod):
result = self._call('mod/release/delete', retry=1, data={
'mid': mod.mid,
'version': str(mod.version)
})
data = result.json()
if not data or not data.get('result'):
if data.get('reason') == 'unauthorized':
raise AccessDeniedException(data['reason'])
else:
raise RequestFailedException('unknown')
return True
def upload_file(self, name, path, fn=None, content_checksum=None, vp_checksum=None):
_, checksum = util.gen_hash(path)
result = self._call('upload/check', data={'checksum': checksum})
data = result.json()
if data.get('result'):
# Already uploaded
return True
if vp_checksum:
assert vp_checksum[0] == 'sha256'
vp_checksum = vp_checksum[1]
hdl = open(path, 'rb')
enc = MultipartEncoder({
'checksum': checksum,
'content_checksum': content_checksum,
'vp_checksum': vp_checksum,
'file': ('upload', hdl, 'application/octet-stream')
})
enc_len = enc.len
def cb(monitor):
progress.update(monitor.bytes_read / enc_len, 'Uploading %s...' % name)
monitor = MultipartEncoderMonitor(enc, cb)
# TODO: Implement incremental uploads to get rid of this insanity
self._call('upload/file', data=monitor, timeout=3 * 60 * 60, headers={ # timeout = 3 hours
'Content-Type': monitor.content_type
})
return True
def multiupload_file(self, name, path, fn=None, content_checksum=None, vp_checksum=None):
progress.start_task(0, 1, '%s: %%s' % name)
uploader = None
try:
uploader = MultipartUploader(self, name, path, content_checksum, vp_checksum)
self._uploads.append(uploader)
result = uploader.run()
except Exception:
logging.exception('MultipartUploader bug!')
result = False
finally:
if uploader in self._uploads:
self._uploads.remove(uploader)
progress.finish_task()
return result
def abort_uploads(self):
for up in self._uploads:
up.abort()
def is_uploaded(self, checksum=None, content_checksum=None):
assert checksum or content_checksum
if checksum:
data = {'checksum': checksum}
else:
data = {'content_checksum': content_checksum}
result = self._call('upload/check', data=data)
data = result.json()
if data.get('result'):
return True, data
else:
return False, None
def upload_log(self, content):
result = self._call('log/upload', skip_login=True, data={'log': content})
data = result.json()
if data['result']:
return center.WEB + 'log/' + data['id']
else:
return None
def get_team_members(self, mid):
result = self._call('mod/team/fetch', data={'mid': mid})
return result.json()
def update_team_members(self, mid, members):
result = self._call('mod/team/update', json={
'mid': mid,
'members': members
})
return result.json()
def get_private_mods(self):
result = self._call('mod/list_private', method='GET', timeout=20)
return result.json()
Use parts lock for all accesses to the variables
In the MultipartUploader there was a member that contained the parts to
upload or something and that was read and written to in various places
without any locking which resulted in some inconsistent states and
subsequent exceptions. This should fix that by using the lock wherever
the variable is accessed.
import os.path
import logging
import hashlib
import math
import time
import requests
from threading import Thread, Lock
from requests_toolbelt.multipart.encoder import MultipartEncoder, MultipartEncoderMonitor
from . import center, progress, util
class InvalidLoginException(Exception):
pass
class RequestFailedException(Exception):
pass
class AccessDeniedException(RequestFailedException):
pass
class BufferingMultipartEncoder(MultipartEncoder):
chunk_size = 4 * 1024 # 4 KiB
def _calculate_load_amount(self, read_size):
needed = read_size - self._buffer.len
if needed > 0:
return max(self.chunk_size, needed)
else:
return 0
class FileWrapper:
def __init__(self, hdl, size):
self._hdl = hdl
self._hasher = hashlib.new('sha256')
pos = hdl.tell()
hdl.seek(0, os.SEEK_END)
# Correct the size if the remaining amount of data is less
self._end = min(pos + size, hdl.tell())
self.len = self._end - pos
hdl.seek(pos)
def read(self, size=None):
if not size:
size = self.len
elif self.len <= 0:
return b''
elif size > self.len:
size = self.len
chunk = self._hdl.read(size)
self._hasher.update(chunk)
self.len -= size
return chunk
def get_hash(self):
return self._hasher.hexdigest()
class UploadWorker(Thread):
def __init__(self, manager):
super(UploadWorker, self).__init__()
self._manager = manager
self.daemon = True
self.start()
def run(self):
hdl = self._manager.get_hdl()
neb = self._manager.neb
uid = self._manager.upload_id
try:
while True:
idx, offset = self._manager.get_part()
if idx is None:
# We're done
return
hdl.seek(offset)
try:
# result = neb._call('multiupload/check', data={
# 'id': uid,
# 'part': idx
# })
# data = result.json()
# if data.get('result'):
# # Already uploaded
# self._manager.done(idx)
# continue
wrapper = FileWrapper(hdl, self._manager.part_size)
enc = BufferingMultipartEncoder({
'id': uid,
'part': str(idx),
'file': ('upload', wrapper, 'application/octet-stream')
})
# enc_len = enc.len
# def cb(monitor):
# progress.update(monitor.bytes_read / enc_len, 'Uploading %s...' % name)
# monitor = MultipartEncoderMonitor(enc, cb)
neb._call('multiupload/part', data=enc, timeout=10 * 60, retry=0, headers={ # timeout = 10 minutes (for ~10 MiB)
'Content-Type': enc.content_type
})
result = neb._call('multiupload/verify_part', data={
'id': uid,
'part': str(idx),
'checksum': wrapper.get_hash()
})
data = result.json()
if data.get('result'):
self._manager.done(idx)
else:
self._manager.failed(idx)
except Exception:
logging.exception('Failed to upload part %d for upload %s' % (idx, self._manager.name))
self._manager.failed(idx)
except Exception:
logging.exception('Worker exception during multi-upload for %s' % self._manager.name)
finally:
hdl.close()
self._manager._remove_worker(self)
class MultipartUploader:
upload_id = None
part_size = 10 * 1024 * 1024 # 10 MiB
_retries = 0
_aborted = False
def __init__(self, nebula, name, path, content_checksum, vp_checksum):
self.neb = nebula
self.name = name
self._path = path
self._content_checksum = content_checksum
self._vp_checksum = vp_checksum
self._workers = []
self._parts_left = []
self._parts_done = set()
self._parts_lock = Lock()
self._progress = None
def run(self, worker_count=3):
progress.update(0, 'Hashing...')
_, checksum = util.gen_hash(self._path)
size = os.stat(self._path).st_size
with self._parts_lock:
self._parts_left = list(range(math.ceil(size / self.part_size)))
self.upload_id = checksum
progress.update(0.1, 'Registering...')
try:
result = self.neb._call('multiupload/start', data={
'id': checksum,
'size': size,
'parts': len(self._parts_left)
})
data = result.json()
if data.get('done'):
progress.update(1, 'Already uploaded')
with self._parts_lock:
self._parts_done = set(self._parts_left)
self._parts_left = []
return True
if not data.get('result'):
raise Exception('Multiupload failed for unkown reasons')
if data.get('finished_parts'):
with self._parts_lock:
self._parts_done = set(data['finished_parts'])
for p in self._parts_done:
self._parts_left.remove(p)
except Exception:
logging.exception('Multiupload %s failed to start.' % self.name)
return False
progress.update(0.1, 'Starting workers...')
for i in range(worker_count):
self._workers.append(UploadWorker(self))
self._update_status()
# Wait for them to finish
while self._workers:
time.sleep(0.5)
if self._progress:
# Forward progress updates from the worker threads
progress.update(*self._progress)
self._progress = None
if self._aborted:
return False
progress.update(0.95, 'Verifying...')
try:
result = self.neb._call('multiupload/finish', timeout=10 * 60 * 60, data={
'id': checksum,
'checksum': checksum,
'content_checksum': self._content_checksum,
'vp_checksum': self._vp_checksum
})
data = result.json()
if not data.get('result'):
raise Exception('Multiupload failed for unkown reasons')
except Exception:
logging.exception('Multiupload %s failed to start.' % self.name)
return False
progress.update(1, 'Done')
return True
def _remove_worker(self, w):
if w in self._workers:
self._workers.remove(w)
def get_hdl(self):
return open(self._path, 'rb')
def get_part(self):
with self._parts_lock:
if self._parts_left and not self._aborted:
p = self._parts_left.pop(0)
return (p, self.part_size * p)
else:
return (None, None)
def done(self, idx):
logging.debug('%s: Part %d done.' % (self.name, idx))
with self._parts_lock:
self._parts_done.add(idx)
self._update_status()
def failed(self, idx):
logging.debug('%s: Part %d failed.' % (self.name, idx))
self._retries += 1
with self._parts_lock:
self._parts_left.append(idx)
self._update_status()
def _update_status(self):
with self._parts_lock:
done = len(self._parts_done)
left = len(self._parts_left)
if done + left == 0:
logging.warning('No parts for updating status!')
return
total = float(done + left)
self._progress = (
(done / total * 0.85) + 0.1,
'Uploading... %3d / %3d, %d retried' % (done, total, self._retries)
)
def abort(self):
self._aborted = True
with self._parts_lock:
self._parts_left = []
class NebulaClient(object):
_token = None
_sess = None
def __init__(self):
self._sess = util.HTTP_SESSION
self._uploads = []
def _call(self, path, method='POST', skip_login=False, check_code=True, retry=3, **kwargs):
url = center.API + path
if not skip_login and not self._token:
if not self.login():
raise InvalidLoginException()
if self._token:
headers = kwargs.setdefault('headers', {})
headers['X-KN-TOKEN'] = self._token
kwargs.setdefault('timeout', 60)
for i in range(retry + 1):
try:
try:
result = self._sess.request(method, url, **kwargs)
except requests.RequestException:
logging.exception('Failed to send %s request to %s!' % (method, path))
raise RequestFailedException('connection')
if check_code and result.status_code != 200:
reason = 'unknown'
if result.status_code == 404:
reason = 'not found'
raise RequestFailedException(reason)
break
except Exception:
if i == retry:
raise
else:
time.sleep(0.3)
return result
def login(self, user=None, password=None):
# TODO: Switch to token-based authentication instead of password-based.
if not user:
user = center.settings['neb_user']
password = center.settings['neb_password']
try:
result = self._call('login', skip_login=True, data={
'user': user,
'password': password
})
except Exception:
return False
data = result.json()
if data['result']:
self._token = data['token']
return True
else:
return False
def register(self, user, password, email):
self._call('register', skip_login=True, retry=0, data={
'name': user,
'password': password,
'email': email
})
return True
def reset_password(self, user):
self._call('reset_password', skip_login=True, retry=0, data={'user': user})
return True
def get_editable_mods(self):
result = self._call('mod/editable', 'GET')
return result.json()['mods']
def is_editable(self, mid):
result = self._call('mod/is_editable', data={'mid': mid})
return result.json()
def _upload_mod_logos(self, mod):
chks = [None, None]
for i, prop in enumerate(('logo', 'tile')):
im = getattr(mod, prop)
if im and os.path.isfile(im):
chks[i] = util.gen_hash(im)[1]
self.upload_file(prop, im)
return chks
def check_mod_id(self, mid, title=None):
result = self._call('mod/check_id', data={
'id': mid,
'title': title
})
return result.json()['result']
def create_mod(self, mod):
logo_chk, tile_chk = self._upload_mod_logos(mod)
self._call('mod/create', retry=0, json={
'id': mod.mid,
'title': mod.title,
'type': mod.mtype,
'parent': mod.parent,
'logo': logo_chk,
'tile': tile_chk,
'first_release': mod.first_release.strftime('%Y-%m-%d') if mod.first_release else None,
'members': []
})
return True
def update_mod(self, mod):
# TODO: Check if these actually changed
logo_chk, tile_chk = self._upload_mod_logos(mod)
self._call('mod/update', json={
'id': mod.mid,
'title': mod.title,
'logo': logo_chk,
'tile': tile_chk,
'first_release': mod.first_release.strftime('%Y-%m-%d') if mod.first_release else None,
'members': [center.settings['neb_user']]
})
return True
def preflight_release(self, mod, private=False):
meta = mod.get()
meta['screenshots'] = []
meta['attachments'] = []
meta['banner'] = ''
meta['private'] = private
result = self._call('mod/release/preflight', json=meta)
data = result.json()
if not data:
raise RequestFailedException()
if data['result']:
return True
if data.get('reason') == 'unauthorized':
raise AccessDeniedException()
raise RequestFailedException(data.get('reason'))
def _prepare_release(self, mod, private):
meta = mod.get()
for prop in ('screenshots', 'attachments'):
sums = []
for fn in meta[prop]:
path = os.path.join(mod.folder, fn)
if os.path.isfile(path):
chk = util.gen_hash(path)[1]
self.upload_file(fn, path)
sums.append(chk)
meta[prop] = sums
if meta['banner']:
image = os.path.join(mod.folder, meta['banner'])
if os.path.isfile(image):
meta['banner'] = util.gen_hash(image)[1]
self.upload_file('banner', image)
meta['private'] = private
return meta
def create_release(self, mod, private=False):
meta = self._prepare_release(mod, private)
result = self._call('mod/release', json=meta)
data = result.json()
if not data:
raise RequestFailedException('unknown')
if data['result']:
return True
if data.get('reason') == 'unauthorized':
raise AccessDeniedException(data['reason'])
raise RequestFailedException(data.get('reason'))
def update_release(self, mod, private=False):
meta = self._prepare_release(mod, private)
result = self._call('mod/release/update', json=meta)
data = result.json()
if not data:
raise RequestFailedException('unknown')
if data['result']:
return True
if data.get('reason') == 'unauthorized':
raise AccessDeniedException(data['reason'])
raise RequestFailedException(data.get('reason'))
def report_release(self, mod, message):
result = self._call('mod/release/report', data={
'mid': mod.mid,
'version': str(mod.version),
'message': message
})
data = result.json()
if not data or not data.get('result'):
raise RequestFailedException('unknown')
return True
def delete_release(self, mod):
result = self._call('mod/release/delete', retry=1, data={
'mid': mod.mid,
'version': str(mod.version)
})
data = result.json()
if not data or not data.get('result'):
if data.get('reason') == 'unauthorized':
raise AccessDeniedException(data['reason'])
else:
raise RequestFailedException('unknown')
return True
def upload_file(self, name, path, fn=None, content_checksum=None, vp_checksum=None):
_, checksum = util.gen_hash(path)
result = self._call('upload/check', data={'checksum': checksum})
data = result.json()
if data.get('result'):
# Already uploaded
return True
if vp_checksum:
assert vp_checksum[0] == 'sha256'
vp_checksum = vp_checksum[1]
hdl = open(path, 'rb')
enc = MultipartEncoder({
'checksum': checksum,
'content_checksum': content_checksum,
'vp_checksum': vp_checksum,
'file': ('upload', hdl, 'application/octet-stream')
})
enc_len = enc.len
def cb(monitor):
progress.update(monitor.bytes_read / enc_len, 'Uploading %s...' % name)
monitor = MultipartEncoderMonitor(enc, cb)
# TODO: Implement incremental uploads to get rid of this insanity
self._call('upload/file', data=monitor, timeout=3 * 60 * 60, headers={ # timeout = 3 hours
'Content-Type': monitor.content_type
})
return True
def multiupload_file(self, name, path, fn=None, content_checksum=None, vp_checksum=None):
progress.start_task(0, 1, '%s: %%s' % name)
uploader = None
try:
uploader = MultipartUploader(self, name, path, content_checksum, vp_checksum)
self._uploads.append(uploader)
result = uploader.run()
except Exception:
logging.exception('MultipartUploader bug!')
result = False
finally:
if uploader in self._uploads:
self._uploads.remove(uploader)
progress.finish_task()
return result
def abort_uploads(self):
for up in self._uploads:
up.abort()
def is_uploaded(self, checksum=None, content_checksum=None):
assert checksum or content_checksum
if checksum:
data = {'checksum': checksum}
else:
data = {'content_checksum': content_checksum}
result = self._call('upload/check', data=data)
data = result.json()
if data.get('result'):
return True, data
else:
return False, None
def upload_log(self, content):
result = self._call('log/upload', skip_login=True, data={'log': content})
data = result.json()
if data['result']:
return center.WEB + 'log/' + data['id']
else:
return None
def get_team_members(self, mid):
result = self._call('mod/team/fetch', data={'mid': mid})
return result.json()
def update_team_members(self, mid, members):
result = self._call('mod/team/update', json={
'mid': mid,
'members': members
})
return result.json()
def get_private_mods(self):
result = self._call('mod/list_private', method='GET', timeout=20)
return result.json()
|
import numpy as np
from algorithms.svd import SVD
from utils.data_io import get_user_movie_time_rating
import utils.c_interface
import sys
class SVDEuclidean(SVD):
def initialize_users_and_movies(self):
self.max_user = self.calculate_max_user()
self.max_movie = self.calculate_max_movie()
self.users = np.array(
np.random.normal(loc=0.0, scale=self.feature_initial,
size=(self.max_user, self.num_features)),
dtype=np.float32)
self.movies = np.array(
np.random.normal(loc=0.0, scale=self.feature_initial,
size=(self.max_movie, self.num_features)),
dtype=np.float32)
def train(self, train_points, stats, epochs=1):
self.set_train_points(train_points=train_points)
self.set_stats(stats=stats)
self.initialize_users_and_movies()
for epoch in range(epochs):
if self.debug:
print('Epoch {}'.format(epoch+1))
print('movies: {}'.format(self.movies))
print('users: {}'.format(self.users))
if np.isnan(np.sum(self.movies)) or np.isnan(np.sum(self.users)):
print("So, I found a NaN..")
import pdb
pdb.set_trace()
if self.run_c:
self.train_epoch_in_c()
else:
self.train_epoch()
def train_more(self, train_points=None, epochs=1):
if train_points is not None:
self.set_train_points(train_points)
for epoch in range(epochs):
if self.run_c:
self.train_epoch_in_c()
else:
self.train_epoch()
def train_epoch(self):
count = 0
for train_point in self.train_points:
count += 1
if count % 100000 == 0:
sys.stdout.write('.')
sys.stdout.flush()
user, movie, _, rating = get_user_movie_time_rating(train_point)
self.update_euclidean_all_features(user=user, movie=movie,
rating=rating)
def train_epoch_in_c(self):
utils.c_interface.c_svd_euclidean_train_epoch(
train_points=self.train_points,
users=self.users,
user_offsets=self.stats.user_offsets,
movies=self.movies,
movie_averages=self.stats.movie_averages,
num_features=self.num_features,
learn_rate=self.learn_rate,
k_factor=self.k_factor
)
def update_euclidean_all_features(self, user, movie, rating):
prediction_error = self.calculate_prediction_error(user=user, movie=movie,
rating=rating)
for feature in range(self.num_features):
self.update_user_and_movie(user=user, movie=movie, feature=feature,
error=prediction_error)
Add seed to random num generator in SVDEuclidean
Numpy documentation
claims that this is called upon initialization of
RandomState, which I can only assume is initialized
with import numpy. But I would MUCH rather be safe
and include it again, then sorry and have drastically
awful blending results in a few hours.
import numpy as np
from algorithms.svd import SVD
from utils.data_io import get_user_movie_time_rating
import utils.c_interface
import sys
class SVDEuclidean(SVD):
def initialize_users_and_movies(self):
self.max_user = self.calculate_max_user()
self.max_movie = self.calculate_max_movie()
np.random.seed()
self.users = np.array(
np.random.normal(loc=0.0, scale=self.feature_initial,
size=(self.max_user, self.num_features)),
dtype=np.float32)
self.movies = np.array(
np.random.normal(loc=0.0, scale=self.feature_initial,
size=(self.max_movie, self.num_features)),
dtype=np.float32)
def train(self, train_points, stats, epochs=1):
self.set_train_points(train_points=train_points)
self.set_stats(stats=stats)
self.initialize_users_and_movies()
for epoch in range(epochs):
if self.debug:
print('Epoch {}'.format(epoch+1))
print('movies: {}'.format(self.movies))
print('users: {}'.format(self.users))
if np.isnan(np.sum(self.movies)) or np.isnan(np.sum(self.users)):
print("So, I found a NaN..")
import pdb
pdb.set_trace()
if self.run_c:
self.train_epoch_in_c()
else:
self.train_epoch()
def train_more(self, train_points=None, epochs=1):
if train_points is not None:
self.set_train_points(train_points)
for epoch in range(epochs):
if self.run_c:
self.train_epoch_in_c()
else:
self.train_epoch()
def train_epoch(self):
count = 0
for train_point in self.train_points:
count += 1
if count % 100000 == 0:
sys.stdout.write('.')
sys.stdout.flush()
user, movie, _, rating = get_user_movie_time_rating(train_point)
self.update_euclidean_all_features(user=user, movie=movie,
rating=rating)
def train_epoch_in_c(self):
utils.c_interface.c_svd_euclidean_train_epoch(
train_points=self.train_points,
users=self.users,
user_offsets=self.stats.user_offsets,
movies=self.movies,
movie_averages=self.stats.movie_averages,
num_features=self.num_features,
learn_rate=self.learn_rate,
k_factor=self.k_factor
)
def update_euclidean_all_features(self, user, movie, rating):
prediction_error = self.calculate_prediction_error(user=user, movie=movie,
rating=rating)
for feature in range(self.num_features):
self.update_user_and_movie(user=user, movie=movie, feature=feature,
error=prediction_error)
|
import bloop.util
import enum
_DIFF = enum.Enum('DIFF', ['SET', 'DEL', 'NOOP'])
_MISSING = bloop.util.Sentinel('MISSING')
_TRACKING_ATTR_NAME = '__tracking__'
def _tracking_dict(obj):
'''
Returns the dict used to track changes for a given object.
If the obj does not have a tracking dict, sets one up and returns it.
'''
tracking = getattr(obj, _TRACKING_ATTR_NAME, None)
if tracking is None:
tracking = {}
setattr(obj, _TRACKING_ATTR_NAME, tracking)
return tracking
def _set_value(obj, name, value):
'''
Store the value of an attr in the obj's tracking dict, overwriting
any existing value. This marks the attr as having been loaded from
DynamoDB.
TODO: Should this use copy.deepcopy()? Why would someone mutate the value
before it is passed to the column's typedef for loading?
'''
tracking = _tracking_dict(obj)
tracking[name] = value
def _del_value(obj, name):
'''
Delete the value of an attr from the obj's tracking dict. This marks
the attr as having not been loaded from DynamoDB, and should only be used
when the attribute was EXPECTED to be returned, but DID NOT return because
it was empty. This should NOT be used when the attribute was NOT loaded,
such as a query against an Index that does not project all attributes.
'''
_tracking_dict(obj).pop(name, None)
def _get_value(obj, name):
'''
Returns the value for an attr from the obj's tracking dict, or MISSING if
there is no value.
'''
return _tracking_dict(obj).get(name, _MISSING)
def _get_tracking(obj):
'''
Returns a dict of {dynamo_name: value} for a given object. Attributes not
set when the object was last loaded are replaced with MISSING.
'''
attrs = {}
for column in obj.Meta.columns:
attrs[column.dynamo_name] = _get_value(obj, column.dynamo_name)
return attrs
def _get_current(obj, engine):
'''
Returns a dict of {dynamo_name: value} for a given object. Attributes not
set on the object are replaced with MISSING.
'''
attrs = engine.__dump__(obj.__cls__, obj)
for column in obj.Meta.columns:
if column.dynamo_name not in attrs:
attrs[column.dynamo_name] = _MISSING
return attrs
def _diff_value(current, loaded):
'''
_DIFF of two values, where either, neither, or both can be MISSING.
Returns the _DIFF value that should be applied to the attribute when
saving back to DynamoDB.
current | loaded | _DIFF
----------|---------|-----------
foo | foo | _DIFF.NOOP
MISSING | MISSING | _DIFF.NOOP
MISSING | bar | _DIFF.DEL
foo | bar | _DIFF.SET
foo | MISSING | _DIFF.SET
'''
if bloop.util.ordered(current) == bloop.util.ordered(loaded):
return _DIFF.NOOP
elif current is _MISSING:
return _DIFF.DEL
else:
return _DIFF.SET
def diff_obj(obj, engine):
'''
Returns a dict of changes to make for a given object, comparing its
current values to its tracking (last loaded) values.
The return dict is:
{
"set": [Column<Foo>, Column<Bar>, ...],
"del": [Column<Baz>, ...]
}
'''
current = _get_current(obj, engine)
tracking = _get_tracking(obj)
diff = {"set": [], "del": []}
for column in obj.Meta.columns:
name = column.dynamo_name
current_value = current[name]
tracking_value = tracking[name]
change = _diff_value(current_value, tracking_value)
if change is _DIFF.SET:
diff["set"].append(column)
elif change is _DIFF.DEL:
diff["del"].append(column)
# Don't do anything if it's _DIFF.NOOP
return diff
def update(obj, attrs, expected):
'''
Loading an object by table should expect all columns.
Loading an object by index should expect all projected columns*.
* Except when using an LSI and selecting more than the projected columns,
in which case all should be expected (LSIs will fetch from the table).
attrs should be a dict {dynamo_name: dumped value}
expected should be a list of column objects
set or del attributes from the obj's tracking dict, depending on whether
they were expected in the return value, and whether they are actually
present.
expected | present | change
----------|---------|--------
True | True | SET
True | False | DEL
False | Either | NOOP
'''
for column in expected:
name = column.dynamo_name
value = attrs.get(name, _MISSING)
if value is _MISSING:
_del_value(obj, name)
else:
_set_value(obj, name, value)
Typo in tracking. #4
import bloop.util
import enum
_DIFF = enum.Enum('DIFF', ['SET', 'DEL', 'NOOP'])
_MISSING = bloop.util.Sentinel('MISSING')
_TRACKING_ATTR_NAME = '__tracking__'
def _tracking_dict(obj):
'''
Returns the dict used to track changes for a given object.
If the obj does not have a tracking dict, sets one up and returns it.
'''
tracking = getattr(obj, _TRACKING_ATTR_NAME, None)
if tracking is None:
tracking = {}
setattr(obj, _TRACKING_ATTR_NAME, tracking)
return tracking
def _set_value(obj, name, value):
'''
Store the value of an attr in the obj's tracking dict, overwriting
any existing value. This marks the attr as having been loaded from
DynamoDB.
TODO: Should this use copy.deepcopy()? Why would someone mutate the value
before it is passed to the column's typedef for loading?
'''
tracking = _tracking_dict(obj)
tracking[name] = value
def _del_value(obj, name):
'''
Delete the value of an attr from the obj's tracking dict. This marks
the attr as having not been loaded from DynamoDB, and should only be used
when the attribute was EXPECTED to be returned, but DID NOT return because
it was empty. This should NOT be used when the attribute was NOT loaded,
such as a query against an Index that does not project all attributes.
'''
_tracking_dict(obj).pop(name, None)
def _get_value(obj, name):
'''
Returns the value for an attr from the obj's tracking dict, or MISSING if
there is no value.
'''
return _tracking_dict(obj).get(name, _MISSING)
def _get_tracking(obj):
'''
Returns a dict of {dynamo_name: value} for a given object. Attributes not
set when the object was last loaded are replaced with MISSING.
'''
attrs = {}
for column in obj.Meta.columns:
attrs[column.dynamo_name] = _get_value(obj, column.dynamo_name)
return attrs
def _get_current(obj, engine):
'''
Returns a dict of {dynamo_name: value} for a given object. Attributes not
set on the object are replaced with MISSING.
'''
attrs = engine.__dump__(obj.__class__, obj)
for column in obj.Meta.columns:
if column.dynamo_name not in attrs:
attrs[column.dynamo_name] = _MISSING
return attrs
def _diff_value(current, loaded):
'''
_DIFF of two values, where either, neither, or both can be MISSING.
Returns the _DIFF value that should be applied to the attribute when
saving back to DynamoDB.
current | loaded | _DIFF
----------|---------|-----------
foo | foo | _DIFF.NOOP
MISSING | MISSING | _DIFF.NOOP
MISSING | bar | _DIFF.DEL
foo | bar | _DIFF.SET
foo | MISSING | _DIFF.SET
'''
if bloop.util.ordered(current) == bloop.util.ordered(loaded):
return _DIFF.NOOP
elif current is _MISSING:
return _DIFF.DEL
else:
return _DIFF.SET
def diff_obj(obj, engine):
'''
Returns a dict of changes to make for a given object, comparing its
current values to its tracking (last loaded) values.
The return dict is:
{
"set": [Column<Foo>, Column<Bar>, ...],
"del": [Column<Baz>, ...]
}
'''
current = _get_current(obj, engine)
tracking = _get_tracking(obj)
diff = {"set": [], "del": []}
for column in obj.Meta.columns:
name = column.dynamo_name
current_value = current[name]
tracking_value = tracking[name]
change = _diff_value(current_value, tracking_value)
if change is _DIFF.SET:
diff["set"].append(column)
elif change is _DIFF.DEL:
diff["del"].append(column)
# Don't do anything if it's _DIFF.NOOP
return diff
def update(obj, attrs, expected):
'''
Loading an object by table should expect all columns.
Loading an object by index should expect all projected columns*.
* Except when using an LSI and selecting more than the projected columns,
in which case all should be expected (LSIs will fetch from the table).
attrs should be a dict {dynamo_name: dumped value}
expected should be a list of column objects
set or del attributes from the obj's tracking dict, depending on whether
they were expected in the return value, and whether they are actually
present.
expected | present | change
----------|---------|--------
True | True | SET
True | False | DEL
False | Either | NOOP
'''
for column in expected:
name = column.dynamo_name
value = attrs.get(name, _MISSING)
if value is _MISSING:
_del_value(obj, name)
else:
_set_value(obj, name, value)
|
"""A plugin to react to new pokes from Facebook.
It hijacks an authentication-cookie which the user has to enter manually.
Nomnomnom, Cookies!
"""
###############################################################################
#
# TODO: [ ]
#
###############################################################################
# standard library imports
import re
from HTMLParser import HTMLParser
import logging
# related third party imports
import requests
# application specific imports
from samantha.core import subscribe_to
from samantha.plugins.plugin import Plugin
from samantha.tools.eventbuilder import eEvent
try:
import samantha.variables_private as variables_private
CURL = variables_private.fb_curl
except (ImportError, AttributeError):
variables_private = None
CURL = None
__version__ = "1.0.2"
# Initialize the logger
LOGGER = logging.getLogger(__name__)
PLUGIN = Plugin("Facebook", CURL is not None, LOGGER, __file__)
def _parse_curl(curl):
"""Parse a cURL command meant to be used in bash into URL and headers.
This plugin requires cookie-jacking to access and parse an authenticated
version of Facebook's "poke" page. Chrome allows to copy internal URL call
the browser makes including necessary cookies as bash commands. This
function allows the user to enter this command and have it parsed.
"""
if curl is None:
return None, None
curl = curl.replace("curl ", "").replace(" --compressed", "")
divider = curl[0]
# This will be the type of quote around the items in the cURL command.
# Should always be ', but better safe than sound, right?
curl = curl.replace(divider, "") # remove all the quotes
# The command is in the format "URL -H header1 -H header2 ..."
# Everything before the first appearance of -H is the URL, after each
# appearance of -H follows a header.
headers = curl.split(" -H ")
url = headers.pop(0)
header_dict = {}
for h in headers:
name, val = h.split(": ")
header_dict[name] = val
return url, header_dict
# Parse a command formatted for bash's cURL into URL and a dict of headers.
URL, HEADER_DICT = _parse_curl(CURL)
# Initialize the HTMLParser only once to be used later.
UNESCAPE = HTMLParser().unescape
CACHE = []
@subscribe_to("time.schedule.min")
def check_pokes(key, data):
"""Parse the website https://m.facebook.com/pokes/ to access new pokes.
The result is compared to an existing cache of pokes to notify the user
only about new ones.
"""
global CACHE
cache = []
new_count = 0
req = requests.get(url=URL, headers=HEADER_DICT)
text = req.text
matches = re.findall(
r'<article class="_55wr" id="poke_live_item_[\s\S]*?</article>',
text)
if matches:
# pokes were found on the parsed webpage.
for match in matches:
poke = {}
m = re.search((r'<a href="/[\s\S]*?">'
r'(?P<name>[\s\S]*?)</a>'
r'(?P<text>[\s\S]*?)</div>'),
match)
poke["text"] = m.group("name") + m.group("text")
poke["name"] = m.group("name")
m = re.search((r'<i class="img profpic"[\s\S]*?url\("'
r'(?P<imgurl>[\s\S]*?)"\)'),
match)
poke["imgurl"] = UNESCAPE(m.group("imgurl"))
m = re.search((r'<a class="_56bz _54k8 _56bs _56bu" href="'
r'(?P<pokeurl>[\s\S]*?)"'),
match)
poke["pokeurl"] = "https://m.facebook.com" + UNESCAPE(
m.group("pokeurl"))
if poke["name"] not in CACHE:
LOGGER.debug(poke["text"])
eEvent(sender_id=PLUGIN.name,
keyword="facebook.poked",
data=poke).trigger()
new_count += 1
else:
LOGGER.warn("This poke by %s is an old one.", poke["name"])
cache.append(poke["name"])
else:
LOGGER.warn("No new pokes!")
CACHE = cache
return "Found {} poke{}, {} of them new. (Cache: {})".format(
len(CACHE),
"s" if len(CACHE) is not 1 else "",
new_count,
CACHE)
@subscribe_to("facebook.poke")
def poke(key, data):
"""Poke a person via a URL including Facebook's authentication cookie."""
if "pokeurl" not in data:
result = "Error: The URL is missing from the data."
elif "headers" not in data:
result = "Error: The headers are missing from the data."
elif "name" not in data:
result = "Error: The poked person's name is missing from the data."
else:
req = requests.get(url=data["pokeurl"], headers=data["headers"])
if req.status_code == 200:
result = "{} poked successfully".format(data["name"])
else:
result = "Error: the Poke returned Code {}".format(req.status_code)
return result
:new: retry after a failed connection#
"""A plugin to react to new pokes from Facebook.
It hijacks an authentication-cookie which the user has to enter manually.
Nomnomnom, Cookies!
"""
###############################################################################
#
# TODO: [ ]
#
###############################################################################
# standard library imports
import re
from HTMLParser import HTMLParser
import logging
from threading import Event as tEvent
import time
# related third party imports
import requests
# application specific imports
from samantha.core import subscribe_to
from samantha.plugins.plugin import Plugin
from samantha.tools.eventbuilder import eEvent
try:
import samantha.variables_private as variables_private
CURL = variables_private.fb_curl
except (ImportError, AttributeError):
variables_private = None
CURL = None
__version__ = "1.0.3"
# Initialize the logger
LOGGER = logging.getLogger(__name__)
PLUGIN = Plugin("Facebook", CURL is not None, LOGGER, __file__)
def _parse_curl(curl):
"""Parse a cURL command meant to be used in bash into URL and headers.
This plugin requires cookie-jacking to access and parse an authenticated
version of Facebook's "poke" page. Chrome allows to copy internal URL call
the browser makes including necessary cookies as bash commands. This
function allows the user to enter this command and have it parsed.
"""
if curl is None:
return None, None
curl = curl.replace("curl ", "").replace(" --compressed", "")
divider = curl[0]
# This will be the type of quote around the items in the cURL command.
# Should always be ', but better safe than sound, right?
curl = curl.replace(divider, "") # remove all the quotes
# The command is in the format "URL -H header1 -H header2 ..."
# Everything before the first appearance of -H is the URL, after each
# appearance of -H follows a header.
headers = curl.split(" -H ")
url = headers.pop(0)
header_dict = {}
for h in headers:
name, val = h.split(": ")
header_dict[name] = val
return url, header_dict
# Set as soon as a request to facebook is successful. Cleared if the requests
# fail 3x in a row. While PLUGIN_IS_ONLINE isn't set the plugin will not retry
# failed requests. While it is set, the plugin will retry up to 3x to
# reestablish a connection.
PLUGIN_IS_ONLINE = tEvent()
# Parse a command formatted for bash's cURL into URL and a dict of headers.
URL, HEADER_DICT = _parse_curl(CURL)
# Initialize the HTMLParser only once to be used later.
UNESCAPE = HTMLParser().unescape
CACHE = []
@subscribe_to("time.schedule.min")
def check_pokes(key, data):
"""Parse the website https://m.facebook.com/pokes/ to access new pokes.
The result is compared to an existing cache of pokes to notify the user
only about new ones.
"""
global CACHE
cache = []
new_count = 0
req = None
tries = 0
retries = 3 if PLUGIN_IS_ONLINE.is_set() else 1
# Give up after one failed attempt if the plugin wasn't able to establish a
# connection before. Otherwise try up to 3x.
while tries < retries and req is None:
try:
tries += 1
req = requests.get(url=URL, headers=HEADER_DICT, timeout=15)
if req.status_code == 200:
# Update the flag after a successful connection
PLUGIN_IS_ONLINE.set()
else:
if tries == retries > 1:
m = "Reached the max. amount of retries."
elif not PLUGIN_IS_ONLINE.is_set():
m = "Not retrying because the plugin is offline."
else:
m = "Retrying in two seconds."
LOGGER.warn("The request returned the wrong status code (%s) "
"on attempt %d. %s", req.status_code, tries, m)
req = None
time.sleep(2)
except (requests.exceptions.ConnectionError,
requests.exceptions.SSLError,
requests.exceptions.Timeout), e:
if tries == retries > 1:
m = "Reached the max. amount of retries."
elif not PLUGIN_IS_ONLINE.is_set():
m = "Not retrying because the plugin is offline."
else:
m = "Retrying in two seconds."
LOGGER.warn("Connecting to Facebook failed on attempt %d. "
"%s Error: %s", tries, m, e)
req = None
time.sleep(2)
if req is None:
LOGGER.error("Connecting to Twitch failed.")
PLUGIN_IS_ONLINE.clear()
return "Error: Connecting to Twitch failed."
text = req.text
matches = re.findall(
r'<article class="_55wr" id="poke_live_item_[\s\S]*?</article>',
text)
if matches:
# pokes were found on the parsed webpage.
for match in matches:
poke = {}
m = re.search((r'<a href="/[\s\S]*?">'
r'(?P<name>[\s\S]*?)</a>'
r'(?P<text>[\s\S]*?)</div>'),
match)
poke["text"] = m.group("name") + m.group("text")
poke["name"] = m.group("name")
m = re.search((r'<i class="img profpic"[\s\S]*?url\("'
r'(?P<imgurl>[\s\S]*?)"\)'),
match)
poke["imgurl"] = UNESCAPE(m.group("imgurl"))
m = re.search((r'<a class="_56bz _54k8 _56bs _56bu" href="'
r'(?P<pokeurl>[\s\S]*?)"'),
match)
poke["pokeurl"] = "https://m.facebook.com" + UNESCAPE(
m.group("pokeurl"))
if poke["name"] not in CACHE:
LOGGER.debug(poke["text"])
eEvent(sender_id=PLUGIN.name,
keyword="facebook.poked",
data=poke).trigger()
new_count += 1
else:
LOGGER.warn("This poke by %s is an old one.", poke["name"])
cache.append(poke["name"])
else:
LOGGER.warn("No new pokes!")
CACHE = cache
return "Found {} poke{}, {} of them new. (Cache: {})".format(
len(CACHE),
"s" if len(CACHE) is not 1 else "",
new_count,
CACHE)
@subscribe_to("facebook.poke")
def poke(key, data):
"""Poke a person via a URL including Facebook's authentication cookie."""
if "pokeurl" not in data:
result = "Error: The URL is missing from the data."
elif "headers" not in data:
result = "Error: The headers are missing from the data."
elif "name" not in data:
result = "Error: The poked person's name is missing from the data."
else:
req = requests.get(url=data["pokeurl"], headers=data["headers"])
if req.status_code == 200:
result = "{} poked successfully".format(data["name"])
else:
result = "Error: the Poke returned Code {}".format(req.status_code)
return result
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django import forms
from django.utils.translation import ugettext_lazy as _
from userprofiles.forms import RegistrationForm
from cosinnus.forms.profile import UserProfileForm
from cosinnus.forms.widgets import DateL10nPicker
from cosinnus.models import (CosinnusGroup, CosinnusGroupMembership,
MEMBERSHIP_ADMIN)
from .models import (EcobasaUserProfile, EcobasaCommunityProfile,
EcobasaCommunityProfileSeed)
class RegistrationMemberForm(RegistrationForm):
def __init__(self, *args, **kwargs):
super(RegistrationMemberForm, self).__init__(*args, **kwargs)
fields_user = forms.fields_for_model(EcobasaUserProfile)
self.fields.update(fields_user)
self.fields['birth_date'].widget = DateL10nPicker()
# has_bus is a boolean field, but is represented as a button in the
# form. Form validation has to be told explicitly that this field is
# not required.
self.fields['has_bus'] = forms.CharField(
widget=forms.HiddenInput(),
label=self.fields['has_bus'].label,
required=False)
def save_profile(self, new_user, *args, **kwargs):
# do not catch DoesNotExist: there must be something else wrong
profile = EcobasaUserProfile.objects.get(user=new_user)
profile.avatar = self.cleaned_data['avatar']
profile.gender = self.cleaned_data['gender']
profile.birth_date = self.cleaned_data['birth_date']
profile.country = self.cleaned_data['country']
profile.city = self.cleaned_data['city']
profile.zipcode = self.cleaned_data['zipcode']
profile.ecobasa_member = self.cleaned_data['ecobasa_member']
profile.tour_why = self.cleaned_data['tour_why']
profile.tour_how = self.cleaned_data['tour_how']
profile.has_bus = self.cleaned_data['has_bus']
profile.bus_consumption = self.cleaned_data['bus_consumption']
profile.bus_has_driving_license =\
self.cleaned_data['bus_has_driving_license']
profile.bus_image = self.cleaned_data['bus_image']
profile.bus_num_passengers = self.cleaned_data['bus_num_passengers']
profile.bus_others_can_drive =\
self.cleaned_data['bus_others_can_drive']
for tag in self.cleaned_data['interests']:
profile.interests.add(tag)
for tag in self.cleaned_data['skills']:
profile.skills.add(tag)
for tag in self.cleaned_data['products']:
profile.products.add(tag)
profile.save()
class RegistrationCommunityForm(RegistrationForm):
SeedInlineFormSet = forms.models.inlineformset_factory(
EcobasaCommunityProfile, EcobasaCommunityProfileSeed, extra=1)
def __init__(self, *args, **kwargs):
super(RegistrationCommunityForm, self).__init__(*args, **kwargs)
fields_user = forms.fields_for_model(EcobasaUserProfile)
self.fields.update(fields_user)
self.fields['birth_date'].widget = DateL10nPicker()
fields_community = forms.fields_for_model(EcobasaCommunityProfile)
self.fields.update(fields_community)
def save_profile(self, new_user, *args, **kwargs):
name = self.cleaned_data['name']
# set up cosinnus group and admin user
community = CosinnusGroup.objects.create(name=name, public=False)
CosinnusGroupMembership.objects.create(
user=new_user, group=community, status=MEMBERSHIP_ADMIN)
# set up profile
profile = EcobasaCommunityProfile.objects.create(group=community)
profile.name = name
profile.contact_telephone = self.cleaned_data['contact_telephone']
profile.contact_street = self.cleaned_data['contact_street']
profile.contact_city = self.cleaned_data['contact_city']
profile.contact_zipcode = self.cleaned_data['contact_zipcode']
profile.contact_country = self.cleaned_data['contact_country']
profile.contact_show = self.cleaned_data['contact_show']
profile.visitors_num = self.cleaned_data['visitors_num']
profile.visitors_accommodation =\
self.cleaned_data['visitors_accommodation']
profile.wishlist_materials = self.cleaned_data['wishlist_materials']
profile.wishlist_tools = self.cleaned_data['wishlist_tools']
profile.wishlist_special_needs =\
self.cleaned_data['wishlist_special_needs']
for tag in self.cleaned_data['offers_services']:
profile.offers_services.add(tag)
for tag in self.cleaned_data['offers_skills']:
profile.offers_skills.add(tag)
for tag in self.cleaned_data['offers_creations']:
profile.offers_creations.add(tag)
profile.offers_workshop_spaces =\
self.cleaned_data['offers_workshop_spaces']
profile.offers_learning_seminars =\
self.cleaned_data['offers_learning_seminars']
profile.basic_inhabitants = self.cleaned_data['basic_inhabitants']
profile.basic_inhabitants_underage =\
self.cleaned_data['basic_inhabitants_underage']
profile.basic_brings_together =\
self.cleaned_data['basic_brings_together']
profile.basic_membership_status =\
self.cleaned_data['basic_membership_status']
profile.save()
# seed stuff
formsets = self.SeedInlineFormSet(self.data, instance=profile)
for formset in formsets:
if formset.is_valid():
formset.save()
class CommunityProfileForm(forms.ModelForm):
SeedInlineFormSet = forms.models.inlineformset_factory(
EcobasaCommunityProfile, EcobasaCommunityProfileSeed, extra=1)
class Meta:
model = EcobasaCommunityProfile
def save(self, commit=True):
formset = self.SeedInlineFormSet(self.data, instance=self.instance)
for form in formset:
if form.is_valid():
# is_valid populates cleaned_data
data = form.cleaned_data
if data and data['kind'] and data['num']:
if data['DELETE']:
data['id'].delete()
else:
form.save(commit)
return super(CommunityProfileForm, self).save(commit)
class PioneerProfileForm(UserProfileForm):
error_messages = {
'password_mismatch': _("The two password fields didn't match."),
'password_incorrect': _('Your old password was entered incorrectly. '
'Please enter it again.'),
}
old_password = forms.CharField(label=_('Old password'),
required=False,
widget=forms.PasswordInput)
new_password1 = forms.CharField(label=_('New password'),
required=False,
widget=forms.PasswordInput)
new_password2 = forms.CharField(label=_('New password confirmation'),
required=False,
widget=forms.PasswordInput)
email = forms.EmailField(label=_("Email"), max_length=254)
def __init__(self, *args, **kwargs):
super(PioneerProfileForm, self).__init__(*args, **kwargs)
self.fields['birth_date'].widget = DateL10nPicker()
self.fields['email'].initial = self.instance.user.email
def clean_old_password(self):
"""
Validates that the old_password field is correct.
"""
old_password = self.cleaned_data['old_password']
if old_password and not self.instance.user.check_password(old_password):
raise forms.ValidationError(
self.error_messages['password_incorrect'])
return old_password
def clean_new_password2(self):
"""
Validates that the new password matches.
"""
password1 = self.cleaned_data.get('new_password1')
password2 = self.cleaned_data.get('new_password2')
if password1 and password2:
if password1 != password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'])
return password2
def save(self, commit=True):
user = self.instance.user
user.email = self.cleaned_data['email']
new_password = self.cleaned_data['new_password1']
if new_password:
user.set_password(new_password)
if commit:
user.save()
return super(PioneerProfileForm, self).save(commit)
Refers #54 -- Made avatar required when registering a pioneer
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django import forms
from django.utils.translation import ugettext_lazy as _
from userprofiles.forms import RegistrationForm
from cosinnus.forms.profile import UserProfileForm
from cosinnus.forms.widgets import DateL10nPicker
from cosinnus.models import (CosinnusGroup, CosinnusGroupMembership,
MEMBERSHIP_ADMIN)
from .models import (EcobasaUserProfile, EcobasaCommunityProfile,
EcobasaCommunityProfileSeed)
class RegistrationMemberForm(RegistrationForm):
def __init__(self, *args, **kwargs):
super(RegistrationMemberForm, self).__init__(*args, **kwargs)
fields_user = forms.fields_for_model(EcobasaUserProfile)
self.fields.update(fields_user)
self.fields['birth_date'].widget = DateL10nPicker()
self.fields['avatar'].required = True
# has_bus is a boolean field, but is represented as a button in the
# form. Form validation has to be told explicitly that this field is
# not required.
self.fields['has_bus'] = forms.CharField(
widget=forms.HiddenInput(),
label=self.fields['has_bus'].label,
required=False)
def save_profile(self, new_user, *args, **kwargs):
# do not catch DoesNotExist: there must be something else wrong
profile = EcobasaUserProfile.objects.get(user=new_user)
profile.avatar = self.cleaned_data['avatar']
profile.gender = self.cleaned_data['gender']
profile.birth_date = self.cleaned_data['birth_date']
profile.country = self.cleaned_data['country']
profile.city = self.cleaned_data['city']
profile.zipcode = self.cleaned_data['zipcode']
profile.ecobasa_member = self.cleaned_data['ecobasa_member']
profile.tour_why = self.cleaned_data['tour_why']
profile.tour_how = self.cleaned_data['tour_how']
profile.has_bus = self.cleaned_data['has_bus']
profile.bus_consumption = self.cleaned_data['bus_consumption']
profile.bus_has_driving_license =\
self.cleaned_data['bus_has_driving_license']
profile.bus_image = self.cleaned_data['bus_image']
profile.bus_num_passengers = self.cleaned_data['bus_num_passengers']
profile.bus_others_can_drive =\
self.cleaned_data['bus_others_can_drive']
for tag in self.cleaned_data['interests']:
profile.interests.add(tag)
for tag in self.cleaned_data['skills']:
profile.skills.add(tag)
for tag in self.cleaned_data['products']:
profile.products.add(tag)
profile.save()
class RegistrationCommunityForm(RegistrationForm):
SeedInlineFormSet = forms.models.inlineformset_factory(
EcobasaCommunityProfile, EcobasaCommunityProfileSeed, extra=1)
def __init__(self, *args, **kwargs):
super(RegistrationCommunityForm, self).__init__(*args, **kwargs)
fields_user = forms.fields_for_model(EcobasaUserProfile)
self.fields.update(fields_user)
self.fields['birth_date'].widget = DateL10nPicker()
fields_community = forms.fields_for_model(EcobasaCommunityProfile)
self.fields.update(fields_community)
def save_profile(self, new_user, *args, **kwargs):
name = self.cleaned_data['name']
# set up cosinnus group and admin user
community = CosinnusGroup.objects.create(name=name, public=False)
CosinnusGroupMembership.objects.create(
user=new_user, group=community, status=MEMBERSHIP_ADMIN)
# set up profile
profile = EcobasaCommunityProfile.objects.create(group=community)
profile.name = name
profile.contact_telephone = self.cleaned_data['contact_telephone']
profile.contact_street = self.cleaned_data['contact_street']
profile.contact_city = self.cleaned_data['contact_city']
profile.contact_zipcode = self.cleaned_data['contact_zipcode']
profile.contact_country = self.cleaned_data['contact_country']
profile.contact_show = self.cleaned_data['contact_show']
profile.visitors_num = self.cleaned_data['visitors_num']
profile.visitors_accommodation =\
self.cleaned_data['visitors_accommodation']
profile.wishlist_materials = self.cleaned_data['wishlist_materials']
profile.wishlist_tools = self.cleaned_data['wishlist_tools']
profile.wishlist_special_needs =\
self.cleaned_data['wishlist_special_needs']
for tag in self.cleaned_data['offers_services']:
profile.offers_services.add(tag)
for tag in self.cleaned_data['offers_skills']:
profile.offers_skills.add(tag)
for tag in self.cleaned_data['offers_creations']:
profile.offers_creations.add(tag)
profile.offers_workshop_spaces =\
self.cleaned_data['offers_workshop_spaces']
profile.offers_learning_seminars =\
self.cleaned_data['offers_learning_seminars']
profile.basic_inhabitants = self.cleaned_data['basic_inhabitants']
profile.basic_inhabitants_underage =\
self.cleaned_data['basic_inhabitants_underage']
profile.basic_brings_together =\
self.cleaned_data['basic_brings_together']
profile.basic_membership_status =\
self.cleaned_data['basic_membership_status']
profile.save()
# seed stuff
formsets = self.SeedInlineFormSet(self.data, instance=profile)
for formset in formsets:
if formset.is_valid():
formset.save()
class CommunityProfileForm(forms.ModelForm):
SeedInlineFormSet = forms.models.inlineformset_factory(
EcobasaCommunityProfile, EcobasaCommunityProfileSeed, extra=1)
class Meta:
model = EcobasaCommunityProfile
def save(self, commit=True):
formset = self.SeedInlineFormSet(self.data, instance=self.instance)
for form in formset:
if form.is_valid():
# is_valid populates cleaned_data
data = form.cleaned_data
if data and data['kind'] and data['num']:
if data['DELETE']:
data['id'].delete()
else:
form.save(commit)
return super(CommunityProfileForm, self).save(commit)
class PioneerProfileForm(UserProfileForm):
error_messages = {
'password_mismatch': _("The two password fields didn't match."),
'password_incorrect': _('Your old password was entered incorrectly. '
'Please enter it again.'),
}
old_password = forms.CharField(label=_('Old password'),
required=False,
widget=forms.PasswordInput)
new_password1 = forms.CharField(label=_('New password'),
required=False,
widget=forms.PasswordInput)
new_password2 = forms.CharField(label=_('New password confirmation'),
required=False,
widget=forms.PasswordInput)
email = forms.EmailField(label=_("Email"), max_length=254)
def __init__(self, *args, **kwargs):
super(PioneerProfileForm, self).__init__(*args, **kwargs)
self.fields['birth_date'].widget = DateL10nPicker()
self.fields['email'].initial = self.instance.user.email
def clean_old_password(self):
"""
Validates that the old_password field is correct.
"""
old_password = self.cleaned_data['old_password']
if old_password and not self.instance.user.check_password(old_password):
raise forms.ValidationError(
self.error_messages['password_incorrect'])
return old_password
def clean_new_password2(self):
"""
Validates that the new password matches.
"""
password1 = self.cleaned_data.get('new_password1')
password2 = self.cleaned_data.get('new_password2')
if password1 and password2:
if password1 != password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'])
return password2
def save(self, commit=True):
user = self.instance.user
user.email = self.cleaned_data['email']
new_password = self.cleaned_data['new_password1']
if new_password:
user.set_password(new_password)
if commit:
user.save()
return super(PioneerProfileForm, self).save(commit)
|
# Copyright (C) 2009 Renato Lima - Akretion
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from odoo import _, api, fields, models
from odoo.exceptions import UserError
class StockInvoiceOnshipping(models.TransientModel):
_inherit = 'stock.invoice.onshipping'
fiscal_operation_journal = fields.Boolean(
string='Account Jornal from Fiscal Operation',
default=True,
)
group = fields.Selection(
selection_add=[
('fiscal_operation', 'Fiscal Operation')],
)
@api.multi
def _get_journal(self):
"""
Get the journal depending on the journal_type
:return: account.journal recordset
"""
self.ensure_one()
journal = self.env['account.journal']
if self.fiscal_operation_journal:
pickings = self._load_pickings()
picking = fields.first(pickings)
journal = picking.fiscal_operation_id.journal_id
if not journal:
raise UserError(
_('Invalid Journal! There is not journal defined'
' for this company: %s in fiscal operation: %s !') %
(picking.company_id.name,
picking.fiscal_operation_id.name))
else:
journal = super()._get_journal()
return journal
@api.multi
def _build_invoice_values_from_pickings(self, pickings):
invoice, values = super()._build_invoice_values_from_pickings(pickings)
pick = fields.first(pickings)
fiscal_vals = pick._prepare_br_fiscal_dict()
document_type_id = self._context.get('document_type_id')
if document_type_id:
document_type = self.env['l10n_br_fiscal.document.type'].browse(
document_type_id)
else:
document_type = pick.company_id.document_type_id
document_type_id = pick.company_id.document_type_id.id
fiscal_vals['document_type_id'] = document_type_id
document_serie = document_type.get_document_serie(
pick.company_id, pick.fiscal_operation_id)
if document_serie:
fiscal_vals['document_serie_id'] = document_serie.id
if pick.fiscal_operation_id and pick.fiscal_operation_id.journal_id:
fiscal_vals['journal_id'] = pick.fiscal_operation_id.journal_id.id
values.update(fiscal_vals)
return invoice, values
@api.multi
def _get_invoice_line_values(self, moves, invoice_values, invoice):
"""
Create invoice line values from given moves
:param moves: stock.move
:param invoice: account.invoice
:return: dict
"""
move = fields.first(moves)
values = move._prepare_br_fiscal_dict()
values.update(super()._get_invoice_line_values(
moves, invoice_values, invoice))
values['invoice_line_tax_ids'] = [
(6, 0, self.env['l10n_br_fiscal.tax'].browse(
values['fiscal_tax_ids'][0][2]
).account_taxes().ids)
]
return values
[FIX] Flake8
# Copyright (C) 2009 Renato Lima - Akretion
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from odoo import _, api, fields, models
from odoo.exceptions import UserError
class StockInvoiceOnshipping(models.TransientModel):
_inherit = 'stock.invoice.onshipping'
fiscal_operation_journal = fields.Boolean(
string='Account Jornal from Fiscal Operation',
default=True,
)
group = fields.Selection(
selection_add=[
('fiscal_operation', 'Fiscal Operation')],
)
@api.multi
def _get_journal(self):
"""
Get the journal depending on the journal_type
:return: account.journal recordset
"""
self.ensure_one()
journal = self.env['account.journal']
if self.fiscal_operation_journal:
pickings = self._load_pickings()
picking = fields.first(pickings)
journal = picking.fiscal_operation_id.journal_id
if not journal:
raise UserError(
_('Invalid Journal! There is not journal defined'
' for this company: %s in fiscal operation: %s !') %
(picking.company_id.name,
picking.fiscal_operation_id.name))
else:
journal = super()._get_journal()
return journal
@api.multi
def _build_invoice_values_from_pickings(self, pickings):
invoice, values = super()._build_invoice_values_from_pickings(pickings)
pick = fields.first(pickings)
fiscal_vals = pick._prepare_br_fiscal_dict()
document_type_id = self._context.get('document_type_id')
if document_type_id:
document_type = self.env['l10n_br_fiscal.document.type'].browse(
document_type_id)
else:
document_type = pick.company_id.document_type_id
document_type_id = pick.company_id.document_type_id.id
fiscal_vals['document_type_id'] = document_type_id
document_serie = document_type.get_document_serie(
pick.company_id, pick.fiscal_operation_id)
if document_serie:
fiscal_vals['document_serie_id'] = document_serie.id
if pick.fiscal_operation_id and pick.fiscal_operation_id.journal_id:
fiscal_vals['journal_id'] = pick.fiscal_operation_id.journal_id.id
values.update(fiscal_vals)
return invoice, values
@api.multi
def _get_invoice_line_values(self, moves, invoice_values, invoice):
"""
Create invoice line values from given moves
:param moves: stock.move
:param invoice: account.invoice
:return: dict
"""
move = fields.first(moves)
values = move._prepare_br_fiscal_dict()
values.update(super()._get_invoice_line_values(
moves, invoice_values, invoice))
values['invoice_line_tax_ids'] = [
(6, 0, self.env['l10n_br_fiscal.tax'].browse(
values['fiscal_tax_ids'][0][2]
).account_taxes().ids)
]
return values
|
#!/usr/bin/python
# -*- coding: utf-8 -*-#
# Copyright (c) 2008-2016, Xavier Basty
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GrapeFruit - Color manipulation in Python"""
from __future__ import division
import sys
# The default white reference, use 2° Standard Observer, D65 (daylight)
_DEFAULT_WREF = (0.95043, 1.00000, 1.08890)
_oneThird = 1.0 / 3
_srgbGammaCorrInv = 0.03928 / 12.92
_sixteenHundredsixteenth = 16.0 / 116
_RybWheel = (
0, 26, 52,
83, 120, 130,
141, 151, 162,
177, 190, 204,
218, 232, 246,
261, 275, 288,
303, 317, 330,
338, 345, 352,
360)
_RgbWheel = (
0, 8, 17,
26, 34, 41,
48, 54, 60,
81, 103, 123,
138, 155, 171,
187, 204, 219,
234, 251, 267,
282, 298, 329,
360)
WHITE_REFERENCE = {
'std_A' : (1.09847, 1.00000, 0.35582),
'std_B' : (0.99093, 1.00000, 0.85313),
'std_C' : (0.98071, 1.00000, 1.18225),
'std_D50' : (0.96421, 1.00000, 0.82519),
'std_D55' : (0.95680, 1.00000, 0.92148),
'std_D65' : (0.95043, 1.00000, 1.08890),
'std_D75' : (0.94972, 1.00000, 1.22639),
'std_E' : (1.00000, 1.00000, 1.00000),
'std_F1' : (0.92834, 1.00000, 1.03665),
'std_F2' : (0.99145, 1.00000, 0.67316),
'std_F3' : (1.03753, 1.00000, 0.49861),
'std_F4' : (1.09147, 1.00000, 0.38813),
'std_F5' : (0.90872, 1.00000, 0.98723),
'std_F6' : (0.97309, 1.00000, 0.60191),
'std_F7' : (0.95017, 1.00000, 1.08630),
'std_F8' : (0.96413, 1.00000, 0.82333),
'std_F9' : (1.00365, 1.00000, 0.67868),
'std_F10' : (0.96174, 1.00000, 0.81712),
'std_F11' : (1.00899, 1.00000, 0.64262),
'std_F12' : (1.08046, 1.00000, 0.39228),
'sup_A' : (1.11142, 1.00000, 0.35200),
'sup_B' : (0.99178, 1.00000, 0.84349),
'sup_C' : (0.97286, 1.00000, 1.16145),
'sup_D50' : (0.96721, 1.00000, 0.81428),
'sup_D55' : (0.95797, 1.00000, 0.90925),
'sup_D65' : (0.94810, 1.00000, 1.07305),
'sup_D75' : (0.94417, 1.00000, 1.20643),
'sup_E' : (1.00000, 1.00000, 1.00000),
'sup_F1' : (0.94791, 1.00000, 1.03191),
'sup_F2' : (1.03245, 1.00000, 0.68990),
'sup_F3' : (1.08968, 1.00000, 0.51965),
'sup_F4' : (1.14961, 1.00000, 0.40963),
'sup_F5' : (0.93369, 1.00000, 0.98636),
'sup_F6' : (1.02148, 1.00000, 0.62074),
'sup_F7' : (0.95780, 1.00000, 1.07618),
'sup_F8' : (0.97115, 1.00000, 0.81135),
'sup_F9' : (1.02116, 1.00000, 0.67826),
'sup_F10' : (0.99001, 1.00000, 0.83134),
'sup_F11' : (1.03820, 1.00000, 0.65555),
'sup_F12' : (1.11428, 1.00000, 0.40353)}
NAMED_COLOR = {
'aliceblue': '#f0f8ff',
'antiquewhite': '#faebd7',
'aqua': '#00ffff',
'aquamarine': '#7fffd4',
'azure': '#f0ffff',
'beige': '#f5f5dc',
'bisque': '#ffe4c4',
'black': '#000000',
'blanchedalmond': '#ffebcd',
'blue': '#0000ff',
'blueviolet': '#8a2be2',
'brown': '#a52a2a',
'burlywood': '#deb887',
'cadetblue': '#5f9ea0',
'chartreuse': '#7fff00',
'chocolate': '#d2691e',
'coral': '#ff7f50',
'cornflowerblue': '#6495ed',
'cornsilk': '#fff8dc',
'crimson': '#dc143c',
'cyan': '#00ffff',
'darkblue': '#00008b',
'darkcyan': '#008b8b',
'darkgoldenrod': '#b8860b',
'darkgray': '#a9a9a9',
'darkgrey': '#a9a9a9',
'darkgreen': '#006400',
'darkkhaki': '#bdb76b',
'darkmagenta': '#8b008b',
'darkolivegreen': '#556b2f',
'darkorange': '#ff8c00',
'darkorchid': '#9932cc',
'darkred': '#8b0000',
'darksalmon': '#e9967a',
'darkseagreen': '#8fbc8f',
'darkslateblue': '#483d8b',
'darkslategray': '#2f4f4f',
'darkslategrey': '#2f4f4f',
'darkturquoise': '#00ced1',
'darkviolet': '#9400d3',
'deeppink': '#ff1493',
'deepskyblue': '#00bfff',
'dimgray': '#696969',
'dimgrey': '#696969',
'dodgerblue': '#1e90ff',
'firebrick': '#b22222',
'floralwhite': '#fffaf0',
'forestgreen': '#228b22',
'fuchsia': '#ff00ff',
'gainsboro': '#dcdcdc',
'ghostwhite': '#f8f8ff',
'gold': '#ffd700',
'goldenrod': '#daa520',
'gray': '#808080',
'grey': '#808080',
'green': '#008000',
'greenyellow': '#adff2f',
'honeydew': '#f0fff0',
'hotpink': '#ff69b4',
'indianred': '#cd5c5c',
'indigo': '#4b0082',
'ivory': '#fffff0',
'khaki': '#f0e68c',
'lavender': '#e6e6fa',
'lavenderblush': '#fff0f5',
'lawngreen': '#7cfc00',
'lemonchiffon': '#fffacd',
'lightblue': '#add8e6',
'lightcoral': '#f08080',
'lightcyan': '#e0ffff',
'lightgoldenrodyellow': '#fafad2',
'lightgreen': '#90ee90',
'lightgray': '#d3d3d3',
'lightgrey': '#d3d3d3',
'lightpink': '#ffb6c1',
'lightsalmon': '#ffa07a',
'lightseagreen': '#20b2aa',
'lightskyblue': '#87cefa',
'lightslategray': '#778899',
'lightslategrey': '#778899',
'lightsteelblue': '#b0c4de',
'lightyellow': '#ffffe0',
'lime': '#00ff00',
'limegreen': '#32cd32',
'linen': '#faf0e6',
'magenta': '#ff00ff',
'maroon': '#800000',
'mediumaquamarine': '#66cdaa',
'mediumblue': '#0000cd',
'mediumorchid': '#ba55d3',
'mediumpurple': '#9370db',
'mediumseagreen': '#3cb371',
'mediumslateblue': '#7b68ee',
'mediumspringgreen': '#00fa9a',
'mediumturquoise': '#48d1cc',
'mediumvioletred': '#c71585',
'midnightblue': '#191970',
'mintcream': '#f5fffa',
'mistyrose': '#ffe4e1',
'moccasin': '#ffe4b5',
'navajowhite': '#ffdead',
'navy': '#000080',
'oldlace': '#fdf5e6',
'olive': '#808000',
'olivedrab': '#6b8e23',
'orange': '#ffa500',
'orangered': '#ff4500',
'orchid': '#da70d6',
'palegoldenrod': '#eee8aa',
'palegreen': '#98fb98',
'paleturquoise': '#afeeee',
'palevioletred': '#db7093',
'papayawhip': '#ffefd5',
'peachpuff': '#ffdab9',
'peru': '#cd853f',
'pink': '#ffc0cb',
'plum': '#dda0dd',
'powderblue': '#b0e0e6',
'purple': '#800080',
'red': '#ff0000',
'rosybrown': '#bc8f8f',
'royalblue': '#4169e1',
'saddlebrown': '#8b4513',
'salmon': '#fa8072',
'sandybrown': '#f4a460',
'seagreen': '#2e8b57',
'seashell': '#fff5ee',
'sienna': '#a0522d',
'silver': '#c0c0c0',
'skyblue': '#87ceeb',
'slateblue': '#6a5acd',
'slategray': '#708090',
'slategrey': '#708090',
'snow': '#fffafa',
'springgreen': '#00ff7f',
'steelblue': '#4682b4',
'tan': '#d2b48c',
'teal': '#008080',
'thistle': '#d8bfd8',
'tomato': '#ff6347',
'turquoise': '#40e0d0',
'violet': '#ee82ee',
'wheat': '#f5deb3',
'white': '#ffffff',
'whitesmoke': '#f5f5f5',
'yellow': '#ffff00',
'yellowgreen': '#9acd32'}
def rgb_to_hsl(r, g, b):
"""Convert the color from RGB coordinates to HSL.
Parameters:
:r:
The Red component value [0...1]
:g:
The Green component value [0...1]
:b:
The Blue component value [0...1]
Returns:
The color as an (h, s, l) tuple in the range:
h[0...360],
s[0...1],
l[0...1]
>>> rgb_to_hsl(1, 0.5, 0)
(30.0, 1.0, 0.5)
"""
minVal = min(r, g, b) # min RGB value
maxVal = max(r, g, b) # max RGB value
l = (maxVal + minVal) / 2.0
if minVal==maxVal:
return (0.0, 0.0, l) # achromatic (gray)
d = maxVal - minVal # delta RGB value
if l < 0.5: s = d / (maxVal + minVal)
else: s = d / (2.0 - maxVal - minVal)
dr, dg, db = [(maxVal-val) / d for val in (r, g, b)]
if r==maxVal:
h = db - dg
elif g==maxVal:
h = 2.0 + dr - db
else:
h = 4.0 + dg - dr
h = (h*60.0) % 360.0
return (h, s, l)
def _hue_to_rgb(n1, n2, h):
h %= 6.0
if h < 1.0: return n1 + ((n2-n1) * h)
if h < 3.0: return n2
if h < 4.0: return n1 + ((n2-n1) * (4.0 - h))
return n1
def hsl_to_rgb(h, s, l):
"""Convert the color from HSL coordinates to RGB.
Parameters:
:h:
The Hue component value [0...1]
:s:
The Saturation component value [0...1]
:l:
The Lightness component value [0...1]
Returns:
The color as an (r, g, b) tuple in the range:
r[0...1],
g[0...1],
b[0...1]
>>> hsl_to_rgb(30.0, 1.0, 0.5)
(1.0, 0.5, 0.0)
"""
if s==0: return (l, l, l) # achromatic (gray)
if l<0.5: n2 = l * (1.0 + s)
else: n2 = l+s - (l*s)
n1 = (2.0 * l) - n2
h /= 60.0
hueToRgb = _hue_to_rgb
r = hueToRgb(n1, n2, h + 2)
g = hueToRgb(n1, n2, h)
b = hueToRgb(n1, n2, h - 2)
return (r, g, b)
def rgb_to_hsv(r, g, b):
"""Convert the color from RGB coordinates to HSV.
Parameters:
:r:
The Red component value [0...1]
:g:
The Green component value [0...1]
:b:
The Blue component value [0...1]
Returns:
The color as an (h, s, v) tuple in the range:
h[0...360],
s[0...1],
v[0...1]
>>> rgb_to_hsv(1, 0.5, 0)
(30.0, 1.0, 1.0)
"""
v = float(max(r, g, b))
d = v - min(r, g, b)
if d==0: return (0.0, 0.0, v)
s = d / v
dr, dg, db = [(v - val) / d for val in (r, g, b)]
if r==v:
h = db - dg # between yellow & magenta
elif g==v:
h = 2.0 + dr - db # between cyan & yellow
else: # b==v
h = 4.0 + dg - dr # between magenta & cyan
h = (h*60.0) % 360.0
return (h, s, v)
def hsv_to_rgb(h, s, v):
"""Convert the color from RGB coordinates to HSV.
Parameters:
:h:
The Hus component value [0...1]
:s:
The Saturation component value [0...1]
:v:
The Value component [0...1]
Returns:
The color as an (r, g, b) tuple in the range:
r[0...1],
g[0...1],
b[0...1]
>>> hsv_to_rgb(30.0, 1.0, 0.5)
(0.5, 0.25, 0.0)
"""
if s==0: return (v, v, v) # achromatic (gray)
h /= 60.0
h = h % 6.0
i = int(h)
f = h - i
if not(i&1): f = 1-f # if i is even
m = v * (1.0 - s)
n = v * (1.0 - (s * f))
if i==0: return (v, n, m)
if i==1: return (n, v, m)
if i==2: return (m, v, n)
if i==3: return (m, n, v)
if i==4: return (n, m, v)
return (v, m, n)
def rgb_to_yiq(r, g, b):
"""Convert the color from RGB to YIQ.
Parameters:
:r:
The Red component value [0...1]
:g:
The Green component value [0...1]
:b:
The Blue component value [0...1]
Returns:
The color as an (y, i, q) tuple in the range:
y[0...1],
i[0...1],
q[0...1]
>>> '(%g, %g, %g)' % rgb_to_yiq(1, 0.5, 0)
'(0.592263, 0.458874, -0.0499818)'
"""
y = (r * 0.29895808) + (g * 0.58660979) + (b *0.11443213)
i = (r * 0.59590296) - (g * 0.27405705) - (b *0.32184591)
q = (r * 0.21133576) - (g * 0.52263517) + (b *0.31129940)
return (y, i, q)
def yiq_to_rgb(y, i, q):
"""Convert the color from YIQ coordinates to RGB.
Parameters:
:y:
Tte Y component value [0...1]
:i:
The I component value [0...1]
:q:
The Q component value [0...1]
Returns:
The color as an (r, g, b) tuple in the range:
r[0...1],
g[0...1],
b[0...1]
>>> '(%g, %g, %g)' % yiq_to_rgb(0.592263, 0.458874, -0.0499818)
'(1, 0.5, 5.442e-07)'
"""
r = y + (i * 0.9562) + (q * 0.6210)
g = y - (i * 0.2717) - (q * 0.6485)
b = y - (i * 1.1053) + (q * 1.7020)
return (r, g, b)
def rgb_to_yuv(r, g, b):
"""Convert the color from RGB coordinates to YUV.
Parameters:
:r:
The Red component value [0...1]
:g:
The Green component value [0...1]
:b:
The Blue component value [0...1]
Returns:
The color as an (y, u, v) tuple in the range:
y[0...1],
u[-0.436...0.436],
v[-0.615...0.615]
>>> '(%g, %g, %g)' % rgb_to_yuv(1, 0.5, 0)
'(0.5925, -0.29156, 0.357505)'
"""
y = (r * 0.29900) + (g * 0.58700) + (b * 0.11400)
u = -(r * 0.14713) - (g * 0.28886) + (b * 0.43600)
v = (r * 0.61500) - (g * 0.51499) - (b * 0.10001)
return (y, u, v)
def yuv_to_rgb(y, u, v):
"""Convert the color from YUV coordinates to RGB.
Parameters:
:y:
The Y component value [0...1]
:u:
The U component value [-0.436...0.436]
:v:
The V component value [-0.615...0.615]
Returns:
The color as an (r, g, b) tuple in the range:
r[0...1],
g[0...1],
b[0...1]
>>> '(%g, %g, %g)' % yuv_to_rgb(0.5925, -0.2916, 0.3575)
'(0.999989, 0.500015, -6.3276e-05)'
"""
r = y + (v * 1.13983)
g = y - (u * 0.39465) - (v * 0.58060)
b = y + (u * 2.03211)
return (r, g, b)
def rgb_to_xyz(r, g, b):
"""Convert the color from sRGB to CIE XYZ.
The methods assumes that the RGB coordinates are given in the sRGB
colorspace (D65).
.. note::
Compensation for the sRGB gamma correction is applied before converting.
Parameters:
:r:
The Red component value [0...1]
:g:
The Green component value [0...1]
:b:
The Blue component value [0...1]
Returns:
The color as an (x, y, z) tuple in the range:
x[0...1],
y[0...1],
z[0...1]
>>> '(%g, %g, %g)' % rgb_to_xyz(1, 0.5, 0)
'(0.488941, 0.365682, 0.0448137)'
"""
r, g, b = [((v <= 0.03928) and [v / 12.92] or [((v+0.055) / 1.055) **2.4])[0] for v in (r, g, b)]
x = (r * 0.4124) + (g * 0.3576) + (b * 0.1805)
y = (r * 0.2126) + (g * 0.7152) + (b * 0.0722)
z = (r * 0.0193) + (g * 0.1192) + (b * 0.9505)
return (x, y, z)
def xyz_to_rgb(x, y, z):
"""Convert the color from CIE XYZ coordinates to sRGB.
.. note::
Compensation for sRGB gamma correction is applied before converting.
Parameters:
:x:
The X component value [0...1]
:y:
The Y component value [0...1]
:z:
The Z component value [0...1]
Returns:
The color as an (r, g, b) tuple in the range:
r[0...1],
g[0...1],
b[0...1]
>>> '(%g, %g, %g)' % xyz_to_rgb(0.488941, 0.365682, 0.0448137)
'(1, 0.5, 6.81883e-08)'
"""
r = (x * 3.2406255) - (y * 1.5372080) - (z * 0.4986286)
g = -(x * 0.9689307) + (y * 1.8757561) + (z * 0.0415175)
b = (x * 0.0557101) - (y * 0.2040211) + (z * 1.0569959)
return tuple((((v <= _srgbGammaCorrInv) and [v * 12.92] or [(1.055 * (v ** (1/2.4))) - 0.055])[0] for v in (r, g, b)))
def xyz_to_lab(x, y, z, wref=_DEFAULT_WREF):
"""Convert the color from CIE XYZ to CIE L*a*b*.
Parameters:
:x:
The X component value [0...1]
:y:
The Y component value [0...1]
:z:
The Z component value [0...1]
:wref:
The whitepoint reference, default is 2° D65.
Returns:
The color as an (L, a, b) tuple in the range:
L[0...100],
a[-1...1],
b[-1...1]
>>> '(%g, %g, %g)' % xyz_to_lab(0.488941, 0.365682, 0.0448137)
'(66.9518, 0.43084, 0.739692)'
>>> '(%g, %g, %g)' % xyz_to_lab(0.488941, 0.365682, 0.0448137, WHITE_REFERENCE['std_D50'])
'(66.9518, 0.411663, 0.67282)'
"""
# White point correction
x /= wref[0]
y /= wref[1]
z /= wref[2]
# Nonlinear distortion and linear transformation
x, y, z = [((v > 0.008856) and [v**_oneThird] or [(7.787 * v) + _sixteenHundredsixteenth])[0] for v in (x, y, z)]
# Vector scaling
l = (116 * y) - 16
a = 5.0 * (x - y)
b = 2.0 * (y - z)
return (l, a, b)
def lab_to_xyz(l, a, b, wref=_DEFAULT_WREF):
"""Convert the color from CIE L*a*b* to CIE 1931 XYZ.
Parameters:
:l:
The L component [0...100]
:a:
The a component [-1...1]
:b:
The a component [-1...1]
:wref:
The whitepoint reference, default is 2° D65.
Returns:
The color as an (x, y, z) tuple in the range:
x[0...q],
y[0...1],
z[0...1]
>>> '(%g, %g, %g)' % lab_to_xyz(66.9518, 0.43084, 0.739692)
'(0.488941, 0.365682, 0.0448137)'
>>> '(%g, %g, %g)' % lab_to_xyz(66.9518, 0.411663, 0.67282, WHITE_REFERENCE['std_D50'])
'(0.488941, 0.365682, 0.0448138)'
"""
y = (l + 16) / 116
x = (a / 5.0) + y
z = y - (b / 2.0)
return tuple((((v > 0.206893) and [v**3] or [(v - _sixteenHundredsixteenth) / 7.787])[0] * w for v, w in zip((x, y, z), wref)))
def cmyk_to_cmy(c, m, y, k):
"""Convert the color from CMYK coordinates to CMY.
Parameters:
:c:
The Cyan component value [0...1]
:m:
The Magenta component value [0...1]
:y:
The Yellow component value [0...1]
:k:
The Black component value [0...1]
Returns:
The color as an (c, m, y) tuple in the range:
c[0...1],
m[0...1],
y[0...1]
>>> '(%g, %g, %g)' % cmyk_to_cmy(1, 0.32, 0, 0.5)
'(1, 0.66, 0.5)'
"""
mk = 1-k
return ((c*mk + k), (m*mk + k), (y*mk + k))
def cmy_to_cmyk(c, m, y):
"""Convert the color from CMY coordinates to CMYK.
Parameters:
:c:
The Cyan component value [0...1]
:m:
The Magenta component value [0...1]
:y:
The Yellow component value [0...1]
Returns:
The color as an (c, m, y, k) tuple in the range:
c[0...1],
m[0...1],
y[0...1],
k[0...1]
>>> '(%g, %g, %g, %g)' % cmy_to_cmyk(1, 0.66, 0.5)
'(1, 0.32, 0, 0.5)'
"""
k = min(c, m, y)
if k==1.0: return (0.0, 0.0, 0.0, 1.0)
mk = 1-k
return ((c-k) / mk, (m-k) / mk, (y-k) / mk, k)
def rgb_to_cmy(r, g, b):
"""Convert the color from RGB coordinates to CMY.
Parameters:
:r:
The Red component value [0...1]
:g:
The Green component value [0...1]
:b:
The Blue component value [0...1]
Returns:
The color as an (c, m, y) tuple in the range:
c[0...1],
m[0...1],
y[0...1]
>>> rgb_to_cmy(1, 0.5, 0)
(0, 0.5, 1)
"""
return (1-r, 1-g, 1-b)
def cmy_to_rgb(c, m, y):
"""Convert the color from CMY coordinates to RGB.
Parameters:
:c:
The Cyan component value [0...1]
:m:
The Magenta component value [0...1]
:y:
The Yellow component value [0...1]
Returns:
The color as an (r, g, b) tuple in the range:
r[0...1],
g[0...1],
b[0...1]
>>> cmy_to_rgb(0, 0.5, 1)
(1, 0.5, 0)
"""
return (1-c, 1-m, 1-y)
def rgb_to_int_tuple(r, g, b):
"""Convert the color from (r, g, b) to an int tuple.
Parameters:
:r:
The Red component value [0...1]
:g:
The Green component value [0...1]
:b:
The Blue component value [0...1]
Returns:
The color as an (r, g, b) tuple in the range:
r[0...255],
g[0...2551],
b[0...2551]
>>> rgb_to_int_tuple(1, 0.5, 0)
(255, 128, 0)
"""
return tuple(int(round(v*255)) for v in (r, g, b))
def int_tuple_to_rgb(int_tuple):
"""Convert a tuple of ints to (r, g, b).
Parameters:
The color as an (r, g, b) integer tuple in the range:
r[0...255],
g[0...255],
b[0...255]
Returns:
The color as an (r, g, b) tuple in the range:
r[0...1],
g[0...1],
b[0...1]
>>> '(%g, %g, %g)' % int_tuple_to_rgb((255, 128, 0))
'(1, 0.501961, 0)'
"""
return tuple(v / 255 for v in int_tuple)
def rgb_to_html(r, g, b):
"""Convert the color from (r, g, b) to #RRGGBB.
Parameters:
:r:
The Red component value [0...1]
:g:
The Green component value [0...1]
:b:
The Blue component value [0...1]
Returns:
A CSS string representation of this color (#RRGGBB).
>>> rgb_to_html(1, 0.5, 0)
'#ff8000'
"""
return '#%02x%02x%02x' % tuple((min(round(v*255), 255) for v in (r, g, b)))
def html_to_rgb(html):
"""Convert the HTML color to (r, g, b).
Parameters:
:html:
the HTML definition of the color (#RRGGBB or #RGB or a color name).
Returns:
The color as an (r, g, b) tuple in the range:
r[0...1],
g[0...1],
b[0...1]
Throws:
:ValueError:
If html is neither a known color name or a hexadecimal RGB
representation.
>>> '(%g, %g, %g)' % html_to_rgb('#ff8000')
'(1, 0.501961, 0)'
>>> '(%g, %g, %g)' % html_to_rgb('ff8000')
'(1, 0.501961, 0)'
>>> '(%g, %g, %g)' % html_to_rgb('#f60')
'(1, 0.4, 0)'
>>> '(%g, %g, %g)' % html_to_rgb('f60')
'(1, 0.4, 0)'
>>> '(%g, %g, %g)' % html_to_rgb('lemonchiffon')
'(1, 0.980392, 0.803922)'
"""
html = html.strip().lower()
if html[0]=='#':
html = html[1:]
elif html in NAMED_COLOR:
html = NAMED_COLOR[html][1:]
if len(html)==6:
rgb = html[:2], html[2:4], html[4:]
elif len(html)==3:
rgb = ['%c%c' % (v,v) for v in html]
else:
raise ValueError("input #%s is not in #RRGGBB format" % html)
return tuple(((int(n, 16) / 255.0) for n in rgb))
def rgb_to_pil(r, g, b):
"""Convert the color from RGB to a PIL-compatible integer.
Parameters:
:r:
The Red component value [0...1]
:g:
The Green component value [0...1]
:b:
The Blue component value [0...1]
Returns:
A PIL compatible integer (0xBBGGRR).
>>> '0x%06x' % rgb_to_pil(1, 0.5, 0)
'0x0080ff'
"""
r, g, b = [min(int(round(v*255)), 255) for v in (r, g, b)]
return (b << 16) + (g << 8) + r
def pil_to_rgb(pil):
"""Convert the color from a PIL-compatible integer to RGB.
Parameters:
pil: a PIL compatible color representation (0xBBGGRR)
Returns:
The color as an (r, g, b) tuple in the range:
the range:
r: [0...1]
g: [0...1]
b: [0...1]
>>> '(%g, %g, %g)' % pil_to_rgb(0x0080ff)
'(1, 0.501961, 0)'
"""
r = 0xff & pil
g = 0xff & (pil >> 8)
b = 0xff & (pil >> 16)
return tuple((v / 255.0 for v in (r, g, b)))
def _web_safe_component(c, alt=False):
"""Convert a color component to its web safe equivalent.
Parameters:
:c:
The component value [0...1]
:alt:
If True, return the alternative value instead of the nearest one.
Returns:
The web safe equivalent of the component value.
"""
# This sucks, but floating point between 0 and 1 is quite fuzzy...
# So we just change the scale a while to make the equality tests
# work, otherwise it gets wrong at some decimal far to the right.
sc = c * 100.0
# If the color is already safe, return it straight away
d = sc % 20
if d==0: return c
# Get the lower and upper safe values
l = sc - d
u = l + 20
# Return the 'closest' value according to the alt flag
if alt:
if (sc-l) >= (u-sc): return l/100.0
else: return u/100.0
else:
if (sc-l) >= (u-sc): return u/100.0
else: return l/100.0
def rgb_to_web_safe(r, g, b, alt=False):
"""Convert the color from RGB to 'web safe' RGB
Parameters:
:r:
The Red component value [0...1]
:g:
The Green component value [0...1]
:b:
The Blue component value [0...1]
:alt:
If True, use the alternative color instead of the nearest one.
Can be used for dithering.
Returns:
The color as an (r, g, b) tuple in the range:
the range:
r[0...1],
g[0...1],
b[0...1]
>>> '(%g, %g, %g)' % rgb_to_web_safe(1, 0.55, 0.0)
'(1, 0.6, 0)'
"""
web_safeComponent = _web_safe_component
return tuple((web_safeComponent(v, alt) for v in (r, g, b)))
def rgb_to_greyscale(r, g, b):
"""Convert the color from RGB to its greyscale equivalent
Parameters:
:r:
The Red component value [0...1]
:g:
The Green component value [0...1]
:b:
The Blue component value [0...1]
Returns:
The color as an (r, g, b) tuple in the range:
the range:
r[0...1],
g[0...1],
b[0...1]
>>> '(%g, %g, %g)' % rgb_to_greyscale(1, 0.8, 0)
'(0.6, 0.6, 0.6)'
"""
v = (r + g + b) / 3.0
return (v, v, v)
def rgb_to_ryb(hue):
"""Maps a hue on the RGB color wheel to Itten's RYB wheel.
Parameters:
:hue:
The hue on the RGB color wheel [0...360]
Returns:
An approximation of the corresponding hue on Itten's RYB wheel.
>>> rgb_to_ryb(15)
26.0
"""
d = hue % 15
i = int(hue / 15)
x0 = _RybWheel[i]
x1 = _RybWheel[i+1]
return x0 + (x1-x0) * d / 15
def ryb_to_rgb(hue):
"""Maps a hue on Itten's RYB color wheel to the standard RGB wheel.
Parameters:
:hue:
The hue on Itten's RYB color wheel [0...360]
Returns:
An approximation of the corresponding hue on the standard RGB wheel.
>>> ryb_to_rgb(15)
8.0
"""
d = hue % 15
i = int(hue / 15)
x0 = _RgbWheel[i]
x1 = _RgbWheel[i+1]
return x0 + (x1-x0) * d / 15
class Color:
"""Hold a color value.
Example usage:
To create an instance of the grapefruit.Color from RGB values:
>>> import grapefruit
>>> r, g, b = 1, 0.5, 0
>>> col = grapefruit.Color.FromRgb(r, g, b)
To get the values of the color in another colorspace:
>>> h, s, v = col.hsv
>>> l, a, b = col.lab
To get the complementary of a color:
>>> compl = col.complementary_color(mode='rgb')
>>> print(compl.hsl)
(210.0, 1.0, 0.5)
To directly convert RGB values to their HSL equivalent:
>>> h, s, l = rgb_to_hsl(r, g, b)
"""
@staticmethod
def FromRgb(r, g, b, alpha=1.0, wref=_DEFAULT_WREF):
"""Create a new instance based on the specifed RGB values.
Parameters:
:r:
The Red component value [0...1]
:g:
The Green component value [0...1]
:b:
The Blue component value [0...1]
:alpha:
The color transparency [0...1], default is opaque
:wref:
The whitepoint reference, default is 2° D65.
Returns:
A grapefruit.Color instance.
>>> Color.FromRgb(1.0, 0.5, 0.0)
Color(1.0, 0.5, 0.0, 1.0)
>>> Color.FromRgb(1.0, 0.5, 0.0, 0.5)
Color(1.0, 0.5, 0.0, 0.5)
"""
return Color((r, g, b), 'rgb', alpha, wref)
@staticmethod
def FromHsl(h, s, l, alpha=1.0, wref=_DEFAULT_WREF):
"""Create a new instance based on the specifed HSL values.
Parameters:
:h:
The Hue component value [0...1]
:s:
The Saturation component value [0...1]
:l:
The Lightness component value [0...1]
:alpha:
The color transparency [0...1], default is opaque
:wref:
The whitepoint reference, default is 2° D65.
Returns:
A grapefruit.Color instance.
>>> Color.FromHsl(30, 1, 0.5)
Color(1.0, 0.5, 0.0, 1.0)
>>> Color.FromHsl(30, 1, 0.5, 0.5)
Color(1.0, 0.5, 0.0, 0.5)
"""
return Color((h, s, l), 'hsl', alpha, wref)
@staticmethod
def FromHsv(h, s, v, alpha=1.0, wref=_DEFAULT_WREF):
"""Create a new instance based on the specifed HSV values.
Parameters:
:h:
The Hus component value [0...1]
:s:
The Saturation component value [0...1]
:v:
The Value component [0...1]
:alpha:
The color transparency [0...1], default is opaque
:wref:
The whitepoint reference, default is 2° D65.
Returns:
A grapefruit.Color instance.
>>> Color.FromHsv(30, 1, 1)
Color(1.0, 0.5, 0.0, 1.0)
>>> Color.FromHsv(30, 1, 1, 0.5)
Color(1.0, 0.5, 0.0, 0.5)
"""
h2, s, l = rgb_to_hsl(*hsv_to_rgb(h, s, v))
return Color((h, s, l), 'hsl', alpha, wref)
@staticmethod
def FromYiq(y, i, q, alpha=1.0, wref=_DEFAULT_WREF):
"""Create a new instance based on the specifed YIQ values.
Parameters:
:y:
The Y component value [0...1]
:i:
The I component value [0...1]
:q:
The Q component value [0...1]
:alpha:
The color transparency [0...1], default is opaque
:wref:
The whitepoint reference, default is 2° D65.
Returns:
A grapefruit.Color instance.
>>> str(Color.FromYiq(0.5922, 0.45885,-0.05))
'(0.999902, 0.499955, -6.6905e-05, 1)'
>>> str(Color.FromYiq(0.5922, 0.45885,-0.05, 0.5))
'(0.999902, 0.499955, -6.6905e-05, 0.5)'
"""
return Color(yiq_to_rgb(y, i, q), 'rgb', alpha, wref)
@staticmethod
def FromYuv(y, u, v, alpha=1.0, wref=_DEFAULT_WREF):
"""Create a new instance based on the specifed YUV values.
Parameters:
:y:
The Y component value [0...1]
:u:
The U component value [-0.436...0.436]
:v:
The V component value [-0.615...0.615]
:alpha:
The color transparency [0...1], default is opaque
:wref:
The whitepoint reference, default is 2° D65.
Returns:
A grapefruit.Color instance.
>>> str(Color.FromYuv(0.5925, -0.2916, 0.3575))
'(0.999989, 0.500015, -6.3276e-05, 1)'
>>> str(Color.FromYuv(0.5925, -0.2916, 0.3575, 0.5))
'(0.999989, 0.500015, -6.3276e-05, 0.5)'
"""
return Color(yuv_to_rgb(y, u, v), 'rgb', alpha, wref)
@staticmethod
def FromXyz(x, y, z, alpha=1.0, wref=_DEFAULT_WREF):
"""Create a new instance based on the specifed CIE-XYZ values.
Parameters:
:x:
The Red component value [0...1]
:y:
The Green component value [0...1]
:z:
The Blue component value [0...1]
:alpha:
The color transparency [0...1], default is opaque
:wref:
The whitepoint reference, default is 2° D65.
Returns:
A grapefruit.Color instance.
>>> str(Color.FromXyz(0.488941, 0.365682, 0.0448137))
'(1, 0.5, 6.81883e-08, 1)'
>>> str(Color.FromXyz(0.488941, 0.365682, 0.0448137, 0.5))
'(1, 0.5, 6.81883e-08, 0.5)'
"""
return Color(xyz_to_rgb(x, y, z), 'rgb', alpha, wref)
@staticmethod
def FromLab(l, a, b, alpha=1.0, wref=_DEFAULT_WREF):
"""Create a new instance based on the specifed CIE-LAB values.
Parameters:
:l:
The L component [0...100]
:a:
The a component [-1...1]
:b:
The a component [-1...1]
:alpha:
The color transparency [0...1], default is opaque
:wref:
The whitepoint reference, default is 2° D65.
Returns:
A grapefruit.Color instance.
>>> str(Color.FromLab(66.9518, 0.43084, 0.739692))
'(1, 0.5, 1.09491e-08, 1)'
>>> str(Color.FromLab(66.9518, 0.43084, 0.739692, wref=WHITE_REFERENCE['std_D50']))
'(1.01238, 0.492011, -0.14311, 1)'
>>> str(Color.FromLab(66.9518, 0.43084, 0.739692, 0.5))
'(1, 0.5, 1.09491e-08, 0.5)'
>>> str(Color.FromLab(66.9518, 0.43084, 0.739692, 0.5, WHITE_REFERENCE['std_D50']))
'(1.01238, 0.492011, -0.14311, 0.5)'
"""
return Color(xyz_to_rgb(*lab_to_xyz(l, a, b, wref)), 'rgb', alpha, wref)
@staticmethod
def FromCmy(c, m, y, alpha=1.0, wref=_DEFAULT_WREF):
"""Create a new instance based on the specifed CMY values.
Parameters:
:c:
The Cyan component value [0...1]
:m:
The Magenta component value [0...1]
:y:
The Yellow component value [0...1]
:alpha:
The color transparency [0...1], default is opaque
:wref:
The whitepoint reference, default is 2° D65.
Returns:
A grapefruit.Color instance.
>>> Color.FromCmy(0, 0.5, 1)
Color(1, 0.5, 0, 1.0)
>>> Color.FromCmy(0, 0.5, 1, 0.5)
Color(1, 0.5, 0, 0.5)
"""
return Color(cmy_to_rgb(c, m, y), 'rgb', alpha, wref)
@staticmethod
def FromCmyk(c, m, y, k, alpha=1.0, wref=_DEFAULT_WREF):
"""Create a new instance based on the specifed CMYK values.
Parameters:
:c:
The Cyan component value [0...1]
:m:
The Magenta component value [0...1]
:y:
The Yellow component value [0...1]
:k:
The Black component value [0...1]
:alpha:
The color transparency [0...1], default is opaque
:wref:
The whitepoint reference, default is 2° D65.
Returns:
A grapefruit.Color instance.
>>> str(Color.FromCmyk(1, 0.32, 0, 0.5))
'(0, 0.34, 0.5, 1)'
>>> str(Color.FromCmyk(1, 0.32, 0, 0.5, 0.5))
'(0, 0.34, 0.5, 0.5)'
"""
return Color(cmy_to_rgb(*cmyk_to_cmy(c, m, y, k)), 'rgb', alpha, wref)
@staticmethod
def FromHtml(html, alpha=1.0, wref=_DEFAULT_WREF):
"""Create a new instance based on the specifed HTML color definition.
Parameters:
:html:
The HTML definition of the color (#RRGGBB or #RGB or a color name).
:alpha:
The color transparency [0...1], default is opaque.
:wref:
The whitepoint reference, default is 2° D65.
Returns:
A grapefruit.Color instance.
>>> str(Color.FromHtml('#ff8000'))
'(1, 0.501961, 0, 1)'
>>> str(Color.FromHtml('ff8000'))
'(1, 0.501961, 0, 1)'
>>> str(Color.FromHtml('#f60'))
'(1, 0.4, 0, 1)'
>>> str(Color.FromHtml('f60'))
'(1, 0.4, 0, 1)'
>>> str(Color.FromHtml('lemonchiffon'))
'(1, 0.980392, 0.803922, 1)'
>>> str(Color.FromHtml('#ff8000', 0.5))
'(1, 0.501961, 0, 0.5)'
"""
return Color(html_to_rgb(html), 'rgb', alpha, wref)
@staticmethod
def FromPil(pil, alpha=1.0, wref=_DEFAULT_WREF):
"""Create a new instance based on the specifed PIL color.
Parameters:
:pil:
A PIL compatible color representation (0xBBGGRR)
:alpha:
The color transparency [0...1], default is opaque
:wref:
The whitepoint reference, default is 2° D65.
Returns:
A grapefruit.Color instance.
>>> str(Color.FromPil(0x0080ff))
'(1, 0.501961, 0, 1)'
>>> str(Color.FromPil(0x0080ff, 0.5))
'(1, 0.501961, 0, 0.5)'
"""
return Color(pil_to_rgb(pil), 'rgb', alpha, wref)
def __init__(self, values, mode='rgb', alpha=1.0, wref=_DEFAULT_WREF):
"""Instantiate a new grapefruit.Color object.
Parameters:
:values:
The values of this color, in the specified representation.
:mode:
The representation mode used for values.
:alpha:
the alpha value (transparency) of this color.
:wref:
The whitepoint reference, default is 2° D65.
"""
if not(isinstance(values, tuple)):
raise TypeError("values must be a tuple")
if mode=='rgb':
self.__rgb = values
self.__hsl = rgb_to_hsl(*values)
elif mode=='hsl':
self.__hsl = values
self.__rgb = hsl_to_rgb(*values)
else:
raise ValueError("Invalid color mode: " + mode)
self.__a = alpha
self.__wref = wref
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, other):
try:
if isinstance(other, Color):
return (self.__rgb==other.__rgb) and (self.__a==other.__a)
if len(other) != 4:
return False
return list(self.__rgb + (self.__a,)) == list(other)
except TypeError:
return False
except AttributeError:
return False
def __repr__(self):
return "Color{}".format(self.__rgb + (self.__a,))
def __str__(self):
"""A string representation of this grapefruit.Color instance.
Returns:
The RGBA representation of this grapefruit.Color instance.
"""
return '(%g, %g, %g, %g)' % (self.__rgb + (self.__a,))
if sys.version_info[0] < 3:
def __unicode__(self):
"""A unicode string representation of this grapefruit.Color instance.
Returns:
The RGBA representation of this grapefruit.Color instance.
"""
return unicode('%g, %g, %g, %g)') % (self.__rgb + (self.__a,))
def __iter__(self):
return iter(self.__rgb + (self.__a,))
def __len__(self):
return 4
def __get_is_legal(self):
return all(0.0 <= v <= 1.0 for v in self)
is_legal = property(fget=__get_is_legal, doc="Boolean indicating whether the color is within the legal gamut.")
def __get_nearest_legal(self):
def clamp(x, lo, hi):
if x < lo:
return lo
elif x > hi:
return hi
else:
return x
return Color.FromRgb(*[clamp(v, 0.0, 1.0) for v in self])
nearest_legal = property(fget=__get_nearest_legal, doc="The nearest legal color.")
def __get_alpha(self):
return self.__a
alpha = property(fget=__get_alpha, doc='The transparency of this color. 0.0 is transparent and 1.0 is fully opaque.')
def __get_white_ref(self):
return self.__wref
white_ref = property(fget=__get_white_ref, doc='the white reference point of this color.')
def __get_rgb(self):
return self.__rgb
rgb = property(fget=__get_rgb, doc='The RGB values of this Color.')
def __get_hue(self):
return self.__hsl[0]
hue = property(fget=__get_hue, doc='The hue of this color.')
def __get_hsl(self):
return self.__hsl
hsl = property(fget=__get_hsl, doc='The HSL values of this Color.')
def __get_hsv(self):
h, s, v = rgb_to_hsv(*self.__rgb)
return (self.__hsl[0], s, v)
hsv = property(fget=__get_hsv, doc='The HSV values of this Color.')
def __get_yiq(self):
return rgb_to_yiq(*self.__rgb)
yiq = property(fget=__get_yiq, doc='The YIQ values of this Color.')
def __get_yuv(self):
return rgb_to_yuv(*self.__rgb)
yuv = property(fget=__get_yuv, doc='The YUV values of this Color.')
def __get_xyz(self):
return rgb_to_xyz(*self.__rgb)
xyz = property(fget=__get_xyz, doc='The CIE-XYZ values of this Color.')
def __get_lab(self):
return xyz_to_lab(wref=self.__wref, *rgb_to_xyz(*self.__rgb))
lab = property(fget=__get_lab, doc='The CIE-LAB values of this Color.')
def __get_cmy(self):
return rgb_to_cmy(*self.__rgb)
cmy = property(fget=__get_cmy, doc='The CMY values of this Color.')
def __get_cmyk(self):
return cmy_to_cmyk(*rgb_to_cmy(*self.__rgb))
cmyk = property(fget=__get_cmyk, doc='The CMYK values of this Color.')
def __get_int_tuple(self):
return rgb_to_int_tuple(*self.__rgb)
int_tuple = property(fget=__get_int_tuple, doc='This Color as a tuple of integers in the range [0...255]')
def __get_html(self):
return rgb_to_html(*self.__rgb)
html = property(fget=__get_html, doc='This Color as an HTML color definition.')
def __get_pil(self):
return rgb_to_pil(*self.__rgb)
pil = property(fget=__get_pil, doc='This Color as a PIL compatible value.')
def __get_web_safe(self):
return rgb_to_web_safe(*self.__rgb)
web_safe = property(fget=__get_web_safe, doc='The web safe color nearest to this one (RGB).')
def __get_greyscale(self):
return rgb_to_greyscale(*self.rgb)
greyscale = property(fget=__get_greyscale, doc='The greyscale equivalent to this color (RGB).')
def with_alpha(self, alpha):
"""Create a new instance based on this one with a new alpha value.
Parameters:
:alpha:
The transparency of the new color [0...1].
Returns:
A grapefruit.Color instance.
>>> Color.FromRgb(1.0, 0.5, 0.0, 1.0).with_alpha(0.5)
Color(1.0, 0.5, 0.0, 0.5)
"""
return Color(self.__rgb, 'rgb', alpha, self.__wref)
def with_white_ref(self, wref, labAsRef=False):
"""Create a new instance based on this one with a new white reference.
Parameters:
:wref:
The whitepoint reference.
:labAsRef:
If True, the L*a*b* values of the current instance are used as reference
for the new color; otherwise, the RGB values are used as reference.
Returns:
A grapefruit.Color instance.
>>> c = Color.FromRgb(1.0, 0.5, 0.0, 1.0, WHITE_REFERENCE['std_D65'])
>>> c2 = c.with_white_ref(WHITE_REFERENCE['sup_D50'])
>>> c2.rgb
(1.0, 0.5, 0.0)
>>> '(%g, %g, %g)' % c2.white_ref
'(0.96721, 1, 0.81428)'
>>> c2 = c.with_white_ref(WHITE_REFERENCE['sup_D50'], labAsRef=True)
>>> '(%g, %g, %g)' % c2.rgb
'(1.01463, 0.490339, -0.148131)'
>>> '(%g, %g, %g)' % c2.white_ref
'(0.96721, 1, 0.81428)'
>>> '(%g, %g, %g)' % c.lab
'(66.9518, 0.43084, 0.739692)'
>>> '(%g, %g, %g)' % c2.lab
'(66.9518, 0.43084, 0.739693)'
"""
if labAsRef:
l, a, b = self.__get_lab()
return Color.FromLab(l, a, b, self.__a, wref)
else:
return Color(self.__rgb, 'rgb', self.__a, wref)
def with_hue(self, hue):
"""Create a new instance based on this one with a new hue.
Parameters:
:hue:
The hue of the new color [0...360].
Returns:
A grapefruit.Color instance.
>>> Color.FromHsl(30, 1, 0.5).with_hue(60)
Color(1.0, 1.0, 0.0, 1.0)
>>> Color.FromHsl(30, 1, 0.5).with_hue(60).hsl
(60, 1, 0.5)
"""
h, s, l = self.__hsl
return Color((hue, s, l), 'hsl', self.__a, self.__wref)
def with_saturation(self, saturation):
"""Create a new instance based on this one with a new saturation value.
.. note::
The saturation is defined for the HSL mode.
Parameters:
:saturation:
The saturation of the new color [0...1].
Returns:
A grapefruit.Color instance.
>>> Color.FromHsl(30, 1, 0.5).with_saturation(0.5)
Color(0.75, 0.5, 0.25, 1.0)
>>> Color.FromHsl(30, 1, 0.5).with_saturation(0.5).hsl
(30, 0.5, 0.5)
"""
h, s, l = self.__hsl
return Color((h, saturation, l), 'hsl', self.__a, self.__wref)
def with_lightness(self, lightness):
"""Create a new instance based on this one with a new lightness value.
Parameters:
:lightness:
The lightness of the new color [0...1].
Returns:
A grapefruit.Color instance.
>>> Color.FromHsl(30, 1, 0.5).with_lightness(0.25)
Color(0.5, 0.25, 0.0, 1.0)
>>> Color.FromHsl(30, 1, 0.5).with_lightness(0.25).hsl
(30, 1, 0.25)
"""
h, s, l = self.__hsl
return Color((h, s, lightness), 'hsl', self.__a, self.__wref)
def darker(self, level):
"""Create a new instance based on this one but darker.
Parameters:
:level:
The amount by which the color should be darkened to produce
the new one [0...1].
Returns:
A grapefruit.Color instance.
>>> Color.FromHsl(30, 1, 0.5).darker(0.25)
Color(0.5, 0.25, 0.0, 1.0)
>>> Color.FromHsl(30, 1, 0.5).darker(0.25).hsl
(30, 1, 0.25)
"""
h, s, l = self.__hsl
return Color((h, s, max(l - level, 0)), 'hsl', self.__a, self.__wref)
def lighter(self, level):
"""Create a new instance based on this one but lighter.
Parameters:
:level:
The amount by which the color should be lightened to produce
the new one [0...1].
Returns:
A grapefruit.Color instance.
>>> Color.FromHsl(30, 1, 0.5).lighter(0.25)
Color(1.0, 0.75, 0.5, 1.0)
>>> Color.FromHsl(30, 1, 0.5).lighter(0.25).hsl
(30, 1, 0.75)
"""
h, s, l = self.__hsl
return Color((h, s, min(l + level, 1)), 'hsl', self.__a, self.__wref)
def saturate(self, level):
"""Create a new instance based on this one but more saturated.
Parameters:
:level:
The amount by which the color should be saturated to produce
the new one [0...1].
Returns:
A grapefruit.Color instance.
>>> Color.FromHsl(30, 0.5, 0.5).saturate(0.25)
Color(0.875, 0.5, 0.125, 1.0)
>>> Color.FromHsl(30, 0.5, 0.5).saturate(0.25).hsl
(30, 0.75, 0.5)
"""
h, s, l = self.__hsl
return Color((h, min(s + level, 1), l), 'hsl', self.__a, self.__wref)
def desaturate(self, level):
"""Create a new instance based on this one but less saturated.
Parameters:
:level:
The amount by which the color should be desaturated to produce
the new one [0...1].
Returns:
A grapefruit.Color instance.
>>> Color.FromHsl(30, 0.5, 0.5).desaturate(0.25)
Color(0.625, 0.5, 0.375, 1.0)
>>> Color.FromHsl(30, 0.5, 0.5).desaturate(0.25).hsl
(30, 0.25, 0.5)
"""
h, s, l = self.__hsl
return Color((h, max(s - level, 0), l), 'hsl', self.__a, self.__wref)
def web_safe_dither(self):
"""Return the two websafe colors nearest to this one.
Returns:
A tuple of two grapefruit.Color instances which are the two
web safe colors closest this one.
>>> c = Color.FromRgb(1.0, 0.45, 0.0)
>>> c1, c2 = c.web_safe_dither()
>>> str(c1)
'(1, 0.4, 0, 1)'
>>> str(c2)
'(1, 0.6, 0, 1)'
"""
return (
Color(rgb_to_web_safe(*self.__rgb), 'rgb', self.__a, self.__wref),
Color(rgb_to_web_safe(alt=True, *self.__rgb), 'rgb', self.__a, self.__wref))
def complementary_color(self, mode='ryb'):
"""Create a new instance which is the complementary color of this one.
Parameters:
:mode:
Select which color wheel to use for the generation (ryb/rgb).
Returns:
A grapefruit.Color instance.
>>> Color.FromHsl(30, 1, 0.5).complementary_color(mode='rgb')
Color(0.0, 0.5, 1.0, 1.0)
>>> Color.FromHsl(30, 1, 0.5).complementary_color(mode='rgb').hsl
(210, 1, 0.5)
"""
h, s, l = self.__hsl
if mode == 'ryb': h = rgb_to_ryb(h)
h = (h+180)%360
if mode == 'ryb': h = ryb_to_rgb(h)
return Color((h, s, l), 'hsl', self.__a, self.__wref)
def make_gradient(self, target, steps=100):
"""Create a list with the gradient colors between this and the other color.
Parameters:
:target:
The grapefruit.Color at the other end of the gradient.
:steps:
The number of gradients steps to create.
Returns:
A list of grapefruit.Color instances.
>>> c1 = Color.FromRgb(1.0, 0.0, 0.0, alpha=1)
>>> c2 = Color.FromRgb(0.0, 1.0, 0.0, alpha=0)
>>> c1.make_gradient(c2, 3)
[Color(0.75, 0.25, 0.0, 0.75), Color(0.5, 0.5, 0.0, 0.5), Color(0.25, 0.75, 0.0, 0.25)]
"""
gradient = []
rgba1 = self.__rgb + (self.__a,)
rgba2 = target.__rgb + (target.__a,)
steps += 1
for n in range(1, steps):
d = 1.0*n/steps
r = (rgba1[0]*(1-d)) + (rgba2[0]*d)
g = (rgba1[1]*(1-d)) + (rgba2[1]*d)
b = (rgba1[2]*(1-d)) + (rgba2[2]*d)
a = (rgba1[3]*(1-d)) + (rgba2[3]*d)
gradient.append(Color((r, g, b), 'rgb', a, self.__wref))
return gradient
def make_monochrome_scheme(self):
"""Return 4 colors in the same hue with varying saturation/lightness.
Returns:
A tuple of 4 grapefruit.Color in the same hue as this one,
with varying saturation/lightness.
>>> c = Color.FromHsl(30, 0.5, 0.5)
>>> ['(%g, %g, %g)' % clr.hsl for clr in c.make_monochrome_scheme()]
['(30, 0.2, 0.8)', '(30, 0.5, 0.3)', '(30, 0.2, 0.6)', '(30, 0.5, 0.8)']
"""
def _wrap(x, min, thres, plus):
if (x-min) < thres: return x + plus
else: return x-min
h, s, l = self.__hsl
s1 = _wrap(s, 0.3, 0.1, 0.3)
l1 = _wrap(l, 0.5, 0.2, 0.3)
s2 = s
l2 = _wrap(l, 0.2, 0.2, 0.6)
s3 = s1
l3 = max(0.2, l + (1-l)*0.2)
s4 = s
l4 = _wrap(l, 0.5, 0.2, 0.3)
return (
Color((h, s1, l1), 'hsl', self.__a, self.__wref),
Color((h, s2, l2), 'hsl', self.__a, self.__wref),
Color((h, s3, l3), 'hsl', self.__a, self.__wref),
Color((h, s4, l4), 'hsl', self.__a, self.__wref))
def make_triadic_scheme(self, angle=120, mode='ryb'):
"""Return two colors forming a triad or a split complementary with this one.
Parameters:
:angle:
The angle between the hues of the created colors.
The default value makes a triad.
:mode:
Select which color wheel to use for the generation (ryb/rgb).
Returns:
A tuple of two grapefruit.Color forming a color triad with
this one or a split complementary.
>>> c1 = Color.FromHsl(30, 1, 0.5)
>>> c2, c3 = c1.make_triadic_scheme(mode='rgb')
>>> c2.hsl
(150.0, 1, 0.5)
>>> c3.hsl
(270.0, 1, 0.5)
>>> c2, c3 = c1.make_triadic_scheme(angle=40, mode='rgb')
>>> c2.hsl
(190.0, 1, 0.5)
>>> c3.hsl
(230.0, 1, 0.5)
"""
h, s, l = self.__hsl
angle = min(angle, 120) / 2.0
if mode == 'ryb': h = rgb_to_ryb(h)
h += 180
h1 = (h - angle) % 360
h2 = (h + angle) % 360
if mode == 'ryb':
h1 = ryb_to_rgb(h1)
h2 = ryb_to_rgb(h2)
return (
Color((h1, s, l), 'hsl', self.__a, self.__wref),
Color((h2, s, l), 'hsl', self.__a, self.__wref))
def make_tetradic_scheme(self, angle=30, mode='ryb'):
"""Return three colors froming a tetrad with this one.
Parameters:
:angle:
The angle to substract from the adjacent colors hues [-90...90].
You can use an angle of zero to generate a square tetrad.
:mode:
Select which color wheel to use for the generation (ryb/rgb).
Returns:
A tuple of three grapefruit.Color forming a color tetrad with
this one.
>>> col = Color.FromHsl(30, 1, 0.5)
>>> [c.hsl for c in col.make_tetradic_scheme(mode='rgb', angle=30)]
[(90, 1, 0.5), (210, 1, 0.5), (270, 1, 0.5)]
"""
h, s, l = self.__hsl
if mode == 'ryb': h = rgb_to_ryb(h)
h1 = (h + 90 - angle) % 360
h2 = (h + 180) % 360
h3 = (h + 270 - angle) % 360
if mode == 'ryb':
h1 = ryb_to_rgb(h1)
h2 = ryb_to_rgb(h2)
h3 = ryb_to_rgb(h3)
return (
Color((h1, s, l), 'hsl', self.__a, self.__wref),
Color((h2, s, l), 'hsl', self.__a, self.__wref),
Color((h3, s, l), 'hsl', self.__a, self.__wref))
def make_analogous_scheme(self, angle=30, mode='ryb'):
"""Return two colors analogous to this one.
Args:
:angle:
The angle between the hues of the created colors and this one.
:mode:
Select which color wheel to use for the generation (ryb/rgb).
Returns:
A tuple of grapefruit.Colors analogous to this one.
>>> c1 = Color.FromHsl(30, 1, 0.5)
>>> c2, c3 = c1.make_analogous_scheme(angle=60, mode='rgb')
>>> c2.hsl
(330, 1, 0.5)
>>> c3.hsl
(90, 1, 0.5)
>>> c2, c3 = c1.make_analogous_scheme(angle=10, mode='rgb')
>>> c2.hsl
(20, 1, 0.5)
>>> c3.hsl
(40, 1, 0.5)
"""
h, s, l = self.__hsl
if mode == 'ryb': h = rgb_to_ryb(h)
h += 360
h1 = (h - angle) % 360
h2 = (h + angle) % 360
if mode == 'ryb':
h1 = ryb_to_rgb(h1)
h2 = ryb_to_rgb(h2)
return (Color((h1, s, l), 'hsl', self.__a, self.__wref),
Color((h2, s, l), 'hsl', self.__a, self.__wref))
def alpha_blend(self, other):
"""Alpha-blend this color on the other one.
Args:
:other:
The grapefruit.Color to alpha-blend with this one.
Returns:
A grapefruit.Color instance which is the result of alpha-blending
this color on the other one.
>>> c1 = Color.FromRgb(1, 0.5, 0, 0.2)
>>> c2 = Color.FromRgb(1, 1, 1, 0.8)
>>> c3 = c1.alpha_blend(c2)
>>> str(c3)
'(1, 0.875, 0.75, 0.84)'
"""
# get final alpha channel
fa = self.__a + other.__a - (self.__a * other.__a)
# get percentage of source alpha compared to final alpha
if fa==0: sa = 0
else: sa = min(1.0, self.__a/other.__a)
# destination percentage is just the additive inverse
da = 1.0 - sa
sr, sg, sb = [v * sa for v in self.__rgb]
dr, dg, db = [v * da for v in other.__rgb]
return Color((sr+dr, sg+dg, sb+db), 'rgb', fa, self.__wref)
def blend(self, other, percent=0.5):
"""blend this color with the other one.
Args:
:other:
the grapefruit.Color to blend with this one.
Returns:
A grapefruit.Color instance which is the result of blending
this color on the other one.
>>> c1 = Color.FromRgb(1, 0.5, 0, 0.2)
>>> c2 = Color.FromRgb(1, 1, 1, 0.6)
>>> c3 = c1.blend(c2)
>>> str(c3)
'(1, 0.75, 0.5, 0.4)'
"""
dest = 1.0 - percent
rgb = tuple(((u * percent) + (v * dest) for u, v in zip(self.__rgb, other.__rgb)))
a = (self.__a * percent) + (other.__a * dest)
return Color(rgb, 'rgb', a, self.__wref)
def _test():
import doctest
reload(doctest)
doctest.testmod()
if __name__=='__main__':
_test()
# vim: ts=2 sts=2 sw=2 et
convert properties and add setters
#!/usr/bin/python
# -*- coding: utf-8 -*-#
# Copyright (c) 2008-2016, Xavier Basty
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GrapeFruit - Color manipulation in Python"""
from __future__ import division
import sys
# The default white reference, use 2° Standard Observer, D65 (daylight)
_DEFAULT_WREF = (0.95043, 1.00000, 1.08890)
_oneThird = 1.0 / 3
_srgbGammaCorrInv = 0.03928 / 12.92
_sixteenHundredsixteenth = 16.0 / 116
_RybWheel = (
0, 26, 52,
83, 120, 130,
141, 151, 162,
177, 190, 204,
218, 232, 246,
261, 275, 288,
303, 317, 330,
338, 345, 352,
360)
_RgbWheel = (
0, 8, 17,
26, 34, 41,
48, 54, 60,
81, 103, 123,
138, 155, 171,
187, 204, 219,
234, 251, 267,
282, 298, 329,
360)
WHITE_REFERENCE = {
'std_A' : (1.09847, 1.00000, 0.35582),
'std_B' : (0.99093, 1.00000, 0.85313),
'std_C' : (0.98071, 1.00000, 1.18225),
'std_D50' : (0.96421, 1.00000, 0.82519),
'std_D55' : (0.95680, 1.00000, 0.92148),
'std_D65' : (0.95043, 1.00000, 1.08890),
'std_D75' : (0.94972, 1.00000, 1.22639),
'std_E' : (1.00000, 1.00000, 1.00000),
'std_F1' : (0.92834, 1.00000, 1.03665),
'std_F2' : (0.99145, 1.00000, 0.67316),
'std_F3' : (1.03753, 1.00000, 0.49861),
'std_F4' : (1.09147, 1.00000, 0.38813),
'std_F5' : (0.90872, 1.00000, 0.98723),
'std_F6' : (0.97309, 1.00000, 0.60191),
'std_F7' : (0.95017, 1.00000, 1.08630),
'std_F8' : (0.96413, 1.00000, 0.82333),
'std_F9' : (1.00365, 1.00000, 0.67868),
'std_F10' : (0.96174, 1.00000, 0.81712),
'std_F11' : (1.00899, 1.00000, 0.64262),
'std_F12' : (1.08046, 1.00000, 0.39228),
'sup_A' : (1.11142, 1.00000, 0.35200),
'sup_B' : (0.99178, 1.00000, 0.84349),
'sup_C' : (0.97286, 1.00000, 1.16145),
'sup_D50' : (0.96721, 1.00000, 0.81428),
'sup_D55' : (0.95797, 1.00000, 0.90925),
'sup_D65' : (0.94810, 1.00000, 1.07305),
'sup_D75' : (0.94417, 1.00000, 1.20643),
'sup_E' : (1.00000, 1.00000, 1.00000),
'sup_F1' : (0.94791, 1.00000, 1.03191),
'sup_F2' : (1.03245, 1.00000, 0.68990),
'sup_F3' : (1.08968, 1.00000, 0.51965),
'sup_F4' : (1.14961, 1.00000, 0.40963),
'sup_F5' : (0.93369, 1.00000, 0.98636),
'sup_F6' : (1.02148, 1.00000, 0.62074),
'sup_F7' : (0.95780, 1.00000, 1.07618),
'sup_F8' : (0.97115, 1.00000, 0.81135),
'sup_F9' : (1.02116, 1.00000, 0.67826),
'sup_F10' : (0.99001, 1.00000, 0.83134),
'sup_F11' : (1.03820, 1.00000, 0.65555),
'sup_F12' : (1.11428, 1.00000, 0.40353)}
NAMED_COLOR = {
'aliceblue': '#f0f8ff',
'antiquewhite': '#faebd7',
'aqua': '#00ffff',
'aquamarine': '#7fffd4',
'azure': '#f0ffff',
'beige': '#f5f5dc',
'bisque': '#ffe4c4',
'black': '#000000',
'blanchedalmond': '#ffebcd',
'blue': '#0000ff',
'blueviolet': '#8a2be2',
'brown': '#a52a2a',
'burlywood': '#deb887',
'cadetblue': '#5f9ea0',
'chartreuse': '#7fff00',
'chocolate': '#d2691e',
'coral': '#ff7f50',
'cornflowerblue': '#6495ed',
'cornsilk': '#fff8dc',
'crimson': '#dc143c',
'cyan': '#00ffff',
'darkblue': '#00008b',
'darkcyan': '#008b8b',
'darkgoldenrod': '#b8860b',
'darkgray': '#a9a9a9',
'darkgrey': '#a9a9a9',
'darkgreen': '#006400',
'darkkhaki': '#bdb76b',
'darkmagenta': '#8b008b',
'darkolivegreen': '#556b2f',
'darkorange': '#ff8c00',
'darkorchid': '#9932cc',
'darkred': '#8b0000',
'darksalmon': '#e9967a',
'darkseagreen': '#8fbc8f',
'darkslateblue': '#483d8b',
'darkslategray': '#2f4f4f',
'darkslategrey': '#2f4f4f',
'darkturquoise': '#00ced1',
'darkviolet': '#9400d3',
'deeppink': '#ff1493',
'deepskyblue': '#00bfff',
'dimgray': '#696969',
'dimgrey': '#696969',
'dodgerblue': '#1e90ff',
'firebrick': '#b22222',
'floralwhite': '#fffaf0',
'forestgreen': '#228b22',
'fuchsia': '#ff00ff',
'gainsboro': '#dcdcdc',
'ghostwhite': '#f8f8ff',
'gold': '#ffd700',
'goldenrod': '#daa520',
'gray': '#808080',
'grey': '#808080',
'green': '#008000',
'greenyellow': '#adff2f',
'honeydew': '#f0fff0',
'hotpink': '#ff69b4',
'indianred': '#cd5c5c',
'indigo': '#4b0082',
'ivory': '#fffff0',
'khaki': '#f0e68c',
'lavender': '#e6e6fa',
'lavenderblush': '#fff0f5',
'lawngreen': '#7cfc00',
'lemonchiffon': '#fffacd',
'lightblue': '#add8e6',
'lightcoral': '#f08080',
'lightcyan': '#e0ffff',
'lightgoldenrodyellow': '#fafad2',
'lightgreen': '#90ee90',
'lightgray': '#d3d3d3',
'lightgrey': '#d3d3d3',
'lightpink': '#ffb6c1',
'lightsalmon': '#ffa07a',
'lightseagreen': '#20b2aa',
'lightskyblue': '#87cefa',
'lightslategray': '#778899',
'lightslategrey': '#778899',
'lightsteelblue': '#b0c4de',
'lightyellow': '#ffffe0',
'lime': '#00ff00',
'limegreen': '#32cd32',
'linen': '#faf0e6',
'magenta': '#ff00ff',
'maroon': '#800000',
'mediumaquamarine': '#66cdaa',
'mediumblue': '#0000cd',
'mediumorchid': '#ba55d3',
'mediumpurple': '#9370db',
'mediumseagreen': '#3cb371',
'mediumslateblue': '#7b68ee',
'mediumspringgreen': '#00fa9a',
'mediumturquoise': '#48d1cc',
'mediumvioletred': '#c71585',
'midnightblue': '#191970',
'mintcream': '#f5fffa',
'mistyrose': '#ffe4e1',
'moccasin': '#ffe4b5',
'navajowhite': '#ffdead',
'navy': '#000080',
'oldlace': '#fdf5e6',
'olive': '#808000',
'olivedrab': '#6b8e23',
'orange': '#ffa500',
'orangered': '#ff4500',
'orchid': '#da70d6',
'palegoldenrod': '#eee8aa',
'palegreen': '#98fb98',
'paleturquoise': '#afeeee',
'palevioletred': '#db7093',
'papayawhip': '#ffefd5',
'peachpuff': '#ffdab9',
'peru': '#cd853f',
'pink': '#ffc0cb',
'plum': '#dda0dd',
'powderblue': '#b0e0e6',
'purple': '#800080',
'red': '#ff0000',
'rosybrown': '#bc8f8f',
'royalblue': '#4169e1',
'saddlebrown': '#8b4513',
'salmon': '#fa8072',
'sandybrown': '#f4a460',
'seagreen': '#2e8b57',
'seashell': '#fff5ee',
'sienna': '#a0522d',
'silver': '#c0c0c0',
'skyblue': '#87ceeb',
'slateblue': '#6a5acd',
'slategray': '#708090',
'slategrey': '#708090',
'snow': '#fffafa',
'springgreen': '#00ff7f',
'steelblue': '#4682b4',
'tan': '#d2b48c',
'teal': '#008080',
'thistle': '#d8bfd8',
'tomato': '#ff6347',
'turquoise': '#40e0d0',
'violet': '#ee82ee',
'wheat': '#f5deb3',
'white': '#ffffff',
'whitesmoke': '#f5f5f5',
'yellow': '#ffff00',
'yellowgreen': '#9acd32'}
def rgb_to_hsl(r, g, b):
"""Convert the color from RGB coordinates to HSL.
Parameters:
:r:
The Red component value [0...1]
:g:
The Green component value [0...1]
:b:
The Blue component value [0...1]
Returns:
The color as an (h, s, l) tuple in the range:
h[0...360],
s[0...1],
l[0...1]
>>> rgb_to_hsl(1, 0.5, 0)
(30.0, 1.0, 0.5)
"""
minVal = min(r, g, b) # min RGB value
maxVal = max(r, g, b) # max RGB value
l = (maxVal + minVal) / 2.0
if minVal==maxVal:
return (0.0, 0.0, l) # achromatic (gray)
d = maxVal - minVal # delta RGB value
if l < 0.5: s = d / (maxVal + minVal)
else: s = d / (2.0 - maxVal - minVal)
dr, dg, db = [(maxVal-val) / d for val in (r, g, b)]
if r==maxVal:
h = db - dg
elif g==maxVal:
h = 2.0 + dr - db
else:
h = 4.0 + dg - dr
h = (h*60.0) % 360.0
return (h, s, l)
def _hue_to_rgb(n1, n2, h):
h %= 6.0
if h < 1.0: return n1 + ((n2-n1) * h)
if h < 3.0: return n2
if h < 4.0: return n1 + ((n2-n1) * (4.0 - h))
return n1
def hsl_to_rgb(h, s, l):
"""Convert the color from HSL coordinates to RGB.
Parameters:
:h:
The Hue component value [0...1]
:s:
The Saturation component value [0...1]
:l:
The Lightness component value [0...1]
Returns:
The color as an (r, g, b) tuple in the range:
r[0...1],
g[0...1],
b[0...1]
>>> hsl_to_rgb(30.0, 1.0, 0.5)
(1.0, 0.5, 0.0)
"""
if s==0: return (l, l, l) # achromatic (gray)
if l<0.5: n2 = l * (1.0 + s)
else: n2 = l+s - (l*s)
n1 = (2.0 * l) - n2
h /= 60.0
hueToRgb = _hue_to_rgb
r = hueToRgb(n1, n2, h + 2)
g = hueToRgb(n1, n2, h)
b = hueToRgb(n1, n2, h - 2)
return (r, g, b)
def rgb_to_hsv(r, g, b):
"""Convert the color from RGB coordinates to HSV.
Parameters:
:r:
The Red component value [0...1]
:g:
The Green component value [0...1]
:b:
The Blue component value [0...1]
Returns:
The color as an (h, s, v) tuple in the range:
h[0...360],
s[0...1],
v[0...1]
>>> rgb_to_hsv(1, 0.5, 0)
(30.0, 1.0, 1.0)
"""
v = float(max(r, g, b))
d = v - min(r, g, b)
if d==0: return (0.0, 0.0, v)
s = d / v
dr, dg, db = [(v - val) / d for val in (r, g, b)]
if r==v:
h = db - dg # between yellow & magenta
elif g==v:
h = 2.0 + dr - db # between cyan & yellow
else: # b==v
h = 4.0 + dg - dr # between magenta & cyan
h = (h*60.0) % 360.0
return (h, s, v)
def hsv_to_rgb(h, s, v):
"""Convert the color from RGB coordinates to HSV.
Parameters:
:h:
The Hus component value [0...1]
:s:
The Saturation component value [0...1]
:v:
The Value component [0...1]
Returns:
The color as an (r, g, b) tuple in the range:
r[0...1],
g[0...1],
b[0...1]
>>> hsv_to_rgb(30.0, 1.0, 0.5)
(0.5, 0.25, 0.0)
"""
if s==0: return (v, v, v) # achromatic (gray)
h /= 60.0
h = h % 6.0
i = int(h)
f = h - i
if not(i&1): f = 1-f # if i is even
m = v * (1.0 - s)
n = v * (1.0 - (s * f))
if i==0: return (v, n, m)
if i==1: return (n, v, m)
if i==2: return (m, v, n)
if i==3: return (m, n, v)
if i==4: return (n, m, v)
return (v, m, n)
def rgb_to_yiq(r, g, b):
"""Convert the color from RGB to YIQ.
Parameters:
:r:
The Red component value [0...1]
:g:
The Green component value [0...1]
:b:
The Blue component value [0...1]
Returns:
The color as an (y, i, q) tuple in the range:
y[0...1],
i[0...1],
q[0...1]
>>> '(%g, %g, %g)' % rgb_to_yiq(1, 0.5, 0)
'(0.592263, 0.458874, -0.0499818)'
"""
y = (r * 0.29895808) + (g * 0.58660979) + (b *0.11443213)
i = (r * 0.59590296) - (g * 0.27405705) - (b *0.32184591)
q = (r * 0.21133576) - (g * 0.52263517) + (b *0.31129940)
return (y, i, q)
def yiq_to_rgb(y, i, q):
"""Convert the color from YIQ coordinates to RGB.
Parameters:
:y:
Tte Y component value [0...1]
:i:
The I component value [0...1]
:q:
The Q component value [0...1]
Returns:
The color as an (r, g, b) tuple in the range:
r[0...1],
g[0...1],
b[0...1]
>>> '(%g, %g, %g)' % yiq_to_rgb(0.592263, 0.458874, -0.0499818)
'(1, 0.5, 5.442e-07)'
"""
r = y + (i * 0.9562) + (q * 0.6210)
g = y - (i * 0.2717) - (q * 0.6485)
b = y - (i * 1.1053) + (q * 1.7020)
return (r, g, b)
def rgb_to_yuv(r, g, b):
"""Convert the color from RGB coordinates to YUV.
Parameters:
:r:
The Red component value [0...1]
:g:
The Green component value [0...1]
:b:
The Blue component value [0...1]
Returns:
The color as an (y, u, v) tuple in the range:
y[0...1],
u[-0.436...0.436],
v[-0.615...0.615]
>>> '(%g, %g, %g)' % rgb_to_yuv(1, 0.5, 0)
'(0.5925, -0.29156, 0.357505)'
"""
y = (r * 0.29900) + (g * 0.58700) + (b * 0.11400)
u = -(r * 0.14713) - (g * 0.28886) + (b * 0.43600)
v = (r * 0.61500) - (g * 0.51499) - (b * 0.10001)
return (y, u, v)
def yuv_to_rgb(y, u, v):
"""Convert the color from YUV coordinates to RGB.
Parameters:
:y:
The Y component value [0...1]
:u:
The U component value [-0.436...0.436]
:v:
The V component value [-0.615...0.615]
Returns:
The color as an (r, g, b) tuple in the range:
r[0...1],
g[0...1],
b[0...1]
>>> '(%g, %g, %g)' % yuv_to_rgb(0.5925, -0.2916, 0.3575)
'(0.999989, 0.500015, -6.3276e-05)'
"""
r = y + (v * 1.13983)
g = y - (u * 0.39465) - (v * 0.58060)
b = y + (u * 2.03211)
return (r, g, b)
def rgb_to_xyz(r, g, b):
"""Convert the color from sRGB to CIE XYZ.
The methods assumes that the RGB coordinates are given in the sRGB
colorspace (D65).
.. note::
Compensation for the sRGB gamma correction is applied before converting.
Parameters:
:r:
The Red component value [0...1]
:g:
The Green component value [0...1]
:b:
The Blue component value [0...1]
Returns:
The color as an (x, y, z) tuple in the range:
x[0...1],
y[0...1],
z[0...1]
>>> '(%g, %g, %g)' % rgb_to_xyz(1, 0.5, 0)
'(0.488941, 0.365682, 0.0448137)'
"""
r, g, b = [((v <= 0.03928) and [v / 12.92] or [((v+0.055) / 1.055) **2.4])[0] for v in (r, g, b)]
x = (r * 0.4124) + (g * 0.3576) + (b * 0.1805)
y = (r * 0.2126) + (g * 0.7152) + (b * 0.0722)
z = (r * 0.0193) + (g * 0.1192) + (b * 0.9505)
return (x, y, z)
def xyz_to_rgb(x, y, z):
"""Convert the color from CIE XYZ coordinates to sRGB.
.. note::
Compensation for sRGB gamma correction is applied before converting.
Parameters:
:x:
The X component value [0...1]
:y:
The Y component value [0...1]
:z:
The Z component value [0...1]
Returns:
The color as an (r, g, b) tuple in the range:
r[0...1],
g[0...1],
b[0...1]
>>> '(%g, %g, %g)' % xyz_to_rgb(0.488941, 0.365682, 0.0448137)
'(1, 0.5, 6.81883e-08)'
"""
r = (x * 3.2406255) - (y * 1.5372080) - (z * 0.4986286)
g = -(x * 0.9689307) + (y * 1.8757561) + (z * 0.0415175)
b = (x * 0.0557101) - (y * 0.2040211) + (z * 1.0569959)
return tuple((((v <= _srgbGammaCorrInv) and [v * 12.92] or [(1.055 * (v ** (1/2.4))) - 0.055])[0] for v in (r, g, b)))
def xyz_to_lab(x, y, z, wref=_DEFAULT_WREF):
"""Convert the color from CIE XYZ to CIE L*a*b*.
Parameters:
:x:
The X component value [0...1]
:y:
The Y component value [0...1]
:z:
The Z component value [0...1]
:wref:
The whitepoint reference, default is 2° D65.
Returns:
The color as an (L, a, b) tuple in the range:
L[0...100],
a[-1...1],
b[-1...1]
>>> '(%g, %g, %g)' % xyz_to_lab(0.488941, 0.365682, 0.0448137)
'(66.9518, 0.43084, 0.739692)'
>>> '(%g, %g, %g)' % xyz_to_lab(0.488941, 0.365682, 0.0448137, WHITE_REFERENCE['std_D50'])
'(66.9518, 0.411663, 0.67282)'
"""
# White point correction
x /= wref[0]
y /= wref[1]
z /= wref[2]
# Nonlinear distortion and linear transformation
x, y, z = [((v > 0.008856) and [v**_oneThird] or [(7.787 * v) + _sixteenHundredsixteenth])[0] for v in (x, y, z)]
# Vector scaling
l = (116 * y) - 16
a = 5.0 * (x - y)
b = 2.0 * (y - z)
return (l, a, b)
def lab_to_xyz(l, a, b, wref=_DEFAULT_WREF):
"""Convert the color from CIE L*a*b* to CIE 1931 XYZ.
Parameters:
:l:
The L component [0...100]
:a:
The a component [-1...1]
:b:
The a component [-1...1]
:wref:
The whitepoint reference, default is 2° D65.
Returns:
The color as an (x, y, z) tuple in the range:
x[0...q],
y[0...1],
z[0...1]
>>> '(%g, %g, %g)' % lab_to_xyz(66.9518, 0.43084, 0.739692)
'(0.488941, 0.365682, 0.0448137)'
>>> '(%g, %g, %g)' % lab_to_xyz(66.9518, 0.411663, 0.67282, WHITE_REFERENCE['std_D50'])
'(0.488941, 0.365682, 0.0448138)'
"""
y = (l + 16) / 116
x = (a / 5.0) + y
z = y - (b / 2.0)
return tuple((((v > 0.206893) and [v**3] or [(v - _sixteenHundredsixteenth) / 7.787])[0] * w for v, w in zip((x, y, z), wref)))
def cmyk_to_cmy(c, m, y, k):
"""Convert the color from CMYK coordinates to CMY.
Parameters:
:c:
The Cyan component value [0...1]
:m:
The Magenta component value [0...1]
:y:
The Yellow component value [0...1]
:k:
The Black component value [0...1]
Returns:
The color as an (c, m, y) tuple in the range:
c[0...1],
m[0...1],
y[0...1]
>>> '(%g, %g, %g)' % cmyk_to_cmy(1, 0.32, 0, 0.5)
'(1, 0.66, 0.5)'
"""
mk = 1-k
return ((c*mk + k), (m*mk + k), (y*mk + k))
def cmy_to_cmyk(c, m, y):
"""Convert the color from CMY coordinates to CMYK.
Parameters:
:c:
The Cyan component value [0...1]
:m:
The Magenta component value [0...1]
:y:
The Yellow component value [0...1]
Returns:
The color as an (c, m, y, k) tuple in the range:
c[0...1],
m[0...1],
y[0...1],
k[0...1]
>>> '(%g, %g, %g, %g)' % cmy_to_cmyk(1, 0.66, 0.5)
'(1, 0.32, 0, 0.5)'
"""
k = min(c, m, y)
if k==1.0: return (0.0, 0.0, 0.0, 1.0)
mk = 1-k
return ((c-k) / mk, (m-k) / mk, (y-k) / mk, k)
def rgb_to_cmy(r, g, b):
"""Convert the color from RGB coordinates to CMY.
Parameters:
:r:
The Red component value [0...1]
:g:
The Green component value [0...1]
:b:
The Blue component value [0...1]
Returns:
The color as an (c, m, y) tuple in the range:
c[0...1],
m[0...1],
y[0...1]
>>> rgb_to_cmy(1, 0.5, 0)
(0, 0.5, 1)
"""
return (1-r, 1-g, 1-b)
def cmy_to_rgb(c, m, y):
"""Convert the color from CMY coordinates to RGB.
Parameters:
:c:
The Cyan component value [0...1]
:m:
The Magenta component value [0...1]
:y:
The Yellow component value [0...1]
Returns:
The color as an (r, g, b) tuple in the range:
r[0...1],
g[0...1],
b[0...1]
>>> cmy_to_rgb(0, 0.5, 1)
(1, 0.5, 0)
"""
return (1-c, 1-m, 1-y)
def rgb_to_ints(r, g, b):
"""Convert the color from (r, g, b) to a tuple of ints.
Parameters:
:r:
The Red component value [0...1]
:g:
The Green component value [0...1]
:b:
The Blue component value [0...1]
Returns:
The color as an (r, g, b) tuple in the range:
r[0...255],
g[0...2551],
b[0...2551]
>>> rgb_to_ints(1, 0.5, 0)
(255, 128, 0)
"""
return tuple(int(round(v*255)) for v in (r, g, b))
def ints_to_rgb(ints):
"""Convert a tuple of ints to (r, g, b).
Parameters:
The color as an (r, g, b) integer tuple in the range:
r[0...255],
g[0...255],
b[0...255]
Returns:
The color as an (r, g, b) tuple in the range:
r[0...1],
g[0...1],
b[0...1]
>>> '(%g, %g, %g)' % ints_to_rgb((255, 128, 0))
'(1, 0.501961, 0)'
"""
return tuple(v / 255 for v in ints)
def rgb_to_html(r, g, b):
"""Convert the color from (r, g, b) to #RRGGBB.
Parameters:
:r:
The Red component value [0...1]
:g:
The Green component value [0...1]
:b:
The Blue component value [0...1]
Returns:
A CSS string representation of this color (#RRGGBB).
>>> rgb_to_html(1, 0.5, 0)
'#ff8000'
"""
return '#%02x%02x%02x' % tuple((min(round(v*255), 255) for v in (r, g, b)))
def html_to_rgb(html):
"""Convert the HTML color to (r, g, b).
Parameters:
:html:
the HTML definition of the color (#RRGGBB or #RGB or a color name).
Returns:
The color as an (r, g, b) tuple in the range:
r[0...1],
g[0...1],
b[0...1]
Throws:
:ValueError:
If html is neither a known color name or a hexadecimal RGB
representation.
>>> '(%g, %g, %g)' % html_to_rgb('#ff8000')
'(1, 0.501961, 0)'
>>> '(%g, %g, %g)' % html_to_rgb('ff8000')
'(1, 0.501961, 0)'
>>> '(%g, %g, %g)' % html_to_rgb('#f60')
'(1, 0.4, 0)'
>>> '(%g, %g, %g)' % html_to_rgb('f60')
'(1, 0.4, 0)'
>>> '(%g, %g, %g)' % html_to_rgb('lemonchiffon')
'(1, 0.980392, 0.803922)'
"""
html = html.strip().lower()
if html[0]=='#':
html = html[1:]
elif html in NAMED_COLOR:
html = NAMED_COLOR[html][1:]
if len(html)==6:
rgb = html[:2], html[2:4], html[4:]
elif len(html)==3:
rgb = ['%c%c' % (v,v) for v in html]
else:
raise ValueError("input #%s is not in #RRGGBB format" % html)
return tuple(((int(n, 16) / 255.0) for n in rgb))
def rgb_to_pil(r, g, b):
"""Convert the color from RGB to a PIL-compatible integer.
Parameters:
:r:
The Red component value [0...1]
:g:
The Green component value [0...1]
:b:
The Blue component value [0...1]
Returns:
A PIL compatible integer (0xBBGGRR).
>>> '0x%06x' % rgb_to_pil(1, 0.5, 0)
'0x0080ff'
"""
r, g, b = [min(int(round(v*255)), 255) for v in (r, g, b)]
return (b << 16) + (g << 8) + r
def pil_to_rgb(pil):
"""Convert the color from a PIL-compatible integer to RGB.
Parameters:
pil: a PIL compatible color representation (0xBBGGRR)
Returns:
The color as an (r, g, b) tuple in the range:
the range:
r: [0...1]
g: [0...1]
b: [0...1]
>>> '(%g, %g, %g)' % pil_to_rgb(0x0080ff)
'(1, 0.501961, 0)'
"""
r = 0xff & pil
g = 0xff & (pil >> 8)
b = 0xff & (pil >> 16)
return tuple((v / 255.0 for v in (r, g, b)))
def _web_safe_component(c, alt=False):
"""Convert a color component to its web safe equivalent.
Parameters:
:c:
The component value [0...1]
:alt:
If True, return the alternative value instead of the nearest one.
Returns:
The web safe equivalent of the component value.
"""
# This sucks, but floating point between 0 and 1 is quite fuzzy...
# So we just change the scale a while to make the equality tests
# work, otherwise it gets wrong at some decimal far to the right.
sc = c * 100.0
# If the color is already safe, return it straight away
d = sc % 20
if d==0: return c
# Get the lower and upper safe values
l = sc - d
u = l + 20
# Return the 'closest' value according to the alt flag
if alt:
if (sc-l) >= (u-sc): return l/100.0
else: return u/100.0
else:
if (sc-l) >= (u-sc): return u/100.0
else: return l/100.0
def rgb_to_web_safe(r, g, b, alt=False):
"""Convert the color from RGB to 'web safe' RGB
Parameters:
:r:
The Red component value [0...1]
:g:
The Green component value [0...1]
:b:
The Blue component value [0...1]
:alt:
If True, use the alternative color instead of the nearest one.
Can be used for dithering.
Returns:
The color as an (r, g, b) tuple in the range:
the range:
r[0...1],
g[0...1],
b[0...1]
>>> '(%g, %g, %g)' % rgb_to_web_safe(1, 0.55, 0.0)
'(1, 0.6, 0)'
"""
web_safeComponent = _web_safe_component
return tuple((web_safeComponent(v, alt) for v in (r, g, b)))
def rgb_to_greyscale(r, g, b):
"""Convert the color from RGB to its greyscale equivalent
Parameters:
:r:
The Red component value [0...1]
:g:
The Green component value [0...1]
:b:
The Blue component value [0...1]
Returns:
The color as an (r, g, b) tuple in the range:
the range:
r[0...1],
g[0...1],
b[0...1]
>>> '(%g, %g, %g)' % rgb_to_greyscale(1, 0.8, 0)
'(0.6, 0.6, 0.6)'
"""
v = (r + g + b) / 3.0
return (v, v, v)
def rgb_to_ryb(hue):
"""Maps a hue on the RGB color wheel to Itten's RYB wheel.
Parameters:
:hue:
The hue on the RGB color wheel [0...360]
Returns:
An approximation of the corresponding hue on Itten's RYB wheel.
>>> rgb_to_ryb(15)
26.0
"""
d = hue % 15
i = int(hue / 15)
x0 = _RybWheel[i]
x1 = _RybWheel[i+1]
return x0 + (x1-x0) * d / 15
def ryb_to_rgb(hue):
"""Maps a hue on Itten's RYB color wheel to the standard RGB wheel.
Parameters:
:hue:
The hue on Itten's RYB color wheel [0...360]
Returns:
An approximation of the corresponding hue on the standard RGB wheel.
>>> ryb_to_rgb(15)
8.0
"""
d = hue % 15
i = int(hue / 15)
x0 = _RgbWheel[i]
x1 = _RgbWheel[i+1]
return x0 + (x1-x0) * d / 15
class Color(object):
"""Hold a color value.
Example usage:
To create an instance of the grapefruit.Color from RGB values:
>>> import grapefruit
>>> r, g, b = 1, 0.5, 0
>>> col = grapefruit.Color.FromRgb(r, g, b)
To get the values of the color in another colorspace:
>>> h, s, v = col.hsv
>>> l, a, b = col.lab
To get the complementary of a color:
>>> compl = col.complementary_color(mode='rgb')
>>> print(compl.hsl)
(210.0, 1.0, 0.5)
To directly convert RGB values to their HSL equivalent:
>>> h, s, l = rgb_to_hsl(r, g, b)
"""
# --==================--------------------------------------------------------
# -- Creation methods --
# --==================--
@staticmethod
def FromRgb(r, g, b, alpha=1.0, wref=_DEFAULT_WREF):
"""Create a new instance based on the specifed RGB values.
Parameters:
:r:
The Red component value [0...1]
:g:
The Green component value [0...1]
:b:
The Blue component value [0...1]
:alpha:
The color transparency [0...1], default is opaque
:wref:
The whitepoint reference, default is 2° D65.
Returns:
A grapefruit.Color instance.
>>> Color.FromRgb(1.0, 0.5, 0.0)
Color(1.0, 0.5, 0.0, 1.0)
>>> Color.FromRgb(1.0, 0.5, 0.0, 0.5)
Color(1.0, 0.5, 0.0, 0.5)
"""
return Color((r, g, b), 'rgb', alpha, wref)
@staticmethod
def FromHsl(h, s, l, alpha=1.0, wref=_DEFAULT_WREF):
"""Create a new instance based on the specifed HSL values.
Parameters:
:h:
The Hue component value [0...1]
:s:
The Saturation component value [0...1]
:l:
The Lightness component value [0...1]
:alpha:
The color transparency [0...1], default is opaque
:wref:
The whitepoint reference, default is 2° D65.
Returns:
A grapefruit.Color instance.
>>> Color.FromHsl(30, 1, 0.5)
Color(1.0, 0.5, 0.0, 1.0)
>>> Color.FromHsl(30, 1, 0.5, 0.5)
Color(1.0, 0.5, 0.0, 0.5)
"""
return Color((h, s, l), 'hsl', alpha, wref)
@staticmethod
def FromHsv(h, s, v, alpha=1.0, wref=_DEFAULT_WREF):
"""Create a new instance based on the specifed HSV values.
Parameters:
:h:
The Hus component value [0...1]
:s:
The Saturation component value [0...1]
:v:
The Value component [0...1]
:alpha:
The color transparency [0...1], default is opaque
:wref:
The whitepoint reference, default is 2° D65.
Returns:
A grapefruit.Color instance.
>>> Color.FromHsv(30, 1, 1)
Color(1.0, 0.5, 0.0, 1.0)
>>> Color.FromHsv(30, 1, 1, 0.5)
Color(1.0, 0.5, 0.0, 0.5)
"""
h2, s, l = rgb_to_hsl(*hsv_to_rgb(h, s, v))
return Color((h, s, l), 'hsl', alpha, wref)
@staticmethod
def FromYiq(y, i, q, alpha=1.0, wref=_DEFAULT_WREF):
"""Create a new instance based on the specifed YIQ values.
Parameters:
:y:
The Y component value [0...1]
:i:
The I component value [0...1]
:q:
The Q component value [0...1]
:alpha:
The color transparency [0...1], default is opaque
:wref:
The whitepoint reference, default is 2° D65.
Returns:
A grapefruit.Color instance.
>>> str(Color.FromYiq(0.5922, 0.45885,-0.05))
'(0.999902, 0.499955, -6.6905e-05, 1)'
>>> str(Color.FromYiq(0.5922, 0.45885,-0.05, 0.5))
'(0.999902, 0.499955, -6.6905e-05, 0.5)'
"""
return Color(yiq_to_rgb(y, i, q), 'rgb', alpha, wref)
@staticmethod
def FromYuv(y, u, v, alpha=1.0, wref=_DEFAULT_WREF):
"""Create a new instance based on the specifed YUV values.
Parameters:
:y:
The Y component value [0...1]
:u:
The U component value [-0.436...0.436]
:v:
The V component value [-0.615...0.615]
:alpha:
The color transparency [0...1], default is opaque
:wref:
The whitepoint reference, default is 2° D65.
Returns:
A grapefruit.Color instance.
>>> str(Color.FromYuv(0.5925, -0.2916, 0.3575))
'(0.999989, 0.500015, -6.3276e-05, 1)'
>>> str(Color.FromYuv(0.5925, -0.2916, 0.3575, 0.5))
'(0.999989, 0.500015, -6.3276e-05, 0.5)'
"""
return Color(yuv_to_rgb(y, u, v), 'rgb', alpha, wref)
@staticmethod
def FromXyz(x, y, z, alpha=1.0, wref=_DEFAULT_WREF):
"""Create a new instance based on the specifed CIE-XYZ values.
Parameters:
:x:
The Red component value [0...1]
:y:
The Green component value [0...1]
:z:
The Blue component value [0...1]
:alpha:
The color transparency [0...1], default is opaque
:wref:
The whitepoint reference, default is 2° D65.
Returns:
A grapefruit.Color instance.
>>> str(Color.FromXyz(0.488941, 0.365682, 0.0448137))
'(1, 0.5, 6.81883e-08, 1)'
>>> str(Color.FromXyz(0.488941, 0.365682, 0.0448137, 0.5))
'(1, 0.5, 6.81883e-08, 0.5)'
"""
return Color(xyz_to_rgb(x, y, z), 'rgb', alpha, wref)
@staticmethod
def FromLab(l, a, b, alpha=1.0, wref=_DEFAULT_WREF):
"""Create a new instance based on the specifed CIE-LAB values.
Parameters:
:l:
The L component [0...100]
:a:
The a component [-1...1]
:b:
The a component [-1...1]
:alpha:
The color transparency [0...1], default is opaque
:wref:
The whitepoint reference, default is 2° D65.
Returns:
A grapefruit.Color instance.
>>> str(Color.FromLab(66.9518, 0.43084, 0.739692))
'(1, 0.5, 1.09491e-08, 1)'
>>> str(Color.FromLab(66.9518, 0.43084, 0.739692, wref=WHITE_REFERENCE['std_D50']))
'(1.01238, 0.492011, -0.14311, 1)'
>>> str(Color.FromLab(66.9518, 0.43084, 0.739692, 0.5))
'(1, 0.5, 1.09491e-08, 0.5)'
>>> str(Color.FromLab(66.9518, 0.43084, 0.739692, 0.5, WHITE_REFERENCE['std_D50']))
'(1.01238, 0.492011, -0.14311, 0.5)'
"""
return Color(xyz_to_rgb(*lab_to_xyz(l, a, b, wref)), 'rgb', alpha, wref)
@staticmethod
def FromCmy(c, m, y, alpha=1.0, wref=_DEFAULT_WREF):
"""Create a new instance based on the specifed CMY values.
Parameters:
:c:
The Cyan component value [0...1]
:m:
The Magenta component value [0...1]
:y:
The Yellow component value [0...1]
:alpha:
The color transparency [0...1], default is opaque
:wref:
The whitepoint reference, default is 2° D65.
Returns:
A grapefruit.Color instance.
>>> Color.FromCmy(0, 0.5, 1)
Color(1.0, 0.5, 0.0, 1.0)
>>> Color.FromCmy(0, 0.5, 1, 0.5)
Color(1.0, 0.5, 0.0, 0.5)
"""
return Color(cmy_to_rgb(c, m, y), 'rgb', alpha, wref)
@staticmethod
def FromCmyk(c, m, y, k, alpha=1.0, wref=_DEFAULT_WREF):
"""Create a new instance based on the specifed CMYK values.
Parameters:
:c:
The Cyan component value [0...1]
:m:
The Magenta component value [0...1]
:y:
The Yellow component value [0...1]
:k:
The Black component value [0...1]
:alpha:
The color transparency [0...1], default is opaque
:wref:
The whitepoint reference, default is 2° D65.
Returns:
A grapefruit.Color instance.
>>> str(Color.FromCmyk(1, 0.32, 0, 0.5))
'(0, 0.34, 0.5, 1)'
>>> str(Color.FromCmyk(1, 0.32, 0, 0.5, 0.5))
'(0, 0.34, 0.5, 0.5)'
"""
return Color(cmy_to_rgb(*cmyk_to_cmy(c, m, y, k)), 'rgb', alpha, wref)
@staticmethod
def FromHtml(html, alpha=1.0, wref=_DEFAULT_WREF):
"""Create a new instance based on the specifed HTML color definition.
Parameters:
:html:
The HTML definition of the color (#RRGGBB or #RGB or a color name).
:alpha:
The color transparency [0...1], default is opaque.
:wref:
The whitepoint reference, default is 2° D65.
Returns:
A grapefruit.Color instance.
>>> str(Color.FromHtml('#ff8000'))
'(1, 0.501961, 0, 1)'
>>> str(Color.FromHtml('ff8000'))
'(1, 0.501961, 0, 1)'
>>> str(Color.FromHtml('#f60'))
'(1, 0.4, 0, 1)'
>>> str(Color.FromHtml('f60'))
'(1, 0.4, 0, 1)'
>>> str(Color.FromHtml('lemonchiffon'))
'(1, 0.980392, 0.803922, 1)'
>>> str(Color.FromHtml('#ff8000', 0.5))
'(1, 0.501961, 0, 0.5)'
"""
return Color(html_to_rgb(html), 'rgb', alpha, wref)
@staticmethod
def FromPil(pil, alpha=1.0, wref=_DEFAULT_WREF):
"""Create a new instance based on the specifed PIL color.
Parameters:
:pil:
A PIL compatible color representation (0xBBGGRR)
:alpha:
The color transparency [0...1], default is opaque
:wref:
The whitepoint reference, default is 2° D65.
Returns:
A grapefruit.Color instance.
>>> str(Color.FromPil(0x0080ff))
'(1, 0.501961, 0, 1)'
>>> str(Color.FromPil(0x0080ff, 0.5))
'(1, 0.501961, 0, 0.5)'
"""
return Color(pil_to_rgb(pil), 'rgb', alpha, wref)
def __init__(self, values, mode='rgb', alpha=1.0, wref=_DEFAULT_WREF):
"""Instantiate a new grapefruit.Color object.
Parameters:
:values:
The values of this color, in the specified representation.
:mode:
The representation mode used for values.
:alpha:
the alpha value (transparency) of this color.
:wref:
The whitepoint reference, default is 2° D65.
"""
if not(isinstance(values, tuple)):
raise TypeError("values must be a tuple")
if mode=='rgb':
self.__rgb = tuple([float(v) for v in values])
self.__hsl = rgb_to_hsl(*self.__rgb)
elif mode=='hsl':
self.__hsl = tuple([float(v) for v in values])
self.__rgb = hsl_to_rgb(*self.__hsl)
else:
raise ValueError("Invalid color mode: " + mode)
self.__a = alpha
self.__wref = wref
# --=====================-----------------------------------------------------
# -- Convenience methods --
# --=====================--
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, other):
try:
if isinstance(other, Color):
return (self.__rgb==other.__rgb) and (self.__a==other.__a) and (self.__wref==other.__wref)
if len(other) != 4:
return False
return list(self.__rgb + (self.__a,)) == list(other)
except TypeError:
return False
except AttributeError:
return False
def __repr__(self):
return "Color{}".format(self.__rgb + (self.__a,))
def __str__(self):
"""A string representation of this grapefruit.Color instance.
Returns:
The RGBA representation of this grapefruit.Color instance.
"""
return '(%g, %g, %g, %g)' % (self.__rgb + (self.__a,))
if sys.version_info[0] < 3:
def __unicode__(self):
"""A unicode string representation of this grapefruit.Color instance.
Returns:
The RGBA representation of this grapefruit.Color instance.
"""
return unicode('%g, %g, %g, %g)') % (self.__rgb + (self.__a,))
def __iter__(self):
return iter(self.__rgb + (self.__a,))
def __len__(self):
return 4
# --============--------------------------------------------------------------
# -- Properties --
# --============--
@property
def is_legal(self):
"""Boolean indicating whether the color is within the legal gamut."""
return all(0.0 <= v <= 1.0 for v in self)
@property
def alpha(self):
"""The transparency of this color. 0.0 is transparent and 1.0 is fully opaque."""
return self.__a
@alpha.setter
def alpha(self, value):
self.__a = value
@property
def white_ref(self):
"""the white reference point of this color."""
return self.__wref
@white_ref.setter
def white_ref(self, value):
self.__wref = value
@property
def rgb(self):
"""The RGB values of this Color."""
return self.__rgb
@rgb.setter
def rgb(self, value):
self.__rgb = tuple([float(v) for v in values])
self.__hsl = rgb_to_hsl(*self.__rgb)
@property
def red(self):
return self.__rgb[0]
@red.setter
def red(self, value):
self.__rgb[0] = float(value)
self.__hsl = rgb_to_hsl(*self.__rgb)
@property
def green(self):
return self.__rgb[1]
@green.setter
def green(self, value):
self.__rgb[1] = float(value)
self.__hsl = rgb_to_hsl(*self.__rgb)
@property
def blue(self):
return self.__rgb[2]
@blue.setter
def blue(self, value):
self.__rgb[2] = float(value)
self.__hsl = rgb_to_hsl(*self.__rgb)
@property
def hsl(self):
"""The HSL values of this Color."""
return self.__hsl
@hsl.setter
def hsl(self, value):
self.__hsl = tuple([float(v) for v in values])
self.__rgb = hsl_to_rgb(*self.__hsl)
@property
def hsl_hue(self):
"""The hue of this color."""
return self.__hsl[0]
@hsl_hue.setter
def hsl_hue(self, value):
self.__hsl[0] = float(value)
self.__rgb = hsl_to_rgb(*self.__hsl)
@property
def hsv(self):
"""The HSV values of this Color."""
return rgb_to_hsv(*self.__rgb)
@hsv.setter
def hsv(self, value):
self.__rgb = hsv_to_rgb(*value)
self.__hsl = rgb_to_hsl(*self.__rgb)
@property
def yiq(self):
"""The YIQ values of this Color."""
return rgb_to_yiq(*self.__rgb)
@yiq.setter
def yiq(self, value):
self.__rgb = yiq_to_rgb(*value)
self.__hsl = rgb_to_hsl(*self.__rgb)
@property
def yuv(self):
"""The YUV values of this Color."""
return rgb_to_yuv(*self.__rgb)
@yuv.setter
def yuv(self, value):
self.__rgb = yuv_to_rgb(*value)
self.__hsl = rgb_to_hsl(*self.__rgb)
@property
def xyz(self):
"""The CIE-XYZ values of this Color."""
return rgb_to_xyz(*self.__rgb)
@xyz.setter
def xyz(self, value):
self.__rgb = xyz_to_rgb(*value)
self.__hsl = rgb_to_hsl(*self.__rgb)
@property
def lab(self):
"""The CIE-LAB values of this Color."""
return xyz_to_lab(wref=self.__wref, *rgb_to_xyz(*self.__rgb))
@lab.setter
def lab(self, value):
self.__rgb = xyz_to_rgb(lab_to_xyz(*value))
self.__hsl = rgb_to_hsl(*self.__rgb)
@property
def cmy(self):
"""The CMY values of this Color."""
return rgb_to_cmy(*self.__rgb)
@cmy.setter
def cmy(self, value):
self.__rgb = cmy_to_rgb(*value)
self.__hsl = rgb_to_hsl(*self.__rgb)
@property
def cmyk(self):
"""The CMYK values of this Color."""
return cmy_to_cmyk(*rgb_to_cmy(*self.__rgb))
@cmyk.setter
def cmyk(self, value):
self.__rgb = cmy_to_rgb(cmyk_to_cmy(*value))
self.__hsl = rgb_to_hsl(*self.__rgb)
@property
def ints(self):
"""This Color as a tuple of integers in the range [0...255]"""
return rgb_to_ints(*self.__rgb)
@ints.setter
def ints(self, value):
self.__rgb = ints_to_rgb(*value)
self.__hsl = rgb_to_hsl(*self.__rgb)
@property
def html(self):
"""This Color as an HTML color definition."""
return rgb_to_html(*self.__rgb)
@html.setter
def html(self, value):
self.__rgb = html_to_rgb(*value)
self.__hsl = rgb_to_hsl(*self.__rgb)
@property
def pil(self):
"""This Color as a PIL compatible value."""
return rgb_to_pil(*self.__rgb)
@property
def pil(self):
self.__rgb = pil_to_rgb(*value)
self.__hsl = rgb_to_hsl(*self.__rgb)
@property
def web_safe(self):
"""The web safe color nearest to this one (RGB)."""
return rgb_to_web_safe(*self.__rgb)
@property
def greyscale(self):
"""The greyscale equivalent to this color (RGB)."""
return rgb_to_greyscale(*self.rgb)
def with_alpha(self, alpha):
"""Create a new instance based on this one with a new alpha value.
Parameters:
:alpha:
The transparency of the new color [0...1].
Returns:
A grapefruit.Color instance.
>>> Color.FromRgb(1.0, 0.5, 0.0, 1.0).with_alpha(0.5)
Color(1.0, 0.5, 0.0, 0.5)
"""
return Color(self.__rgb, 'rgb', alpha, self.__wref)
def with_white_ref(self, wref, labAsRef=False):
"""Create a new instance based on this one with a new white reference.
Parameters:
:wref:
The whitepoint reference.
:labAsRef:
If True, the L*a*b* values of the current instance are used as reference
for the new color; otherwise, the RGB values are used as reference.
Returns:
A grapefruit.Color instance.
>>> c = Color.FromRgb(1.0, 0.5, 0.0, 1.0, WHITE_REFERENCE['std_D65'])
>>> c2 = c.with_white_ref(WHITE_REFERENCE['sup_D50'])
>>> c2.rgb
(1.0, 0.5, 0.0)
>>> '(%g, %g, %g)' % c2.white_ref
'(0.96721, 1, 0.81428)'
>>> c2 = c.with_white_ref(WHITE_REFERENCE['sup_D50'], labAsRef=True)
>>> '(%g, %g, %g)' % c2.rgb
'(1.01463, 0.490339, -0.148131)'
>>> '(%g, %g, %g)' % c2.white_ref
'(0.96721, 1, 0.81428)'
>>> '(%g, %g, %g)' % c.lab
'(66.9518, 0.43084, 0.739692)'
>>> '(%g, %g, %g)' % c2.lab
'(66.9518, 0.43084, 0.739693)'
"""
if labAsRef:
l, a, b = self.lab
return Color.FromLab(l, a, b, self.__a, wref)
else:
return Color(self.__rgb, 'rgb', self.__a, wref)
def with_hue(self, hue):
"""Create a new instance based on this one with a new hue.
Parameters:
:hue:
The hue of the new color [0...360].
Returns:
A grapefruit.Color instance.
>>> Color.FromHsl(30, 1, 0.5).with_hue(60)
Color(1.0, 1.0, 0.0, 1.0)
>>> Color.FromHsl(30, 1, 0.5).with_hue(60).hsl
(60.0, 1.0, 0.5)
"""
h, s, l = self.__hsl
return Color((hue, s, l), 'hsl', self.__a, self.__wref)
def with_saturation(self, saturation):
"""Create a new instance based on this one with a new saturation value.
.. note::
The saturation is defined for the HSL mode.
Parameters:
:saturation:
The saturation of the new color [0...1].
Returns:
A grapefruit.Color instance.
>>> Color.FromHsl(30, 1, 0.5).with_saturation(0.5)
Color(0.75, 0.5, 0.25, 1.0)
>>> Color.FromHsl(30, 1, 0.5).with_saturation(0.5).hsl
(30.0, 0.5, 0.5)
"""
h, s, l = self.__hsl
return Color((h, saturation, l), 'hsl', self.__a, self.__wref)
def with_lightness(self, lightness):
"""Create a new instance based on this one with a new lightness value.
Parameters:
:lightness:
The lightness of the new color [0...1].
Returns:
A grapefruit.Color instance.
>>> Color.FromHsl(30, 1, 0.5).with_lightness(0.25)
Color(0.5, 0.25, 0.0, 1.0)
>>> Color.FromHsl(30, 1, 0.5).with_lightness(0.25).hsl
(30.0, 1.0, 0.25)
"""
h, s, l = self.__hsl
return Color((h, s, lightness), 'hsl', self.__a, self.__wref)
def darker(self, level):
"""Create a new instance based on this one but darker.
Parameters:
:level:
The amount by which the color should be darkened to produce
the new one [0...1].
Returns:
A grapefruit.Color instance.
>>> Color.FromHsl(30, 1, 0.5).darker(0.25)
Color(0.5, 0.25, 0.0, 1.0)
>>> Color.FromHsl(30, 1, 0.5).darker(0.25).hsl
(30.0, 1.0, 0.25)
"""
h, s, l = self.__hsl
return Color((h, s, max(l - level, 0)), 'hsl', self.__a, self.__wref)
def lighter(self, level):
"""Create a new instance based on this one but lighter.
Parameters:
:level:
The amount by which the color should be lightened to produce
the new one [0...1].
Returns:
A grapefruit.Color instance.
>>> Color.FromHsl(30, 1, 0.5).lighter(0.25)
Color(1.0, 0.75, 0.5, 1.0)
>>> Color.FromHsl(30, 1, 0.5).lighter(0.25).hsl
(30.0, 1.0, 0.75)
"""
h, s, l = self.__hsl
return Color((h, s, min(l + level, 1)), 'hsl', self.__a, self.__wref)
def saturate(self, level):
"""Create a new instance based on this one but more saturated.
Parameters:
:level:
The amount by which the color should be saturated to produce
the new one [0...1].
Returns:
A grapefruit.Color instance.
>>> Color.FromHsl(30, 0.5, 0.5).saturate(0.25)
Color(0.875, 0.5, 0.125, 1.0)
>>> Color.FromHsl(30, 0.5, 0.5).saturate(0.25).hsl
(30.0, 0.75, 0.5)
"""
h, s, l = self.__hsl
return Color((h, min(s + level, 1), l), 'hsl', self.__a, self.__wref)
def desaturate(self, level):
"""Create a new instance based on this one but less saturated.
Parameters:
:level:
The amount by which the color should be desaturated to produce
the new one [0...1].
Returns:
A grapefruit.Color instance.
>>> Color.FromHsl(30, 0.5, 0.5).desaturate(0.25)
Color(0.625, 0.5, 0.375, 1.0)
>>> Color.FromHsl(30, 0.5, 0.5).desaturate(0.25).hsl
(30.0, 0.25, 0.5)
"""
h, s, l = self.__hsl
return Color((h, max(s - level, 0), l), 'hsl', self.__a, self.__wref)
def nearest_legal(self):
"""Create a new instance that is the nearest legal color to this one.
>>> Color.FromRgb(2.0, 0.0, 3.0).nearest_legal()
Color(1.0, 0.0, 1.0, 1.0)
"""
return Color.FromRgb(*[max(min(v, 1.0), 0.0) for v in self])
def web_safe_dither(self):
"""Return the two websafe colors nearest to this one.
Returns:
A tuple of two grapefruit.Color instances which are the two
web safe colors closest this one.
>>> c = Color.FromRgb(1.0, 0.45, 0.0)
>>> c1, c2 = c.web_safe_dither()
>>> c1
Color(1.0, 0.4, 0.0, 1.0)
>>> c2
Color(1.0, 0.6, 0.0, 1.0)
"""
return (
Color(rgb_to_web_safe(*self.__rgb), 'rgb', self.__a, self.__wref),
Color(rgb_to_web_safe(alt=True, *self.__rgb), 'rgb', self.__a, self.__wref))
def complementary_color(self, mode='ryb'):
"""Create a new instance which is the complementary color of this one.
Parameters:
:mode:
Select which color wheel to use for the generation (ryb/rgb).
Returns:
A grapefruit.Color instance.
>>> Color.FromHsl(30, 1, 0.5).complementary_color(mode='rgb')
Color(0.0, 0.5, 1.0, 1.0)
>>> Color.FromHsl(30, 1, 0.5).complementary_color(mode='rgb').hsl
(210.0, 1.0, 0.5)
"""
h, s, l = self.__hsl
if mode == 'ryb': h = rgb_to_ryb(h)
h = (h+180)%360
if mode == 'ryb': h = ryb_to_rgb(h)
return Color((h, s, l), 'hsl', self.__a, self.__wref)
def make_gradient(self, target, steps=100):
"""Create a list with the gradient colors between this and the other color.
Parameters:
:target:
The grapefruit.Color at the other end of the gradient.
:steps:
The number of gradients steps to create.
Returns:
A list of grapefruit.Color instances.
>>> c1 = Color.FromRgb(1.0, 0.0, 0.0, alpha=1)
>>> c2 = Color.FromRgb(0.0, 1.0, 0.0, alpha=0)
>>> c1.make_gradient(c2, 3)
[Color(0.75, 0.25, 0.0, 0.75), Color(0.5, 0.5, 0.0, 0.5), Color(0.25, 0.75, 0.0, 0.25)]
"""
gradient = []
rgba1 = self.__rgb + (self.__a,)
rgba2 = target.__rgb + (target.__a,)
steps += 1
for n in range(1, steps):
d = 1.0*n/steps
r = (rgba1[0]*(1-d)) + (rgba2[0]*d)
g = (rgba1[1]*(1-d)) + (rgba2[1]*d)
b = (rgba1[2]*(1-d)) + (rgba2[2]*d)
a = (rgba1[3]*(1-d)) + (rgba2[3]*d)
gradient.append(Color((r, g, b), 'rgb', a, self.__wref))
return gradient
def make_monochrome_scheme(self):
"""Return 4 colors in the same hue with varying saturation/lightness.
Returns:
A tuple of 4 grapefruit.Color in the same hue as this one,
with varying saturation/lightness.
>>> c = Color.FromHsl(30, 0.5, 0.5)
>>> ['(%g, %g, %g)' % clr.hsl for clr in c.make_monochrome_scheme()]
['(30, 0.2, 0.8)', '(30, 0.5, 0.3)', '(30, 0.2, 0.6)', '(30, 0.5, 0.8)']
"""
def _wrap(x, min, thres, plus):
if (x-min) < thres: return x + plus
else: return x-min
h, s, l = self.__hsl
s1 = _wrap(s, 0.3, 0.1, 0.3)
l1 = _wrap(l, 0.5, 0.2, 0.3)
s2 = s
l2 = _wrap(l, 0.2, 0.2, 0.6)
s3 = s1
l3 = max(0.2, l + (1-l)*0.2)
s4 = s
l4 = _wrap(l, 0.5, 0.2, 0.3)
return (
Color((h, s1, l1), 'hsl', self.__a, self.__wref),
Color((h, s2, l2), 'hsl', self.__a, self.__wref),
Color((h, s3, l3), 'hsl', self.__a, self.__wref),
Color((h, s4, l4), 'hsl', self.__a, self.__wref))
def make_triadic_scheme(self, angle=120, mode='ryb'):
"""Return two colors forming a triad or a split complementary with this one.
Parameters:
:angle:
The angle between the hues of the created colors.
The default value makes a triad.
:mode:
Select which color wheel to use for the generation (ryb/rgb).
Returns:
A tuple of two grapefruit.Color forming a color triad with
this one or a split complementary.
>>> c1 = Color.FromHsl(30, 1, 0.5)
>>> c2, c3 = c1.make_triadic_scheme(mode='rgb')
>>> c2.hsl
(150.0, 1.0, 0.5)
>>> c3.hsl
(270.0, 1.0, 0.5)
>>> c2, c3 = c1.make_triadic_scheme(angle=40, mode='rgb')
>>> c2.hsl
(190.0, 1.0, 0.5)
>>> c3.hsl
(230.0, 1.0, 0.5)
"""
h, s, l = self.__hsl
angle = min(angle, 120) / 2.0
if mode == 'ryb': h = rgb_to_ryb(h)
h += 180
h1 = (h - angle) % 360
h2 = (h + angle) % 360
if mode == 'ryb':
h1 = ryb_to_rgb(h1)
h2 = ryb_to_rgb(h2)
return (
Color((h1, s, l), 'hsl', self.__a, self.__wref),
Color((h2, s, l), 'hsl', self.__a, self.__wref))
def make_tetradic_scheme(self, angle=30, mode='ryb'):
"""Return three colors froming a tetrad with this one.
Parameters:
:angle:
The angle to substract from the adjacent colors hues [-90...90].
You can use an angle of zero to generate a square tetrad.
:mode:
Select which color wheel to use for the generation (ryb/rgb).
Returns:
A tuple of three grapefruit.Color forming a color tetrad with
this one.
>>> col = Color.FromHsl(30, 1, 0.5)
>>> [c.hsl for c in col.make_tetradic_scheme(mode='rgb', angle=30)]
[(90.0, 1.0, 0.5), (210.0, 1.0, 0.5), (270.0, 1.0, 0.5)]
"""
h, s, l = self.__hsl
if mode == 'ryb': h = rgb_to_ryb(h)
h1 = (h + 90 - angle) % 360
h2 = (h + 180) % 360
h3 = (h + 270 - angle) % 360
if mode == 'ryb':
h1 = ryb_to_rgb(h1)
h2 = ryb_to_rgb(h2)
h3 = ryb_to_rgb(h3)
return (
Color((h1, s, l), 'hsl', self.__a, self.__wref),
Color((h2, s, l), 'hsl', self.__a, self.__wref),
Color((h3, s, l), 'hsl', self.__a, self.__wref))
def make_analogous_scheme(self, angle=30, mode='ryb'):
"""Return two colors analogous to this one.
Args:
:angle:
The angle between the hues of the created colors and this one.
:mode:
Select which color wheel to use for the generation (ryb/rgb).
Returns:
A tuple of grapefruit.Colors analogous to this one.
>>> c1 = Color.FromHsl(30, 1, 0.5)
>>> c2, c3 = c1.make_analogous_scheme(angle=60, mode='rgb')
>>> c2.hsl
(330.0, 1.0, 0.5)
>>> c3.hsl
(90.0, 1.0, 0.5)
>>> c2, c3 = c1.make_analogous_scheme(angle=10, mode='rgb')
>>> c2.hsl
(20.0, 1.0, 0.5)
>>> c3.hsl
(40.0, 1.0, 0.5)
"""
h, s, l = self.__hsl
if mode == 'ryb': h = rgb_to_ryb(h)
h += 360
h1 = (h - angle) % 360
h2 = (h + angle) % 360
if mode == 'ryb':
h1 = ryb_to_rgb(h1)
h2 = ryb_to_rgb(h2)
return (Color((h1, s, l), 'hsl', self.__a, self.__wref),
Color((h2, s, l), 'hsl', self.__a, self.__wref))
def alpha_blend(self, other):
"""Alpha-blend this color on the other one.
Args:
:other:
The grapefruit.Color to alpha-blend with this one.
Returns:
A grapefruit.Color instance which is the result of alpha-blending
this color on the other one.
>>> c1 = Color.FromRgb(1, 0.5, 0, 0.2)
>>> c2 = Color.FromRgb(1, 1, 1, 0.8)
>>> c3 = c1.alpha_blend(c2)
>>> str(c3)
'(1, 0.875, 0.75, 0.84)'
"""
# get final alpha channel
fa = self.__a + other.__a - (self.__a * other.__a)
# get percentage of source alpha compared to final alpha
if fa==0: sa = 0
else: sa = min(1.0, self.__a/other.__a)
# destination percentage is just the additive inverse
da = 1.0 - sa
sr, sg, sb = [v * sa for v in self.__rgb]
dr, dg, db = [v * da for v in other.__rgb]
return Color((sr+dr, sg+dg, sb+db), 'rgb', fa, self.__wref)
def blend(self, other, percent=0.5):
"""blend this color with the other one.
Args:
:other:
the grapefruit.Color to blend with this one.
Returns:
A grapefruit.Color instance which is the result of blending
this color on the other one.
>>> c1 = Color.FromRgb(1, 0.5, 0, 0.2)
>>> c2 = Color.FromRgb(1, 1, 1, 0.6)
>>> c3 = c1.blend(c2)
>>> str(c3)
'(1, 0.75, 0.5, 0.4)'
"""
dest = 1.0 - percent
rgb = tuple(((u * percent) + (v * dest) for u, v in zip(self.__rgb, other.__rgb)))
a = (self.__a * percent) + (other.__a * dest)
return Color(rgb, 'rgb', a, self.__wref)
def _test():
import doctest
reload(doctest)
doctest.testmod()
if __name__=='__main__':
_test()
# vim: ts=2 sts=2 sw=2 et
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, ESS LLP and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
import json
from frappe.utils import getdate, add_days
from frappe import _
import datetime
from frappe.core.doctype.sms_settings.sms_settings import send_sms
from erpnext.hr.doctype.employee.employee import is_holiday
from erpnext.healthcare.doctype.healthcare_settings.healthcare_settings import get_receivable_account,get_income_account
from erpnext.healthcare.utils import validity_exists, service_item_and_practitioner_charge
class PatientAppointment(Document):
def on_update(self):
today = datetime.date.today()
appointment_date = getdate(self.appointment_date)
# If appointment created for today set as open
if today == appointment_date:
frappe.db.set_value("Patient Appointment", self.name, "status", "Open")
self.reload()
def after_insert(self):
if self.procedure_prescription:
frappe.db.set_value("Procedure Prescription", self.procedure_prescription, "appointment_booked", True)
# Check fee validity exists
appointment = self
validity_exist = validity_exists(appointment.practitioner, appointment.patient)
if validity_exist:
fee_validity = frappe.get_doc("Fee Validity", validity_exist[0][0])
# Check if the validity is valid
appointment_date = getdate(appointment.appointment_date)
if (fee_validity.valid_till >= appointment_date) and (fee_validity.visited < fee_validity.max_visit):
visited = fee_validity.visited + 1
frappe.db.set_value("Fee Validity", fee_validity.name, "visited", visited)
if fee_validity.ref_invoice:
frappe.db.set_value("Patient Appointment", appointment.name, "invoiced", True)
frappe.msgprint(_("{0} has fee validity till {1}").format(appointment.patient, fee_validity.valid_till))
confirm_sms(self)
if frappe.db.get_value("Healthcare Settings", None, "manage_appointment_invoice_automatically") == '1' and \
frappe.db.get_value("Patient Appointment", self.name, "invoiced") != 1:
invoice_appointment(self)
@frappe.whitelist()
def invoice_appointment(appointment_doc):
if not appointment_doc.name:
return False
sales_invoice = frappe.new_doc("Sales Invoice")
sales_invoice.customer = frappe.get_value("Patient", appointment_doc.patient, "customer")
sales_invoice.appointment = appointment_doc.name
sales_invoice.due_date = getdate()
sales_invoice.is_pos = True
sales_invoice.company = appointment_doc.company
sales_invoice.debit_to = get_receivable_account(appointment_doc.company)
item_line = sales_invoice.append("items")
service_item, practitioner_charge = service_item_and_practitioner_charge(appointment_doc)
item_line.item_code = service_item
item_line.description = "Consulting Charges: " + appointment_doc.practitioner
item_line.income_account = get_income_account(appointment_doc.practitioner, appointment_doc.company)
item_line.rate = practitioner_charge
item_line.amount = practitioner_charge
item_line.qty = 1
item_line.reference_dt = "Patient Appointment"
item_line.reference_dn = appointment_doc.name
payments_line = sales_invoice.append("payments")
payments_line.mode_of_payment = appointment_doc.mode_of_payment
payments_line.amount = appointment_doc.paid_amount
sales_invoice.set_missing_values(for_validate = True)
sales_invoice.save(ignore_permissions=True)
sales_invoice.submit()
frappe.msgprint(_("Sales Invoice {0} created as paid".format(sales_invoice.name)), alert=True)
def appointment_cancel(appointment_id):
appointment = frappe.get_doc("Patient Appointment", appointment_id)
# If invoiced --> fee_validity update with -1 visit
if appointment.invoiced:
sales_invoice = exists_sales_invoice(appointment)
if sales_invoice and cancel_sales_invoice(sales_invoice):
frappe.msgprint(
_("Appointment {0} and Sales Invoice {1} cancelled".format(appointment.name, sales_invoice.name))
)
else:
validity = validity_exists(appointment.practitioner, appointment.patient)
if validity:
fee_validity = frappe.get_doc("Fee Validity", validity[0][0])
if appointment_valid_in_fee_validity(appointment, fee_validity.valid_till, True, fee_validity.ref_invoice):
visited = fee_validity.visited - 1
frappe.db.set_value("Fee Validity", fee_validity.name, "visited", visited)
frappe.msgprint(
_("Appointment cancelled, Please review and cancel the invoice {0}".format(fee_validity.ref_invoice))
)
else:
frappe.msgprint(_("Appointment cancelled"))
else:
frappe.msgprint(_("Appointment cancelled"))
else:
frappe.msgprint(_("Appointment cancelled"))
def appointment_valid_in_fee_validity(appointment, valid_end_date, invoiced, ref_invoice):
valid_days = frappe.db.get_value("Healthcare Settings", None, "valid_days")
max_visit = frappe.db.get_value("Healthcare Settings", None, "max_visit")
valid_start_date = add_days(getdate(valid_end_date), -int(valid_days))
# Appointments which has same fee validity range with the appointment
appointments = frappe.get_list("Patient Appointment",{'patient': appointment.patient, 'invoiced': invoiced,
'appointment_date':("<=", getdate(valid_end_date)), 'appointment_date':(">=", getdate(valid_start_date)),
'practitioner': appointment.practitioner}, order_by="appointment_date desc", limit=int(max_visit))
if appointments and len(appointments) > 0:
appointment_obj = appointments[len(appointments)-1]
sales_invoice = exists_sales_invoice(appointment_obj)
if sales_invoice.name == ref_invoice:
return True
return False
def cancel_sales_invoice(sales_invoice):
if frappe.db.get_value("Healthcare Settings", None, "manage_appointment_invoice_automatically") == '1':
if len(sales_invoice.items) == 1:
sales_invoice.cancel()
return True
return False
def exists_sales_invoice_item(appointment):
return frappe.db.exists(
"Sales Invoice Item",
{
"reference_dt": "Patient Appointment",
"reference_dn": appointment.name
}
)
def exists_sales_invoice(appointment):
sales_item_exist = exists_sales_invoice_item(appointment)
if sales_item_exist:
sales_invoice = frappe.get_doc("Sales Invoice", frappe.db.get_value("Sales Invoice Item", sales_item_exist, "parent"))
return sales_invoice
return False
@frappe.whitelist()
def get_availability_data(date, practitioner):
"""
Get availability data of 'practitioner' on 'date'
:param date: Date to check in schedule
:param practitioner: Name of the practitioner
:return: dict containing a list of available slots, list of appointments and time of appointments
"""
date = getdate(date)
weekday = date.strftime("%A")
available_slots = []
slot_details = []
practitioner_schedule = None
employee = None
practitioner_obj = frappe.get_doc("Healthcare Practitioner", practitioner)
# Get practitioner employee relation
if practitioner_obj.employee:
employee = practitioner_obj.employee
elif practitioner_obj.user_id:
if frappe.db.exists({
"doctype": "Employee",
"user_id": practitioner_obj.user_id
}):
employee = frappe.get_doc("Employee", {"user_id": practitioner_obj.user_id}).name
if employee:
# Check if it is Holiday
if is_holiday(employee, date):
frappe.throw(_("{0} is a company holiday".format(date)))
# Check if He/She on Leave
leave_record = frappe.db.sql("""select half_day from `tabLeave Application`
where employee = %s and %s between from_date and to_date
and docstatus = 1""", (employee, date), as_dict=True)
if leave_record:
if leave_record[0].half_day:
frappe.throw(_("{0} on Half day Leave on {1}").format(practitioner, date))
else:
frappe.throw(_("{0} on Leave on {1}").format(practitioner, date))
# get practitioners schedule
if practitioner_obj.practitioner_schedules:
for schedule in practitioner_obj.practitioner_schedules:
if schedule.schedule:
practitioner_schedule = frappe.get_doc("Practitioner Schedule", schedule.schedule)
else:
frappe.throw(_("{0} does not have a Healthcare Practitioner Schedule. Add it in Healthcare Practitioner master".format(practitioner)))
if practitioner_schedule:
available_slots = []
for t in practitioner_schedule.time_slots:
if weekday == t.day:
available_slots.append(t)
if available_slots:
appointments = []
if schedule.service_unit:
slot_name = schedule.schedule+" - "+schedule.service_unit
allow_overlap = frappe.get_value('Healthcare Service Unit', schedule.service_unit, 'overlap_appointments')
if allow_overlap:
# fetch all appointments to practitioner by service unit
appointments = frappe.get_all(
"Patient Appointment",
filters={"practitioner": practitioner, "service_unit": schedule.service_unit, "appointment_date": date, "status": ["not in",["Cancelled"]]},
fields=["name", "appointment_time", "duration", "status"])
else:
# fetch all appointments to service unit
appointments = frappe.get_all(
"Patient Appointment",
filters={"service_unit": schedule.service_unit, "appointment_date": date, "status": ["not in",["Cancelled"]]},
fields=["name", "appointment_time", "duration", "status"])
else:
slot_name = schedule.schedule
# fetch all appointments to practitioner without service unit
appointments = frappe.get_all(
"Patient Appointment",
filters={"practitioner": practitioner, "service_unit": '', "appointment_date": date, "status": ["not in",["Cancelled"]]},
fields=["name", "appointment_time", "duration", "status"])
slot_details.append({"slot_name":slot_name, "service_unit":schedule.service_unit,
"avail_slot":available_slots, 'appointments': appointments})
else:
frappe.throw(_("{0} does not have a Healthcare Practitioner Schedule. Add it in Healthcare Practitioner master".format(practitioner)))
if not available_slots and not slot_details:
# TODO: return available slots in nearby dates
frappe.throw(_("Healthcare Practitioner not available on {0}").format(weekday))
return {
"slot_details": slot_details
}
@frappe.whitelist()
def update_status(appointment_id, status):
frappe.db.set_value("Patient Appointment", appointment_id, "status", status)
appointment_booked = True
if status == "Cancelled":
appointment_booked = False
appointment_cancel(appointment_id)
procedure_prescription = frappe.db.get_value("Patient Appointment", appointment_id, "procedure_prescription")
if procedure_prescription:
frappe.db.set_value("Procedure Prescription", procedure_prescription, "appointment_booked", appointment_booked)
@frappe.whitelist()
def set_open_appointments():
today = getdate()
frappe.db.sql(
"update `tabPatient Appointment` set status='Open' where status = 'Scheduled'"
" and appointment_date = %s", today)
@frappe.whitelist()
def set_pending_appointments():
today = getdate()
frappe.db.sql(
"update `tabPatient Appointment` set status='Pending' where status in "
"('Scheduled','Open') and appointment_date < %s", today)
def confirm_sms(doc):
if frappe.db.get_value("Healthcare Settings", None, "app_con") == '1':
message = frappe.db.get_value("Healthcare Settings", None, "app_con_msg")
send_message(doc, message)
@frappe.whitelist()
def create_encounter(appointment):
appointment = frappe.get_doc("Patient Appointment", appointment)
encounter = frappe.new_doc("Patient Encounter")
encounter.appointment = appointment.name
encounter.patient = appointment.patient
encounter.practitioner = appointment.practitioner
encounter.visit_department = appointment.department
encounter.patient_sex = appointment.patient_sex
encounter.encounter_date = appointment.appointment_date
if appointment.invoiced:
encounter.invoiced = True
return encounter.as_dict()
def remind_appointment():
if frappe.db.get_value("Healthcare Settings", None, "app_rem") == '1':
rem_before = datetime.datetime.strptime(frappe.get_value("Healthcare Settings", None, "rem_before"), "%H:%M:%S")
rem_dt = datetime.datetime.now() + datetime.timedelta(
hours=rem_before.hour, minutes=rem_before.minute, seconds=rem_before.second)
appointment_list = frappe.db.sql(
"select name from `tabPatient Appointment` where start_dt between %s and %s and reminded = 0 ",
(datetime.datetime.now(), rem_dt)
)
for i in range(0, len(appointment_list)):
doc = frappe.get_doc("Patient Appointment", appointment_list[i][0])
message = frappe.db.get_value("Healthcare Settings", None, "app_rem_msg")
send_message(doc, message)
frappe.db.set_value("Patient Appointment", doc.name, "reminded",1)
def send_message(doc, message):
patient = frappe.get_doc("Patient", doc.patient)
if patient.mobile:
context = {"doc": doc, "alert": doc, "comments": None}
if doc.get("_comments"):
context["comments"] = json.loads(doc.get("_comments"))
# jinja to string convertion happens here
message = frappe.render_template(message, context)
number = [patient.mobile]
send_sms(number, message)
@frappe.whitelist()
def get_events(start, end, filters=None):
"""Returns events for Gantt / Calendar view rendering.
:param start: Start date-time.
:param end: End date-time.
:param filters: Filters (JSON).
"""
from frappe.desk.calendar import get_event_conditions
conditions = get_event_conditions("Patient Appointment", filters)
data = frappe.db.sql("""select name, patient, practitioner, status,
duration, timestamp(appointment_date, appointment_time) as
'start' from `tabPatient Appointment` where
(appointment_date between %(start)s and %(end)s)
and docstatus < 2 {conditions}""".format(conditions=conditions),
{"start": start, "end": end}, as_dict=True, update={"allDay": 0})
for item in data:
item.end = item.start + datetime.timedelta(minutes = item.duration)
return data
@frappe.whitelist()
def get_procedure_prescribed(patient):
return frappe.db.sql("""select pp.name, pp.procedure, pp.parent, ct.practitioner,
ct.encounter_date, pp.practitioner, pp.date, pp.department
from `tabPatient Encounter` ct, `tabProcedure Prescription` pp
where ct.patient='{0}' and pp.parent=ct.name and pp.appointment_booked=0
order by ct.creation desc""".format(patient))
fix: Patient Appointment - Calendar - set color from appointment type
# -*- coding: utf-8 -*-
# Copyright (c) 2015, ESS LLP and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
import json
from frappe.utils import getdate, add_days
from frappe import _
import datetime
from frappe.core.doctype.sms_settings.sms_settings import send_sms
from erpnext.hr.doctype.employee.employee import is_holiday
from erpnext.healthcare.doctype.healthcare_settings.healthcare_settings import get_receivable_account,get_income_account
from erpnext.healthcare.utils import validity_exists, service_item_and_practitioner_charge
class PatientAppointment(Document):
def on_update(self):
today = datetime.date.today()
appointment_date = getdate(self.appointment_date)
# If appointment created for today set as open
if today == appointment_date:
frappe.db.set_value("Patient Appointment", self.name, "status", "Open")
self.reload()
def after_insert(self):
if self.procedure_prescription:
frappe.db.set_value("Procedure Prescription", self.procedure_prescription, "appointment_booked", True)
# Check fee validity exists
appointment = self
validity_exist = validity_exists(appointment.practitioner, appointment.patient)
if validity_exist:
fee_validity = frappe.get_doc("Fee Validity", validity_exist[0][0])
# Check if the validity is valid
appointment_date = getdate(appointment.appointment_date)
if (fee_validity.valid_till >= appointment_date) and (fee_validity.visited < fee_validity.max_visit):
visited = fee_validity.visited + 1
frappe.db.set_value("Fee Validity", fee_validity.name, "visited", visited)
if fee_validity.ref_invoice:
frappe.db.set_value("Patient Appointment", appointment.name, "invoiced", True)
frappe.msgprint(_("{0} has fee validity till {1}").format(appointment.patient, fee_validity.valid_till))
confirm_sms(self)
if frappe.db.get_value("Healthcare Settings", None, "manage_appointment_invoice_automatically") == '1' and \
frappe.db.get_value("Patient Appointment", self.name, "invoiced") != 1:
invoice_appointment(self)
@frappe.whitelist()
def invoice_appointment(appointment_doc):
if not appointment_doc.name:
return False
sales_invoice = frappe.new_doc("Sales Invoice")
sales_invoice.customer = frappe.get_value("Patient", appointment_doc.patient, "customer")
sales_invoice.appointment = appointment_doc.name
sales_invoice.due_date = getdate()
sales_invoice.is_pos = True
sales_invoice.company = appointment_doc.company
sales_invoice.debit_to = get_receivable_account(appointment_doc.company)
item_line = sales_invoice.append("items")
service_item, practitioner_charge = service_item_and_practitioner_charge(appointment_doc)
item_line.item_code = service_item
item_line.description = "Consulting Charges: " + appointment_doc.practitioner
item_line.income_account = get_income_account(appointment_doc.practitioner, appointment_doc.company)
item_line.rate = practitioner_charge
item_line.amount = practitioner_charge
item_line.qty = 1
item_line.reference_dt = "Patient Appointment"
item_line.reference_dn = appointment_doc.name
payments_line = sales_invoice.append("payments")
payments_line.mode_of_payment = appointment_doc.mode_of_payment
payments_line.amount = appointment_doc.paid_amount
sales_invoice.set_missing_values(for_validate = True)
sales_invoice.save(ignore_permissions=True)
sales_invoice.submit()
frappe.msgprint(_("Sales Invoice {0} created as paid".format(sales_invoice.name)), alert=True)
def appointment_cancel(appointment_id):
appointment = frappe.get_doc("Patient Appointment", appointment_id)
# If invoiced --> fee_validity update with -1 visit
if appointment.invoiced:
sales_invoice = exists_sales_invoice(appointment)
if sales_invoice and cancel_sales_invoice(sales_invoice):
frappe.msgprint(
_("Appointment {0} and Sales Invoice {1} cancelled".format(appointment.name, sales_invoice.name))
)
else:
validity = validity_exists(appointment.practitioner, appointment.patient)
if validity:
fee_validity = frappe.get_doc("Fee Validity", validity[0][0])
if appointment_valid_in_fee_validity(appointment, fee_validity.valid_till, True, fee_validity.ref_invoice):
visited = fee_validity.visited - 1
frappe.db.set_value("Fee Validity", fee_validity.name, "visited", visited)
frappe.msgprint(
_("Appointment cancelled, Please review and cancel the invoice {0}".format(fee_validity.ref_invoice))
)
else:
frappe.msgprint(_("Appointment cancelled"))
else:
frappe.msgprint(_("Appointment cancelled"))
else:
frappe.msgprint(_("Appointment cancelled"))
def appointment_valid_in_fee_validity(appointment, valid_end_date, invoiced, ref_invoice):
valid_days = frappe.db.get_value("Healthcare Settings", None, "valid_days")
max_visit = frappe.db.get_value("Healthcare Settings", None, "max_visit")
valid_start_date = add_days(getdate(valid_end_date), -int(valid_days))
# Appointments which has same fee validity range with the appointment
appointments = frappe.get_list("Patient Appointment",{'patient': appointment.patient, 'invoiced': invoiced,
'appointment_date':("<=", getdate(valid_end_date)), 'appointment_date':(">=", getdate(valid_start_date)),
'practitioner': appointment.practitioner}, order_by="appointment_date desc", limit=int(max_visit))
if appointments and len(appointments) > 0:
appointment_obj = appointments[len(appointments)-1]
sales_invoice = exists_sales_invoice(appointment_obj)
if sales_invoice.name == ref_invoice:
return True
return False
def cancel_sales_invoice(sales_invoice):
if frappe.db.get_value("Healthcare Settings", None, "manage_appointment_invoice_automatically") == '1':
if len(sales_invoice.items) == 1:
sales_invoice.cancel()
return True
return False
def exists_sales_invoice_item(appointment):
return frappe.db.exists(
"Sales Invoice Item",
{
"reference_dt": "Patient Appointment",
"reference_dn": appointment.name
}
)
def exists_sales_invoice(appointment):
sales_item_exist = exists_sales_invoice_item(appointment)
if sales_item_exist:
sales_invoice = frappe.get_doc("Sales Invoice", frappe.db.get_value("Sales Invoice Item", sales_item_exist, "parent"))
return sales_invoice
return False
@frappe.whitelist()
def get_availability_data(date, practitioner):
"""
Get availability data of 'practitioner' on 'date'
:param date: Date to check in schedule
:param practitioner: Name of the practitioner
:return: dict containing a list of available slots, list of appointments and time of appointments
"""
date = getdate(date)
weekday = date.strftime("%A")
available_slots = []
slot_details = []
practitioner_schedule = None
employee = None
practitioner_obj = frappe.get_doc("Healthcare Practitioner", practitioner)
# Get practitioner employee relation
if practitioner_obj.employee:
employee = practitioner_obj.employee
elif practitioner_obj.user_id:
if frappe.db.exists({
"doctype": "Employee",
"user_id": practitioner_obj.user_id
}):
employee = frappe.get_doc("Employee", {"user_id": practitioner_obj.user_id}).name
if employee:
# Check if it is Holiday
if is_holiday(employee, date):
frappe.throw(_("{0} is a company holiday".format(date)))
# Check if He/She on Leave
leave_record = frappe.db.sql("""select half_day from `tabLeave Application`
where employee = %s and %s between from_date and to_date
and docstatus = 1""", (employee, date), as_dict=True)
if leave_record:
if leave_record[0].half_day:
frappe.throw(_("{0} on Half day Leave on {1}").format(practitioner, date))
else:
frappe.throw(_("{0} on Leave on {1}").format(practitioner, date))
# get practitioners schedule
if practitioner_obj.practitioner_schedules:
for schedule in practitioner_obj.practitioner_schedules:
if schedule.schedule:
practitioner_schedule = frappe.get_doc("Practitioner Schedule", schedule.schedule)
else:
frappe.throw(_("{0} does not have a Healthcare Practitioner Schedule. Add it in Healthcare Practitioner master".format(practitioner)))
if practitioner_schedule:
available_slots = []
for t in practitioner_schedule.time_slots:
if weekday == t.day:
available_slots.append(t)
if available_slots:
appointments = []
if schedule.service_unit:
slot_name = schedule.schedule+" - "+schedule.service_unit
allow_overlap = frappe.get_value('Healthcare Service Unit', schedule.service_unit, 'overlap_appointments')
if allow_overlap:
# fetch all appointments to practitioner by service unit
appointments = frappe.get_all(
"Patient Appointment",
filters={"practitioner": practitioner, "service_unit": schedule.service_unit, "appointment_date": date, "status": ["not in",["Cancelled"]]},
fields=["name", "appointment_time", "duration", "status"])
else:
# fetch all appointments to service unit
appointments = frappe.get_all(
"Patient Appointment",
filters={"service_unit": schedule.service_unit, "appointment_date": date, "status": ["not in",["Cancelled"]]},
fields=["name", "appointment_time", "duration", "status"])
else:
slot_name = schedule.schedule
# fetch all appointments to practitioner without service unit
appointments = frappe.get_all(
"Patient Appointment",
filters={"practitioner": practitioner, "service_unit": '', "appointment_date": date, "status": ["not in",["Cancelled"]]},
fields=["name", "appointment_time", "duration", "status"])
slot_details.append({"slot_name":slot_name, "service_unit":schedule.service_unit,
"avail_slot":available_slots, 'appointments': appointments})
else:
frappe.throw(_("{0} does not have a Healthcare Practitioner Schedule. Add it in Healthcare Practitioner master".format(practitioner)))
if not available_slots and not slot_details:
# TODO: return available slots in nearby dates
frappe.throw(_("Healthcare Practitioner not available on {0}").format(weekday))
return {
"slot_details": slot_details
}
@frappe.whitelist()
def update_status(appointment_id, status):
frappe.db.set_value("Patient Appointment", appointment_id, "status", status)
appointment_booked = True
if status == "Cancelled":
appointment_booked = False
appointment_cancel(appointment_id)
procedure_prescription = frappe.db.get_value("Patient Appointment", appointment_id, "procedure_prescription")
if procedure_prescription:
frappe.db.set_value("Procedure Prescription", procedure_prescription, "appointment_booked", appointment_booked)
@frappe.whitelist()
def set_open_appointments():
today = getdate()
frappe.db.sql(
"update `tabPatient Appointment` set status='Open' where status = 'Scheduled'"
" and appointment_date = %s", today)
@frappe.whitelist()
def set_pending_appointments():
today = getdate()
frappe.db.sql(
"update `tabPatient Appointment` set status='Pending' where status in "
"('Scheduled','Open') and appointment_date < %s", today)
def confirm_sms(doc):
if frappe.db.get_value("Healthcare Settings", None, "app_con") == '1':
message = frappe.db.get_value("Healthcare Settings", None, "app_con_msg")
send_message(doc, message)
@frappe.whitelist()
def create_encounter(appointment):
appointment = frappe.get_doc("Patient Appointment", appointment)
encounter = frappe.new_doc("Patient Encounter")
encounter.appointment = appointment.name
encounter.patient = appointment.patient
encounter.practitioner = appointment.practitioner
encounter.visit_department = appointment.department
encounter.patient_sex = appointment.patient_sex
encounter.encounter_date = appointment.appointment_date
if appointment.invoiced:
encounter.invoiced = True
return encounter.as_dict()
def remind_appointment():
if frappe.db.get_value("Healthcare Settings", None, "app_rem") == '1':
rem_before = datetime.datetime.strptime(frappe.get_value("Healthcare Settings", None, "rem_before"), "%H:%M:%S")
rem_dt = datetime.datetime.now() + datetime.timedelta(
hours=rem_before.hour, minutes=rem_before.minute, seconds=rem_before.second)
appointment_list = frappe.db.sql(
"select name from `tabPatient Appointment` where start_dt between %s and %s and reminded = 0 ",
(datetime.datetime.now(), rem_dt)
)
for i in range(0, len(appointment_list)):
doc = frappe.get_doc("Patient Appointment", appointment_list[i][0])
message = frappe.db.get_value("Healthcare Settings", None, "app_rem_msg")
send_message(doc, message)
frappe.db.set_value("Patient Appointment", doc.name, "reminded",1)
def send_message(doc, message):
patient = frappe.get_doc("Patient", doc.patient)
if patient.mobile:
context = {"doc": doc, "alert": doc, "comments": None}
if doc.get("_comments"):
context["comments"] = json.loads(doc.get("_comments"))
# jinja to string convertion happens here
message = frappe.render_template(message, context)
number = [patient.mobile]
send_sms(number, message)
@frappe.whitelist()
def get_events(start, end, filters=None):
"""Returns events for Gantt / Calendar view rendering.
:param start: Start date-time.
:param end: End date-time.
:param filters: Filters (JSON).
"""
from frappe.desk.calendar import get_event_conditions
conditions = get_event_conditions("Patient Appointment", filters)
data = frappe.db.sql("""
select
`tabPatient Appointment`.name, `tabPatient Appointment`.patient,
`tabPatient Appointment`.practitioner, `tabPatient Appointment`.status,
`tabPatient Appointment`.duration,
timestamp(`tabPatient Appointment`.appointment_date, `tabPatient Appointment`.appointment_time) as 'start',
`tabAppointment Type`.color
from
`tabPatient Appointment`
left join `tabAppointment Type` on `tabPatient Appointment`.appointment_type=`tabAppointment Type`.name
where
(`tabPatient Appointment`.appointment_date between %(start)s and %(end)s)
and `tabPatient Appointment`.docstatus < 2 {conditions}""".format(conditions=conditions),
{"start": start, "end": end}, as_dict=True, update={"allDay": 0})
for item in data:
item.end = item.start + datetime.timedelta(minutes = item.duration)
return data
@frappe.whitelist()
def get_procedure_prescribed(patient):
return frappe.db.sql("""select pp.name, pp.procedure, pp.parent, ct.practitioner,
ct.encounter_date, pp.practitioner, pp.date, pp.department
from `tabPatient Encounter` ct, `tabProcedure Prescription` pp
where ct.patient='{0}' and pp.parent=ct.name and pp.appointment_booked=0
order by ct.creation desc""".format(patient))
|
# -*- coding: utf-8 -*-
"""
Tests for the Openstack Cloud Provider
"""
from __future__ import absolute_import, print_function, unicode_literals
import logging
import os
import shutil
from time import sleep
from salt.config import cloud_config, cloud_providers_config
from salt.ext.six.moves import range
from salt.utils.yaml import safe_load
from tests.support.case import ShellCase
from tests.support.helpers import expensiveTest, random_string
from tests.support.paths import FILES
from tests.support.runtests import RUNTIME_VARS
TIMEOUT = 500
log = logging.getLogger(__name__)
@expensiveTest
class CloudTest(ShellCase):
PROVIDER = ""
REQUIRED_PROVIDER_CONFIG_ITEMS = tuple()
__RE_RUN_DELAY = 30
__RE_TRIES = 12
@staticmethod
def clean_cloud_dir(tmp_dir):
"""
Clean the cloud.providers.d tmp directory
"""
# make sure old provider configs are deleted
if not os.path.isdir(tmp_dir):
return
for fname in os.listdir(tmp_dir):
os.remove(os.path.join(tmp_dir, fname))
def query_instances(self):
"""
Standardize the data returned from a salt-cloud --query
"""
return set(
x.strip(": ")
for x in self.run_cloud("--query")
if x.lstrip().lower().startswith("cloud-test-")
)
def _instance_exists(self, instance_name=None, query=None):
"""
:param instance_name: The name of the instance to check for in salt-cloud.
For example this is may used when a test temporarily renames an instance
:param query: The result of a salt-cloud --query run outside of this function
"""
if not instance_name:
instance_name = self.instance_name
if not query:
query = self.query_instances()
log.debug('Checking for "{}" in {}'.format(instance_name, query))
if isinstance(query, set):
return instance_name in query
return any(instance_name == q.strip(": ") for q in query)
def assertInstanceExists(self, creation_ret=None, instance_name=None):
"""
:param instance_name: Override the checked instance name, otherwise the class default will be used.
:param creation_ret: The return value from the run_cloud() function that created the instance
"""
if not instance_name:
instance_name = self.instance_name
# If it exists but doesn't show up in the creation_ret, there was probably an error during creation
if creation_ret:
self.assertIn(
instance_name,
[i.strip(": ") for i in creation_ret],
"An error occured during instance creation: |\n\t{}\n\t|".format(
"\n\t".join(creation_ret)
),
)
else:
# Verify that the instance exists via query
query = self.query_instances()
for tries in range(self.__RE_TRIES):
if self._instance_exists(instance_name, query):
log.debug(
'Instance "{}" reported after {} seconds'.format(
instance_name, tries * self.__RE_RUN_DELAY
)
)
break
else:
sleep(self.__RE_RUN_DELAY)
query = self.query_instances()
# Assert that the last query was successful
self.assertTrue(
self._instance_exists(instance_name, query),
'Instance "{}" was not created successfully: {}'.format(
self.instance_name, ", ".join(query)
),
)
log.debug('Instance exists and was created: "{}"'.format(instance_name))
def assertDestroyInstance(self, instance_name=None, timeout=None):
if timeout is None:
timeout = TIMEOUT
if not instance_name:
instance_name = self.instance_name
log.debug('Deleting instance "{}"'.format(instance_name))
delete_str = self.run_cloud(
"-d {0} --assume-yes --out=yaml".format(instance_name), timeout=timeout
)
if delete_str:
delete = safe_load("\n".join(delete_str))
self.assertIn(self.profile_str, delete)
self.assertIn(self.PROVIDER, delete[self.profile_str])
self.assertIn(instance_name, delete[self.profile_str][self.PROVIDER])
delete_status = delete[self.profile_str][self.PROVIDER][instance_name]
if isinstance(delete_status, str):
self.assertEqual(delete_status, "True")
return
elif isinstance(delete_status, dict):
current_state = delete_status.get("currentState")
if current_state:
if current_state.get("ACTION"):
self.assertIn(".delete", current_state.get("ACTION"))
return
else:
self.assertEqual(current_state.get("name"), "shutting-down")
return
# It's not clear from the delete string that deletion was successful, ask salt-cloud after a delay
query = self.query_instances()
# some instances take a while to report their destruction
for tries in range(6):
if self._instance_exists(query=query):
sleep(30)
log.debug(
'Instance "{}" still found in query after {} tries: {}'.format(
instance_name, tries, query
)
)
query = self.query_instances()
# The last query should have been successful
self.assertNotIn(instance_name, self.query_instances())
@property
def instance_name(self):
if not hasattr(self, "_instance_name"):
# Create the cloud instance name to be used throughout the tests
subclass = self.__class__.__name__.strip("Test")
# Use the first three letters of the subclass, fill with '-' if too short
self._instance_name = random_string(
"cloud-test-{:-<3}-".format(subclass[:3]), uppercase=False
).lower()
return self._instance_name
@property
def providers(self):
if not hasattr(self, "_providers"):
self._providers = self.run_cloud("--list-providers")
return self._providers
@property
def provider_config(self):
if not hasattr(self, "_provider_config"):
self._provider_config = cloud_providers_config(
os.path.join(
RUNTIME_VARS.TMP_CONF_DIR,
"cloud.providers.d",
self.PROVIDER + ".conf",
)
)
return self._provider_config[self.profile_str][self.PROVIDER]
@property
def config(self):
if not hasattr(self, "_config"):
self._config = cloud_config(
os.path.join(
RUNTIME_VARS.TMP_CONF_DIR,
"cloud.profiles.d",
self.PROVIDER + ".conf",
)
)
return self._config
@property
def profile_str(self):
return self.PROVIDER + "-config"
def setUp(self):
"""
Sets up the test requirements. In child classes, define PROVIDER and REQUIRED_PROVIDER_CONFIG_ITEMS or this will fail
"""
super(CloudTest, self).setUp()
if not self.PROVIDER:
self.fail("A PROVIDER must be defined for this test")
# check if appropriate cloud provider and profile files are present
if self.profile_str + ":" not in self.providers:
self.skipTest(
"Configuration file for {0} was not found. Check {0}.conf files "
"in tests/integration/files/conf/cloud.*.d/ to run these tests.".format(
self.PROVIDER
)
)
missing_conf_item = []
for att in self.REQUIRED_PROVIDER_CONFIG_ITEMS:
if not self.provider_config.get(att):
missing_conf_item.append(att)
if missing_conf_item:
self.skipTest(
"Conf items are missing that must be provided to run these tests: {}".format(
", ".join(missing_conf_item)
)
+ "\nCheck tests/integration/files/conf/cloud.providers.d/{0}.conf".format(
self.PROVIDER
)
)
def _alt_names(self):
"""
Check for an instances created alongside this test's instance that weren't cleaned up
"""
query = self.query_instances()
instances = set()
for q in query:
# Verify but this is a new name and not a shutting down ec2 instance
if q.startswith(self.instance_name) and not q.split("-")[-1].startswith(
"DEL"
):
instances.add(q)
log.debug(
'Adding "{}" to the set of instances that needs to be deleted'.format(
q
)
)
return instances
def _ensure_deletion(self, instance_name=None):
"""
Make sure that the instance absolutely gets deleted, but fail the test if it happens in the tearDown
:return True if an instance was deleted, False if no instance was deleted; and a message
"""
destroyed = False
if not instance_name:
instance_name = self.instance_name
if self._instance_exists(instance_name):
for tries in range(3):
try:
self.assertDestroyInstance(instance_name)
return (
False,
'The instance "{}" was deleted during the tearDown, not the test.'.format(
instance_name
),
)
except AssertionError as e:
log.error(
'Failed to delete instance "{}". Tries: {}\n{}'.format(
instance_name, tries, str(e)
)
)
if not self._instance_exists():
destroyed = True
break
else:
sleep(30)
if not destroyed:
# Destroying instances in the tearDown is a contingency, not the way things should work by default.
return (
False,
'The Instance "{}" was not deleted after multiple attempts'.format(
instance_name
),
)
return (
True,
'The instance "{}" cleaned up properly after the test'.format(
instance_name
),
)
def tearDown(self):
"""
Clean up after tests, If the instance still exists for any reason, delete it.
Instances should be destroyed before the tearDown, assertDestroyInstance() should be called exactly
one time in a test for each instance created. This is a failSafe and something went wrong
if the tearDown is where an instance is destroyed.
"""
success = True
fail_messages = []
alt_names = self._alt_names()
for instance in alt_names:
alt_destroyed, alt_destroy_message = self._ensure_deletion(instance)
if not alt_destroyed:
success = False
fail_messages.append(alt_destroy_message)
log.error(
'Failed to destroy instance "{}": {}'.format(
instance, alt_destroy_message
)
)
self.assertTrue(success, "\n".join(fail_messages))
self.assertFalse(
alt_names, "Cleanup should happen in the test, not the TearDown"
)
@classmethod
def tearDownClass(cls):
cls.clean_cloud_dir(cls.tmp_provider_dir)
@classmethod
def setUpClass(cls):
# clean up before setup
cls.tmp_provider_dir = os.path.join(
RUNTIME_VARS.TMP_CONF_DIR, "cloud.providers.d"
)
cls.clean_cloud_dir(cls.tmp_provider_dir)
# add the provider config for only the cloud we are testing
provider_file = cls.PROVIDER + ".conf"
shutil.copyfile(
os.path.join(
os.path.join(FILES, "conf", "cloud.providers.d"), provider_file
),
os.path.join(os.path.join(cls.tmp_provider_dir, provider_file)),
)
fix pre-commit on #57996
"""
Tests for the Openstack Cloud Provider
"""
import logging
import os
import shutil
from time import sleep
from salt.config import cloud_config, cloud_providers_config
from salt.ext.six.moves import range
from salt.utils.yaml import safe_load
from tests.support.case import ShellCase
from tests.support.helpers import expensiveTest, random_string
from tests.support.paths import FILES
from tests.support.runtests import RUNTIME_VARS
TIMEOUT = 500
log = logging.getLogger(__name__)
@expensiveTest
class CloudTest(ShellCase):
PROVIDER = ""
REQUIRED_PROVIDER_CONFIG_ITEMS = tuple()
__RE_RUN_DELAY = 30
__RE_TRIES = 12
@staticmethod
def clean_cloud_dir(tmp_dir):
"""
Clean the cloud.providers.d tmp directory
"""
# make sure old provider configs are deleted
if not os.path.isdir(tmp_dir):
return
for fname in os.listdir(tmp_dir):
os.remove(os.path.join(tmp_dir, fname))
def query_instances(self):
"""
Standardize the data returned from a salt-cloud --query
"""
return {
x.strip(": ")
for x in self.run_cloud("--query")
if x.lstrip().lower().startswith("cloud-test-")
}
def _instance_exists(self, instance_name=None, query=None):
"""
:param instance_name: The name of the instance to check for in salt-cloud.
For example this is may used when a test temporarily renames an instance
:param query: The result of a salt-cloud --query run outside of this function
"""
if not instance_name:
instance_name = self.instance_name
if not query:
query = self.query_instances()
log.debug('Checking for "{}" in {}'.format(instance_name, query))
if isinstance(query, set):
return instance_name in query
return any(instance_name == q.strip(": ") for q in query)
def assertInstanceExists(self, creation_ret=None, instance_name=None):
"""
:param instance_name: Override the checked instance name, otherwise the class default will be used.
:param creation_ret: The return value from the run_cloud() function that created the instance
"""
if not instance_name:
instance_name = self.instance_name
# If it exists but doesn't show up in the creation_ret, there was probably an error during creation
if creation_ret:
self.assertIn(
instance_name,
[i.strip(": ") for i in creation_ret],
"An error occured during instance creation: |\n\t{}\n\t|".format(
"\n\t".join(creation_ret)
),
)
else:
# Verify that the instance exists via query
query = self.query_instances()
for tries in range(self.__RE_TRIES):
if self._instance_exists(instance_name, query):
log.debug(
'Instance "{}" reported after {} seconds'.format(
instance_name, tries * self.__RE_RUN_DELAY
)
)
break
else:
sleep(self.__RE_RUN_DELAY)
query = self.query_instances()
# Assert that the last query was successful
self.assertTrue(
self._instance_exists(instance_name, query),
'Instance "{}" was not created successfully: {}'.format(
self.instance_name, ", ".join(query)
),
)
log.debug('Instance exists and was created: "{}"'.format(instance_name))
def assertDestroyInstance(self, instance_name=None, timeout=None):
if timeout is None:
timeout = TIMEOUT
if not instance_name:
instance_name = self.instance_name
log.debug('Deleting instance "{}"'.format(instance_name))
delete_str = self.run_cloud(
"-d {} --assume-yes --out=yaml".format(instance_name), timeout=timeout
)
if delete_str:
delete = safe_load("\n".join(delete_str))
self.assertIn(self.profile_str, delete)
self.assertIn(self.PROVIDER, delete[self.profile_str])
self.assertIn(instance_name, delete[self.profile_str][self.PROVIDER])
delete_status = delete[self.profile_str][self.PROVIDER][instance_name]
if isinstance(delete_status, str):
self.assertEqual(delete_status, "True")
return
elif isinstance(delete_status, dict):
current_state = delete_status.get("currentState")
if current_state:
if current_state.get("ACTION"):
self.assertIn(".delete", current_state.get("ACTION"))
return
else:
self.assertEqual(current_state.get("name"), "shutting-down")
return
# It's not clear from the delete string that deletion was successful, ask salt-cloud after a delay
query = self.query_instances()
# some instances take a while to report their destruction
for tries in range(6):
if self._instance_exists(query=query):
sleep(30)
log.debug(
'Instance "{}" still found in query after {} tries: {}'.format(
instance_name, tries, query
)
)
query = self.query_instances()
# The last query should have been successful
self.assertNotIn(instance_name, self.query_instances())
@property
def instance_name(self):
if not hasattr(self, "_instance_name"):
# Create the cloud instance name to be used throughout the tests
subclass = self.__class__.__name__.strip("Test")
# Use the first three letters of the subclass, fill with '-' if too short
self._instance_name = random_string(
"cloud-test-{:-<3}-".format(subclass[:3]), uppercase=False
).lower()
return self._instance_name
@property
def providers(self):
if not hasattr(self, "_providers"):
self._providers = self.run_cloud("--list-providers")
return self._providers
@property
def provider_config(self):
if not hasattr(self, "_provider_config"):
self._provider_config = cloud_providers_config(
os.path.join(
RUNTIME_VARS.TMP_CONF_DIR,
"cloud.providers.d",
self.PROVIDER + ".conf",
)
)
return self._provider_config[self.profile_str][self.PROVIDER]
@property
def config(self):
if not hasattr(self, "_config"):
self._config = cloud_config(
os.path.join(
RUNTIME_VARS.TMP_CONF_DIR,
"cloud.profiles.d",
self.PROVIDER + ".conf",
)
)
return self._config
@property
def profile_str(self):
return self.PROVIDER + "-config"
def setUp(self):
"""
Sets up the test requirements. In child classes, define PROVIDER and REQUIRED_PROVIDER_CONFIG_ITEMS or this will fail
"""
super().setUp()
if not self.PROVIDER:
self.fail("A PROVIDER must be defined for this test")
# check if appropriate cloud provider and profile files are present
if self.profile_str + ":" not in self.providers:
self.skipTest(
"Configuration file for {0} was not found. Check {0}.conf files "
"in tests/integration/files/conf/cloud.*.d/ to run these tests.".format(
self.PROVIDER
)
)
missing_conf_item = []
for att in self.REQUIRED_PROVIDER_CONFIG_ITEMS:
if not self.provider_config.get(att):
missing_conf_item.append(att)
if missing_conf_item:
self.skipTest(
"Conf items are missing that must be provided to run these tests: {}".format(
", ".join(missing_conf_item)
)
+ "\nCheck tests/integration/files/conf/cloud.providers.d/{}.conf".format(
self.PROVIDER
)
)
def _alt_names(self):
"""
Check for an instances created alongside this test's instance that weren't cleaned up
"""
query = self.query_instances()
instances = set()
for q in query:
# Verify but this is a new name and not a shutting down ec2 instance
if q.startswith(self.instance_name) and not q.split("-")[-1].startswith(
"DEL"
):
instances.add(q)
log.debug(
'Adding "{}" to the set of instances that needs to be deleted'.format(
q
)
)
return instances
def _ensure_deletion(self, instance_name=None):
"""
Make sure that the instance absolutely gets deleted, but fail the test if it happens in the tearDown
:return True if an instance was deleted, False if no instance was deleted; and a message
"""
destroyed = False
if not instance_name:
instance_name = self.instance_name
if self._instance_exists(instance_name):
for tries in range(3):
try:
self.assertDestroyInstance(instance_name)
return (
False,
'The instance "{}" was deleted during the tearDown, not the test.'.format(
instance_name
),
)
except AssertionError as e:
log.error(
'Failed to delete instance "{}". Tries: {}\n{}'.format(
instance_name, tries, str(e)
)
)
if not self._instance_exists():
destroyed = True
break
else:
sleep(30)
if not destroyed:
# Destroying instances in the tearDown is a contingency, not the way things should work by default.
return (
False,
'The Instance "{}" was not deleted after multiple attempts'.format(
instance_name
),
)
return (
True,
'The instance "{}" cleaned up properly after the test'.format(
instance_name
),
)
def tearDown(self):
"""
Clean up after tests, If the instance still exists for any reason, delete it.
Instances should be destroyed before the tearDown, assertDestroyInstance() should be called exactly
one time in a test for each instance created. This is a failSafe and something went wrong
if the tearDown is where an instance is destroyed.
"""
success = True
fail_messages = []
alt_names = self._alt_names()
for instance in alt_names:
alt_destroyed, alt_destroy_message = self._ensure_deletion(instance)
if not alt_destroyed:
success = False
fail_messages.append(alt_destroy_message)
log.error(
'Failed to destroy instance "{}": {}'.format(
instance, alt_destroy_message
)
)
self.assertTrue(success, "\n".join(fail_messages))
self.assertFalse(
alt_names, "Cleanup should happen in the test, not the TearDown"
)
@classmethod
def tearDownClass(cls):
cls.clean_cloud_dir(cls.tmp_provider_dir)
@classmethod
def setUpClass(cls):
# clean up before setup
cls.tmp_provider_dir = os.path.join(
RUNTIME_VARS.TMP_CONF_DIR, "cloud.providers.d"
)
cls.clean_cloud_dir(cls.tmp_provider_dir)
# add the provider config for only the cloud we are testing
provider_file = cls.PROVIDER + ".conf"
shutil.copyfile(
os.path.join(
os.path.join(FILES, "conf", "cloud.providers.d"), provider_file
),
os.path.join(os.path.join(cls.tmp_provider_dir, provider_file)),
)
|
"""
Copyright [2009-2015] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django.core.management.base import BaseCommand
from portal.models import Database, Rna, Xref, DatabaseStats
from portal.views import _get_json_lineage_tree
from django.db.models import Min, Max, Count, Avg
import json
####################
# Export functions #
####################
def compute_database_stats():
"""
Precompute database statistics for display on Expert Database landing pages.
avg_length
min_length
max_length
num_sequences
num_organisms
length_counts
taxonomic_lineage
"""
for expert_db in Database.objects.order_by('-id').all():
print expert_db.descr
context = dict()
rnas = Rna.objects.filter(xrefs__deleted='N',
xrefs__db__descr=expert_db.descr)
# avg_length, min_length, max_length, len_counts
context.update(rnas.aggregate(min_length=Min('length'),
max_length=Max('length'),
avg_length=Avg('length')))
context['len_counts'] = list(rnas.values('length').\
annotate(counts=Count('length')).\
order_by('length'))
# taxonomic_lineage
xrefs = Xref.objects.select_related('accession').\
filter(db__descr=expert_db.descr).iterator()
lineages = _get_json_lineage_tree(xrefs)
# update expert_db object
expert_db.avg_length = context['avg_length']
expert_db.min_length = context['min_length']
expert_db.max_length = context['max_length']
expert_db.num_sequences = expert_db.count_sequences()
expert_db.num_organisms = expert_db.count_organisms()
expert_db.save()
expert_db_stats, created = DatabaseStats.objects.get_or_create(
database=expert_db.descr,
defaults={
'length_counts': '',
'taxonomic_lineage': ''
})
# django produces 'counts' keys, but d3 expects 'count' keys
expert_db_stats.length_counts = json.dumps(context['len_counts']).\
replace('counts', 'count')
expert_db_stats.taxonomic_lineage = lineages
expert_db_stats.save()
class Command(BaseCommand):
"""
Usage:
python manage.py database_stats
"""
########################
# Command line options #
########################
# shown with -h, --help
help = ('Calculate per-database statistics used for Expert Database '
'landing pages')
def handle(self, *args, **options):
"""
Django entry point
"""
compute_database_stats()
Add an option to compute database_stats for a single database
"""
Copyright [2009-2015] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django.core.management.base import BaseCommand
from optparse import make_option
from portal.models import Database, Rna, Xref, DatabaseStats
from portal.views import _get_json_lineage_tree
from django.db.models import Min, Max, Count, Avg
import json
####################
# Export functions #
####################
def compute_database_stats(database):
"""
Precompute database statistics for display on Expert Database landing pages.
avg_length
min_length
max_length
num_sequences
num_organisms
length_counts
taxonomic_lineage
"""
for expert_db in Database.objects.order_by('-id').all():
if database:
if expert_db.descr != database.upper():
continue
print expert_db.descr
context = dict()
rnas = Rna.objects.filter(xrefs__deleted='N',
xrefs__db__descr=expert_db.descr)
# avg_length, min_length, max_length, len_counts
context.update(rnas.aggregate(min_length=Min('length'),
max_length=Max('length'),
avg_length=Avg('length')))
context['len_counts'] = list(rnas.values('length').\
annotate(counts=Count('length')).\
order_by('length'))
# taxonomic_lineage
xrefs = Xref.objects.select_related('accession').\
filter(db__descr=expert_db.descr).iterator()
lineages = _get_json_lineage_tree(xrefs)
# update expert_db object
expert_db.avg_length = context['avg_length']
expert_db.min_length = context['min_length']
expert_db.max_length = context['max_length']
expert_db.num_sequences = expert_db.count_sequences()
expert_db.num_organisms = expert_db.count_organisms()
expert_db.save()
expert_db_stats, created = DatabaseStats.objects.get_or_create(
database=expert_db.descr,
defaults={
'length_counts': '',
'taxonomic_lineage': ''
})
# django produces 'counts' keys, but d3 expects 'count' keys
expert_db_stats.length_counts = json.dumps(context['len_counts']).\
replace('counts', 'count')
expert_db_stats.taxonomic_lineage = lineages
expert_db_stats.save()
class Command(BaseCommand):
"""
Usage:
python manage.py database_stats
"""
########################
# Command line options #
########################
option_list = BaseCommand.option_list + (
make_option('-d',
default='',
dest='database',
help='[Optional] Expert Database that needs to be recomputed'),
)
# shown with -h, --help
help = ('Calculate per-database statistics used for Expert Database '
'landing pages')
def handle(self, *args, **options):
"""
Django entry point
"""
compute_database_stats(options['database'])
|
# Copyright (C) 2015 Midokura SARL.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import mock
import testtools
from oslo_utils import uuidutils
import neutron.common.rpc as n_rpc
import neutron.common.utils as n_utils
from neutron import context
from neutron.tests.unit import testlib_api
import neutron_taas.db.taas_db # noqa
import neutron_taas.extensions.taas as taas_ext
from neutron_taas.services.taas.service_drivers import taas_agent_api
from neutron_taas.services.taas import taas_plugin
class DummyError(Exception):
pass
class TestTaasPlugin(testlib_api.SqlTestCase):
def setUp(self):
super(TestTaasPlugin, self).setUp()
mock.patch.object(n_rpc, 'create_connection', auto_spec=True).start()
mock.patch.object(taas_agent_api,
'TaasCallbacks', auto_spec=True).start()
mock.patch.object(taas_agent_api,
'TaasAgentApi', auto_spec=True).start()
self.driver = mock.MagicMock()
mock.patch('neutron.services.service_base.load_drivers',
return_value=({'dummy_provider': self.driver},
'dummy_provider')).start()
mock.patch('neutron.db.servicetype_db.ServiceTypeManager.get_instance',
return_value=mock.MagicMock()).start()
self._plugin = taas_plugin.TaasPlugin()
self._context = context.get_admin_context()
self._tenant_id = 'tenant-X'
self._network_id = uuidutils.generate_uuid()
self._host_id = 'host-A'
self._port_id = uuidutils.generate_uuid()
self._port_details = {
'tenant_id': self._tenant_id,
'binding:host_id': self._host_id,
'mac_address': n_utils.get_random_mac(
'fa:16:3e:00:00:00'.split(':')),
}
self._tap_service = {
'tenant_id': self._tenant_id,
'name': 'MyTap',
'description': 'This is my tap service',
'port_id': self._port_id,
}
self._tap_flow = {
'description': 'This is my tap flow',
'direction': 'BOTH',
'name': 'MyTapFlow',
'source_port': self._port_id,
'tenant_id': self._tenant_id,
}
@contextlib.contextmanager
def tap_service(self):
req = {
'tap_service': self._tap_service,
}
with mock.patch.object(self._plugin, '_get_port_details',
return_value=self._port_details):
yield self._plugin.create_tap_service(self._context, req)
self._tap_service['id'] = mock.ANY
self.driver.assert_has_calls([
mock.call.create_tap_service_precommit(mock.ANY),
mock.call.create_tap_service_postcommit(mock.ANY),
])
pre_args = self.driver.create_tap_service_precommit.call_args[0][0]
self.assertEqual(self._context, pre_args._plugin_context)
self.assertEqual(self._tap_service, pre_args.tap_service)
post_args = self.driver.create_tap_service_postcommit.call_args[0][0]
self.assertEqual(self._context, post_args._plugin_context)
self.assertEqual(self._tap_service, post_args.tap_service)
@contextlib.contextmanager
def tap_flow(self, tap_service, tenant_id=None):
self._tap_flow['tap_service_id'] = tap_service
if tenant_id is not None:
self._tap_flow['tenant_id'] = tenant_id
req = {
'tap_flow': self._tap_flow,
}
with mock.patch.object(self._plugin, '_get_port_details',
return_value=self._port_details):
yield self._plugin.create_tap_flow(self._context, req)
self._tap_flow['id'] = mock.ANY
self._tap_service['id'] = mock.ANY
self.driver.assert_has_calls([
mock.call.create_tap_flow_precommit(mock.ANY),
mock.call.create_tap_flow_postcommit(mock.ANY),
])
pre_args = self.driver.create_tap_flow_precommit.call_args[0][0]
self.assertEqual(self._context, pre_args._plugin_context)
self.assertEqual(self._tap_flow, pre_args.tap_flow)
post_args = self.driver.create_tap_flow_postcommit.call_args[0][0]
self.assertEqual(self._context, post_args._plugin_context)
self.assertEqual(self._tap_flow, post_args.tap_flow)
def test_create_tap_service(self):
with self.tap_service():
pass
def test_create_tap_service_wrong_tenant_id(self):
self._port_details['tenant_id'] = 'other-tenant'
with testtools.ExpectedException(taas_ext.PortDoesNotBelongToTenant), \
self.tap_service():
pass
self.assertEqual([], self.driver.mock_calls)
def test_create_tap_service_reach_limit(self):
# TODO(Yoichiro):Need to move this test to taas_rpc test
pass
def test_create_tap_service_failed_on_service_driver(self):
attr = {'create_tap_service_postcommit.side_effect': DummyError}
self.driver.configure_mock(**attr)
with testtools.ExpectedException(DummyError):
req = {
'tap_service': self._tap_service,
}
with mock.patch.object(self._plugin, '_get_port_details',
return_value=self._port_details):
self._plugin.create_tap_service(self._context, req)
def test_delete_tap_service(self):
with self.tap_service() as ts:
self._plugin.delete_tap_service(self._context, ts['id'])
self.driver.assert_has_calls([
mock.call.delete_tap_service_precommit(mock.ANY),
mock.call.delete_tap_service_postcommit(mock.ANY),
])
pre_args = self.driver.delete_tap_service_precommit.call_args[0][0]
self.assertEqual(self._context, pre_args._plugin_context)
self.assertEqual(self._tap_service, pre_args.tap_service)
post_args = self.driver.delete_tap_service_postcommit.call_args[0][0]
self.assertEqual(self._context, post_args._plugin_context)
self.assertEqual(self._tap_service, post_args.tap_service)
def test_delete_tap_service_with_flow(self):
with self.tap_service() as ts, \
self.tap_flow(tap_service=ts['id']):
self._plugin.delete_tap_service(self._context, ts['id'])
self.driver.assert_has_calls([
mock.call.delete_tap_flow_precommit(mock.ANY),
mock.call.delete_tap_flow_postcommit(mock.ANY),
mock.call.delete_tap_service_precommit(mock.ANY),
mock.call.delete_tap_service_postcommit(mock.ANY),
])
pre_args = self.driver.delete_tap_flow_precommit.call_args[0][0]
self.assertEqual(self._context, pre_args._plugin_context)
self.assertEqual(self._tap_flow, pre_args.tap_flow)
post_args = self.driver.delete_tap_flow_postcommit.call_args[0][0]
self.assertEqual(self._context, post_args._plugin_context)
self.assertEqual(self._tap_flow, post_args.tap_flow)
pre_args = self.driver.delete_tap_service_precommit.call_args[0][0]
self.assertEqual(self._context, pre_args._plugin_context)
self.assertEqual(self._tap_service, pre_args.tap_service)
post_args = self.driver.delete_tap_service_postcommit.call_args[0][0]
self.assertEqual(self._context, post_args._plugin_context)
self.assertEqual(self._tap_service, post_args.tap_service)
def test_delete_tap_service_non_existent(self):
with testtools.ExpectedException(taas_ext.TapServiceNotFound):
self._plugin.delete_tap_service(self._context, 'non-existent')
def test_delete_tap_service_failed_on_service_driver(self):
attr = {'delete_tap_service_postcommit.side_effect': DummyError}
self.driver.configure_mock(**attr)
with self.tap_service() as ts:
with testtools.ExpectedException(DummyError):
self._plugin.delete_tap_service(self._context, ts['id'])
def test_create_tap_flow(self):
with self.tap_service() as ts, self.tap_flow(tap_service=ts['id']):
pass
def test_create_tap_flow_wrong_tenant_id(self):
with self.tap_service() as ts, \
testtools.ExpectedException(taas_ext.TapServiceNotBelongToTenant), \
self.tap_flow(tap_service=ts['id'], tenant_id='other-tenant'):
pass
def test_create_tap_flow_failed_on_service_driver(self):
with self.tap_service() as ts:
attr = {'create_tap_flow_postcommit.side_effect': DummyError}
self.driver.configure_mock(**attr)
with testtools.ExpectedException(DummyError):
self._tap_flow['tap_service_id'] = ts['id']
req = {
'tap_flow': self._tap_flow,
}
with mock.patch.object(self._plugin, '_get_port_details',
return_value=self._port_details):
self._plugin.create_tap_flow(self._context, req)
def test_delete_tap_flow(self):
with self.tap_service() as ts, \
self.tap_flow(tap_service=ts['id']) as tf:
self._plugin.delete_tap_flow(self._context, tf['id'])
self._tap_flow['id'] = tf['id']
self.driver.assert_has_calls([
mock.call.delete_tap_flow_precommit(mock.ANY),
mock.call.delete_tap_flow_postcommit(mock.ANY),
])
pre_args = self.driver.delete_tap_flow_precommit.call_args[0][0]
self.assertEqual(self._context, pre_args._plugin_context)
self.assertEqual(self._tap_flow, pre_args.tap_flow)
post_args = self.driver.delete_tap_flow_postcommit.call_args[0][0]
self.assertEqual(self._context, post_args._plugin_context)
self.assertEqual(self._tap_flow, post_args.tap_flow)
def test_delete_tap_flow_failed_on_service_driver(self):
with self.tap_service() as ts, \
self.tap_flow(tap_service=ts['id']) as tf:
attr = {'delete_tap_flow_postcommit.side_effect': DummyError}
self.driver.configure_mock(**attr)
with testtools.ExpectedException(DummyError):
self._plugin.delete_tap_flow(self._context, tf['id'])
Fix Bug 1623457
Gate jobs for TaaS were failing ever since Neutron started
returning both Project and Tenant IDs as a part of migration
to Keystone Auth3.
This patch closes this issue.
Change-Id: Ie6b3811e41a94721679c9178cdd5119bdad8208d
Closes-Bug: #1623457
# Copyright (C) 2015 Midokura SARL.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import mock
import testtools
from oslo_utils import uuidutils
import neutron.common.rpc as n_rpc
import neutron.common.utils as n_utils
from neutron import context
from neutron.tests.unit import testlib_api
import neutron_taas.db.taas_db # noqa
import neutron_taas.extensions.taas as taas_ext
from neutron_taas.services.taas.service_drivers import taas_agent_api
from neutron_taas.services.taas import taas_plugin
class DummyError(Exception):
pass
class TestTaasPlugin(testlib_api.SqlTestCase):
def setUp(self):
super(TestTaasPlugin, self).setUp()
mock.patch.object(n_rpc, 'create_connection', auto_spec=True).start()
mock.patch.object(taas_agent_api,
'TaasCallbacks', auto_spec=True).start()
mock.patch.object(taas_agent_api,
'TaasAgentApi', auto_spec=True).start()
self.driver = mock.MagicMock()
mock.patch('neutron.services.service_base.load_drivers',
return_value=({'dummy_provider': self.driver},
'dummy_provider')).start()
mock.patch('neutron.db.servicetype_db.ServiceTypeManager.get_instance',
return_value=mock.MagicMock()).start()
self._plugin = taas_plugin.TaasPlugin()
self._context = context.get_admin_context()
self._project_id = self._tenant_id = 'tenant-X'
self._network_id = uuidutils.generate_uuid()
self._host_id = 'host-A'
self._port_id = uuidutils.generate_uuid()
self._port_details = {
'tenant_id': self._tenant_id,
'binding:host_id': self._host_id,
'mac_address': n_utils.get_random_mac(
'fa:16:3e:00:00:00'.split(':')),
}
self._tap_service = {
'tenant_id': self._tenant_id,
'name': 'MyTap',
'description': 'This is my tap service',
'port_id': self._port_id,
'project_id': self._project_id,
}
self._tap_flow = {
'description': 'This is my tap flow',
'direction': 'BOTH',
'name': 'MyTapFlow',
'source_port': self._port_id,
'tenant_id': self._tenant_id,
'project_id': self._project_id,
}
@contextlib.contextmanager
def tap_service(self):
req = {
'tap_service': self._tap_service,
}
with mock.patch.object(self._plugin, '_get_port_details',
return_value=self._port_details):
yield self._plugin.create_tap_service(self._context, req)
self._tap_service['id'] = mock.ANY
self.driver.assert_has_calls([
mock.call.create_tap_service_precommit(mock.ANY),
mock.call.create_tap_service_postcommit(mock.ANY),
])
pre_args = self.driver.create_tap_service_precommit.call_args[0][0]
self.assertEqual(self._context, pre_args._plugin_context)
self.assertEqual(self._tap_service, pre_args.tap_service)
post_args = self.driver.create_tap_service_postcommit.call_args[0][0]
self.assertEqual(self._context, post_args._plugin_context)
self.assertEqual(self._tap_service, post_args.tap_service)
@contextlib.contextmanager
def tap_flow(self, tap_service, tenant_id=None):
self._tap_flow['tap_service_id'] = tap_service
if tenant_id is not None:
self._tap_flow['tenant_id'] = tenant_id
req = {
'tap_flow': self._tap_flow,
}
with mock.patch.object(self._plugin, '_get_port_details',
return_value=self._port_details):
yield self._plugin.create_tap_flow(self._context, req)
self._tap_flow['id'] = mock.ANY
self._tap_service['id'] = mock.ANY
self.driver.assert_has_calls([
mock.call.create_tap_flow_precommit(mock.ANY),
mock.call.create_tap_flow_postcommit(mock.ANY),
])
pre_args = self.driver.create_tap_flow_precommit.call_args[0][0]
self.assertEqual(self._context, pre_args._plugin_context)
self.assertEqual(self._tap_flow, pre_args.tap_flow)
post_args = self.driver.create_tap_flow_postcommit.call_args[0][0]
self.assertEqual(self._context, post_args._plugin_context)
self.assertEqual(self._tap_flow, post_args.tap_flow)
def test_create_tap_service(self):
with self.tap_service():
pass
def test_create_tap_service_wrong_tenant_id(self):
self._port_details['tenant_id'] = 'other-tenant'
with testtools.ExpectedException(taas_ext.PortDoesNotBelongToTenant), \
self.tap_service():
pass
self.assertEqual([], self.driver.mock_calls)
def test_create_tap_service_reach_limit(self):
# TODO(Yoichiro):Need to move this test to taas_rpc test
pass
def test_create_tap_service_failed_on_service_driver(self):
attr = {'create_tap_service_postcommit.side_effect': DummyError}
self.driver.configure_mock(**attr)
with testtools.ExpectedException(DummyError):
req = {
'tap_service': self._tap_service,
}
with mock.patch.object(self._plugin, '_get_port_details',
return_value=self._port_details):
self._plugin.create_tap_service(self._context, req)
def test_delete_tap_service(self):
with self.tap_service() as ts:
self._plugin.delete_tap_service(self._context, ts['id'])
self.driver.assert_has_calls([
mock.call.delete_tap_service_precommit(mock.ANY),
mock.call.delete_tap_service_postcommit(mock.ANY),
])
pre_args = self.driver.delete_tap_service_precommit.call_args[0][0]
self.assertEqual(self._context, pre_args._plugin_context)
self.assertEqual(self._tap_service, pre_args.tap_service)
post_args = self.driver.delete_tap_service_postcommit.call_args[0][0]
self.assertEqual(self._context, post_args._plugin_context)
self.assertEqual(self._tap_service, post_args.tap_service)
def test_delete_tap_service_with_flow(self):
with self.tap_service() as ts, \
self.tap_flow(tap_service=ts['id']):
self._plugin.delete_tap_service(self._context, ts['id'])
self.driver.assert_has_calls([
mock.call.delete_tap_flow_precommit(mock.ANY),
mock.call.delete_tap_flow_postcommit(mock.ANY),
mock.call.delete_tap_service_precommit(mock.ANY),
mock.call.delete_tap_service_postcommit(mock.ANY),
])
pre_args = self.driver.delete_tap_flow_precommit.call_args[0][0]
self.assertEqual(self._context, pre_args._plugin_context)
self.assertEqual(self._tap_flow, pre_args.tap_flow)
post_args = self.driver.delete_tap_flow_postcommit.call_args[0][0]
self.assertEqual(self._context, post_args._plugin_context)
self.assertEqual(self._tap_flow, post_args.tap_flow)
pre_args = self.driver.delete_tap_service_precommit.call_args[0][0]
self.assertEqual(self._context, pre_args._plugin_context)
self.assertEqual(self._tap_service, pre_args.tap_service)
post_args = self.driver.delete_tap_service_postcommit.call_args[0][0]
self.assertEqual(self._context, post_args._plugin_context)
self.assertEqual(self._tap_service, post_args.tap_service)
def test_delete_tap_service_non_existent(self):
with testtools.ExpectedException(taas_ext.TapServiceNotFound):
self._plugin.delete_tap_service(self._context, 'non-existent')
def test_delete_tap_service_failed_on_service_driver(self):
attr = {'delete_tap_service_postcommit.side_effect': DummyError}
self.driver.configure_mock(**attr)
with self.tap_service() as ts:
with testtools.ExpectedException(DummyError):
self._plugin.delete_tap_service(self._context, ts['id'])
def test_create_tap_flow(self):
with self.tap_service() as ts, self.tap_flow(tap_service=ts['id']):
pass
def test_create_tap_flow_wrong_tenant_id(self):
with self.tap_service() as ts, \
testtools.ExpectedException(taas_ext.TapServiceNotBelongToTenant), \
self.tap_flow(tap_service=ts['id'], tenant_id='other-tenant'):
pass
def test_create_tap_flow_failed_on_service_driver(self):
with self.tap_service() as ts:
attr = {'create_tap_flow_postcommit.side_effect': DummyError}
self.driver.configure_mock(**attr)
with testtools.ExpectedException(DummyError):
self._tap_flow['tap_service_id'] = ts['id']
req = {
'tap_flow': self._tap_flow,
}
with mock.patch.object(self._plugin, '_get_port_details',
return_value=self._port_details):
self._plugin.create_tap_flow(self._context, req)
def test_delete_tap_flow(self):
with self.tap_service() as ts, \
self.tap_flow(tap_service=ts['id']) as tf:
self._plugin.delete_tap_flow(self._context, tf['id'])
self._tap_flow['id'] = tf['id']
self.driver.assert_has_calls([
mock.call.delete_tap_flow_precommit(mock.ANY),
mock.call.delete_tap_flow_postcommit(mock.ANY),
])
pre_args = self.driver.delete_tap_flow_precommit.call_args[0][0]
self.assertEqual(self._context, pre_args._plugin_context)
self.assertEqual(self._tap_flow, pre_args.tap_flow)
post_args = self.driver.delete_tap_flow_postcommit.call_args[0][0]
self.assertEqual(self._context, post_args._plugin_context)
self.assertEqual(self._tap_flow, post_args.tap_flow)
def test_delete_tap_flow_failed_on_service_driver(self):
with self.tap_service() as ts, \
self.tap_flow(tap_service=ts['id']) as tf:
attr = {'delete_tap_flow_postcommit.side_effect': DummyError}
self.driver.configure_mock(**attr)
with testtools.ExpectedException(DummyError):
self._plugin.delete_tap_flow(self._context, tf['id'])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.