CombinedText stringlengths 4 3.42M |
|---|
b340aef8-2eae-11e5-95e7-7831c1d44c14
b347c09e-2eae-11e5-8419-7831c1d44c14
b347c09e-2eae-11e5-8419-7831c1d44c14 |
6d51e842-2eae-11e5-8aeb-7831c1d44c14
6d57e207-2eae-11e5-b009-7831c1d44c14
6d57e207-2eae-11e5-b009-7831c1d44c14 |
65431861-2eae-11e5-9e71-7831c1d44c14
6548af70-2eae-11e5-9489-7831c1d44c14
6548af70-2eae-11e5-9489-7831c1d44c14 |
add example code
|
8bc7363a-2eae-11e5-99a9-7831c1d44c14
8bd24980-2eae-11e5-a58e-7831c1d44c14
8bd24980-2eae-11e5-a58e-7831c1d44c14 |
from __future__ import with_statement
import os
import sys
import yaml
import time
import gzip
import curses
import os.path
import logging
import optparse
import threading
import subprocess
from random import random
from marietje import Marietje, MarietjeException
from cStringIO import StringIO
VERSION = 7
INITIAL_TIMEOUT = 100
DEFAULT_TIMEOUT = 1000
(CP_WHITE, CP_BLUE, CP_GREEN, CP_RED,
CP_CWHITE, CP_CBLUE, CP_CGREEN, CP_CRED) = range(8)
GOT_COLORS = True
def curses_use_default_colors(*args, **kwargs):
if not hasattr(curses, 'has_colors') or \
not curses.has_colors():
global GOT_COLORS
GOT_COLORS = False
return
curses.use_default_colors(*args, **kwargs)
def curses_color_pair(*args, **kwargs):
if not GOT_COLORS:
return curses.A_BOLD
return curses.color_pair(*args, **kwargs)
def curses_init_pair(*args, **kwargs):
if not GOT_COLORS:
return
return curses.init_pair(*args, **kwargs)
def format_list(l):
""" Formats a list <l> neatly """
if len(l) == 1:
return str(l[0])
if len(l) == 0:
return
ret = ''
first = True
for i in xrange(len(l) - 1):
if first: first = False
else: ret += ', '
ret += str(l[i])
return ret + ' and ' + str(l[-1])
def format_time(s):
""" Formats a <s> seconds into <hours>:<minutes>:<seconds>,
nicely. """
if s < 0:
ret = '-'
s = abs(s)
else:
ret = ''
seconds = s % 60
s /= 60
minutes = s % 60
s /= 60
hours = s
if hours != 0:
ret += str(hours) + ':' + str(minutes).zfill(2)
else:
ret += str(minutes)
ret += ':' + str(seconds).zfill(2)
return ret
class ScrollingColsWindow:
""" Base of both the queue and the search result view. Shows a dataset
in a table with an optional cursor. """
def __init__(self, w, use_cursor=False):
self.w = w
self.needRedraw = True
self.needLayout = False
self.x_offset = 0
self.y_offset = 0
self.old_x_offset = 0
self.old_y_offset = 0
self.old_w = 0
self.old_h = 0
self.y_max = 0
self.col_ws = None
self.use_cursor = use_cursor
self.c_offset = 0
self.c_middle = 0
self.old_c_offset = 0
def scroll_page_up(self):
self.y_offset -= self.old_h
def scroll_page_down(self):
self.y_offset += self.old_h
def scroll_up(self):
if self.use_cursor and (self.c_middle < self.c_offset or \
self.y_max < self.old_h):
self.c_offset -= 1
else:
self.y_offset -= 1
def scroll_down(self):
if self.use_cursor and (self.c_middle > self.c_offset or \
self.y_max < self.old_h):
self.c_offset += 1
else:
self.y_offset += 1
def scroll_home(self):
self.y_offset = 0
def scroll_end(self):
self.y_offset = self.y_max
def scroll_right(self):
self.x_offset += 1
if self.x_offset == 1:
self.x_offset = 2
def scroll_left(self):
self.x_offset -= 1
if self.x_offset <= 1:
self.x_offset = 0
def get_data_info(self):
""" Should return the number of rows, the average sizes of the
columns and the maximum sizes of the columns """
raise NotImplementedError
def _layout(self, vals, w, maxs):
""" Helper for layout: finds the best way to fit volumns of
preferred widths of <vals> to a total width of <w> with
maximum values <maxs> """
if sum(vals) == 0:
return [0] * (len(vals) - 1) + [w]
r = float(w) / float(sum(vals))
ret = map(lambda x: int(x*r), vals)
e = w - sum(ret)
for i in xrange(abs(e)):
ret[i] += (1 if e > 0 else -1)
# If the assigned row width is less than its maximum, we'll
# try to give at least a width of three for the '>' and '$'.
cur = 0
for i in xrange(0, len(ret)):
last = ret[i]
while ret[i] < maxs[i] and ret[i] < 3:
for j in xrange(len(ret)):
if ret[(j + cur) % len(ret)] > 3:
ret[(j + cur) % len(ret)] -= 1
ret[i] += 1
break
cur = (cur + j + 1) % len(ret)
if ret[i] == last:
# We couldn't find any more room
return ret
last = ret[i]
return ret
def layout(self):
""" Calculate the widths of the seperate columns """
self.needLayout = False
h, w = self.w.getmaxyx()
di = self.get_data_info()
if di is None:
self.y_max = 0
return
N, avgs, maxs = di
if sum(maxs) <= w:
self.col_ws = self._layout(maxs, w, maxs)
else:
self.col_ws = self._layout(avgs, w, maxs)
self.y_max = N
def draw_cell_text(self, val, start, end, colors):
self.w.addstr(val[start:end])
def draw_cell(self, y, cx, cw, val, colors):
self.w.move(y, cx)
if len(val) > cw:
if self.x_offset == 0:
self.draw_cell_text(val, 0, cw-1, colors)
self.w.addch('$', colors[1])
else:
self.w.addch('>', colors[2])
off = self.x_offset
if off + cw - 2 > len(val):
off = len(val) - cw + 2
self.draw_cell_text(val, off, off+cw-2,
colors)
self.w.addch('$', colors[1])
else:
self.draw_cell_text(val, 0, len(val), colors)
def draw_cols_line(self, y, cells, is_cursor):
""" Draws a line with columns """
self.w.move(y, 0)
self.w.clrtoeol()
if is_cursor:
self.w.attron(curses_color_pair(CP_CWHITE))
self.w.hline(' ', self.w.getmaxyx()[1])
colors = map(curses_color_pair,
[CP_CWHITE, CP_CBLUE, CP_CGREEN, CP_CRED])
else:
colors = map(curses_color_pair,
[CP_WHITE, CP_BLUE, CP_GREEN, CP_RED])
self.w.move(y, 0)
cx = 0
for j in xrange(len(self.col_ws)):
if self.col_ws[j] == 0:
assert cells[j] == ''
continue
cw = self.col_ws[j]
try:
self.draw_cell(y, cx, cw, cells[j], colors)
except curses.error:
if y == self.w.getmaxyx()[0] - 1 and \
cx + cw == sum(self.col_ws):
# curses doesn't like us
# writing to the top right:
# ignore.
pass
else:
try:
self.w.move(y, cx)
self.w.addch('!', colors[3])
except curses.error:
# This shouldn't happen!
raise Exception, (y, cx)
cx += cw
if is_cursor:
self.w.attroff(curses_color_pair(
CP_CWHITE))
def draw_line(self, y, is_cursor):
""" Draws a line """
self.w.move(y, 0)
if y + self.y_offset >= self.y_max:
self.w.addstr('~', curses_color_pair(
CP_BLUE))
self.w.clrtoeol()
else:
cells = self.get_cells(y + self.y_offset)
self.draw_cols_line(y, cells, is_cursor)
def update(self, forceRedraw=False):
""" Update the view """
h, w = self.w.getmaxyx()
if not forceRedraw and not self.needRedraw:
if self.old_x_offset != self.x_offset or \
self.old_y_offset != self.y_offset or \
self.old_w != w or self.old_h != h or \
(self.use_cursor and \
self.old_c_offset != self.c_offset):
pass
else:
return
self.needRedraw = False
if self.old_w != w or self.old_h != h:
self.old_h = h
self.old_w = w
self.needLayout = True
if self.use_cursor:
self.c_middle = h/2
if self.needLayout:
self.layout()
if self.col_ws is None:
self.y_max = 0
if self.y_offset + h > self.y_max:
self.y_offset = self.y_max - h
if self.use_cursor:
self.c_offset += 1
if self.y_offset < 0:
self.y_offset = 0
if self.use_cursor:
self.c_offset -= 1
if self.use_cursor:
if self.c_offset < 0:
self.c_offset = 0
elif self.c_offset + self.y_offset >= self.y_max:
self.c_offset = min(h - 1,
self.y_max - self.y_offset - 1)
start = time.time()
for y in xrange(h):
self.draw_line(y, self.use_cursor and y==self.c_offset)
# We update old_ here for they might've been updated
# by self.draw_line or above
self.old_y_offset = self.y_offset
self.old_x_offset = self.x_offset
if self.use_cursor:
self.old_c_offset = self.c_offset
self.w.noutrefresh()
def touch(self, layout=False):
""" Touches the window to redraw. If <layout>, also recompute
the column layout """
self.needRedraw = True
if layout: self.needLayout = True
def get_view_region(self):
""" Return view region information: start and end of the region
currently viewed in the dataset and the total amount of
lines """
return (self.y_offset, self.y_offset + self.old_h, self.y_max)
class SearchWindow(ScrollingColsWindow):
def __init__(self, w, m, highlight=True):
ScrollingColsWindow.__init__(self, w, use_cursor=True)
self.needDataInfoRecreate = False
self.data_info = None
self.data = None
self.m = m
self.query = None
self.highlight = highlight
def draw_cell_text(self, val, start, end, colors):
if not self.highlight:
return ScrollingColsWindow.draw_cell_text(self,
val, start, end, colors)
ridx = -1
idxs = [0]
val_lower = val.lower()
while True:
ridx = val_lower.find(self.query, ridx+1)
if ridx == -1:
break
if ridx < idxs[-1]:
idxs[-1] = ridx + len(self.query)
else:
idxs.append(ridx)
idxs.append(ridx + len(self.query))
idxs.append(len(val))
v = list(idxs)
v.sort()
assert v == idxs
m = True
for i in xrange(0, len(idxs)-1):
m = not m
istart, iend = idxs[i:i+2]
if end <= istart: continue
if iend <= start: continue
istart = max(istart, start)
iend = min(iend, end)
if istart == iend: continue
self.w.attron(colors[3 if m else 0])
self.w.addstr(val[istart:iend])
self.w.attroff(colors[3 if m else 0])
def set_query(self, q):
if self.query == q:
return
self.query = q
self.data = None
self.needDataInfoRecreate = True
def touch(self, layout=False, data=False):
if data:
self.needDataInfoRecreate = True
ScrollingColsWindow.touch(self, layout=layout)
def fetch_data(self):
return self.m.query(self.query)
def get_data_info(self):
if self.data is None:
if not self.m.songs_fetched:
return None
self.data = self.fetch_data()
if len(self.data) == 0:
return None
if self.data_info is None or \
self.needDataInfoRecreate:
self.needDataInfoRecreate = False
self.data_info = None
if self.data_info is None:
self.data_info = self.create_data_info()
return self.data_info
def create_data_info(self):
N = len(self.data)
l = len(self.get_cells(0))
sums = [0]*l
maxs = [0]*l
for i in xrange(N):
cells = self.get_cells(i)
for j in xrange(l):
sums[j] += len(cells[j])
maxs[j] = max(len(cells[j]), maxs[j])
return (N, map(lambda x: x/N, sums), maxs)
def get_cells(self, j):
return (self.m.songs[self.data[j]][0],
self.m.songs[self.data[j]][1])
def request_track(self):
""" Requests the track under the cursor """
cpos = self.c_offset + self.y_offset
track_id = self.data[cpos]
self.m.request_track(track_id)
class QueueWindow(ScrollingColsWindow):
def __init__(self, w, m):
ScrollingColsWindow.__init__(self, w)
self.m = m
self.data_info = None
self.needDataInfoRecreate = False
self.time_lut = None
self.last_redraw = 0
def create_data_info(self):
N = len(self.m.queue) + 1
l = len(self.get_cells(0))
sums = [0]*l
maxs = [0]*l
for i in xrange(N):
cells = self.get_cells(i)
for j in xrange(l):
sums[j] += len(cells[j])
maxs[j] = max(len(cells[j]), maxs[j])
return (N, map(lambda x: x/N, sums), maxs)
def _nowPlaying_line(self):
""" Returns the line containing the currently playing song """
if len(self.m.queue) == 0:
timeLeft = format_time(int(self.m.queueOffsetTime - \
time.time()))
else:
# The countdown on the first queued song would equal
# <timeleft>.
timeLeft = ''
self.m.queueOffsetTime
if not self.m.nowPlaying[0] in self.m.songs:
return ('', 'unknown track', '#%s' % \
self.m.nowPlaying[0], timeLeft)
artist, title = self.m.songs[self.m.nowPlaying[0]]
return ('', artist, title, timeLeft)
def get_cells(self, l):
if l == 0:
if not self.m.playing_fetched or \
not self.m.songs_fetched:
return ('','','','')
return self._nowPlaying_line()
l -= 1
if self.time_lut is None:
t = format_time(self.m.queue[l][2])
else:
t = format_time(int(self.time_lut[l] - time.time()))
return (self.m.queue[l][3],
self.m.queue[l][0],
self.m.queue[l][1],
t)
def get_data_info(self):
if not self.m.queue_fetched or len(self.m.queue) == 0:
if not self.m.playing_fetched or \
not self.m.songs_fetched:
return None
N = 1
else:
N = 1 + len(self.m.queue)
if self.data_info is None or \
self.data_info[0] != N or \
self.needDataInfoRecreate:
self.needDataInfoRecreate = False
self.time_lut = None
# We first need to create self.time_lut, for
# create_data_info depends on it
self.data_info = None
# Compute the timestamps of initiation of each of
# the queued tracks
if self.time_lut is None:
if self.m.playing_fetched:
id, songStarted, songLength, \
serverTime = self.m.nowPlaying
offset = self.m.queueOffsetTime
self.time_lut = list()
for i in xrange(len(self.m.queue)):
self.time_lut.append(offset)
offset += self.m.queue[i][2]
if self.data_info is None:
self.data_info = self.create_data_info()
return self.data_info
def reset(self):
""" Resets cached information about the currently playing
track and the queue """
self.time_lut = None
self.data_info = None
def update(self, forceRedraw=False):
""" Overload update to keep track of the updates to allow
per-second reloads if we've got countdowns to update """
if not forceRedraw and not self.time_lut is None and \
(time.time() - self.last_redraw) > 0.7:
self.needRedraw = True
self.last_redraw = time.time()
ScrollingColsWindow.update(self, forceRedraw)
def touch(self, layout=False, data=False):
if data:
self.needDataInfoRecreate = True
ScrollingColsWindow.touch(self, layout=layout)
class CursesMarietje:
def __init__(self, host, port, userdir):
self.running = False
self.refresh_status = True
self.statusline = ''
self.status_shown_once = False
self.old_query = ''
self.query = ''
self.userdir = os.path.expanduser(
os.path.join('~', userdir))
self.options = {}
if not os.path.exists(self.userdir):
try:
os.mkdir(self.userdir)
except Exception, e:
self.userdir = None
else:
fp = os.path.join(self.userdir, 'config')
if os.path.exists(fp):
with open(fp) as f:
self.options = yaml.load(f)
if not 'marietje' in self.options:
self.options['marietje'] = dict()
if not 'username' in self.options['marietje']:
self.options['marietje']['username'] = os.getlogin()
self.m = Marietje(self.options['marietje']['username'],
queueCb=self.on_queue_fetched,
songCb=self.on_songs_fetched,
playingCb=self.on_playing_fetched,
host=host,
port=port)
self.l = logging.getLogger('CursesMarietje')
if not self.userdir is None:
fp = os.path.join(self.userdir, 'songs-cache')
if os.path.exists(fp):
try:
with open(fp) as f:
self.m.songs_from_cache(f)
except Exception, e:
self.l.exception("Exception while "+
"reading cache")
# We silently assume self.m is in a
# consistent state in exception.
def refetch(self, fetchSongs=True, fetchQueue=True,
fetchPlaying=True, force=False):
""" If all requested are fetched, refetch them. If some of
them are not fetched, fetch only those. """
if force:
pass
elif (not fetchPlaying or self.m.playing_fetched or \
self.m.playing_fetching) and \
(not fetchSongs or self.m.songs_fetched or \
self.m.songs_fetching) and \
(not fetchQueue or self.m.queue_fetched or \
self.m.queue_fetching):
fetchPlaying = fetchPlaying and \
not self.m.playing_fetching
fetchSongs = fetchSongs and \
not self.m.songs_fetching
fetchQueue = fetchQueue and \
not self.m.queue_fetching
self.set_status("Refetching %s" % format_list(
(('playing',) if fetchPlaying else ()) +
(('songs',) if fetchSongs else ()) +
(('queue',) if fetchQueue else ())))
else:
fetchPlaying = not self.m.playing_fetched and \
not self.m.playing_fetching
fetchSongs = not self.m.songs_fetched and \
not self.m.songs_fetching
fetchQueue = not self.m.queue_fetched and \
not self.m.queue_fetching
self.set_status("Fetching %s" % format_list(
(('playing',) if fetchPlaying else ()) +
(('songs',) if fetchSongs else ()) +
(('queue',) if fetchQueue else ())))
self.update_timeout = True
self.timeout = INITIAL_TIMEOUT
self.m.start_fetch(fetchSongs=fetchSongs,
fetchPlaying=fetchPlaying,
fetchQueue=fetchQueue)
self.queue_main.reset()
def set_status(self, value):
self.l.info(value)
self.statusline = value
self.refresh_status = True
self.status_shown_once = False
def run(self):
self.log = StringIO()
logging.basicConfig(stream=self.log,
level=logging.DEBUG,
format="%(asctime)s:%(levelname)s:"+
"%(name)s:%(levelname)s:%(message)s")
self._been_setup = False
self.running = True
while self.running:
curses.wrapper(self._inside_curses)
if not self.userdir is None:
with open(os.path.join(self.userdir,
'config'), 'w') as f:
self.options = yaml.dump(self.options, f)
with open(os.path.join(self.userdir,
'songs-cache'), 'w') as f:
self.m.cache_songs_to(f)
def _inside_curses(self, window):
if not self._been_setup:
self._setup(window)
self._been_setup = True
self._main_loop()
def _setup(self, window):
curses_use_default_colors()
curses_init_pair(CP_BLUE, curses.COLOR_BLUE, -1)
curses_init_pair(CP_GREEN, curses.COLOR_GREEN, -1)
curses_init_pair(CP_RED, curses.COLOR_RED, -1)
curses_init_pair(CP_CWHITE, curses.COLOR_BLACK,
curses.COLOR_WHITE)
curses_init_pair(CP_CBLUE, curses.COLOR_BLUE,
curses.COLOR_WHITE)
curses_init_pair(CP_CGREEN, curses.COLOR_GREEN,
curses.COLOR_WHITE)
curses_init_pair(CP_CRED, curses.COLOR_RED,
curses.COLOR_WHITE)
self.window = window
h,w = self.window.getmaxyx()
self.queue_main = QueueWindow(self.window.derwin(h-1,w,0,0),
self.m)
if not 'search-window' in self.options:
self.options['search-window'] = dict()
if not 'highlight' in self.options['search-window']:
self.options['search-window']['highlight'] = True
self.search_main = SearchWindow(self.window.derwin(h-1,w,0,0),
self.m, highlight=self.options[
'search-window']['highlight'])
self.status_w = self.window.derwin(1, w, h-1, 0)
self.main = self.queue_main
self.refetch(force=True)
def _main_loop(self):
window = self.window
h,w = self.window.getmaxyx()
while True:
if self.update_timeout:
self.update_timeout = False
window.timeout(self.timeout)
try:
k = window.getch()
except KeyboardInterrupt:
self.running = False
break
forceRedraw = False
if k == -1:
pass
elif k == 27:
window.timeout(0)
try:
try:
k = window.getch()
except KeyboardInterrupt:
self.running = False
break
if k == -1:
pass
elif k == ord('x'):
self.running = False
break
elif k == ord('r'):
forceRedraw = True
elif k == ord('R'):
self.window.redrawwin()
forceRedraw = True
elif k == ord('f'):
self.refetch(fetchSongs=False)
elif k == ord('F'):
self.refetch()
elif k == ord('?'):
self.show_help()
# We break the main loop, which
# is then reentered via
# curses.wrapper
break
elif k == ord('a'):
self.query = '*'
finally:
window.timeout(self.timeout)
elif k == 410: # redraw
h, w = self.window.getmaxyx()
self.queue_main.w.resize(h-1,w)
self.queue_main.touch()
self.status_w.resize(1,w)
self.status_w.mvwin(h-1, 0)
self.refresh_status = True
elif k == 262: # home
self.main.scroll_home()
self.refresh_status = True
elif k == 360: # end
self.main.scroll_end()
self.refresh_status = True
elif k == 339: # page up
self.main.scroll_page_up()
self.refresh_status = True
elif k == 338: # page down
self.main.scroll_page_down()
self.refresh_status = True
elif k == 261: # right
self.main.scroll_right()
self.refresh_status = True
elif k == 260: # left
self.main.scroll_left()
self.refresh_status = True
elif k == 259: # up
self.main.scroll_up()
self.refresh_status = True
elif k == 258: # down
self.main.scroll_down()
self.refresh_status = True
elif k == ord('?'):
self.show_help()
# We break the main loop, which
# is then reentered via
# curses.wrapper
break
elif k == 263 or k == 127: # backspace
if len(self.query) != 0:
self.query = self.query[:-1]
elif k == 23: # C-w
if len(self.query) != 0:
idx = self.query.rfind(' ', 0,
len(self.query)-1)
if idx == -1:
self.query = ''
else:
self.query = self.query[:idx+1]
elif k == 21: # C-u
if len(self.query) != 0:
self.query = ''
elif k == 20: # C-t
if len(self.query) >= 2:
self.query = (self.query[:-2] +
self.query[-1] +
self.query[-2])
elif self.main is self.search_main and k == 10: # RET
try:
self.search_main.request_track()
except MarietjeException, e:
self.l.exception("Exception while "+
"requesting track")
self.set_status(str(e))
self.refetch(fetchSongs=False)
self.query = ''
elif 0 < k and k < 128 and \
chr(k).lower() in self.m.cs_lut:
self.query += chr(k).lower()
else:
self.set_status((
'Unknown key (%s). Press '+
'Alt+x to quit, Alt+? for help') % k)
if self.main is self.queue_main \
and len(self.query) != 0:
self.main = self.search_main
self.main.touch()
elif self.main is self.search_main \
and len(self.query) == 0:
self.main = self.queue_main
self.main.touch()
if self.main is self.queue_main:
if self.m.playing_fetched and \
time.time() > self.m.queueOffsetTime:
self.refetch(fetchSongs=False)
if self.query != self.old_query:
if len(self.query) > 1 and self.query[0] == '*':
self.query = self.query[1:]
self.old_query = self.query
self.refresh_status = True
if self.main is self.search_main:
self.search_main.set_query(self.query)
self.search_main.touch(layout=True)
self.main.update(forceRedraw=forceRedraw)
self.update_status(forceRedraw=forceRedraw)
curses.doupdate()
def _status_attr(self, fetching, fetched):
ret = 0
if fetched:
ret |= curses.A_BOLD
if fetching:
ret |= curses_color_pair(CP_GREEN)
else:
if fetching:
ret |= curses_color_pair(CP_GREEN)
else:
ret |= curses_color_pair(CP_RED)
return ret
def update_status(self, forceRedraw):
if not self.refresh_status and not forceRedraw:
return
h, w = self.status_w.getmaxyx()
self.refresh_status = False
self.status_w.clear()
pos = '%s-%s|%s' % self.main.get_view_region()
if len(pos) < w:
self.status_w.addstr(0, w-len(pos)-1, pos)
if self.query == '' or not self.status_shown_once:
self.status_shown_once = True
if self.query != '': self.refresh_status = True
self.status_w.addch(0, 0, 'Q', self._status_attr(
self.m.queue_fetching, self.m.queue_fetched))
self.status_w.addch(0, 1, 'P', self._status_attr(
self.m.playing_fetching, self.m.playing_fetched))
self.status_w.addch(0, 2, 'S', self._status_attr(
self.m.songs_fetching, self.m.songs_fetched))
self.status_w.addstr(0, 4, self.statusline[:w-5])
else:
self.status_w.addstr(0, 0, self.query[:w-1], curses.A_BOLD)
self.status_w.noutrefresh()
def on_queue_fetched(self):
if not self.m.queue_fetched:
self.set_status("Queue fetch failed: %s" % \
str(self.m.qException))
return
if self.m.songs_fetched and self.m.playing_fetched:
self.timeout = DEFAULT_TIMEOUT
self.update_timeout = True
self.queue_main.touch(layout=True, data=True)
self.set_status("Queue in %s" % self.m.qLoadTime)
def on_songs_fetched(self, from_cache=False):
if not self.running:
# That cache is DAMNED quick
assert from_cache
return
if not self.m.songs_fetched:
self.set_status("Songs fetch failed: %s" % \
str(self.m.sException))
return
if self.m.queue_fetched and self.m.playing_fetched:
self.timeout = DEFAULT_TIMEOUT
self.update_timeout = True
self.queue_main.touch(layout=True, data=True)
if from_cache:
self.set_status("Songs (cache) in %s" % self.m.sCacheLoadTime)
else:
self.set_status("Songs in %s" % self.m.sLoadTime)
def on_playing_fetched(self):
if not self.m.playing_fetched:
self.set_status("Playing fetch failed: %s" % \
str(self.m.pException))
return
if self.m.songs_fetched and self.m.queue_fetched:
self.timeout = DEFAULT_TIMEOUT
self.update_timeout = True
self.queue_main.touch(layout=True, data=True)
self.set_status("Playing in %s" % self.m.pLoadTime)
def show_help(self):
less = subprocess.Popen(['less', '-c'], stdin=subprocess.PIPE)
less.stdin.write((" Curses based Python Marietje client %(version)s\n"+
" (c) 2008 - Bas Westerbaan, 99BA289B\n"+
"\n"+
" Alt+f refetch some Alt+F refetch all\n"+
" Alt+r refresh screen Alt+R refresh screen harder\n"+
" Alt+x quit Alt+? guess!\n"+
" Ctrl+u clear query Ctrl+w only the last word\n"+
" Alt+a list all songs Ctrl+t transpose last two chars\n"+
"\n"+
"RUNTIME\n"+
" Load times\n"+
" songs %(slt)s\n"+
" songs cache %(clt)s\n"+
" songs lut %(llt)s\n"+
" queue %(qlt)s\n"+
" now playing %(plt)s\n"+
"\n"+
"LOG\n"+
"%(log)s") % {
'version': VERSION,
'qlt': self.m.qLoadTime if hasattr(self.m, 'qLoadTime') else 'n/a',
'slt': self.m.sLoadTime if hasattr(self.m, 'sLoadTime') else 'n/a',
'llt': self.m.sLutGenTime if hasattr(self.m, 'sLutGenTime') else 'n/a',
'plt': self.m.pLoadTime if hasattr(self.m, 'pLoadTime') else 'n/a',
'clt': self.m.sCacheLoadTime if hasattr(self.m, 'sCacheLoadTime') else 'n/a',
'log': self.log.getvalue()
})
less.stdin.close()
less.wait()
if __name__ == '__main__':
parser = optparse.OptionParser()
parser.add_option('-H', '--host', dest='host',
default='zuidslet.science.ru.nl',
help="Connect to HOST", metavar='HOST')
parser.add_option('-p', '--port', dest='port',
default='1337', type='int',
help="Connect on PORT", metavar='PORT')
parser.add_option('-u', '--userdir', dest='userdir',
default='.pymarietje',
help="Use PATH as userdir", metavar='PATH')
(options, args) = parser.parse_args()
m = CursesMarietje(host=options.host,
port=options.port,
userdir=options.userdir)
try:
m.run()
except Exception, e:
logging.exception('Uncatched exception')
if hasattr(m, 'log'):
print m.log.getvalue()
else:
print e
sys.exit(0)
Don't crash on empty query window while enter.
Signed-off-by: Bas Westerbaan <1d31d94f30d40df7951505d1034e1e923d02ec49@fsfe.org>
from __future__ import with_statement
import os
import sys
import yaml
import time
import gzip
import curses
import os.path
import logging
import optparse
import threading
import subprocess
from random import random
from marietje import Marietje, MarietjeException
from cStringIO import StringIO
VERSION = 7
INITIAL_TIMEOUT = 100
DEFAULT_TIMEOUT = 1000
(CP_WHITE, CP_BLUE, CP_GREEN, CP_RED,
CP_CWHITE, CP_CBLUE, CP_CGREEN, CP_CRED) = range(8)
GOT_COLORS = True
def curses_use_default_colors(*args, **kwargs):
if not hasattr(curses, 'has_colors') or \
not curses.has_colors():
global GOT_COLORS
GOT_COLORS = False
return
curses.use_default_colors(*args, **kwargs)
def curses_color_pair(*args, **kwargs):
if not GOT_COLORS:
return curses.A_BOLD
return curses.color_pair(*args, **kwargs)
def curses_init_pair(*args, **kwargs):
if not GOT_COLORS:
return
return curses.init_pair(*args, **kwargs)
def format_list(l):
""" Formats a list <l> neatly """
if len(l) == 1:
return str(l[0])
if len(l) == 0:
return
ret = ''
first = True
for i in xrange(len(l) - 1):
if first: first = False
else: ret += ', '
ret += str(l[i])
return ret + ' and ' + str(l[-1])
def format_time(s):
""" Formats a <s> seconds into <hours>:<minutes>:<seconds>,
nicely. """
if s < 0:
ret = '-'
s = abs(s)
else:
ret = ''
seconds = s % 60
s /= 60
minutes = s % 60
s /= 60
hours = s
if hours != 0:
ret += str(hours) + ':' + str(minutes).zfill(2)
else:
ret += str(minutes)
ret += ':' + str(seconds).zfill(2)
return ret
class ScrollingColsWindow:
""" Base of both the queue and the search result view. Shows a dataset
in a table with an optional cursor. """
def __init__(self, w, use_cursor=False):
self.w = w
self.needRedraw = True
self.needLayout = False
self.x_offset = 0
self.y_offset = 0
self.old_x_offset = 0
self.old_y_offset = 0
self.old_w = 0
self.old_h = 0
self.y_max = 0
self.col_ws = None
self.use_cursor = use_cursor
self.c_offset = 0
self.c_middle = 0
self.old_c_offset = 0
def scroll_page_up(self):
self.y_offset -= self.old_h
def scroll_page_down(self):
self.y_offset += self.old_h
def scroll_up(self):
if self.use_cursor and (self.c_middle < self.c_offset or \
self.y_max < self.old_h):
self.c_offset -= 1
else:
self.y_offset -= 1
def scroll_down(self):
if self.use_cursor and (self.c_middle > self.c_offset or \
self.y_max < self.old_h):
self.c_offset += 1
else:
self.y_offset += 1
def scroll_home(self):
self.y_offset = 0
def scroll_end(self):
self.y_offset = self.y_max
def scroll_right(self):
self.x_offset += 1
if self.x_offset == 1:
self.x_offset = 2
def scroll_left(self):
self.x_offset -= 1
if self.x_offset <= 1:
self.x_offset = 0
def get_data_info(self):
""" Should return the number of rows, the average sizes of the
columns and the maximum sizes of the columns """
raise NotImplementedError
def _layout(self, vals, w, maxs):
""" Helper for layout: finds the best way to fit volumns of
preferred widths of <vals> to a total width of <w> with
maximum values <maxs> """
if sum(vals) == 0:
return [0] * (len(vals) - 1) + [w]
r = float(w) / float(sum(vals))
ret = map(lambda x: int(x*r), vals)
e = w - sum(ret)
for i in xrange(abs(e)):
ret[i] += (1 if e > 0 else -1)
# If the assigned row width is less than its maximum, we'll
# try to give at least a width of three for the '>' and '$'.
cur = 0
for i in xrange(0, len(ret)):
last = ret[i]
while ret[i] < maxs[i] and ret[i] < 3:
for j in xrange(len(ret)):
if ret[(j + cur) % len(ret)] > 3:
ret[(j + cur) % len(ret)] -= 1
ret[i] += 1
break
cur = (cur + j + 1) % len(ret)
if ret[i] == last:
# We couldn't find any more room
return ret
last = ret[i]
return ret
def layout(self):
""" Calculate the widths of the seperate columns """
self.needLayout = False
h, w = self.w.getmaxyx()
di = self.get_data_info()
if di is None:
self.y_max = 0
return
N, avgs, maxs = di
if sum(maxs) <= w:
self.col_ws = self._layout(maxs, w, maxs)
else:
self.col_ws = self._layout(avgs, w, maxs)
self.y_max = N
def draw_cell_text(self, val, start, end, colors):
self.w.addstr(val[start:end])
def draw_cell(self, y, cx, cw, val, colors):
self.w.move(y, cx)
if len(val) > cw:
if self.x_offset == 0:
self.draw_cell_text(val, 0, cw-1, colors)
self.w.addch('$', colors[1])
else:
self.w.addch('>', colors[2])
off = self.x_offset
if off + cw - 2 > len(val):
off = len(val) - cw + 2
self.draw_cell_text(val, off, off+cw-2,
colors)
self.w.addch('$', colors[1])
else:
self.draw_cell_text(val, 0, len(val), colors)
def draw_cols_line(self, y, cells, is_cursor):
""" Draws a line with columns """
self.w.move(y, 0)
self.w.clrtoeol()
if is_cursor:
self.w.attron(curses_color_pair(CP_CWHITE))
self.w.hline(' ', self.w.getmaxyx()[1])
colors = map(curses_color_pair,
[CP_CWHITE, CP_CBLUE, CP_CGREEN, CP_CRED])
else:
colors = map(curses_color_pair,
[CP_WHITE, CP_BLUE, CP_GREEN, CP_RED])
self.w.move(y, 0)
cx = 0
for j in xrange(len(self.col_ws)):
if self.col_ws[j] == 0:
assert cells[j] == ''
continue
cw = self.col_ws[j]
try:
self.draw_cell(y, cx, cw, cells[j], colors)
except curses.error:
if y == self.w.getmaxyx()[0] - 1 and \
cx + cw == sum(self.col_ws):
# curses doesn't like us
# writing to the top right:
# ignore.
pass
else:
try:
self.w.move(y, cx)
self.w.addch('!', colors[3])
except curses.error:
# This shouldn't happen!
raise Exception, (y, cx)
cx += cw
if is_cursor:
self.w.attroff(curses_color_pair(
CP_CWHITE))
def draw_line(self, y, is_cursor):
""" Draws a line """
self.w.move(y, 0)
if y + self.y_offset >= self.y_max:
self.w.addstr('~', curses_color_pair(
CP_BLUE))
self.w.clrtoeol()
else:
cells = self.get_cells(y + self.y_offset)
self.draw_cols_line(y, cells, is_cursor)
def update(self, forceRedraw=False):
""" Update the view """
h, w = self.w.getmaxyx()
if not forceRedraw and not self.needRedraw:
if self.old_x_offset != self.x_offset or \
self.old_y_offset != self.y_offset or \
self.old_w != w or self.old_h != h or \
(self.use_cursor and \
self.old_c_offset != self.c_offset):
pass
else:
return
self.needRedraw = False
if self.old_w != w or self.old_h != h:
self.old_h = h
self.old_w = w
self.needLayout = True
if self.use_cursor:
self.c_middle = h/2
if self.needLayout:
self.layout()
if self.col_ws is None:
self.y_max = 0
if self.y_offset + h > self.y_max:
self.y_offset = self.y_max - h
if self.use_cursor:
self.c_offset += 1
if self.y_offset < 0:
self.y_offset = 0
if self.use_cursor:
self.c_offset -= 1
if self.use_cursor:
if self.c_offset < 0:
self.c_offset = 0
elif self.c_offset + self.y_offset >= self.y_max:
self.c_offset = min(h - 1,
self.y_max - self.y_offset - 1)
start = time.time()
for y in xrange(h):
self.draw_line(y, self.use_cursor and y==self.c_offset)
# We update old_ here for they might've been updated
# by self.draw_line or above
self.old_y_offset = self.y_offset
self.old_x_offset = self.x_offset
if self.use_cursor:
self.old_c_offset = self.c_offset
self.w.noutrefresh()
def touch(self, layout=False):
""" Touches the window to redraw. If <layout>, also recompute
the column layout """
self.needRedraw = True
if layout: self.needLayout = True
def get_view_region(self):
""" Return view region information: start and end of the region
currently viewed in the dataset and the total amount of
lines """
return (self.y_offset, self.y_offset + self.old_h, self.y_max)
class SearchWindow(ScrollingColsWindow):
def __init__(self, w, m, highlight=True):
ScrollingColsWindow.__init__(self, w, use_cursor=True)
self.needDataInfoRecreate = False
self.data_info = None
self.data = None
self.m = m
self.query = None
self.highlight = highlight
def draw_cell_text(self, val, start, end, colors):
if not self.highlight:
return ScrollingColsWindow.draw_cell_text(self,
val, start, end, colors)
ridx = -1
idxs = [0]
val_lower = val.lower()
while True:
ridx = val_lower.find(self.query, ridx+1)
if ridx == -1:
break
if ridx < idxs[-1]:
idxs[-1] = ridx + len(self.query)
else:
idxs.append(ridx)
idxs.append(ridx + len(self.query))
idxs.append(len(val))
v = list(idxs)
v.sort()
assert v == idxs
m = True
for i in xrange(0, len(idxs)-1):
m = not m
istart, iend = idxs[i:i+2]
if end <= istart: continue
if iend <= start: continue
istart = max(istart, start)
iend = min(iend, end)
if istart == iend: continue
self.w.attron(colors[3 if m else 0])
self.w.addstr(val[istart:iend])
self.w.attroff(colors[3 if m else 0])
def set_query(self, q):
if self.query == q:
return
self.query = q
self.data = None
self.needDataInfoRecreate = True
def touch(self, layout=False, data=False):
if data:
self.needDataInfoRecreate = True
ScrollingColsWindow.touch(self, layout=layout)
def fetch_data(self):
return self.m.query(self.query)
def get_data_info(self):
if self.data is None:
if not self.m.songs_fetched:
return None
self.data = self.fetch_data()
if len(self.data) == 0:
return None
if self.data_info is None or \
self.needDataInfoRecreate:
self.needDataInfoRecreate = False
self.data_info = None
if self.data_info is None:
self.data_info = self.create_data_info()
return self.data_info
def create_data_info(self):
N = len(self.data)
l = len(self.get_cells(0))
sums = [0]*l
maxs = [0]*l
for i in xrange(N):
cells = self.get_cells(i)
for j in xrange(l):
sums[j] += len(cells[j])
maxs[j] = max(len(cells[j]), maxs[j])
return (N, map(lambda x: x/N, sums), maxs)
def get_cells(self, j):
return (self.m.songs[self.data[j]][0],
self.m.songs[self.data[j]][1])
def request_track(self):
""" Requests the track under the cursor """
cpos = self.c_offset + self.y_offset
if len(self.data) == 0: return
track_id = self.data[cpos]
self.m.request_track(track_id)
class QueueWindow(ScrollingColsWindow):
def __init__(self, w, m):
ScrollingColsWindow.__init__(self, w)
self.m = m
self.data_info = None
self.needDataInfoRecreate = False
self.time_lut = None
self.last_redraw = 0
def create_data_info(self):
N = len(self.m.queue) + 1
l = len(self.get_cells(0))
sums = [0]*l
maxs = [0]*l
for i in xrange(N):
cells = self.get_cells(i)
for j in xrange(l):
sums[j] += len(cells[j])
maxs[j] = max(len(cells[j]), maxs[j])
return (N, map(lambda x: x/N, sums), maxs)
def _nowPlaying_line(self):
""" Returns the line containing the currently playing song """
if len(self.m.queue) == 0:
timeLeft = format_time(int(self.m.queueOffsetTime - \
time.time()))
else:
# The countdown on the first queued song would equal
# <timeleft>.
timeLeft = ''
self.m.queueOffsetTime
if not self.m.nowPlaying[0] in self.m.songs:
return ('', 'unknown track', '#%s' % \
self.m.nowPlaying[0], timeLeft)
artist, title = self.m.songs[self.m.nowPlaying[0]]
return ('', artist, title, timeLeft)
def get_cells(self, l):
if l == 0:
if not self.m.playing_fetched or \
not self.m.songs_fetched:
return ('','','','')
return self._nowPlaying_line()
l -= 1
if self.time_lut is None:
t = format_time(self.m.queue[l][2])
else:
t = format_time(int(self.time_lut[l] - time.time()))
return (self.m.queue[l][3],
self.m.queue[l][0],
self.m.queue[l][1],
t)
def get_data_info(self):
if not self.m.queue_fetched or len(self.m.queue) == 0:
if not self.m.playing_fetched or \
not self.m.songs_fetched:
return None
N = 1
else:
N = 1 + len(self.m.queue)
if self.data_info is None or \
self.data_info[0] != N or \
self.needDataInfoRecreate:
self.needDataInfoRecreate = False
self.time_lut = None
# We first need to create self.time_lut, for
# create_data_info depends on it
self.data_info = None
# Compute the timestamps of initiation of each of
# the queued tracks
if self.time_lut is None:
if self.m.playing_fetched:
id, songStarted, songLength, \
serverTime = self.m.nowPlaying
offset = self.m.queueOffsetTime
self.time_lut = list()
for i in xrange(len(self.m.queue)):
self.time_lut.append(offset)
offset += self.m.queue[i][2]
if self.data_info is None:
self.data_info = self.create_data_info()
return self.data_info
def reset(self):
""" Resets cached information about the currently playing
track and the queue """
self.time_lut = None
self.data_info = None
def update(self, forceRedraw=False):
""" Overload update to keep track of the updates to allow
per-second reloads if we've got countdowns to update """
if not forceRedraw and not self.time_lut is None and \
(time.time() - self.last_redraw) > 0.7:
self.needRedraw = True
self.last_redraw = time.time()
ScrollingColsWindow.update(self, forceRedraw)
def touch(self, layout=False, data=False):
if data:
self.needDataInfoRecreate = True
ScrollingColsWindow.touch(self, layout=layout)
class CursesMarietje:
def __init__(self, host, port, userdir):
self.running = False
self.refresh_status = True
self.statusline = ''
self.status_shown_once = False
self.old_query = ''
self.query = ''
self.userdir = os.path.expanduser(
os.path.join('~', userdir))
self.options = {}
if not os.path.exists(self.userdir):
try:
os.mkdir(self.userdir)
except Exception, e:
self.userdir = None
else:
fp = os.path.join(self.userdir, 'config')
if os.path.exists(fp):
with open(fp) as f:
self.options = yaml.load(f)
if not 'marietje' in self.options:
self.options['marietje'] = dict()
if not 'username' in self.options['marietje']:
self.options['marietje']['username'] = os.getlogin()
self.m = Marietje(self.options['marietje']['username'],
queueCb=self.on_queue_fetched,
songCb=self.on_songs_fetched,
playingCb=self.on_playing_fetched,
host=host,
port=port)
self.l = logging.getLogger('CursesMarietje')
if not self.userdir is None:
fp = os.path.join(self.userdir, 'songs-cache')
if os.path.exists(fp):
try:
with open(fp) as f:
self.m.songs_from_cache(f)
except Exception, e:
self.l.exception("Exception while "+
"reading cache")
# We silently assume self.m is in a
# consistent state in exception.
def refetch(self, fetchSongs=True, fetchQueue=True,
fetchPlaying=True, force=False):
""" If all requested are fetched, refetch them. If some of
them are not fetched, fetch only those. """
if force:
pass
elif (not fetchPlaying or self.m.playing_fetched or \
self.m.playing_fetching) and \
(not fetchSongs or self.m.songs_fetched or \
self.m.songs_fetching) and \
(not fetchQueue or self.m.queue_fetched or \
self.m.queue_fetching):
fetchPlaying = fetchPlaying and \
not self.m.playing_fetching
fetchSongs = fetchSongs and \
not self.m.songs_fetching
fetchQueue = fetchQueue and \
not self.m.queue_fetching
self.set_status("Refetching %s" % format_list(
(('playing',) if fetchPlaying else ()) +
(('songs',) if fetchSongs else ()) +
(('queue',) if fetchQueue else ())))
else:
fetchPlaying = not self.m.playing_fetched and \
not self.m.playing_fetching
fetchSongs = not self.m.songs_fetched and \
not self.m.songs_fetching
fetchQueue = not self.m.queue_fetched and \
not self.m.queue_fetching
self.set_status("Fetching %s" % format_list(
(('playing',) if fetchPlaying else ()) +
(('songs',) if fetchSongs else ()) +
(('queue',) if fetchQueue else ())))
self.update_timeout = True
self.timeout = INITIAL_TIMEOUT
self.m.start_fetch(fetchSongs=fetchSongs,
fetchPlaying=fetchPlaying,
fetchQueue=fetchQueue)
self.queue_main.reset()
def set_status(self, value):
self.l.info(value)
self.statusline = value
self.refresh_status = True
self.status_shown_once = False
def run(self):
self.log = StringIO()
logging.basicConfig(stream=self.log,
level=logging.DEBUG,
format="%(asctime)s:%(levelname)s:"+
"%(name)s:%(levelname)s:%(message)s")
self._been_setup = False
self.running = True
while self.running:
curses.wrapper(self._inside_curses)
if not self.userdir is None:
with open(os.path.join(self.userdir,
'config'), 'w') as f:
self.options = yaml.dump(self.options, f)
with open(os.path.join(self.userdir,
'songs-cache'), 'w') as f:
self.m.cache_songs_to(f)
def _inside_curses(self, window):
if not self._been_setup:
self._setup(window)
self._been_setup = True
self._main_loop()
def _setup(self, window):
curses_use_default_colors()
curses_init_pair(CP_BLUE, curses.COLOR_BLUE, -1)
curses_init_pair(CP_GREEN, curses.COLOR_GREEN, -1)
curses_init_pair(CP_RED, curses.COLOR_RED, -1)
curses_init_pair(CP_CWHITE, curses.COLOR_BLACK,
curses.COLOR_WHITE)
curses_init_pair(CP_CBLUE, curses.COLOR_BLUE,
curses.COLOR_WHITE)
curses_init_pair(CP_CGREEN, curses.COLOR_GREEN,
curses.COLOR_WHITE)
curses_init_pair(CP_CRED, curses.COLOR_RED,
curses.COLOR_WHITE)
self.window = window
h,w = self.window.getmaxyx()
self.queue_main = QueueWindow(self.window.derwin(h-1,w,0,0),
self.m)
if not 'search-window' in self.options:
self.options['search-window'] = dict()
if not 'highlight' in self.options['search-window']:
self.options['search-window']['highlight'] = True
self.search_main = SearchWindow(self.window.derwin(h-1,w,0,0),
self.m, highlight=self.options[
'search-window']['highlight'])
self.status_w = self.window.derwin(1, w, h-1, 0)
self.main = self.queue_main
self.refetch(force=True)
def _main_loop(self):
window = self.window
h,w = self.window.getmaxyx()
while True:
if self.update_timeout:
self.update_timeout = False
window.timeout(self.timeout)
try:
k = window.getch()
except KeyboardInterrupt:
self.running = False
break
forceRedraw = False
if k == -1:
pass
elif k == 27:
window.timeout(0)
try:
try:
k = window.getch()
except KeyboardInterrupt:
self.running = False
break
if k == -1:
pass
elif k == ord('x'):
self.running = False
break
elif k == ord('r'):
forceRedraw = True
elif k == ord('R'):
self.window.redrawwin()
forceRedraw = True
elif k == ord('f'):
self.refetch(fetchSongs=False)
elif k == ord('F'):
self.refetch()
elif k == ord('?'):
self.show_help()
# We break the main loop, which
# is then reentered via
# curses.wrapper
break
elif k == ord('a'):
self.query = '*'
finally:
window.timeout(self.timeout)
elif k == 410: # redraw
h, w = self.window.getmaxyx()
self.queue_main.w.resize(h-1,w)
self.queue_main.touch()
self.status_w.resize(1,w)
self.status_w.mvwin(h-1, 0)
self.refresh_status = True
elif k == 262: # home
self.main.scroll_home()
self.refresh_status = True
elif k == 360: # end
self.main.scroll_end()
self.refresh_status = True
elif k == 339: # page up
self.main.scroll_page_up()
self.refresh_status = True
elif k == 338: # page down
self.main.scroll_page_down()
self.refresh_status = True
elif k == 261: # right
self.main.scroll_right()
self.refresh_status = True
elif k == 260: # left
self.main.scroll_left()
self.refresh_status = True
elif k == 259: # up
self.main.scroll_up()
self.refresh_status = True
elif k == 258: # down
self.main.scroll_down()
self.refresh_status = True
elif k == ord('?'):
self.show_help()
# We break the main loop, which
# is then reentered via
# curses.wrapper
break
elif k == 263 or k == 127: # backspace
if len(self.query) != 0:
self.query = self.query[:-1]
elif k == 23: # C-w
if len(self.query) != 0:
idx = self.query.rfind(' ', 0,
len(self.query)-1)
if idx == -1:
self.query = ''
else:
self.query = self.query[:idx+1]
elif k == 21: # C-u
if len(self.query) != 0:
self.query = ''
elif k == 20: # C-t
if len(self.query) >= 2:
self.query = (self.query[:-2] +
self.query[-1] +
self.query[-2])
elif self.main is self.search_main and k == 10: # RET
try:
self.search_main.request_track()
except MarietjeException, e:
self.l.exception("Exception while "+
"requesting track")
self.set_status(str(e))
self.refetch(fetchSongs=False)
self.query = ''
elif 0 < k and k < 128 and \
chr(k).lower() in self.m.cs_lut:
self.query += chr(k).lower()
else:
self.set_status((
'Unknown key (%s). Press '+
'Alt+x to quit, Alt+? for help') % k)
if self.main is self.queue_main \
and len(self.query) != 0:
self.main = self.search_main
self.main.touch()
elif self.main is self.search_main \
and len(self.query) == 0:
self.main = self.queue_main
self.main.touch()
if self.main is self.queue_main:
if self.m.playing_fetched and \
time.time() > self.m.queueOffsetTime:
self.refetch(fetchSongs=False)
if self.query != self.old_query:
if len(self.query) > 1 and self.query[0] == '*':
self.query = self.query[1:]
self.old_query = self.query
self.refresh_status = True
if self.main is self.search_main:
self.search_main.set_query(self.query)
self.search_main.touch(layout=True)
self.main.update(forceRedraw=forceRedraw)
self.update_status(forceRedraw=forceRedraw)
curses.doupdate()
def _status_attr(self, fetching, fetched):
ret = 0
if fetched:
ret |= curses.A_BOLD
if fetching:
ret |= curses_color_pair(CP_GREEN)
else:
if fetching:
ret |= curses_color_pair(CP_GREEN)
else:
ret |= curses_color_pair(CP_RED)
return ret
def update_status(self, forceRedraw):
if not self.refresh_status and not forceRedraw:
return
h, w = self.status_w.getmaxyx()
self.refresh_status = False
self.status_w.clear()
pos = '%s-%s|%s' % self.main.get_view_region()
if len(pos) < w:
self.status_w.addstr(0, w-len(pos)-1, pos)
if self.query == '' or not self.status_shown_once:
self.status_shown_once = True
if self.query != '': self.refresh_status = True
self.status_w.addch(0, 0, 'Q', self._status_attr(
self.m.queue_fetching, self.m.queue_fetched))
self.status_w.addch(0, 1, 'P', self._status_attr(
self.m.playing_fetching, self.m.playing_fetched))
self.status_w.addch(0, 2, 'S', self._status_attr(
self.m.songs_fetching, self.m.songs_fetched))
self.status_w.addstr(0, 4, self.statusline[:w-5])
else:
self.status_w.addstr(0, 0, self.query[:w-1], curses.A_BOLD)
self.status_w.noutrefresh()
def on_queue_fetched(self):
if not self.m.queue_fetched:
self.set_status("Queue fetch failed: %s" % \
str(self.m.qException))
return
if self.m.songs_fetched and self.m.playing_fetched:
self.timeout = DEFAULT_TIMEOUT
self.update_timeout = True
self.queue_main.touch(layout=True, data=True)
self.set_status("Queue in %s" % self.m.qLoadTime)
def on_songs_fetched(self, from_cache=False):
if not self.running:
# That cache is DAMNED quick
assert from_cache
return
if not self.m.songs_fetched:
self.set_status("Songs fetch failed: %s" % \
str(self.m.sException))
return
if self.m.queue_fetched and self.m.playing_fetched:
self.timeout = DEFAULT_TIMEOUT
self.update_timeout = True
self.queue_main.touch(layout=True, data=True)
if from_cache:
self.set_status("Songs (cache) in %s" % self.m.sCacheLoadTime)
else:
self.set_status("Songs in %s" % self.m.sLoadTime)
def on_playing_fetched(self):
if not self.m.playing_fetched:
self.set_status("Playing fetch failed: %s" % \
str(self.m.pException))
return
if self.m.songs_fetched and self.m.queue_fetched:
self.timeout = DEFAULT_TIMEOUT
self.update_timeout = True
self.queue_main.touch(layout=True, data=True)
self.set_status("Playing in %s" % self.m.pLoadTime)
def show_help(self):
less = subprocess.Popen(['less', '-c'], stdin=subprocess.PIPE)
less.stdin.write((" Curses based Python Marietje client %(version)s\n"+
" (c) 2008 - Bas Westerbaan, 99BA289B\n"+
"\n"+
" Alt+f refetch some Alt+F refetch all\n"+
" Alt+r refresh screen Alt+R refresh screen harder\n"+
" Alt+x quit Alt+? guess!\n"+
" Ctrl+u clear query Ctrl+w only the last word\n"+
" Alt+a list all songs Ctrl+t transpose last two chars\n"+
"\n"+
"RUNTIME\n"+
" Load times\n"+
" songs %(slt)s\n"+
" songs cache %(clt)s\n"+
" songs lut %(llt)s\n"+
" queue %(qlt)s\n"+
" now playing %(plt)s\n"+
"\n"+
"LOG\n"+
"%(log)s") % {
'version': VERSION,
'qlt': self.m.qLoadTime if hasattr(self.m, 'qLoadTime') else 'n/a',
'slt': self.m.sLoadTime if hasattr(self.m, 'sLoadTime') else 'n/a',
'llt': self.m.sLutGenTime if hasattr(self.m, 'sLutGenTime') else 'n/a',
'plt': self.m.pLoadTime if hasattr(self.m, 'pLoadTime') else 'n/a',
'clt': self.m.sCacheLoadTime if hasattr(self.m, 'sCacheLoadTime') else 'n/a',
'log': self.log.getvalue()
})
less.stdin.close()
less.wait()
if __name__ == '__main__':
parser = optparse.OptionParser()
parser.add_option('-H', '--host', dest='host',
default='zuidslet.science.ru.nl',
help="Connect to HOST", metavar='HOST')
parser.add_option('-p', '--port', dest='port',
default='1337', type='int',
help="Connect on PORT", metavar='PORT')
parser.add_option('-u', '--userdir', dest='userdir',
default='.pymarietje',
help="Use PATH as userdir", metavar='PATH')
(options, args) = parser.parse_args()
m = CursesMarietje(host=options.host,
port=options.port,
userdir=options.userdir)
try:
m.run()
except Exception, e:
logging.exception('Uncatched exception')
if hasattr(m, 'log'):
print m.log.getvalue()
else:
print e
sys.exit(0)
|
aebc72de-2eae-11e5-99b0-7831c1d44c14
aec2d497-2eae-11e5-9e1a-7831c1d44c14
aec2d497-2eae-11e5-9e1a-7831c1d44c14 |
69b540b5-2eae-11e5-b67e-7831c1d44c14
69bb92a1-2eae-11e5-9e5f-7831c1d44c14
69bb92a1-2eae-11e5-9e5f-7831c1d44c14 |
ad38fb78-2eae-11e5-b6bb-7831c1d44c14
ad3fdca3-2eae-11e5-b292-7831c1d44c14
ad3fdca3-2eae-11e5-b292-7831c1d44c14 |
6d2bfa26-2eae-11e5-9a12-7831c1d44c14
6d31bfcc-2eae-11e5-ba2b-7831c1d44c14
6d31bfcc-2eae-11e5-ba2b-7831c1d44c14 |
c225a0d4-2eae-11e5-817e-7831c1d44c14
c22c22bd-2eae-11e5-8261-7831c1d44c14
c22c22bd-2eae-11e5-8261-7831c1d44c14 |
"""Users model file, contains all data storage logic"""
from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import AbstractUser
from django.conf import settings
from django.contrib.auth import get_user_model
from django.db.models.signals import post_save
from django.dispatch import receiver
from rest_framework.authtoken.models import Token
from songs.models import Song
class Member(AbstractUser):
"""Member(user) model. Abstracted from Django user accounts"""
nick_name = models.CharField(max_length=60, unique=True)
loved_tracks = models.ManyToManyField(Song, blank=True)
listened_to = models.ManyToManyField(
Song, blank=True, related_name='listened_to')
profile_picture = models.ImageField(
upload_to='cdn/images/avatars/', null=True, blank=True)
# DECEMBAWEEEEEEEEEEEEEEEN
banner_picture = models.ImageField(
upload_to='cdn/images/banners/', null=True, blank=True,
default='https://modal.moe/cdn/images/avatars/swongbad.gif')
def __str__(self):
return self.nick_name
class Shout(models.Model):
created_by = models.ForeignKey(AbstractUser)
body = models.CharField(max_length=200)
def __str__(self):
return self.body
class MemberInfo(models.Model):
"""Holds member social links, bio, etc"""
bio = models.CharField(
max_length=200, default='I haven\'t wrote a bio!', null=True)
github = models.CharField(max_length=100, null=True, blank=True)
twitter = models.CharField(max_length=100, null=True, blank=True)
reddit = models.CharField(max_length=100, null=True, blank=True)
youtube = models.CharField(max_length=140, null=True, blank=True)
belongs_to = models.OneToOneField(Member)
def __str__(self):
return self.belongs_to.nick_name
class Following(models.Model):
following = models.ManyToManyField(Member, related_name='sender')
belongs_to = models.ForeignKey(Member)
def __str__(self):
return 'People {} follows'.format(self.belongs_to.nick_name)
# DRF auth token receiver
@receiver(post_save, sender=settings.AUTH_USER_MODEL)
def create_auth_token(sender, instance=None, created=False, **kwargs):
if created:
Token.objects.create(user=instance)
Fix shout model
"""Users model file, contains all data storage logic"""
from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import AbstractUser
from django.conf import settings
from django.contrib.auth import get_user_model
from django.db.models.signals import post_save
from django.dispatch import receiver
from rest_framework.authtoken.models import Token
from songs.models import Song
class Member(AbstractUser):
"""Member(user) model. Abstracted from Django user accounts"""
nick_name = models.CharField(max_length=60, unique=True)
loved_tracks = models.ManyToManyField(Song, blank=True)
listened_to = models.ManyToManyField(
Song, blank=True, related_name='listened_to')
profile_picture = models.ImageField(
upload_to='cdn/images/avatars/', null=True, blank=True)
# DECEMBAWEEEEEEEEEEEEEEEN
banner_picture = models.ImageField(
upload_to='cdn/images/banners/', null=True, blank=True,
default='https://modal.moe/cdn/images/avatars/swongbad.gif')
def __str__(self):
return self.nick_name
class Shout(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
created_by = models.ForeignKey(Member)
shouted_to = models.ForeignKey(Member, related_name='shouted_to')
text = models.CharField(max_length=200)
def __str__(self):
return '{} -> {}'.format(created_by, shouted_to)
class MemberInfo(models.Model):
"""Holds member social links, bio, etc"""
bio = models.CharField(
max_length=200, default='I haven\'t wrote a bio!', null=True)
github = models.CharField(max_length=100, null=True, blank=True)
twitter = models.CharField(max_length=100, null=True, blank=True)
reddit = models.CharField(max_length=100, null=True, blank=True)
youtube = models.CharField(max_length=140, null=True, blank=True)
belongs_to = models.OneToOneField(Member)
def __str__(self):
return self.belongs_to.nick_name
class Following(models.Model):
following = models.ManyToManyField(Member, related_name='sender')
belongs_to = models.ForeignKey(Member)
def __str__(self):
return 'People {} follows'.format(self.belongs_to.nick_name)
# DRF auth token receiver
@receiver(post_save, sender=settings.AUTH_USER_MODEL)
def create_auth_token(sender, instance=None, created=False, **kwargs):
if created:
Token.objects.create(user=instance)
|
8256c173-2eae-11e5-8620-7831c1d44c14
825d2c11-2eae-11e5-b99a-7831c1d44c14
825d2c11-2eae-11e5-b99a-7831c1d44c14 |
from django.db import models
from django.contrib.auth.models import User
from messaging.models import MessageThread, Message
# extension to django's User class which has authentication details
# as well as some basic info such as name
class Member(models.Model):
equiv_user = models.OneToOneField(User, on_delete=models.PROTECT)
def __str__(self):
return self.equiv_user.username
bio = models.CharField(max_length=4096, blank=True)
signature = models.CharField(max_length = 1024, blank=True)
# Sends message to a user
# Utility method
def direct_message(self, other):
pass
fix circular import from users.models
from django.db import models
from django.contrib.auth.models import User
# extension to django's User class which has authentication details
# as well as some basic info such as name
class Member(models.Model):
equiv_user = models.OneToOneField(User, on_delete=models.PROTECT)
def __str__(self):
return self.equiv_user.username
bio = models.CharField(max_length=4096, blank=True)
signature = models.CharField(max_length = 1024, blank=True)
# Sends message to a user
# Utility method
def direct_message(self, other):
pass
|
89932ec0-2eae-11e5-a82b-7831c1d44c14
899db9a3-2eae-11e5-9ea1-7831c1d44c14
899db9a3-2eae-11e5-9ea1-7831c1d44c14 |
from django.conf import settings
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import AbstractBaseUser, UserManager
from incubator.models import ASBLYear
# Create your models here.
# User :
# Des users ont des skills/badges
# Les badges ne peuvent être donnés que par quelqu'un qui l'a déjà (genre des teachers)
# un badge pourrait être "utilisateur de la reprap" et "certigfierait" que le user sait utiliser la machine
# Des users appartiennent à un groupe (anon, registered, membres cotisants, "bureau")
# Système d'emprunt (optionnel)
class CustomUserManager(UserManager):
def _create_user(self, username, email, password, **extra_fields):
"""
Creates and saves a User with the given username, email and password.
"""
now = timezone.now()
email = self.normalize_email(email)
user = self.model(username=username, email=email, last_login=now, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, username, email=None, password=None, **extra_fields):
return self._create_user(username, email, password, **extra_fields)
def create_superuser(self, username, email, password, **extra_fields):
return self._create_user(username, email, password, is_staff=True, **extra_fields)
class User(AbstractBaseUser):
USERNAME_FIELD = "username"
REQUIRED_FIELDS = ['email']
objects = CustomUserManager()
username = models.CharField(max_length=30, unique=True, verbose_name="nom d'utilisateur")
email = models.CharField(max_length=255, unique=True)
created = models.DateTimeField(auto_now_add=True)
edited = models.DateTimeField(auto_now=True)
first_name = models.CharField(max_length=127)
last_name = models.CharField(max_length=127)
is_staff = models.BooleanField(default=False, verbose_name="est administrateur")
balance = models.DecimalField(max_digits=6, decimal_places=2, default=0, verbose_name="ardoise")
has_key = models.BooleanField(default=False, verbose_name="possède une clé")
def has_module_perms(self, *args, **kwargs):
return True # TODO : is this a good idea ?
def has_perm(self, perm_list, obj=None):
return self.is_staff
def write_perm(self, obj):
if self.is_staff:
return True
if obj is None:
return False
def get_short_name(self):
return self.username
def get_full_name(self):
return self.username
@property
def is_member(self):
year = ASBLYear.objects.filter(start__gte=timezone.now(), stop__lt=timezone.now())
return self.membership_set.filter(asbl_year=year).count() > 0
@property
def absolute_balance(self):
return abs(self.balance)
@property
def is_superuser(self):
return self.is_staff
class Membership(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL)
asbl_year = models.ForeignKey('incubator.ASBLYear')
Full name should not be required
from django.conf import settings
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import AbstractBaseUser, UserManager
from incubator.models import ASBLYear
# Create your models here.
# User :
# Des users ont des skills/badges
# Les badges ne peuvent être donnés que par quelqu'un qui l'a déjà (genre des teachers)
# un badge pourrait être "utilisateur de la reprap" et "certigfierait" que le user sait utiliser la machine
# Des users appartiennent à un groupe (anon, registered, membres cotisants, "bureau")
# Système d'emprunt (optionnel)
class CustomUserManager(UserManager):
def _create_user(self, username, email, password, **extra_fields):
"""
Creates and saves a User with the given username, email and password.
"""
now = timezone.now()
email = self.normalize_email(email)
user = self.model(username=username, email=email, last_login=now, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, username, email=None, password=None, **extra_fields):
return self._create_user(username, email, password, **extra_fields)
def create_superuser(self, username, email, password, **extra_fields):
return self._create_user(username, email, password, is_staff=True, **extra_fields)
class User(AbstractBaseUser):
USERNAME_FIELD = "username"
REQUIRED_FIELDS = ['email']
objects = CustomUserManager()
username = models.CharField(max_length=30, unique=True, verbose_name="nom d'utilisateur")
email = models.CharField(max_length=255, unique=True)
created = models.DateTimeField(auto_now_add=True)
edited = models.DateTimeField(auto_now=True)
first_name = models.CharField(max_length=127, blank=True)
last_name = models.CharField(max_length=127, blank=True)
is_staff = models.BooleanField(default=False, verbose_name="est administrateur")
balance = models.DecimalField(max_digits=6, decimal_places=2, default=0, verbose_name="ardoise")
has_key = models.BooleanField(default=False, verbose_name="possède une clé")
def has_module_perms(self, *args, **kwargs):
return True # TODO : is this a good idea ?
def has_perm(self, perm_list, obj=None):
return self.is_staff
def write_perm(self, obj):
if self.is_staff:
return True
if obj is None:
return False
def get_short_name(self):
return self.username
def get_full_name(self):
return self.username
@property
def is_member(self):
year = ASBLYear.objects.filter(start__gte=timezone.now(), stop__lt=timezone.now())
return self.membership_set.filter(asbl_year=year).count() > 0
@property
def absolute_balance(self):
return abs(self.balance)
@property
def is_superuser(self):
return self.is_staff
class Membership(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL)
asbl_year = models.ForeignKey('incubator.ASBLYear')
|
68f5c323-2eae-11e5-868c-7831c1d44c14
68fd831e-2eae-11e5-8767-7831c1d44c14
68fd831e-2eae-11e5-8767-7831c1d44c14 |
from django.utils.translation import ugettext as _
from django.utils.decorators import method_decorator
from django.utils.html import format_html
from django.contrib.auth.decorators import login_required
from django.views.generic import CreateView, UpdateView, ListView, DeleteView, DetailView
from django.core.urlresolvers import reverse_lazy, reverse
from django.contrib import messages
from django.http import Http404, HttpResponseRedirect, HttpResponseBadRequest
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.conf import settings
from django.utils import timezone
from django.core.exceptions import PermissionDenied, ValidationError
from events.models import Event, RSVP, Calendar
from events.tasks import send_cancellation_notification, send_rsvp_notification
from groups.models import SupportGroup
from ..forms import (
EventForm, AddOrganizerForm, EventGeocodingForm, EventReportForm, UploadEventImageForm, AuthorForm, SearchEventForm
)
from ..view_mixins import (
HardLoginRequiredMixin, SoftLoginRequiredMixin, PermissionsRequiredMixin, ObjectOpengraphMixin,
ChangeLocationBaseView, SearchByZipcodeBaseView
)
__all__ = [
'CreateEventView', 'ManageEventView', 'ModifyEventView', 'QuitEventView', 'CancelEventView',
'EventDetailView', 'CalendarView', 'ChangeEventLocationView', 'EditEventReportView', 'UploadEventImageView',
'EventListView',
]
class EventListView(SearchByZipcodeBaseView):
"""List of events, filter by zipcode
"""
min_items = 20
template_name = 'front/events/event_list.html'
context_object_name = 'events'
form_class = SearchEventForm
def get_base_queryset(self):
return Event.objects.upcoming().order_by('start_time')
class EventDetailView(ObjectOpengraphMixin, DetailView):
template_name = "front/events/detail.html"
queryset = Event.objects.filter(published=True)
title_prefix = _("Evénement local")
meta_description = _("Participez aux événements organisés par les membres de la France insoumise.")
def get_context_data(self, **kwargs):
return super().get_context_data(
has_rsvp=self.request.user.is_authenticated and self.object.rsvps.filter(
person=self.request.user.person).exists(),
is_organizer=self.request.user.is_authenticated and self.object.organizers.filter(
pk=self.request.user.person.id).exists(),
organizers_groups=self.object.organizers_groups.distinct(),
event_images=self.object.images.all(),
)
@method_decorator(login_required(login_url=reverse_lazy('oauth_redirect_view')), )
def post(self, request, *args, **kwargs):
self.object = self.get_object()
if request.POST['action'] == 'rsvp':
if not self.object.rsvps.filter(person=request.user.person).exists():
rsvp = RSVP.objects.create(event=self.object, person=request.user.person)
send_rsvp_notification.delay(rsvp.pk)
return HttpResponseRedirect(reverse('view_event', kwargs={'pk': self.object.pk}))
return HttpResponseBadRequest()
class ManageEventView(HardLoginRequiredMixin, PermissionsRequiredMixin, DetailView):
template_name = "front/events/manage.html"
permissions_required = ('events.change_event',)
queryset = Event.objects.filter(published=True)
error_messages = {
'denied': _("Vous ne pouvez pas accéder à cette page sans être organisateur de l'événement.")
}
def get_success_url(self):
return reverse('manage_event', kwargs={'pk': self.object.pk})
def get_form(self):
kwargs = {}
if self.request.method in ('POST', 'PUT'):
kwargs.update({
'data': self.request.POST,
})
return AddOrganizerForm(self.object, **kwargs)
def get_context_data(self, **kwargs):
return super().get_context_data(
add_organizer_form=self.get_form(),
organizers=self.object.organizers.all(),
rsvps=self.object.rsvps.all(),
**kwargs
)
def post(self, request, *args, **kwargs):
self.object = self.get_object()
if self.object.is_past():
raise PermissionDenied(_('Vous ne pouvez pas ajouter d\'organisateur à un événement terminé.'))
form = self.get_form()
if form.is_valid():
form.save()
return HttpResponseRedirect(self.get_success_url())
return self.render_to_response(self.get_context_data(add_organizer_form=form))
class CreateEventView(HardLoginRequiredMixin, CreateView):
template_name = "front/events/create.html"
model = Event
form_class = EventForm
def get_success_url(self):
return reverse('manage_event', kwargs={'pk': self.object.pk})
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['title'] = _('Publiez votre événement')
return context
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
person = self.request.user.person
kwargs['initial'] = {
'contact_name': person.get_full_name(),
'contact_email': person.email,
'contact_phone': person.contact_phone,
}
if self.request.method == 'GET' and self.request.GET.get('as_group'):
try:
kwargs['initial']['as_group'] = SupportGroup.objects.get(pk=self.request.GET.get('as_group'))
except SupportGroup.DoesNotExist:
pass
except ValidationError:
pass
kwargs['person'] = person
return kwargs
def form_valid(self, form):
# first get response to make sure there's no error when saving the model before adding message
res = super().form_valid(form)
# show message
messages.add_message(
request=self.request,
level=messages.SUCCESS,
message="Votre événement a été correctement créé.",
)
return res
class ModifyEventView(HardLoginRequiredMixin, PermissionsRequiredMixin, UpdateView):
permissions_required = ('events.change_event',)
template_name = "front/events/modify.html"
form_class = EventForm
def get_success_url(self):
return reverse('manage_event', kwargs={'pk': self.object.pk})
def get_queryset(self):
return Event.objects.upcoming(as_of=timezone.now())
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['person'] = self.request.user.person
return kwargs
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['title'] = _('Modifiez votre événement')
return context
def form_valid(self, form):
# first get response to make sure there's no error when saving the model before adding message
res = super().form_valid(form)
messages.add_message(
request=self.request,
level=messages.SUCCESS,
message=format_html(_("Les modifications de l'événement <em>{}</em> ont été enregistrées."),
self.object.name)
)
return res
class CancelEventView(HardLoginRequiredMixin, PermissionsRequiredMixin, DetailView):
permissions_required = ('events.change_event',)
template_name = 'front/events/cancel.html'
success_url = reverse_lazy('list_events')
def get_queryset(self):
return Event.objects.upcoming(as_of=timezone.now())
def post(self, request, *args, **kwargs):
self.object = self.get_object()
self.object.published = False
self.object.save()
send_cancellation_notification.delay(self.object.pk)
messages.add_message(
request,
messages.WARNING,
_("L'événement « {} » a bien été annulé.").format(self.object.name)
)
return HttpResponseRedirect(self.success_url)
class QuitEventView(SoftLoginRequiredMixin, DeleteView):
template_name = "front/events/quit.html"
success_url = reverse_lazy("list_events")
context_object_name = 'rsvp'
def get_queryset(self):
return RSVP.objects.upcoming(as_of=timezone.now())
def get_object(self, queryset=None):
try:
return self.get_queryset().select_related('event').get(
event__pk=self.kwargs['pk'],
person=self.request.user.person
)
except RSVP.DoesNotExist:
# TODO show specific 404 page maybe?
raise Http404()
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['event'] = self.object.event
context['success_url'] = self.get_success_url()
return context
def delete(self, request, *args, **kwargs):
# first get response to make sure there's no error before adding message
res = super().delete(request, *args, **kwargs)
messages.add_message(
request,
messages.SUCCESS,
format_html(_("Vous ne participez plus à l'événement <em>{}</em>"), self.object.event.name)
)
return res
class CalendarView(ObjectOpengraphMixin, DetailView):
template_name = "front/events/calendar.html"
model = Calendar
paginator_class = Paginator
per_page = 10
def get_context_data(self, **kwargs):
all_events = self.object.events.upcoming(as_of=timezone.now()).order_by('start_time')
paginator = self.paginator_class(all_events, self.per_page)
page = self.request.GET.get('page')
try:
events = paginator.page(page)
except PageNotAnInteger:
events = paginator.page(1)
except EmptyPage:
events = paginator.page(paginator.num_pages)
return super().get_context_data(
events=events,
default_event_image=settings.DEFAULT_EVENT_IMAGE,
)
class ChangeEventLocationView(ChangeLocationBaseView):
template_name = 'front/events/change_location.html'
form_class = EventGeocodingForm
success_view_name = 'manage_event'
def get_queryset(self):
return Event.objects.upcoming(as_of=timezone.now())
class EditEventReportView(PermissionsRequiredMixin, UpdateView):
template_name = 'front/events/edit_event_report.html'
permissions_required = ('events.change_event',)
form_class = EventReportForm
def get_success_url(self):
return reverse('manage_event', args=(self.object.pk,))
def get_queryset(self):
return Event.objects.past(as_of=timezone.now())
class UploadEventImageView(CreateView):
template_name = 'front/events/upload_event_image.html'
form_class = UploadEventImageForm
def get_queryset(self):
return Event.objects.past(as_of=timezone.now())
def get_success_url(self):
return reverse('view_event', args=(self.event.pk,))
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs.update({
'author': self.request.user.person,
'event': self.event
})
return kwargs
def get_author_form(self):
author_form_kwargs = {
'instance': self.request.user.person,
}
if self.request.method in ['POST', 'PUT']:
author_form_kwargs['data'] = self.request.POST
return AuthorForm(**author_form_kwargs)
def get_context_data(self, **kwargs):
if 'author_form' not in kwargs:
kwargs['author_form'] = self.get_author_form()
return super().get_context_data(
event=self.event,
**kwargs
)
def get(self, request, *args, **kwargs):
self.object = None
self.event = self.get_object()
if not self.event.rsvps.filter(person=request.user.person).exists():
raise PermissionDenied(_("Seuls les participants à l'événement peuvent poster des images"))
return super().get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
self.object = None
self.event = self.get_object()
if not self.event.rsvps.filter(person=request.user.person).exists():
raise PermissionDenied(_("Seuls les participants à l'événement peuvent poster des images"))
form = self.get_form()
author_form = self.get_author_form()
if form.is_valid() and author_form.is_valid():
return self.form_valid(form, author_form)
else:
return self.form_invalid(form, author_form)
def form_invalid(self, form, author_form):
return self.render_to_response(self.get_context_data(form=form, author_form=author_form))
def form_valid(self, form, author_form):
author_form.save()
form.save()
messages.add_message(
self.request,
messages.SUCCESS,
_("Votre photo a correctement été importée, merci de l'avoir partagée !")
)
return HttpResponseRedirect(self.get_success_url())
Fix bug when adding organizer to event
from django.utils.translation import ugettext as _
from django.utils.decorators import method_decorator
from django.utils.html import format_html
from django.contrib.auth.decorators import login_required
from django.views.generic import CreateView, UpdateView, ListView, DeleteView, DetailView
from django.core.urlresolvers import reverse_lazy, reverse
from django.contrib import messages
from django.http import Http404, HttpResponseRedirect, HttpResponseBadRequest
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.conf import settings
from django.utils import timezone
from django.core.exceptions import PermissionDenied, ValidationError
from events.models import Event, RSVP, Calendar
from events.tasks import send_cancellation_notification, send_rsvp_notification
from groups.models import SupportGroup
from ..forms import (
EventForm, AddOrganizerForm, EventGeocodingForm, EventReportForm, UploadEventImageForm, AuthorForm, SearchEventForm
)
from ..view_mixins import (
HardLoginRequiredMixin, SoftLoginRequiredMixin, PermissionsRequiredMixin, ObjectOpengraphMixin,
ChangeLocationBaseView, SearchByZipcodeBaseView
)
__all__ = [
'CreateEventView', 'ManageEventView', 'ModifyEventView', 'QuitEventView', 'CancelEventView',
'EventDetailView', 'CalendarView', 'ChangeEventLocationView', 'EditEventReportView', 'UploadEventImageView',
'EventListView',
]
class EventListView(SearchByZipcodeBaseView):
"""List of events, filter by zipcode
"""
min_items = 20
template_name = 'front/events/event_list.html'
context_object_name = 'events'
form_class = SearchEventForm
def get_base_queryset(self):
return Event.objects.upcoming().order_by('start_time')
class EventDetailView(ObjectOpengraphMixin, DetailView):
template_name = "front/events/detail.html"
queryset = Event.objects.filter(published=True)
title_prefix = _("Evénement local")
meta_description = _("Participez aux événements organisés par les membres de la France insoumise.")
def get_context_data(self, **kwargs):
return super().get_context_data(
has_rsvp=self.request.user.is_authenticated and self.object.rsvps.filter(
person=self.request.user.person).exists(),
is_organizer=self.request.user.is_authenticated and self.object.organizers.filter(
pk=self.request.user.person.id).exists(),
organizers_groups=self.object.organizers_groups.distinct(),
event_images=self.object.images.all(),
)
@method_decorator(login_required(login_url=reverse_lazy('oauth_redirect_view')), )
def post(self, request, *args, **kwargs):
self.object = self.get_object()
if request.POST['action'] == 'rsvp':
if not self.object.rsvps.filter(person=request.user.person).exists():
rsvp = RSVP.objects.create(event=self.object, person=request.user.person)
send_rsvp_notification.delay(rsvp.pk)
return HttpResponseRedirect(reverse('view_event', kwargs={'pk': self.object.pk}))
return HttpResponseBadRequest()
class ManageEventView(HardLoginRequiredMixin, PermissionsRequiredMixin, DetailView):
template_name = "front/events/manage.html"
permissions_required = ('events.change_event',)
queryset = Event.objects.filter(published=True)
error_messages = {
'denied': _("Vous ne pouvez pas accéder à cette page sans être organisateur de l'événement.")
}
def get_success_url(self):
return reverse('manage_event', kwargs={'pk': self.object.pk})
def get_form(self):
kwargs = {}
if self.request.method in ('POST', 'PUT'):
kwargs.update({
'data': self.request.POST,
})
return AddOrganizerForm(self.object, **kwargs)
def get_context_data(self, **kwargs):
if 'add_organizer_form' not in kwargs:
kwargs['add_organizer_form'] = self.get_form()
return super().get_context_data(
organizers=self.object.organizers.all(),
rsvps=self.object.rsvps.all(),
**kwargs
)
def post(self, request, *args, **kwargs):
self.object = self.get_object()
if self.object.is_past():
raise PermissionDenied(_('Vous ne pouvez pas ajouter d\'organisateur à un événement terminé.'))
form = self.get_form()
if form.is_valid():
form.save()
return HttpResponseRedirect(self.get_success_url())
return self.render_to_response(self.get_context_data(add_organizer_form=form))
class CreateEventView(HardLoginRequiredMixin, CreateView):
template_name = "front/events/create.html"
model = Event
form_class = EventForm
def get_success_url(self):
return reverse('manage_event', kwargs={'pk': self.object.pk})
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['title'] = _('Publiez votre événement')
return context
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
person = self.request.user.person
kwargs['initial'] = {
'contact_name': person.get_full_name(),
'contact_email': person.email,
'contact_phone': person.contact_phone,
}
if self.request.method == 'GET' and self.request.GET.get('as_group'):
try:
kwargs['initial']['as_group'] = SupportGroup.objects.get(pk=self.request.GET.get('as_group'))
except SupportGroup.DoesNotExist:
pass
except ValidationError:
pass
kwargs['person'] = person
return kwargs
def form_valid(self, form):
# first get response to make sure there's no error when saving the model before adding message
res = super().form_valid(form)
# show message
messages.add_message(
request=self.request,
level=messages.SUCCESS,
message="Votre événement a été correctement créé.",
)
return res
class ModifyEventView(HardLoginRequiredMixin, PermissionsRequiredMixin, UpdateView):
permissions_required = ('events.change_event',)
template_name = "front/events/modify.html"
form_class = EventForm
def get_success_url(self):
return reverse('manage_event', kwargs={'pk': self.object.pk})
def get_queryset(self):
return Event.objects.upcoming(as_of=timezone.now())
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['person'] = self.request.user.person
return kwargs
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['title'] = _('Modifiez votre événement')
return context
def form_valid(self, form):
# first get response to make sure there's no error when saving the model before adding message
res = super().form_valid(form)
messages.add_message(
request=self.request,
level=messages.SUCCESS,
message=format_html(_("Les modifications de l'événement <em>{}</em> ont été enregistrées."),
self.object.name)
)
return res
class CancelEventView(HardLoginRequiredMixin, PermissionsRequiredMixin, DetailView):
permissions_required = ('events.change_event',)
template_name = 'front/events/cancel.html'
success_url = reverse_lazy('list_events')
def get_queryset(self):
return Event.objects.upcoming(as_of=timezone.now())
def post(self, request, *args, **kwargs):
self.object = self.get_object()
self.object.published = False
self.object.save()
send_cancellation_notification.delay(self.object.pk)
messages.add_message(
request,
messages.WARNING,
_("L'événement « {} » a bien été annulé.").format(self.object.name)
)
return HttpResponseRedirect(self.success_url)
class QuitEventView(SoftLoginRequiredMixin, DeleteView):
template_name = "front/events/quit.html"
success_url = reverse_lazy("list_events")
context_object_name = 'rsvp'
def get_queryset(self):
return RSVP.objects.upcoming(as_of=timezone.now())
def get_object(self, queryset=None):
try:
return self.get_queryset().select_related('event').get(
event__pk=self.kwargs['pk'],
person=self.request.user.person
)
except RSVP.DoesNotExist:
# TODO show specific 404 page maybe?
raise Http404()
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['event'] = self.object.event
context['success_url'] = self.get_success_url()
return context
def delete(self, request, *args, **kwargs):
# first get response to make sure there's no error before adding message
res = super().delete(request, *args, **kwargs)
messages.add_message(
request,
messages.SUCCESS,
format_html(_("Vous ne participez plus à l'événement <em>{}</em>"), self.object.event.name)
)
return res
class CalendarView(ObjectOpengraphMixin, DetailView):
template_name = "front/events/calendar.html"
model = Calendar
paginator_class = Paginator
per_page = 10
def get_context_data(self, **kwargs):
all_events = self.object.events.upcoming(as_of=timezone.now()).order_by('start_time')
paginator = self.paginator_class(all_events, self.per_page)
page = self.request.GET.get('page')
try:
events = paginator.page(page)
except PageNotAnInteger:
events = paginator.page(1)
except EmptyPage:
events = paginator.page(paginator.num_pages)
return super().get_context_data(
events=events,
default_event_image=settings.DEFAULT_EVENT_IMAGE,
)
class ChangeEventLocationView(ChangeLocationBaseView):
template_name = 'front/events/change_location.html'
form_class = EventGeocodingForm
success_view_name = 'manage_event'
def get_queryset(self):
return Event.objects.upcoming(as_of=timezone.now())
class EditEventReportView(PermissionsRequiredMixin, UpdateView):
template_name = 'front/events/edit_event_report.html'
permissions_required = ('events.change_event',)
form_class = EventReportForm
def get_success_url(self):
return reverse('manage_event', args=(self.object.pk,))
def get_queryset(self):
return Event.objects.past(as_of=timezone.now())
class UploadEventImageView(CreateView):
template_name = 'front/events/upload_event_image.html'
form_class = UploadEventImageForm
def get_queryset(self):
return Event.objects.past(as_of=timezone.now())
def get_success_url(self):
return reverse('view_event', args=(self.event.pk,))
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs.update({
'author': self.request.user.person,
'event': self.event
})
return kwargs
def get_author_form(self):
author_form_kwargs = {
'instance': self.request.user.person,
}
if self.request.method in ['POST', 'PUT']:
author_form_kwargs['data'] = self.request.POST
return AuthorForm(**author_form_kwargs)
def get_context_data(self, **kwargs):
if 'author_form' not in kwargs:
kwargs['author_form'] = self.get_author_form()
return super().get_context_data(
event=self.event,
**kwargs
)
def get(self, request, *args, **kwargs):
self.object = None
self.event = self.get_object()
if not self.event.rsvps.filter(person=request.user.person).exists():
raise PermissionDenied(_("Seuls les participants à l'événement peuvent poster des images"))
return super().get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
self.object = None
self.event = self.get_object()
if not self.event.rsvps.filter(person=request.user.person).exists():
raise PermissionDenied(_("Seuls les participants à l'événement peuvent poster des images"))
form = self.get_form()
author_form = self.get_author_form()
if form.is_valid() and author_form.is_valid():
return self.form_valid(form, author_form)
else:
return self.form_invalid(form, author_form)
def form_invalid(self, form, author_form):
return self.render_to_response(self.get_context_data(form=form, author_form=author_form))
def form_valid(self, form, author_form):
author_form.save()
form.save()
messages.add_message(
self.request,
messages.SUCCESS,
_("Votre photo a correctement été importée, merci de l'avoir partagée !")
)
return HttpResponseRedirect(self.get_success_url())
|
import webapp2
class MainPage(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'text/plain'
self.response.write(self.request.path)
self.response.write('\n')
self.response.write(str(self.request.GET))
self.response.write('\n')
application = webapp2.WSGIApplication([
webapp2.Route(r'/.*', handler=MainPage, name='main'),
], debug=True)
sigh.......
import webapp2
class MainPage(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'text/plain'
self.response.write(self.request.path)
self.response.write('\n')
self.response.write(str(self.request.GET))
self.response.write('\n')
application = webapp2.WSGIApplication([
webapp2.Route(r'/', handler=MainPage, name='main'),
], debug=True)
|
"""
helloworld.py
Author: <your name here>
Credit: <list sources used, if any>
Assignment:
Write and submit a Python program that prints the following:
Hello, world!
"""
print("Hello, world!")
Updated from Brython Server: 1/21/2016 2:56:44 PM
"""
helloworld.py
Author: <David Wilson>
Credit: <Will Campbell>
Assignment:
Write and submit a Python program that prints the following:
Hello, world!
"""
print("Hello, world!") |
import math
from datetime import datetime, timedelta
from flask import jsonify
from sqlalchemy import and_, not_
from server import app, sqldb
from server.auth import internal_auth
from server.notifications import Notification, NotificationToken, send_push_notification_batch
from server.studyspaces.availability import get_room_name
from server.studyspaces.models import GSRRoomName, StudySpacesBooking
@app.route('/studyspaces/reminders/send', methods=['POST'])
@internal_auth
def request_send_reminders():
send_reminders()
return jsonify({'result': 'success'})
def send_reminders():
# Query logic
# Get bookings that meet the following criteria:
# 1) Start within the next 10 minutes
# 2) Booked more than 30 minutes before the start time
# 3) Have not been cancelled
# 4) Have not been sent a reminder yet
# 5) Have an associated account with an iOS push notification token
now = datetime.now()
check_start_date = now + timedelta(minutes=10)
get_gsr = StudySpacesBooking.query \
.filter(StudySpacesBooking.start <= check_start_date) \
.filter(StudySpacesBooking.start > now) \
.filter(StudySpacesBooking.date < StudySpacesBooking.start - timedelta(minutes=30)) \
.filter(not_(StudySpacesBooking.is_cancelled)) \
.filter(not_(StudySpacesBooking.reminder_sent)) \
.filter(StudySpacesBooking.account is not None) \
.subquery()
get_tokens = NotificationToken.query.filter(NotificationToken.ios_token is not None).subquery()
join_qry = sqldb.session.query(get_gsr.c.id, get_gsr.c.lid, get_gsr.c.rid, GSRRoomName.name,
get_gsr.c.start, get_tokens.c.ios_token) \
.select_from(get_gsr) \
.join(get_tokens, get_gsr.c.account == get_tokens.c.account) \
.join(GSRRoomName, and_(get_gsr.c.lid == GSRRoomName.lid,
get_gsr.c.rid == GSRRoomName.rid), isouter=True) \
.all()
booking_ids = []
notifications = []
for bid, lid, rid, name, start, token in join_qry:
minutes_to_start = int(math.ceil((start - now).seconds / 60))
title = 'Upcoming reservation'
if not name:
# Fetch name from API if it does not already exist in the DB
name = get_room_name(lid, rid)
if name:
body = 'You have reserved {} starting in {} minutes'.format(name, minutes_to_start)
else:
body = 'You have a reservation starting in {} minutes'.format(minutes_to_start)
alert = {'title': title, 'body': body}
notification = Notification(token=token, alert=alert)
notifications.append(notification)
booking_ids.append(bid)
if notifications:
send_push_notification_batch(notifications)
# Flag each booking as SENT so that a duplicate notification is not accidentally sent
bookings = StudySpacesBooking.query.filter(StudySpacesBooking.id.in_(tuple(booking_ids))).all()
for booking in bookings:
booking.reminder_sent = True
sqldb.session.commit()
add dev support
import math
from datetime import datetime, timedelta
from flask import jsonify, request
from sqlalchemy import and_, not_
from server import app, sqldb
from server.auth import internal_auth
from server.notifications import Notification, NotificationToken, send_push_notification_batch
from server.studyspaces.availability import get_room_name
from server.studyspaces.models import GSRRoomName, StudySpacesBooking
@app.route('/studyspaces/reminders/send', methods=['POST'])
@internal_auth
def request_send_reminders():
isDev = True if request.form.get("dev") else False
send_reminders(isDev)
return jsonify({'result': 'success'})
def send_reminders(isDev=False):
# Query logic
# Get bookings that meet the following criteria:
# 1) Start within the next 10 minutes
# 2) Booked more than 30 minutes before the start time
# 3) Have not been cancelled
# 4) Have not been sent a reminder yet
# 5) Have an associated account with an iOS push notification token
now = datetime.now()
check_start_date = now + timedelta(minutes=30)
get_gsr = StudySpacesBooking.query \
.filter(StudySpacesBooking.start <= check_start_date) \
.filter(StudySpacesBooking.start > now) \
.filter(StudySpacesBooking.date < StudySpacesBooking.start - timedelta(minutes=30)) \
.filter(not_(StudySpacesBooking.is_cancelled)) \
.filter(not_(StudySpacesBooking.reminder_sent)) \
.filter(StudySpacesBooking.account is not None) \
.subquery()
get_tokens = NotificationToken.query.filter(NotificationToken.ios_token is not None).subquery()
join_qry = sqldb.session.query(get_gsr.c.id, get_gsr.c.lid, get_gsr.c.rid, GSRRoomName.name,
get_gsr.c.start, get_tokens.c.ios_token) \
.select_from(get_gsr) \
.join(get_tokens, get_gsr.c.account == get_tokens.c.account) \
.join(GSRRoomName, and_(get_gsr.c.lid == GSRRoomName.lid,
get_gsr.c.rid == GSRRoomName.rid), isouter=True) \
.all()
booking_ids = []
notifications = []
for bid, lid, rid, name, start, token in join_qry:
minutes_to_start = int(math.ceil((start - now).seconds / 60))
title = 'Upcoming reservation'
if not name:
# Fetch name from API if it does not already exist in the DB
name = get_room_name(lid, rid)
if name:
body = 'You have reserved {} starting in {} minutes'.format(name, minutes_to_start)
else:
body = 'You have a reservation starting in {} minutes'.format(minutes_to_start)
alert = {'title': title, 'body': body}
notification = Notification(token=token, alert=alert)
notifications.append(notification)
booking_ids.append(bid)
if notifications:
send_push_notification_batch(notifications, isDev)
# Flag each booking as SENT so that a duplicate notification is not accidentally sent
bookings = StudySpacesBooking.query.filter(StudySpacesBooking.id.in_(tuple(booking_ids))).all()
for booking in bookings:
booking.reminder_sent = True
sqldb.session.commit()
|
#!/usr/bin/env python
# Based on previous work by
# Charles Menguy (see: http://stackoverflow.com/questions/10217067/implementing-a-full-python-unix-style-daemon-process)
# and Sander Marechal (see: http://www.jejik.com/articles/2007/02/a_simple_unix_linux_daemon_in_python/)
# Adapted by M.Hendrix [2015]
# daemon12.py measures the CPU load.
import os, sys, time, math, commands, syslog
from libdaemon import Daemon
DEBUG = False
IS_SYSTEMD = os.path.isfile('/bin/journalctl')
class MyDaemon(Daemon):
def run(self):
sampleptr = 0
samples = 5
datapoints = 11
data = [[None]*datapoints for _ in range(samples)]
sampleTime = 12
cycleTime = samples * sampleTime
# sync to whole minute
waitTime = (cycleTime + sampleTime) - (time.time() % cycleTime)
if DEBUG:
print "NOT waiting {0} s.".format(waitTime)
else:
time.sleep(waitTime)
while True:
startTime = time.time()
result = do_work().split(',')
if DEBUG:print result
data[sampleptr] = map(float, result)
# report sample average
sampleptr = sampleptr + 1
if (sampleptr == samples):
if DEBUG:print data
somma = map(sum,zip(*data))
# not all entries should be float
# 0.37, 0.18, 0.17, 4, 143, 32147, 3, 4, 93, 0, 0
averages = [format(s / samples, '.3f') for s in somma]
averages[3]=int(data[sampleptr-1][3])
averages[4]=int(data[sampleptr-1][4])
averages[5]=int(data[sampleptr-1][5])
if DEBUG:print averages
do_report(averages)
sampleptr = 0
waitTime = sampleTime - (time.time() - startTime) - (startTime%sampleTime)
if (waitTime > 0):
if DEBUG:print "Waiting {0} s".format(waitTime)
time.sleep(waitTime)
def do_work():
# 6 datapoints gathered here
outHistLoad = commands.getoutput("cat /proc/loadavg").replace(" ",", ").replace("/",", ")
# 5 datapoints gathered here
outCpu = commands.getoutput("vmstat 1 2").splitlines()[3].split()
outCpuUS = outCpu[12]
outCpuSY = outCpu[13]
outCpuID = outCpu[14]
outCpuWA = outCpu[15]
outCpuST = 0
return '{0}, {1}, {2}, {3}, {4}, {5}'.format(outHistLoad, outCpuUS, outCpuSY, outCpuID, outCpuWA, outCpuST)
def do_report(result):
# Get the time and date in human-readable form and UN*X-epoch...
outDate = commands.getoutput("date '+%F %H:%M:%S, %s'")
result = ', '.join(map(str, result))
flock = '/tmp/raspdiagd/12.lock'
lock(flock)
f = file('/tmp/raspdiagd/12-load-cpu.csv', 'a')
f.write('{0}, {1}\n'.format(outDate, result) )
f.close()
unlock(flock)
return
def lock(fname):
open(fname, 'a').close()
def unlock(fname):
if os.path.isfile(fname):
os.remove(fname)
if __name__ == "__main__":
daemon = MyDaemon('/tmp/raspdiagd/12.pid')
if len(sys.argv) == 2:
if 'start' == sys.argv[1]:
daemon.start()
elif 'stop' == sys.argv[1]:
daemon.stop()
elif 'restart' == sys.argv[1]:
daemon.restart()
elif 'foreground' == sys.argv[1]:
# assist with debugging.
print "Debug-mode started. Use <Ctrl>+C to stop."
DEBUG = True
if DEBUG:
logtext = "Daemon logging is ON"
syslog.syslog(syslog.LOG_DEBUG, logtext)
daemon.run()
else:
print "Unknown command"
sys.exit(2)
sys.exit(0)
else:
print "usage: %s start|stop|restart|foreground" % sys.argv[0]
sys.exit(2)
indentation
#!/usr/bin/env python
# Based on previous work by
# Charles Menguy (see: http://stackoverflow.com/questions/10217067/implementing-a-full-python-unix-style-daemon-process)
# and Sander Marechal (see: http://www.jejik.com/articles/2007/02/a_simple_unix_linux_daemon_in_python/)
# Adapted by M.Hendrix [2015]
# daemon12.py measures the CPU load.
import os, sys, time, math, commands, syslog
from libdaemon import Daemon
DEBUG = False
IS_SYSTEMD = os.path.isfile('/bin/journalctl')
class MyDaemon(Daemon):
def run(self):
sampleptr = 0
samples = 5
datapoints = 11
data = [[None]*datapoints for _ in range(samples)]
sampleTime = 12
cycleTime = samples * sampleTime
# sync to whole minute
waitTime = (cycleTime + sampleTime) - (time.time() % cycleTime)
if DEBUG:
print "NOT waiting {0} s.".format(waitTime)
else:
time.sleep(waitTime)
while True:
startTime = time.time()
result = do_work().split(',')
if DEBUG:print result
data[sampleptr] = map(float, result)
# report sample average
sampleptr = sampleptr + 1
if (sampleptr == samples):
if DEBUG:print data
somma = map(sum,zip(*data))
# not all entries should be float
# 0.37, 0.18, 0.17, 4, 143, 32147, 3, 4, 93, 0, 0
averages = [format(s / samples, '.3f') for s in somma]
averages[3]=int(data[sampleptr-1][3])
averages[4]=int(data[sampleptr-1][4])
averages[5]=int(data[sampleptr-1][5])
if DEBUG:print averages
do_report(averages)
sampleptr = 0
waitTime = sampleTime - (time.time() - startTime) - (startTime%sampleTime)
if (waitTime > 0):
if DEBUG:print "Waiting {0} s".format(waitTime)
time.sleep(waitTime)
def do_work():
# 6 datapoints gathered here
outHistLoad = commands.getoutput("cat /proc/loadavg").replace(" ",", ").replace("/",", ")
# 5 datapoints gathered here
outCpu = commands.getoutput("vmstat 1 2").splitlines()[3].split()
outCpuUS = outCpu[12]
outCpuSY = outCpu[13]
outCpuID = outCpu[14]
outCpuWA = outCpu[15]
outCpuST = 0
return '{0}, {1}, {2}, {3}, {4}, {5}'.format(outHistLoad, outCpuUS, outCpuSY, outCpuID, outCpuWA, outCpuST)
def do_report(result):
# Get the time and date in human-readable form and UN*X-epoch...
outDate = commands.getoutput("date '+%F %H:%M:%S, %s'")
result = ', '.join(map(str, result))
flock = '/tmp/raspdiagd/12.lock'
lock(flock)
f = file('/tmp/raspdiagd/12-load-cpu.csv', 'a')
f.write('{0}, {1}\n'.format(outDate, result) )
f.close()
unlock(flock)
return
def lock(fname):
open(fname, 'a').close()
def unlock(fname):
if os.path.isfile(fname):
os.remove(fname)
if __name__ == "__main__":
daemon = MyDaemon('/tmp/raspdiagd/12.pid')
if len(sys.argv) == 2:
if 'start' == sys.argv[1]:
daemon.start()
elif 'stop' == sys.argv[1]:
daemon.stop()
elif 'restart' == sys.argv[1]:
daemon.restart()
elif 'foreground' == sys.argv[1]:
# assist with debugging.
print "Debug-mode started. Use <Ctrl>+C to stop."
DEBUG = True
if DEBUG:
logtext = "Daemon logging is ON"
syslog.syslog(syslog.LOG_DEBUG, logtext)
daemon.run()
else:
print "Unknown command"
sys.exit(2)
sys.exit(0)
else:
print "usage: %s start|stop|restart|foreground" % sys.argv[0]
sys.exit(2)
|
import os.path
import shutil
from datetime import datetime
from unittest import TestCase, SkipTest
from whylog.config import SettingsFactorySelector
from whylog.config.investigation_plan import Clue, LineSource
from whylog.config.parsers import RegexParser
from whylog.config.rule import Rule
from whylog.tests.consts import TestPaths
class TestBasic(TestCase):
@classmethod
def setUpClass(cls):
SettingsFactorySelector.WHYLOG_DIR = TestPaths.WHYLOG_DIR
cls.config = SettingsFactorySelector.get_settings()['config']
cls.whylog_dir = SettingsFactorySelector._attach_whylog_dir(os.getcwd())
cause1_regex = '^(\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d) cause1 transaction number: (\d+) Host: (\w)$'
cause1_line = '2016-04-12 23:39:43 cause1 transaction number: 10101 Host: db_host'
convertions = {1: 'date'}
cls.cause1 = RegexParser("cause1", cause1_line, cause1_regex, [1], 'database', convertions)
cause2_regex = '^(\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d) cause2 moved resource id: (\d+) Host: (\w)$'
cause2_line = '2016-04-12 23:40:43 cause2 moved resource id: 1234 Host: apache_host'
convertions = {1: 'date'}
cls.cause2 = RegexParser("cause2", cause2_line, cause2_regex, [1], 'apache', convertions)
effect_regex = '^(\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d) effect internal server error Host: (\w)$'
effect_line = '2016-04-12 23:54:43 effect internal server error Host: apache_host'
convertions = {1: 'date'}
cls.effect = RegexParser("effect", effect_line, effect_regex, [1], 'apache', convertions)
line_source = LineSource('localhost', 'node_1.log')
effect_time = datetime(2016, 4, 12, 23, 54, 43)
effect_line = '2016-04-12 23:54:43 effect internal server error Host: apache_host'
cls.effect_clues = {'effect': Clue((effect_time,), effect_line, 40, line_source)}
def test_search_range_no_constraints_on_primary_values(self):
rule = Rule([self.cause1, self.cause2], self.effect, [], Rule.LINKAGE_AND)
self.config._rules['apache'].append(rule)
calculated_ranges = self.config._get_search_ranges([rule], self.effect_clues)
self.config._rules['apache'].pop()
raise SkipTest('Not implemented yet')
assert calculated_ranges == {}
def test_search_range_single_log_types(self):
constraints = [{'clues_groups': [[1, 1], [0, 1]],
'name': 'time',
'params': {'max_delta': 10}}]
rule = Rule([self.cause2], self.effect, constraints, Rule.LINKAGE_AND)
self.config._rules['apache'].append(rule)
calculated_ranges = self.config._get_search_ranges([rule], self.effect_clues)
self.config._rules['apache'].pop()
expected_ranges = {
'apache': {
'date': {
'left_bound': datetime(2016, 4, 12, 23, 54, 33),
'right_bound': datetime(2016, 4, 12, 23, 54, 43)
}
}
}
raise SkipTest('Not implemented yet')
assert calculated_ranges == expected_ranges
def test_search_range_two_log_types(self):
constraints1 = [
{'clues_groups': [[1, 1], [0, 1]],
'name': 'time',
'params': {'max_delta': 100,
'min_delta': 10}}, {'clues_groups': [[2, 1], [0, 1]],
'name': 'time',
'params': {'max_delta': 10}}
]
rule = Rule([self.cause1, self.cause2], self.effect, constraints1, Rule.LINKAGE_AND)
self.config._rules['apache'].append(rule)
calculated_ranges = self.config._get_search_ranges([rule], self.effect_clues)
self.config._rules['apache'].pop()
expected_ranges = {
'database': {
'date': {
'left_bound': datetime(2016, 4, 12, 23, 53, 3),
'right_bound': datetime(2016, 4, 12, 23, 54, 33)
}
},
'apache': {
'date': {
'left_bound': datetime(2016, 4, 12, 23, 54, 33),
'right_bound': datetime(2016, 4, 12, 23, 54, 43)
}
}
}
assert calculated_ranges == expected_ranges
# def test_search_range_lack_of_left_bound(self):
# constraints1 = [
# {'clues_groups': [[1, 1], [0, 1]],
# 'name': 'time',
# 'params': {'min_delta': 10}},
# ]
# rule = Rule([self.cause1, self.cause2], self.effect, constraints1)
#
# self.config._rules['apache'].append(rule)
# calculated_ranges = self.config._get_search_ranges([rule], self.effect_clues)
# self.config._rules['apache'].pop()
#
# expected_ranges = {
# 'database': {
# 'date': {
# 'left_bound': datetime(2016, 4, 12, 23, 53, 3),
# 'right_bound': datetime(2016, 4, 12, 23, 54, 33)
# }
# },
# 'apache': {
# 'date': {
# 'left_bound': datetime(2016, 4, 12, 23, 54, 33),
# 'right_bound': datetime(2016, 4, 12, 23, 54, 43)
# }
# }
# }
# assert calculated_ranges == expected_ranges
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.whylog_dir)
More tests for finding search range
import os.path
import shutil
from datetime import datetime
from unittest import TestCase, SkipTest
from whylog.config import SettingsFactorySelector
from whylog.config.investigation_plan import Clue, LineSource
from whylog.config.parsers import RegexParser
from whylog.config.rule import Rule
from whylog.tests.consts import TestPaths
class TestBasic(TestCase):
@classmethod
def setUpClass(cls):
SettingsFactorySelector.WHYLOG_DIR = TestPaths.WHYLOG_DIR
cls.config = SettingsFactorySelector.get_settings()['config']
cls.whylog_dir = SettingsFactorySelector._attach_whylog_dir(os.getcwd())
cause1_regex = '^(\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d) cause1 transaction number: (\d+) Host: (\w)$'
cause1_line = '2016-04-12 23:39:43 cause1 transaction number: 10101 Host: db_host'
convertions = {1: 'date'}
cls.cause1 = RegexParser("cause1", cause1_line, cause1_regex, [1], 'database', convertions)
cause2_regex = '^(\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d) cause2 moved resource id: (\d+) Host: (\w)$'
cause2_line = '2016-04-12 23:40:43 cause2 moved resource id: 1234 Host: apache_host'
convertions = {1: 'date'}
cls.cause2 = RegexParser("cause2", cause2_line, cause2_regex, [1], 'apache', convertions)
effect_regex = '^(\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d) effect internal server error Host: (\w)$'
effect_line = '2016-04-12 23:54:43 effect internal server error Host: apache_host'
convertions = {1: 'date'}
cls.effect = RegexParser("effect", effect_line, effect_regex, [1], 'apache', convertions)
line_source = LineSource('localhost', 'node_1.log')
cls.effect_time = datetime(2016, 4, 12, 23, 54, 43)
effect_line = '2016-04-12 23:54:43 effect internal server error Host: apache_host'
cls.effect_clues = {'effect': Clue((cls.effect_time,), effect_line, 40, line_source)}
cls.earliest_date = datetime(1, 1, 1, 1, 1, 1)
cls.ten_second_earlier = datetime(2016, 4, 12, 23, 54, 33)
cls.one_hundred_second_earlier = datetime(2016, 4, 12, 23, 53, 3)
cls.ten_second_later = datetime(2016, 4, 12, 23, 54, 53)
@classmethod
def calculate_range(cls, rule):
cls.config._rules['apache'].append(rule)
calculated_ranges = cls.config._get_search_ranges([rule], cls.effect_clues)
cls.config._rules['apache'].pop()
return calculated_ranges
def test_search_range_no_constraints_on_primary_values(self):
rule = Rule([self.cause1, self.cause2], self.effect, [], Rule.LINKAGE_AND)
calculated_ranges = self.calculate_range(rule)
raise SkipTest('Not implemented yet')
assert calculated_ranges == {}
def test_search_range_single_log_types(self):
constraints = [{'clues_groups': [[1, 1], [0, 1]],
'name': 'time',
'params': {'max_delta': 10}}]
rule = Rule([self.cause2], self.effect, constraints, Rule.LINKAGE_AND)
calculated_ranges = self.calculate_range(rule)
expected_ranges = {
'apache': {
'date': {
'left_bound': self.ten_second_earlier,
'right_bound': self.effect_time
}
}
}
raise SkipTest('Not implemented yet')
assert calculated_ranges == expected_ranges
def test_search_range_two_log_types(self):
constraints1 = [
{'clues_groups': [[1, 1], [0, 1]],
'name': 'time',
'params': {'max_delta': 100,
'min_delta': 10}},
{'clues_groups': [[2, 1], [0, 1]],
'name': 'time',
'params': {'max_delta': 10}}
]
rule = Rule([self.cause1, self.cause2], self.effect, constraints1, Rule.LINKAGE_AND)
calculated_ranges = self.calculate_range(rule)
expected_ranges = {
'database': {
'date': {
'left_bound': self.one_hundred_second_earlier,
'right_bound': self.ten_second_earlier
}
},
'apache': {
'date': {
'left_bound': self.ten_second_earlier,
'right_bound': self.effect_time
}
}
}
assert calculated_ranges == expected_ranges
def test_search_range_lack_of_left_bound(self):
constraints1 = [
{'clues_groups': [[1, 1], [0, 1]],
'name': 'time',
'params': {'min_delta': 10}},
]
rule = Rule([self.cause1, self.cause2], self.effect, constraints1, Rule.LINKAGE_AND)
calculated_ranges = self.calculate_range(rule)
expected_ranges = {
'database': {
'date': {
'left_bound': self.earliest_date,
'right_bound': self.ten_second_earlier
}
},
}
raise SkipTest('Not implemented yet')
assert calculated_ranges == expected_ranges
def test_search_range_lack_of_right_bound(self):
constraints1 = [
{'clues_groups': [[1, 1], [0, 1]],
'name': 'time',
'params': {'max_delta': 10}},
]
rule = Rule([self.cause1, self.cause2], self.effect, constraints1, Rule.LINKAGE_AND)
calculated_ranges = self.calculate_range(rule)
expected_ranges = {
'database': {
'date': {
'left_bound': self.ten_second_earlier,
'right_bound': self.effect_time
}
},
}
raise SkipTest('Not implemented yet')
assert calculated_ranges == expected_ranges
def test_search_range_delayed_logs(self):
constraints1 = [
{'clues_groups': [[1, 1], [0, 1]],
'name': 'time',
'params': {'min_delta': -10, 'max_delta': 100}},
]
rule = Rule([self.cause1, self.cause2], self.effect, constraints1, Rule.LINKAGE_AND)
calculated_ranges = self.calculate_range(rule)
expected_ranges = {
'database': {
'date': {
'left_bound': self.one_hundred_second_earlier,
'right_bound': self.ten_second_later
}
},
}
raise SkipTest('Not implemented yet')
assert calculated_ranges == expected_ranges
def test_search_range_on_identical_constraint(self):
constraints1 = [
{'clues_groups': [[1, 1], [0, 1]],
'name': 'identical',
'params': {}},
]
rule = Rule([self.cause1, self.cause2], self.effect, constraints1, Rule.LINKAGE_AND)
calculated_ranges = self.calculate_range(rule)
expected_ranges = {
'database': {
'date': {
'left_bound': self.effect_time,
'right_bound': self.effect_time
}
},
}
raise SkipTest('Not implemented yet')
assert calculated_ranges == expected_ranges
def test_search_range_merge_range(self):
constraints1 = [
{'clues_groups': [[1, 1], [0, 1]],
'name': 'time',
'params': {'max_delta': 75,
'min_delta': 10}},
{'clues_groups': [[2, 1], [0, 1]],
'name': 'time',
'params': {'max_delta': 10}},
{'clues_groups': [[3, 1], [0, 1]],
'name': 'time',
'params': {'max_delta': 100, 'min_delta': 20}},
{'clues_groups': [[3, 1], [0, 1]],
'name': 'time',
'params': {'max_delta': 100, 'min_delta': 20}}
]
rule = Rule([self.cause1, self.cause2, self.cause1, self.cause2], self.effect, constraints1, Rule.LINKAGE_AND)
calculated_ranges = self.calculate_range(rule)
expected_ranges = {
'database': {
'date': {
'left_bound': self.one_hundred_second_earlier,
'right_bound': self.ten_second_earlier
}
},
'apache': {
'date': {
'left_bound': self.one_hundred_second_earlier,
'right_bound': self.effect_time
}
}
}
raise SkipTest('Not implemented yet')
assert calculated_ranges == expected_ranges
def test_search_range_covering(self):
constraints1 = [
{'clues_groups': [[1, 1], [0, 1]],
'name': 'time',
'params': {'max_delta': 75,
'min_delta': 10}},
{'clues_groups': [[2, 1], [0, 1]],
'name': 'time',
'params': {'max_delta': 100}},
]
rule = Rule([self.cause2, self.cause2], self.effect, constraints1, Rule.LINKAGE_AND)
calculated_ranges = self.calculate_range(rule)
expected_ranges = {
'apache': {
'date': {
'left_bound': self.one_hundred_second_earlier,
'right_bound': self.effect_time
}
}
}
raise SkipTest('Not implemented yet')
assert calculated_ranges == expected_ranges
def test_search_range_reasoning_on_not_only_effect(self):
constraints1 = [
{'clues_groups': [[1, 1], [0, 1]],
'name': 'time',
'params': {'max_delta': 75,
'min_delta': 10}},
{'clues_groups': [[2, 1], [1, 1]],
'name': 'time',
'params': {'max_delta': 25}},
]
rule = Rule([self.cause2, self.cause2], self.effect, constraints1, Rule.LINKAGE_AND)
calculated_ranges = self.calculate_range(rule)
expected_ranges = {
'apache': {
'date': {
'left_bound': self.one_hundred_second_earlier,
'right_bound': self.effect_time
}
}
}
raise SkipTest('Not implemented yet')
assert calculated_ranges == expected_ranges
def test_search_range_mixed_constraint_type(self):
constraints1 = [
{'clues_groups': [[1, 1], [0, 1]],
'name': 'time',
'params': {'max_delta': 100,
'min_delta': 10}},
{'clues_groups': [[2, 1], [0, 1]],
'name': 'identical',
'params': {}},
]
rule = Rule([self.cause2, self.cause2], self.effect, constraints1, Rule.LINKAGE_AND)
calculated_ranges = self.calculate_range(rule)
expected_ranges = {
'apache': {
'date': {
'left_bound': self.one_hundred_second_earlier,
'right_bound': self.effect_time
}
}
}
raise SkipTest('Not implemented yet')
assert calculated_ranges == expected_ranges
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.whylog_dir)
|
from __future__ import print_function, division, absolute_import
import random
import numpy as np
import copy
import numbers
import cv2
import math
import imageio
import multiprocessing
import threading
import traceback
import sys
import six
import six.moves as sm
import os
import skimage.draw
import skimage.measure
import collections
import time
import json
import matplotlib.pyplot as plt
if sys.version_info[0] == 2:
import cPickle as pickle
from Queue import Empty as QueueEmpty, Full as QueueFull
elif sys.version_info[0] == 3:
import pickle
from queue import Empty as QueueEmpty, Full as QueueFull
xrange = range
ALL = "ALL"
FILE_DIR = os.path.dirname(os.path.abspath(__file__))
# filepath to the quokka image, its annotations and depth map
QUOKKA_FP = os.path.join(FILE_DIR, "quokka.jpg")
QUOKKA_ANNOTATIONS_FP = os.path.join(FILE_DIR, "quokka_annotations.json")
QUOKKA_DEPTH_MAP_HALFRES_FP = os.path.join(FILE_DIR, "quokka_depth_map_halfres.png")
DEFAULT_FONT_FP = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"DejaVuSans.ttf"
)
# We instantiate a current/global random state here once.
# One can also call np.random, but that is (in contrast to np.random.RandomState)
# a module and hence cannot be copied via deepcopy. That's why we use RandomState
# here (and in all augmenters) instead of np.random.
CURRENT_RANDOM_STATE = np.random.RandomState(42)
def is_np_array(val):
"""
Checks whether a variable is a numpy array.
Parameters
----------
val : anything
The variable to
check.
Returns
-------
out : bool
True if the variable is a numpy array. Otherwise False.
"""
# using np.generic here seems to also fire for scalar numpy values even
# though those are not arrays
#return isinstance(val, (np.ndarray, np.generic))
return isinstance(val, np.ndarray)
def is_single_integer(val):
"""
Checks whether a variable is an integer.
Parameters
----------
val : anything
The variable to
check.
Returns
-------
out : bool
True if the variable is an integer. Otherwise False.
"""
return isinstance(val, numbers.Integral) and not isinstance(val, bool)
def is_single_float(val):
"""
Checks whether a variable is a float.
Parameters
----------
val : anything
The variable to
check.
Returns
-------
out : bool
True if the variable is a float. Otherwise False.
"""
return isinstance(val, numbers.Real) and not is_single_integer(val) and not isinstance(val, bool)
def is_single_number(val):
"""
Checks whether a variable is a number, i.e. an integer or float.
Parameters
----------
val : anything
The variable to
check.
Returns
-------
out : bool
True if the variable is a number. Otherwise False.
"""
return is_single_integer(val) or is_single_float(val)
def is_iterable(val):
"""
Checks whether a variable is iterable.
Parameters
----------
val : anything
The variable to
check.
Returns
-------
out : bool
True if the variable is an iterable. Otherwise False.
"""
return isinstance(val, collections.Iterable)
# TODO convert to is_single_string() or rename is_single_integer/float/number()
def is_string(val):
"""
Checks whether a variable is a string.
Parameters
----------
val : anything
The variable to
check.
Returns
-------
out : bool
True if the variable is a string. Otherwise False.
"""
return isinstance(val, six.string_types)
def is_integer_array(val):
"""
Checks whether a variable is a numpy integer array.
Parameters
----------
val : anything
The variable to
check.
Returns
-------
out : bool
True if the variable is a numpy integer array. Otherwise False.
"""
return is_np_array(val) and issubclass(val.dtype.type, np.integer)
def is_float_array(val):
"""
Checks whether a variable is a numpy float array.
Parameters
----------
val : anything
The variable to
check.
Returns
-------
out : bool
True if the variable is a numpy float array. Otherwise False.
"""
return is_np_array(val) and issubclass(val.dtype.type, np.floating)
def is_callable(val):
"""
Checks whether a variable is a callable, e.g. a function.
Parameters
----------
val : anything
The variable to
check.
Returns
-------
out : bool
True if the variable is a callable. Otherwise False.
"""
# python 3.x with x <= 2 does not support callable(), apparently
if sys.version_info[0] == 3 and sys.version_info[1] <= 2:
return hasattr(val, '__call__')
else:
return callable(val)
def caller_name():
"""
Returns the name of the caller, e.g. a function.
Returns
-------
name : str
The name of the caller as a string
"""
return sys._getframe(1).f_code.co_name
def seed(seedval):
"""
Set the seed used by the global random state and thereby all randomness
in the library.
This random state is by default by all augmenters. Under special
circumstances (e.g. when an augmenter is switched to deterministic mode),
the global random state is replaced by another -- local -- one.
The replacement is dependent on the global random state.
Parameters
----------
seedval : int
The seed to
use.
"""
CURRENT_RANDOM_STATE.seed(seedval)
def current_random_state():
"""
Returns the current/global random state of the library.
Returns
----------
out : np.random.RandomState
The current/global random state.
"""
return CURRENT_RANDOM_STATE
def new_random_state(seed=None, fully_random=False):
"""
Returns a new random state.
Parameters
----------
seed : None or int, optional(default=None)
Optional seed value to use.
The same datatypes are allowed as for np.random.RandomState(seed).
fully_random : bool, optional(default=False)
Whether to use numpy's random initialization for the
RandomState (used if set to True). If False, a seed is sampled from
the global random state, which is a bit faster and hence the default.
Returns
-------
out : np.random.RandomState
The new random state.
"""
if seed is None:
if not fully_random:
# sample manually a seed instead of just RandomState(),
# because the latter one
# is way slower.
seed = CURRENT_RANDOM_STATE.randint(0, 10**6, 1)[0]
return np.random.RandomState(seed)
def dummy_random_state():
"""
Returns a dummy random state that is always based on a seed of 1.
Returns
-------
out : np.random.RandomState
The new random state.
"""
return np.random.RandomState(1)
def copy_random_state(random_state, force_copy=False):
"""
Creates a copy of a random state.
Parameters
----------
random_state : np.random.RandomState
The random state to
copy.
force_copy : bool, optional(default=False)
If True, this function will always create a copy of every random
state. If False, it will not copy numpy's default random state,
but all other random states.
Returns
-------
rs_copy : np.random.RandomState
The copied random state.
"""
if random_state == np.random and not force_copy:
return random_state
else:
rs_copy = dummy_random_state()
orig_state = random_state.get_state()
rs_copy.set_state(orig_state)
return rs_copy
def derive_random_state(random_state):
"""
Create a new random states based on an existing random state or seed.
Parameters
----------
random_state : np.random.RandomState
Random state or seed from which to derive the new random state.
Returns
-------
result : np.random.RandomState
Derived random state.
"""
return derive_random_states(random_state, n=1)[0]
# TODO use this everywhere instead of manual seed + create
def derive_random_states(random_state, n=1):
"""
Create N new random states based on an existing random state or seed.
Parameters
----------
random_state : np.random.RandomState
Random state or seed from which to derive new random states.
n : int, optional(default=1)
Number of random states to derive.
Returns
-------
result : list of np.random.RandomState
Derived random states.
"""
seed = random_state.randint(0, 10**6, 1)[0]
return [new_random_state(seed+i) for i in sm.xrange(n)]
def forward_random_state(random_state):
"""
Forward the internal state of a random state.
This makes sure that future calls to the random_state will produce new random values.
Parameters
----------
random_state : np.random.RandomState
Random state to forward.
"""
random_state.uniform()
# TODO
# def from_json(json_str):
# pass
def _quokka_normalize_extract(extract):
"""
Generate a normalized rectangle to be extract from the standard quokka image.
Parameters
----------
extract : "square" or tuple of number or BoundingBox or BoundingBoxesOnImage
Unnormalized representation of the image subarea to be extracted.
* If string "square", then a squared area (x: 0-643, y: 0-643) will be extracted from
the image.
* If a tuple, then expected to contain four numbers denoting x1, y1, x2 and y2.
* If a BoundingBox, then that bounding box's area will be extracted from the image.
* If a BoundingBoxesOnImage, then expected to contain exactly one bounding box
and a shape matching the full image dimensions (i.e. (643, 960, *)). Then the
one bounding box will be used similar to BoundingBox.
Returns
-------
bb : BoundingBox
Normalized representation of the area to extract from the standard quokka image.
"""
if extract == "square":
bb = BoundingBox(x1=0, y1=0, x2=643, y2=643)
elif isinstance(extract, tuple) and len(extract) == 4:
bb = BoundingBox(x1=extract[0], y1=extract[1], x2=extract[2], y2=extract[3])
elif isinstance(extract, BoundingBox):
bb = extract
elif isinstance(extract, BoundingBoxesOnImage):
do_assert(len(BoundingBoxesOnImage.bounding_boxes) == 1)
do_assert(extract.shape[0:2] == (643, 960))
bb = extract.bounding_boxes[0]
else:
raise Exception(
"Expected None or tuple of four entries or BoundingBox or BoundingBoxesOnImage "
"for parameter 'extract', got %s." % (type(extract),)
)
return bb
def _compute_resized_shape(from_shape, to_shape):
"""
Computes the intended new shape of an image-like array after resizing.
Parameters
----------
from_shape : tuple or ndarray
Old shape of the array. Usually expected to be a tuple of form (H, W) or (H, W, C) or
alternatively an array with two or three dimensions.
to_shape : None or tuple of ints or tuple of floats or int or float
New shape of the array.
* If None, then `from_shape` will be used as the new shape.
* If an int V, then the new shape will be (V, V, [C]), where C will be added if it
is part of from_shape.
* If a float V, then the new shape will be (H*V, W*V, [C]), where H and W are the old
height/width.
* If a tuple (H', W', [C']) of ints, then H' and W' will be used as the new height
and width.
* If a tuple (H', W', [C']) of floats (except C), then H' and W' will be used as the new height
and width.
Returns
-------
to_shape_computed : tuple of int
New shape.
"""
if is_np_array(from_shape):
from_shape = from_shape.shape
if is_np_array(to_shape):
to_shape = to_shape.shape
to_shape_computed = list(from_shape)
if to_shape is None:
pass
elif isinstance(to_shape, tuple):
if len(from_shape) == 3 and len(to_shape) == 3:
do_assert(from_shape[2] == to_shape[2])
elif len(to_shape) == 3:
to_shape_computed.append(to_shape[2])
if all([is_single_integer(v) for v in to_shape[0:2]]):
to_shape_computed[0] = to_shape[0]
to_shape_computed[1] = to_shape[1]
elif all([is_single_float(v) for v in to_shape[0:2]]):
to_shape_computed[0] = int(round(from_shape[0] * to_shape[0])) if to_shape[0] is not None else from_shape[0]
to_shape_computed[1] = int(round(from_shape[1] * to_shape[1])) if to_shape[1] is not None else from_shape[1]
elif is_single_integer(to_shape) or is_single_float(to_shape):
to_shape_computed = _compute_resized_shape(from_shape, (to_shape, to_shape))
else:
raise Exception("Expected to_shape to be None or ndarray or tuple of floats or tuple of ints or single int or single float, got %s." % (type(to_shape),))
return to_shape_computed
def quokka(size=None, extract=None):
"""
Returns an image of a quokka as a numpy array.
Parameters
----------
size : None or float or tuple of two ints, optional(default=None)
Size of the output image. Input into `imgaug.imresize_single_image()`.
Usually expected to be a tuple (H, W), where H is the desired height
and W is the width. If None, then the image will not be resized.
extract : None or "square" or tuple of four numbers or BoundingBox or BoundingBoxesOnImage
Subarea of the quokka image to extract::
* If None, then the whole image will be used.
* If string "square", then a squared area (x: 0-643, y: 0-643) will be extracted from
the image.
* If a tuple, then expected to contain four numbers denoting x1, y1, x2 and y2.
* If a BoundingBox, then that bounding box's area will be extracted from the image.
* If a BoundingBoxesOnImage, then expected to contain exactly one bounding box
and a shape matching the full image dimensions (i.e. (643, 960, *)). Then the
one bounding box will be used similar to BoundingBox.
Returns
-------
img : (H,W,3) ndarray
The image array of dtype uint8.
"""
img = imageio.imread(QUOKKA_FP, pilmode="RGB")
if extract is not None:
bb = _quokka_normalize_extract(extract)
img = bb.extract_from_image(img)
if size is not None:
shape_resized = _compute_resized_shape(img.shape, size)
img = imresize_single_image(img, shape_resized[0:2])
return img
def quokka_square(size=None):
"""
Returns an (square) image of a quokka as a numpy array.
Parameters
----------
size : None or float or tuple of two ints, optional(default=None)
Size of the output image. Input into `imgaug.imresize_single_image()`.
Usually expected to be a tuple (H, W), where H is the desired height
and W is the width. If None, then the image will not be resized.
Returns
-------
img : (H,W,3) ndarray
The image array of dtype uint8.
"""
return quokka(size=size, extract="square")
def quokka_heatmap(size=None, extract=None):
"""
Returns a heatmap (here: depth map) for the standard example quokka image.
Parameters
----------
size : None or float or tuple of two ints, optional(default=None)
See `quokka()`.
extract : None or "square" or tuple of four numbers or BoundingBox or BoundingBoxesOnImage
See `quokka()`.
Returns
-------
result : HeatmapsOnImage
Depth map as an heatmap object. Values close to 0.0 denote objects that are close to
the camera. Values close to 1.0 denote objects that are furthest away (among all shown
objects).
"""
img = imageio.imread(QUOKKA_DEPTH_MAP_HALFRES_FP, pilmode="RGB")
if extract is not None:
bb = _quokka_normalize_extract(extract)
img = bb.extract_from_image(img)
if size is None:
size = (643, 960)
shape_resized = _compute_resized_shape(img.shape, size)
img = imresize_single_image(img, shape_resized[0:2])
img_0to1 = img.astype(np.float32) / 255.0
img_0to1 = 1 - img_0to1 # depth map was saved as 0 being furthest away
return HeatmapsOnImage(img_0to1, shape=(643, 960, 3))
def quokka_segmentation_map(size=None, extract=None):
"""
Returns a segmentation map for the standard example quokka image.
Parameters
----------
size : None or float or tuple of two ints, optional(default=None)
See `quokka()`.
extract : None or "square" or tuple of four numbers or BoundingBox or BoundingBoxesOnImage
See `quokka()`.
Returns
-------
result : SegmentationMapOnImage
Segmentation map object.
"""
with open(QUOKKA_ANNOTATIONS_FP, "r") as f:
json_dict = json.load(f)
xx = []
yy = []
for kp_dict in json_dict["polygons"][0]["keypoints"]:
x = kp_dict["x"]
y = kp_dict["y"]
xx.append(x)
yy.append(y)
img_seg = np.zeros((643, 960, 1), dtype=np.float32)
rr, cc = skimage.draw.polygon(np.array(yy), np.array(xx), shape=img_seg.shape)
img_seg[rr, cc] = 1.0
if extract is not None:
bb = _quokka_normalize_extract(extract)
img_seg = bb.extract_from_image(img_seg)
segmap = SegmentationMapOnImage(img_seg, shape=(643, 960, 3))
if size is not None:
shape_resized = _compute_resized_shape(img_seg.shape, size)
segmap = segmap.scale(shape_resized[0:2])
return segmap
def quokka_keypoints(size=None, extract=None):
"""
Returns example keypoints on the standard example quokke image.
The keypoints cover the eyes, ears, nose and paws.
Parameters
----------
size : None or float or tuple of two ints or tuple of two floats, optional(default=None)
Size of the output image on which the keypoints are placed. If None, then the keypoints
are not projected to any new size (positions on the original image are used).
Floats lead to relative size changes, ints to absolute sizes in pixels.
extract : None or "square" or tuple of number or BoundingBox or BoundingBoxesOnImage
Subarea to extract from the image. See `quokka()`.
Returns
-------
kpsoi : KeypointsOnImage
Example keypoints on the quokka image.
"""
left, top = 0, 0
if extract is not None:
bb_extract = _quokka_normalize_extract(extract)
left = bb_extract.x1
top = bb_extract.y1
with open(QUOKKA_ANNOTATIONS_FP, "r") as f:
json_dict = json.load(f)
keypoints = []
for kp_dict in json_dict["keypoints"]:
keypoints.append(Keypoint(x=kp_dict["x"] - left, y=kp_dict["y"] - top))
if extract is not None:
shape = (bb_extract.height, bb_extract.width, 3)
else:
shape = (643, 960, 3)
kpsoi = KeypointsOnImage(keypoints, shape=shape)
if size is not None:
shape_resized = _compute_resized_shape(shape, size)
kpsoi = kpsoi.on(shape_resized)
return kpsoi
def quokka_bounding_boxes(size=None, extract=None):
"""
Returns example bounding boxes on the standard example quokke image.
Currently only a single bounding box is returned that covers the quokka.
Parameters
----------
size : None or float or tuple of two ints or tuple of two floats, optional(default=None)
Size of the output image on which the BBs are placed. If None, then the BBs
are not projected to any new size (positions on the original image are used).
Floats lead to relative size changes, ints to absolute sizes in pixels.
extract : None or "square" or tuple of number or BoundingBox or BoundingBoxesOnImage
Subarea to extract from the image. See `quokka()`.
Returns
-------
bbsoi : BoundingBoxesOnImage
Example BBs on the quokka image.
"""
left, top = 0, 0
if extract is not None:
bb_extract = _quokka_normalize_extract(extract)
left = bb_extract.x1
top = bb_extract.y1
with open(QUOKKA_ANNOTATIONS_FP, "r") as f:
json_dict = json.load(f)
bbs = []
for bb_dict in json_dict["bounding_boxes"]:
bbs.append(
BoundingBox(
x1=bb_dict["x1"] - left,
y1=bb_dict["y1"] - top,
x2=bb_dict["x2"] - left,
y2=bb_dict["y2"] - top
)
)
if extract is not None:
shape = (bb_extract.height, bb_extract.width, 3)
else:
shape = (643, 960, 3)
bbsoi = BoundingBoxesOnImage(bbs, shape=shape)
if size is not None:
shape_resized = _compute_resized_shape(shape, size)
bbsoi = bbsoi.on(shape_resized)
return bbsoi
def angle_between_vectors(v1, v2):
"""
Returns the angle in radians between vectors 'v1' and 'v2'.
From http://stackoverflow.com/questions/2827393/angles-between-two-n-dimensional-vectors-in-python
Parameters
----------
{v1, v2} : (N,) ndarray
Input
vectors.
Returns
-------
out : float
Angle in radians.
Examples
--------
>>> angle_between((1, 0, 0), (0, 1, 0))
1.5707963267948966
>>> angle_between((1, 0, 0), (1, 0, 0))
0.0
>>> angle_between((1, 0, 0), (-1, 0, 0))
3.141592653589793
"""
v1_u = v1 / np.linalg.norm(v1)
v2_u = v2 / np.linalg.norm(v2)
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
def draw_text(img, y, x, text, color=[0, 255, 0], size=25): # pylint: disable=locally-disabled, dangerous-default-value, line-too-long
"""
Draw text on an image.
This uses by default DejaVuSans as its font, which is included in the
library.
Parameters
----------
img : (H,W,3) ndarray
The image array to draw text on.
Expected to be of dtype uint8 or float32 (value range 0.0 to 255.0).
{y, x} : int
x- and y- coordinate of the top left corner of the
text.
color : iterable of 3 ints, optional(default=[0, 255, 0])
Color of the text to draw. For RGB-images this is expected to be
an RGB color.
size : int, optional(default=25)
Font size of the text to
draw.
Returns
-------
img_np : (H,W,3) ndarray
Input image with text drawn on it.
"""
# keeping PIL here so that it is not a dependency of the library right now
from PIL import Image, ImageDraw, ImageFont
do_assert(img.dtype in [np.uint8, np.float32])
input_dtype = img.dtype
if img.dtype == np.float32:
img = img.astype(np.uint8)
for i in range(len(color)):
val = color[i]
if isinstance(val, float):
val = int(val * 255)
val = np.clip(val, 0, 255)
color[i] = val
img = Image.fromarray(img)
font = ImageFont.truetype(DEFAULT_FONT_FP, size)
context = ImageDraw.Draw(img)
context.text((x, y), text, fill=tuple(color), font=font)
img_np = np.asarray(img)
img_np.setflags(write=True) # PIL/asarray returns read only array
if img_np.dtype != input_dtype:
img_np = img_np.astype(input_dtype)
return img_np
# TODO rename sizes to size?
def imresize_many_images(images, sizes=None, interpolation=None):
"""
Resize many images to a specified size.
Parameters
----------
images : (N,H,W,C) ndarray
Array of the images to resize.
Expected to usually be of dtype uint8.
sizes : float or iterable of two ints or iterable of two floats
The new size of the images, given either as a fraction (a single float) or as
a (height, width) tuple of two integers or as a (height fraction, width fraction)
tuple of two floats.
interpolation : None or string or int, optional(default=None)
The interpolation to use during resize.
If int, then expected to be one of:
* cv2.INTER_NEAREST (nearest neighbour interpolation)
* cv2.INTER_LINEAR (linear interpolation)
* cv2.INTER_AREA (area interpolation)
* cv2.INTER_CUBIC (cubic interpolation)
If string, then expected to be one of:
* "nearest" (identical to cv2.INTER_NEAREST)
* "linear" (identical to cv2.INTER_LINEAR)
* "area" (identical to cv2.INTER_AREA)
* "cubic" (identical to cv2.INTER_CUBIC)
If None, the interpolation will be chosen automatically. For size
increases, area interpolation will be picked and for size decreases,
linear interpolation will be picked.
Returns
-------
result : (N,H',W',C) ndarray
Array of the resized images.
Examples
--------
>>> imresize_many_images(np.zeros((2, 16, 16, 3), dtype=np.uint8), 2.0)
Converts 2 RGB images of height and width 16 to images of height and width 16*2 = 32.
>>> imresize_many_images(np.zeros((2, 16, 16, 3), dtype=np.uint8), (16, 32))
Converts 2 RGB images of height and width 16 to images of height 16 and width 32.
>>> imresize_many_images(np.zeros((2, 16, 16, 3), dtype=np.uint8), (2.0, 4.0))
Converts 2 RGB images of height and width 16 to images of height 32 and width 64.
"""
shape = images.shape
do_assert(images.ndim == 4, "Expected array of shape (N, H, W, C), got shape %s" % (str(shape),))
nb_images = shape[0]
im_height, im_width = shape[1], shape[2]
nb_channels = shape[3]
if is_single_float(sizes):
do_assert(sizes > 0.0)
height = int(round(im_height * sizes))
width = int(round(im_width * sizes))
else:
do_assert(len(sizes) == 2)
all_int = all([is_single_integer(size) for size in sizes])
all_float = all([is_single_float(size) for size in sizes])
do_assert(all_int or all_float)
if all_int:
height, width = sizes[0], sizes[1]
else:
height = int(round(im_height * sizes[0]))
width = int(round(im_width * sizes[1]))
if height == im_height and width == im_width:
return np.copy(images)
ip = interpolation
do_assert(ip is None or ip in ["nearest", "linear", "area", "cubic", cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_AREA, cv2.INTER_CUBIC])
if ip is None:
if height > im_height or width > im_width:
ip = cv2.INTER_AREA
else:
ip = cv2.INTER_LINEAR
elif ip in ["nearest", cv2.INTER_NEAREST]:
ip = cv2.INTER_NEAREST
elif ip in ["linear", cv2.INTER_LINEAR]:
ip = cv2.INTER_LINEAR
elif ip in ["area", cv2.INTER_AREA]:
ip = cv2.INTER_AREA
else: # if ip in ["cubic", cv2.INTER_CUBIC]:
ip = cv2.INTER_CUBIC
result = np.zeros((nb_images, height, width, nb_channels), dtype=images.dtype)
for img_idx in sm.xrange(nb_images):
# TODO fallback to scipy here if image isn't uint8
result_img = cv2.resize(images[img_idx], (width, height), interpolation=ip)
if len(result_img.shape) == 2:
result_img = result_img[:, :, np.newaxis]
result[img_idx] = result_img.astype(images.dtype)
return result
def imresize_single_image(image, sizes, interpolation=None):
"""
Resizes a single image.
Parameters
----------
image : (H,W,C) ndarray or (H,W) ndarray
Array of the image to resize.
Expected to usually be of dtype uint8.
sizes : float or iterable of two ints or iterable of two floats
See `imresize_many_images()`.
interpolation : None or string or int, optional(default=None)
See `imresize_many_images()`.
Returns
-------
out : (H',W',C) ndarray or (H',W') ndarray
The resized image.
"""
grayscale = False
if image.ndim == 2:
grayscale = True
image = image[:, :, np.newaxis]
do_assert(len(image.shape) == 3, image.shape)
rs = imresize_many_images(image[np.newaxis, :, :, :], sizes, interpolation=interpolation)
if grayscale:
return np.squeeze(rs[0, :, :, 0])
else:
return rs[0, ...]
def pad(arr, top=0, right=0, bottom=0, left=0, mode="constant", cval=0):
"""
Pad an image-like array on its top/right/bottom/left side.
This function is a wrapper around `numpy.pad()`.
Parameters
----------
arr : (H,W) or (H,W,C) ndarray
Image-like array to pad.
top : int, optional(default=0)
Amount of pixels to add at the top side of the image. Must be 0 or greater.
right : int, optional(default=0)
Amount of pixels to add at the right side of the image. Must be 0 or greater.
bottom : int, optional(default=0)
Amount of pixels to add at the bottom side of the image. Must be 0 or greater.
left : int, optional(default=0)
Amount of pixels to add at the left side of the image. Must be 0 or greater.
mode : string, optional(default="constant")
Padding mode to use. See `numpy.pad()` for details.
cval : number, optional(default=0)
Value to use for padding if mode="constant". See `numpy.pad()` for details.
Returns
-------
arr_pad : (H',W') or (H',W',C) ndarray
Padded array with height H'=H+top+bottom and width W'=W+left+right.
"""
do_assert(arr.ndim in [2, 3])
do_assert(top >= 0)
do_assert(right >= 0)
do_assert(bottom >= 0)
do_assert(left >= 0)
if top > 0 or right > 0 or bottom > 0 or left > 0:
paddings_np = [(top, bottom), (left, right)] # paddings for 2d case
if arr.ndim == 3:
paddings_np.append((0, 0)) # add paddings for 3d case
if mode == "constant":
arr_pad = np.pad(
arr,
paddings_np,
mode=mode,
constant_values=cval
)
else:
arr_pad = np.pad(
arr,
paddings_np,
mode=mode
)
return arr_pad
else:
return np.copy(arr)
def compute_paddings_for_aspect_ratio(arr, aspect_ratio):
"""
Compute the amount of pixels by which an array has to be padded to fulfill an aspect ratio.
The aspect ratio is given as width/height.
Depending on which dimension is smaller (height or width), only the corresponding
sides (left/right or top/bottom) will be padded. In each case, both of the sides will
be padded equally.
Parameters
----------
arr : (H,W) or (H,W,C) ndarray
Image-like array for which to compute pad amounts.
aspect_ratio : float
Target aspect ratio, given as width/height. E.g. 2.0 denotes the image having twice
as much width as height.
Returns
-------
result : tuple of ints
Required paddign amounts to reach the target aspect ratio, given as a tuple
of the form (top, right, bottom, left).
"""
do_assert(arr.ndim in [2, 3])
do_assert(aspect_ratio > 0)
height, width = arr.shape[0:2]
do_assert(height > 0)
aspect_ratio_current = width / height
pad_top = 0
pad_right = 0
pad_bottom = 0
pad_left = 0
if aspect_ratio_current < aspect_ratio:
# vertical image, height > width
diff = (aspect_ratio * height) - width
pad_right = int(np.ceil(diff / 2))
pad_left = int(np.floor(diff / 2))
elif aspect_ratio_current > aspect_ratio:
# horizontal image, width > height
diff = ((1/aspect_ratio) * width) - height
pad_top = int(np.ceil(diff / 2))
pad_bottom = int(np.floor(diff / 2))
return (pad_top, pad_right, pad_bottom, pad_left)
def pad_to_aspect_ratio(arr, aspect_ratio, mode="constant", cval=0, return_pad_amounts=False):
"""
Pad an image-like array on its sides so that it matches a target aspect ratio.
Depending on which dimension is smaller (height or width), only the corresponding
sides (left/right or top/bottom) will be padded. In each case, both of the sides will
be padded equally.
Parameters
----------
arr : (H,W) or (H,W,C) ndarray
Image-like array to pad.
aspect_ratio : float
Target aspect ratio, given as width/height. E.g. 2.0 denotes the image having twice
as much width as height.
mode : string, optional(default="constant")
Padding mode to use. See `numpy.pad()` for details.
cval : number, optional(default=0)
Value to use for padding if mode="constant". See `numpy.pad()` for details.
return_pad_amounts : bool, optional(default=False)
If False, then only the padded image will be returned. If True, a tuple with two
entries will be returned, where the first entry is the padded image and the second
entry are the amounts by which each image side was padded. These amounts are again a
tuple of the form (top, right, bottom, left), with each value being an integer.
Returns
-------
result : tuple
First tuple entry: Padded image as (H',W') or (H',W',C) ndarray, fulfulling the given
aspect_ratio.
Second tuple entry: Amounts by which the image was padded on each side, given
as a tuple (top, right, bottom, left).
If return_pad_amounts is False, then only the image is returned.
"""
pad_top, pad_right, pad_bottom, pad_left = compute_paddings_for_aspect_ratio(arr, aspect_ratio)
arr_padded = pad(
arr,
top=pad_top,
right=pad_right,
bottom=pad_bottom,
left=pad_left,
mode=mode,
cval=cval
)
if return_pad_amounts:
return arr_padded, (pad_top, pad_right, pad_bottom, pad_left)
else:
return arr_padded
def pool(arr, block_size, func, cval=0, preserve_dtype=True):
"""
Rescale an array by pooling values within blocks.
Parameters
----------
arr : (H,W) or (H,W,C) ndarray
Image-like array to pool. Ideally of datatype np.float64.
block_size : int or tuple of two ints or tuple of three ints
Spatial size of each group of each values to pool, aka kernel size.
If a single integer, then a symmetric block of that size along height and width will
be used.
If a tuple of two values, it is assumed to be the block size along height and width
of the image-like, with pooling happening per channel.
If a tuple of three values, it is assuemd to be the block size along height, width and
channels.
func : callable
Function to apply to a given block in order to convert it to a single number,
e.g. np.average, np.min, np.max.
cval : number, optional(default=0)
Value to use in order to pad the array along its border if the array cannot be divided
by block_size without remainder.
preserve_dtype : bool, optional(default=True)
Whether to convert the array back to the input datatype if it is changed away from
that in the pooling process.
Returns
-------
arr_reduced : (H',W') or (H',W',C') ndarray
Array after pooling.
"""
do_assert(arr.ndim in [2, 3])
is_valid_int = is_single_integer(block_size) and block_size >= 1
is_valid_tuple = is_iterable(block_size) and len(block_size) in [2, 3] and [is_single_integer(val) and val >= 1 for val in block_size]
do_assert(is_valid_int or is_valid_tuple)
if is_single_integer(block_size):
block_size = [block_size, block_size]
if len(block_size) < arr.ndim:
block_size = list(block_size) + [1]
input_dtype = arr.dtype
arr_reduced = skimage.measure.block_reduce(arr, tuple(block_size), func, cval=cval)
if preserve_dtype and arr_reduced.dtype.type != input_dtype:
arr_reduced = arr_reduced.astype(input_dtype)
return arr_reduced
def avg_pool(arr, block_size, cval=0, preserve_dtype=True):
"""
Rescale an array using average pooling.
Parameters
----------
arr : (H,W) or (H,W,C) ndarray
Image-like array to pool. See `pool()` for details.
block_size : int or tuple of two ints or tuple of three ints
Size of each block of values to pool. See `pool()` for details.
cval : number, optional(default=0)
Padding value. See `pool()` for details.
preserve_dtype : bool, optional(default=True)
Whether to preserve the input array dtype. See `pool()` for details.
Returns
-------
arr_reduced : (H',W') or (H',W',C') ndarray
Array after average pooling.
"""
return pool(arr, block_size, np.average, cval=cval, preserve_dtype=preserve_dtype)
def max_pool(arr, block_size, cval=0, preserve_dtype=True):
"""
Rescale an array using max-pooling.
Parameters
----------
arr : (H,W) or (H,W,C) ndarray
Image-like array to pool. See `pool()` for details.
block_size : int or tuple of two ints or tuple of three ints
Size of each block of values to pool. See `pool()` for details.
cval : number, optional(default=0)
Padding value. See `pool()` for details.
preserve_dtype : bool, optional(default=True)
Whether to preserve the input array dtype. See `pool()` for details.
Returns
-------
arr_reduced : (H',W') or (H',W',C') ndarray
Array after max-pooling.
"""
return pool(arr, block_size, np.max, cval=cval, preserve_dtype=preserve_dtype)
def draw_grid(images, rows=None, cols=None):
"""
Converts multiple input images into a single image showing them in a grid.
Parameters
----------
images : (N,H,W,3) ndarray or iterable of (H,W,3) array
The input images to convert to a grid.
Expected to be RGB and have dtype uint8.
rows : None or int, optional(default=None)
The number of rows to show in the grid.
If None, it will be automatically derived.
cols : None or int, optional(default=None)
The number of cols to show in the grid.
If None, it will be automatically derived.
Returns
-------
grid : (H',W',3) ndarray
Image of the generated grid.
"""
if is_np_array(images):
do_assert(images.ndim == 4)
else:
do_assert(is_iterable(images) and is_np_array(images[0]) and images[0].ndim == 3)
nb_images = len(images)
do_assert(nb_images > 0)
cell_height = max([image.shape[0] for image in images])
cell_width = max([image.shape[1] for image in images])
channels = set([image.shape[2] for image in images])
do_assert(len(channels) == 1, "All images are expected to have the same number of channels, but got channel set %s with length %d instead." % (str(channels), len(channels)))
nb_channels = list(channels)[0]
if rows is None and cols is None:
rows = cols = int(math.ceil(math.sqrt(nb_images)))
elif rows is not None:
cols = int(math.ceil(nb_images / rows))
elif cols is not None:
rows = int(math.ceil(nb_images / cols))
do_assert(rows * cols >= nb_images)
width = cell_width * cols
height = cell_height * rows
grid = np.zeros((height, width, nb_channels), dtype=np.uint8)
cell_idx = 0
for row_idx in sm.xrange(rows):
for col_idx in sm.xrange(cols):
if cell_idx < nb_images:
image = images[cell_idx]
cell_y1 = cell_height * row_idx
cell_y2 = cell_y1 + image.shape[0]
cell_x1 = cell_width * col_idx
cell_x2 = cell_x1 + image.shape[1]
grid[cell_y1:cell_y2, cell_x1:cell_x2, :] = image
cell_idx += 1
return grid
def show_grid(images, rows=None, cols=None):
"""
Converts the input images to a grid image and shows it in a new window.
Parameters
----------
images : (N,H,W,3) ndarray or iterable of (H,W,3) array
See `draw_grid()`.
rows : None or int, optional(default=None)
See `draw_grid()`.
cols : None or int, optional(default=None)
See `draw_grid()`.
"""
grid = draw_grid(images, rows=rows, cols=cols)
imshow(grid)
def imshow(image):
"""
Shows an image in a window.
Parameters
----------
image : (H,W,3) ndarray
Image to show.
"""
plt.imshow(image)
plt.show()
def do_assert(condition, message="Assertion failed."):
"""
Function that behaves equally to an `assert` statement, but raises an
Exception.
This is added because `assert` statements are removed in optimized code.
It replaces `assert` statements throughout the library that should be
kept even in optimized code.
Parameters
----------
condition : bool
If False, an exception is raised.
message : string, optional(default="Assertion failed.")
Error message.
"""
if not condition:
raise AssertionError(str(message))
class HooksImages(object):
"""
Class to intervene with image augmentation runs.
This is e.g. useful to dynamically deactivate some augmenters.
Parameters
----------
activator : None or callable, optional(default=None)
A function that gives permission to execute an augmenter.
The expected interface is `f(images, augmenter, parents, default)`,
where `images` are the input images to augment, `augmenter` is the
instance of the augmenter to execute, `parents` are previously
executed augmenters and `default` is an expected default value to be
returned if the activator function does not plan to make a decision
for the given inputs.
propagator : None or callable, optional(default=None)
A function that gives permission to propagate the augmentation further
to the children of an augmenter. This happens after the activator.
In theory, an augmenter may augment images itself (if allowed by the
activator) and then execute child augmenters afterwards (if allowed by
the propagator). If the activator returned False, the propagation step
will never be executed.
The expected interface is `f(images, augmenter, parents, default)`,
with all arguments having identical meaning to the activator.
preprocessor : None or callable, optional(default=None)
A function to call before an augmenter performed any augmentations.
The interface is `f(images, augmenter, parents)`,
with all arguments having identical meaning to the activator.
It is expected to return the input images, optionally modified.
postprocessor : None or callable, optional(default=None)
A function to call after an augmenter performed augmentations.
The interface is the same as for the preprocessor.
Examples
--------
>>> seq = iaa.Sequential([
>>> iaa.GaussianBlur(3.0, name="blur"),
>>> iaa.Dropout(0.05, name="dropout"),
>>> iaa.Affine(translate_px=-5, name="affine")
>>> ])
>>>
>>> def activator(images, augmenter, parents, default):
>>> return False if augmenter.name in ["blur", "dropout"] else default
>>>
>>> seq_det = seq.to_deterministic()
>>> images_aug = seq_det.augment_images(images)
>>> heatmaps_aug = seq_det.augment_images(
>>> heatmaps,
>>> hooks=ia.HooksImages(activator=activator)
>>> )
This augments images and their respective heatmaps in the same way.
The heatmaps however are only modified by Affine, not by GaussianBlur or
Dropout.
"""
#def __init__(self, activator=None, propagator=None, preprocessor=None, postprocessor=None, propagation_method=None):
def __init__(self, activator=None, propagator=None, preprocessor=None, postprocessor=None):
self.activator = activator
self.propagator = propagator
self.preprocessor = preprocessor
self.postprocessor = postprocessor
#self.propagation_method = propagation_method
def is_activated(self, images, augmenter, parents, default):
"""
Returns whether an augmenter may be executed.
Returns
-------
out : bool
If True, the augmenter may be executed. If False, it may
not be executed.
"""
if self.activator is None:
return default
else:
return self.activator(images, augmenter, parents, default)
# TODO is a propagating hook necessary? seems to be covered by activated
# hook already
def is_propagating(self, images, augmenter, parents, default):
"""
Returns whether an augmenter may call its children to augment an
image. This is independent of the augmenter itself possible changing
the image, without calling its children. (Most (all?) augmenters with
children currently dont perform any changes themselves.)
Returns
-------
out : bool
If True, the augmenter may be propagate to its children.
If False, it may not.
"""
if self.propagator is None:
return default
else:
return self.propagator(images, augmenter, parents, default)
#def get_propagation_method(self, images, augmenter, parents, child, default):
# if self.propagation_method is None:
# return default
# else:
# return self.propagation_method(images, augmenter, parents, child, default)
def preprocess(self, images, augmenter, parents):
"""
A function to be called before the augmentation of images starts (per
augmenter).
Returns
-------
out : (N,H,W,C) ndarray or (N,H,W) ndarray or list of (H,W,C) ndarray or list of (H,W) ndarray
The input images, optionally modified.
"""
if self.preprocessor is None:
return images
else:
return self.preprocessor(images, augmenter, parents)
def postprocess(self, images, augmenter, parents):
"""
A function to be called after the augmentation of images was
performed.
Returns
-------
out : (N,H,W,C) ndarray or (N,H,W) ndarray or list of (H,W,C) ndarray or list of (H,W) ndarray
The input images, optionally modified.
"""
if self.postprocessor is None:
return images
else:
return self.postprocessor(images, augmenter, parents)
class HooksHeatmaps(HooksImages):
"""
Class to intervene with heatmap augmentation runs.
This is e.g. useful to dynamically deactivate some augmenters.
This class is currently the same as the one for images. This may or may
not change in the future.
"""
pass
class HooksKeypoints(HooksImages):
"""
Class to intervene with keypoint augmentation runs.
This is e.g. useful to dynamically deactivate some augmenters.
This class is currently the same as the one for images. This may or may
not change in the future.
"""
pass
class Keypoint(object):
"""
A single keypoint (aka landmark) on an image.
Parameters
----------
x : number
Coordinate of the keypoint on the x axis.
y : number
Coordinate of the keypoint on the y axis.
"""
def __init__(self, x, y):
self.x = x
self.y = y
@property
def x_int(self):
"""
Return the keypoint's x-coordinate, rounded to the closest integer.
Returns
-------
result : int
Keypoint's x-coordinate, rounded to the closest integer.
"""
return int(round(self.x))
@property
def y_int(self):
"""
Return the keypoint's y-coordinate, rounded to the closest integer.
Returns
-------
result : int
Keypoint's y-coordinate, rounded to the closest integer.
"""
return int(round(self.y))
def project(self, from_shape, to_shape):
"""
Project the keypoint onto a new position on a new image.
E.g. if the keypoint is on its original image at x=(10 of 100 pixels)
and y=(20 of 100 pixels) and is projected onto a new image with
size (width=200, height=200), its new position will be (20, 40).
This is intended for cases where the original image is resized.
It cannot be used for more complex changes (e.g. padding, cropping).
Parameters
----------
from_shape : tuple
Shape of the original image. (Before resize.)
to_shape : tuple
Shape of the new image. (After resize.)
Returns
-------
out : Keypoint
Keypoint object with new coordinates.
"""
if from_shape[0:2] == to_shape[0:2]:
return Keypoint(x=self.x, y=self.y)
else:
from_height, from_width = from_shape[0:2]
to_height, to_width = to_shape[0:2]
x = (self.x / from_width) * to_width
y = (self.y / from_height) * to_height
return Keypoint(x=x, y=y)
def shift(self, x=0, y=0):
"""
Move the keypoint around on an image.
Parameters
----------
x : number, optional(default=0)
Move by this value on the x axis.
y : number, optional(default=0)
Move by this value on the y axis.
Returns
-------
out : Keypoint
Keypoint object with new coordinates.
"""
return Keypoint(self.x + x, self.y + y)
def __repr__(self):
return self.__str__()
def __str__(self):
return "Keypoint(x=%.8f, y=%.8f)" % (self.x, self.y)
class KeypointsOnImage(object):
"""
Object that represents all keypoints on a single image.
Parameters
----------
keypoints : list of Keypoint
List of keypoints on the image.
shape : tuple of int
The shape of the image on which the keypoints are placed.
Examples
--------
>>> kps = [Keypoint(x=10, y=20), Keypoint(x=34, y=60)]
>>> kps_oi = KeypointsOnImage(kps, shape=image.shape)
"""
def __init__(self, keypoints, shape):
#assert len(shape) == 3, "KeypointsOnImage requires shape tuples of form (H, W, C) but got %s. Use C=1 for 2-dimensional images." % (str(shape),)
self.keypoints = keypoints
if is_np_array(shape):
self.shape = shape.shape
else:
do_assert(isinstance(shape, (tuple, list)))
self.shape = tuple(shape)
@property
def height(self):
return self.shape[0]
@property
def width(self):
return self.shape[1]
@property
def empty(self):
"""
Returns whether this object contains zero keypoints.
Returns
-------
result : bool
True if this object contains zero keypoints.
"""
return len(self.keypoints) == 0
def on(self, image):
"""
Project keypoints from one image to a new one.
Parameters
----------
image : ndarray or tuple
New image onto which the keypoints are to be projected.
May also simply be that new image's shape tuple.
Returns
-------
keypoints : KeypointsOnImage
Object containing all projected keypoints.
"""
if is_np_array(image):
shape = image.shape
else:
shape = image
if shape[0:2] == self.shape[0:2]:
return self.deepcopy()
else:
keypoints = [kp.project(self.shape, shape) for kp in self.keypoints]
return KeypointsOnImage(keypoints, shape)
def draw_on_image(self, image, color=[0, 255, 0], size=3, copy=True, raise_if_out_of_image=False): # pylint: disable=locally-disabled, dangerous-default-value, line-too-long
"""
Draw all keypoints onto a given image. Each keypoint is marked by a
square of a chosen color and size.
Parameters
----------
image : (H,W,3) ndarray
The image onto which to draw the keypoints.
This image should usually have the same shape as
set in KeypointsOnImage.shape.
color : int or list of ints or tuple of ints or (3,) ndarray, optional(default=[0, 255, 0])
The RGB color of all keypoints. If a single int `C`, then that is
equivalent to (C,C,C).
size : int, optional(default=3)
The size of each point. If set to C, each square will have
size CxC.
copy : bool, optional(default=True)
Whether to copy the image before drawing the points.
raise_if_out_of_image : bool, optional(default=False)
Whether to raise an exception if any keypoint is outside of the
image.
Returns
-------
image : (H,W,3) ndarray
Image with drawn keypoints.
"""
if copy:
image = np.copy(image)
height, width = image.shape[0:2]
for keypoint in self.keypoints:
y, x = keypoint.y_int, keypoint.x_int
if 0 <= y < height and 0 <= x < width:
x1 = max(x - size//2, 0)
x2 = min(x + 1 + size//2, width)
y1 = max(y - size//2, 0)
y2 = min(y + 1 + size//2, height)
image[y1:y2, x1:x2] = color
else:
if raise_if_out_of_image:
raise Exception("Cannot draw keypoint x=%.8f, y=%.8f on image with shape %s." % (y, x, image.shape))
return image
def shift(self, x=0, y=0):
"""
Move the keypoints around on an image.
Parameters
----------
x : number, optional(default=0)
Move each keypoint by this value on the x axis.
y : number, optional(default=0)
Move each keypoint by this value on the y axis.
Returns
-------
out : KeypointsOnImage
Keypoints after moving them.
"""
keypoints = [keypoint.shift(x=x, y=y) for keypoint in self.keypoints]
return KeypointsOnImage(keypoints, self.shape)
def get_coords_array(self):
"""
Convert the coordinates of all keypoints in this object to
an array of shape (N,2).
Returns
-------
result : (N, 2) ndarray
Where N is the number of keypoints. Each first value is the
x coordinate, each second value is the y coordinate.
"""
result = np.zeros((len(self.keypoints), 2), np.float32)
for i, keypoint in enumerate(self.keypoints):
result[i, 0] = keypoint.x
result[i, 1] = keypoint.y
return result
@staticmethod
def from_coords_array(coords, shape):
"""
Convert an array (N,2) with a given image shape to a KeypointsOnImage
object.
Parameters
----------
coords : (N, 2) ndarray
Coordinates of N keypoints on the original image.
Each first entry (i, 0) is expected to be the x coordinate.
Each second entry (i, 1) is expected to be the y coordinate.
shape : tuple
Shape tuple of the image on which the keypoints are placed.
Returns
-------
out : KeypointsOnImage
KeypointsOnImage object that contains all keypoints from the array.
"""
keypoints = [Keypoint(x=coords[i, 0], y=coords[i, 1]) for i in sm.xrange(coords.shape[0])]
return KeypointsOnImage(keypoints, shape)
def to_keypoint_image(self, size=1):
"""
Draws a new black image of shape (H,W,N) in which all keypoint coordinates
are set to 255.
(H=shape height, W=shape width, N=number of keypoints)
This function can be used as a helper when augmenting keypoints with
a method that only supports the augmentation of images.
Parameters
-------
size : int
Size of each (squared) point.
Returns
-------
image : (H,W,N) ndarray
Image in which the keypoints are marked. H is the height,
defined in KeypointsOnImage.shape[0] (analogous W). N is the
number of keypoints.
"""
do_assert(len(self.keypoints) > 0)
height, width = self.shape[0:2]
image = np.zeros((height, width, len(self.keypoints)), dtype=np.uint8)
do_assert(size % 2 != 0)
sizeh = max(0, (size-1)//2)
for i, keypoint in enumerate(self.keypoints):
# TODO for float values spread activation over several cells
# here and do voting at the end
y = keypoint.y_int
x = keypoint.x_int
x1 = np.clip(x - sizeh, 0, width-1)
x2 = np.clip(x + sizeh + 1, 0, width)
y1 = np.clip(y - sizeh, 0, height-1)
y2 = np.clip(y + sizeh + 1, 0, height)
#if 0 <= y < height and 0 <= x < width:
# image[y, x, i] = 255
if x1 < x2 and y1 < y2:
image[y1:y2, x1:x2, i] = 128
if 0 <= y < height and 0 <= x < width:
image[y, x, i] = 255
return image
@staticmethod
def from_keypoint_image(image, if_not_found_coords={"x": -1, "y": -1}, threshold=1, nb_channels=None): # pylint: disable=locally-disabled, dangerous-default-value, line-too-long
"""
Converts an image generated by `to_keypoint_image()` back to
an KeypointsOnImage object.
Parameters
----------
image : (H,W,N) ndarray
The keypoints image. N is the number of
keypoints.
if_not_found_coords : tuple or list or dict or None
Coordinates to use for keypoints that cannot be found in `image`.
If this is a list/tuple, it must have two integer values. If it
is a dictionary, it must have the keys "x" and "y". If this
is None, then the keypoint will not be added to the final
KeypointsOnImage object.
threshold : int
The search for keypoints works by searching for the argmax in
each channel. This parameters contains the minimum value that
the max must have in order to be viewed as a keypoint.
nb_channels : None or int
Number of channels of the image on which the keypoints are placed.
Some keypoint augmenters require that information.
If set to None, the keypoint's shape will be set
to `(height, width)`, otherwise `(height, width, nb_channels)`.
Returns
-------
out : KeypointsOnImage
The extracted keypoints.
"""
do_assert(len(image.shape) == 3)
height, width, nb_keypoints = image.shape
drop_if_not_found = False
if if_not_found_coords is None:
drop_if_not_found = True
if_not_found_x = -1
if_not_found_y = -1
elif isinstance(if_not_found_coords, (tuple, list)):
do_assert(len(if_not_found_coords) == 2)
if_not_found_x = if_not_found_coords[0]
if_not_found_y = if_not_found_coords[1]
elif isinstance(if_not_found_coords, dict):
if_not_found_x = if_not_found_coords["x"]
if_not_found_y = if_not_found_coords["y"]
else:
raise Exception("Expected if_not_found_coords to be None or tuple or list or dict, got %s." % (type(if_not_found_coords),))
keypoints = []
for i in sm.xrange(nb_keypoints):
maxidx_flat = np.argmax(image[..., i])
maxidx_ndim = np.unravel_index(maxidx_flat, (height, width))
found = (image[maxidx_ndim[0], maxidx_ndim[1], i] >= threshold)
if found:
keypoints.append(Keypoint(x=maxidx_ndim[1], y=maxidx_ndim[0]))
else:
if drop_if_not_found:
pass # dont add the keypoint to the result list, i.e. drop it
else:
keypoints.append(Keypoint(x=if_not_found_x, y=if_not_found_y))
out_shape = (height, width)
if nb_channels is not None:
out_shape += (nb_channels,)
return KeypointsOnImage(keypoints, shape=out_shape)
def copy(self):
"""
Create a shallow copy of the KeypointsOnImage object.
Returns
-------
out : KeypointsOnImage
Shallow copy.
"""
return copy.copy(self)
def deepcopy(self):
"""
Create a deep copy of the KeypointsOnImage object.
Returns
-------
out : KeypointsOnImage
Deep copy.
"""
# for some reason deepcopy is way slower here than manual copy
#return copy.deepcopy(self)
kps = [Keypoint(x=kp.x, y=kp.y) for kp in self.keypoints]
return KeypointsOnImage(kps, tuple(self.shape))
def __repr__(self):
return self.__str__()
def __str__(self):
return "KeypointsOnImage(%s, shape=%s)" % (str(self.keypoints), self.shape)
# TODO functions: square(), to_aspect_ratio(), extend()/add_border(), contains_point()
class BoundingBox(object):
"""
Class representing bounding boxes.
Each bounding box is parameterized by its top left and bottom right corners. Both are given
as x and y-coordinates. The corners are intended to lie inside the bounding box area.
As a result, a bounding box that lies completely inside the image but has maximum extensions
would have coordinates `(0.0, 0.0)` and `(W - epsilon, H - epsilon)`. Note that coordinates
are saved internally as floats.
Parameters
----------
x1 : number
X-coordinate of the top left of the bounding box.
y1 : number
Y-coordinate of the top left of the bounding box.
x2 : number
X-coordinate of the bottom right of the bounding box.
y2 : number
Y-coordinate of the bottom right of the bounding box.
label : None or string, optional(default=None)
Label of the bounding box, e.g. a string representing the class.
"""
def __init__(self, x1, y1, x2, y2, label=None):
"""Create a new BoundingBox instance."""
if x1 > x2:
x2, x1 = x1, x2
do_assert(x2 > x1)
if y1 > y2:
y2, y1 = y1, y2
do_assert(y2 > y1)
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
self.label = label
@property
def x1_int(self):
"""
Return the x-coordinate of the top left corner as an integer.
Returns
-------
result : int
X-coordinate of the top left corner, rounded to the closest integer.
"""
return int(round(self.x1))
@property
def y1_int(self):
"""
Return the y-coordinate of the top left corner as an integer.
Returns
-------
result : int
Y-coordinate of the top left corner, rounded to the closest integer.
"""
return int(round(self.y1))
@property
def x2_int(self):
"""
Return the x-coordinate of the bottom left corner as an integer.
Returns
-------
result : int
X-coordinate of the bottom left corner, rounded to the closest integer.
"""
return int(round(self.x2))
@property
def y2_int(self):
"""
Return the y-coordinate of the bottom left corner as an integer.
Returns
-------
result : int
Y-coordinate of the bottom left corner, rounded to the closest integer.
"""
return int(round(self.y2))
@property
def height(self):
"""
Estimate the height of the bounding box.
Returns
-------
result : number
Height of the bounding box.
"""
return self.y2 - self.y1
@property
def width(self):
"""
Estimate the width of the bounding box.
Returns
-------
result : number
Width of the bounding box.
"""
return self.x2 - self.x1
@property
def center_x(self):
"""
Estimate the x-coordinate of the center point of the bounding box.
Returns
-------
result : number
X-coordinate of the center point of the bounding box.
"""
return self.x1 + self.width/2
@property
def center_y(self):
"""
Estimate the y-coordinate of the center point of the bounding box.
Returns
-------
result : number
Y-coordinate of the center point of the bounding box.
"""
return self.y1 + self.height/2
@property
def area(self):
"""
Estimate the area of the bounding box.
Returns
-------
result : number
Area of the bounding box, i.e. `height * width`.
"""
return self.height * self.width
def project(self, from_shape, to_shape):
"""
Project the bounding box onto a new position on a new image.
E.g. if the bounding box is on its original image at
x1=(10 of 100 pixels) and y1=(20 of 100 pixels) and is projected onto
a new image with size (width=200, height=200), its new position will
be (x1=20, y1=40). (Analogous for x2/y2.)
This is intended for cases where the original image is resized.
It cannot be used for more complex changes (e.g. padding, cropping).
Parameters
----------
from_shape : tuple
Shape of the original image. (Before resize.)
to_shape : tuple
Shape of the new image. (After resize.)
Returns
-------
out : BoundingBox
BoundingBox object with new coordinates.
"""
if from_shape[0:2] == to_shape[0:2]:
return self.copy()
else:
from_height, from_width = from_shape[0:2]
to_height, to_width = to_shape[0:2]
do_assert(from_height > 0)
do_assert(from_width > 0)
do_assert(to_height > 0)
do_assert(to_width > 0)
x1 = (self.x1 / from_width) * to_width
y1 = (self.y1 / from_height) * to_height
x2 = (self.x2 / from_width) * to_width
y2 = (self.y2 / from_height) * to_height
return self.copy(
x1=x1,
y1=y1,
x2=x2,
y2=y2,
label=self.label
)
def extend(self, all_sides=0, top=0, right=0, bottom=0, left=0):
"""
Extend the size of the bounding box along its sides.
Parameters
----------
all_sides : number, optional(default=0)
Value by which to extend the bounding box size along all sides.
top : number, optional(default=0)
Value by which to extend the bounding box size along its top side.
right : number, optional(default=0)
Value by which to extend the bounding box size along its right side.
bottom : number, optional(default=0)
Value by which to extend the bounding box size along its bottom side.
left : number, optional(default=0)
Value by which to extend the bounding box size along its left side.
Returns
-------
result : BoundingBox
Extended bounding box.
"""
return BoundingBox(
x1=self.x1 - all_sides - left,
x2=self.x2 + all_sides + right,
y1=self.y1 - all_sides - top,
y2=self.y2 + all_sides + bottom
)
def intersection(self, other, default=None):
"""
Compute the intersection bounding box of this bounding box and another one.
Parameters
----------
other : BoundingBox
Other bounding box with which to generate the intersection.
Returns
-------
result : BoundingBox
Intersection bounding box of the two bounding boxes.
"""
x1_i = max(self.x1, other.x1)
y1_i = max(self.y1, other.y1)
x2_i = min(self.x2, other.x2)
y2_i = min(self.y2, other.y2)
if x1_i >= x2_i or y1_i >= y2_i:
return default
else:
return BoundingBox(x1=x1_i, y1=y1_i, x2=x2_i, y2=y2_i)
def union(self, other):
"""
Compute the union bounding box of this bounding box and another one.
This is equivalent to drawing a bounding box around all corners points of both
bounding boxes.
Parameters
----------
other : BoundingBox
Other bounding box with which to generate the union.
Returns
-------
result : BoundingBox
Union bounding box of the two bounding boxes.
"""
return BoundingBox(
x1=min(self.x1, other.x1),
y1=min(self.y1, other.y1),
x2=max(self.x2, other.x2),
y2=max(self.y2, other.y2),
)
def iou(self, other):
"""
Compute the IoU of this bounding box with another one.
IoU is the intersection over union, defined as:
area(intersection(A, B)) / area(union(A, B))
= area(intersection(A, B)) / (area(A) + area(B) - area(intersection(A, B)))
Parameters
----------
other : BoundingBox
Other bounding box with which to compare.
Returns
-------
result : float
IoU between the two bounding boxes.
"""
inters = self.intersection(other)
if inters is None:
return 0
else:
return inters.area / (self.area + other.area - inters.area)
def is_fully_within_image(self, image):
"""
Estimate whether the bounding box is fully inside the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of at least two ints
Image dimensions to use. If an ndarray, its shape will be used. If a tuple, it is
assumed to represent the image shape.
Returns
-------
result : bool
True if the bounding box is fully inside the image area.
False otherwise.
"""
if isinstance(image, tuple):
shape = image
else:
shape = image.shape
height, width = shape[0:2]
return self.x1 >= 0 and self.x2 < width and self.y1 >= 0 and self.y2 < height
def is_partly_within_image(self, image):
"""
Estimate whether the bounding box is at least partially inside the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of at least two ints
Image dimensions to use. If an ndarray, its shape will be used. If a tuple, it is
assumed to represent the image shape.
Returns
-------
result : bool
True if the bounding box is at least partially inside the image area.
False otherwise.
"""
if isinstance(image, tuple):
shape = image
else:
shape = image.shape
height, width = shape[0:2]
eps = np.finfo(np.float32).eps
img_bb = BoundingBox(x1=0, x2=width-eps, y1=0, y2=height-eps)
return self.intersection(img_bb) is not None
def is_out_of_image(self, image, fully=True, partly=False):
"""
Estimate whether the bounding box is partially or fully outside of the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of ints
Image dimensions to use. If an ndarray, its shape will be used. If a tuple, it is
assumed to represent the image shape and must contain at least two integers.
fully : bool, optional(default=True)
Whether to return True if the bounding box is fully outside fo the image area.
partly : bool, optional(default=False)
Whether to return True if the bounding box is at least partially outside fo the
image area.
Returns
-------
result : bool
True if the bounding box is partially/fully outside of the image area, depending
on defined parameters. False otherwise.
"""
if self.is_fully_within_image(image):
return False
elif self.is_partly_within_image(image):
return partly
else:
return fully
def cut_out_of_image(self, image):
"""
Cut off all parts of the bounding box that are outside of the image.
Parameters
----------
image : (H,W,...) ndarray or tuple of at least two ints
Image dimensions to use for the clipping of the bounding box. If an ndarray, its
shape will be used. If a tuple, it is assumed to represent the image shape.
Returns
-------
result : BoundingBox
Bounding box, clipped to fall within the image dimensions.
"""
if isinstance(image, tuple):
shape = image
else:
shape = image.shape
height, width = shape[0:2]
do_assert(height > 0)
do_assert(width > 0)
eps = np.finfo(np.float32).eps
x1 = np.clip(self.x1, 0, width - eps)
x2 = np.clip(self.x2, 0, width - eps)
y1 = np.clip(self.y1, 0, height - eps)
y2 = np.clip(self.y2, 0, height - eps)
return self.copy(
x1=x1,
y1=y1,
x2=x2,
y2=y2,
label=self.label
)
def shift(self, top=None, right=None, bottom=None, left=None):
"""
Shift the bounding box from one or more image sides, i.e. move it on the x/y-axis.
Parameters
----------
top : None or int, optional(default=None)
Amount of pixels by which to shift the bounding box from the top.
right : None or int, optional(default=None)
Amount of pixels by which to shift the bounding box from the right.
bottom : None or int, optional(default=None)
Amount of pixels by which to shift the bounding box from the bottom.
left : None or int, optional(default=None)
Amount of pixels by which to shift the bounding box from the left.
Returns
-------
result : BoundingBox
Shifted bounding box.
"""
top = top if top is not None else 0
right = right if right is not None else 0
bottom = bottom if bottom is not None else 0
left = left if left is not None else 0
return self.copy(
x1=self.x1+left-right,
x2=self.x2+left-right,
y1=self.y1+top-bottom,
y2=self.y2+top-bottom
)
def draw_on_image(self, image, color=[0, 255, 0], alpha=1.0, thickness=1, copy=True, raise_if_out_of_image=False): # pylint: disable=locally-disabled, dangerous-default-value, line-too-long
"""
Draw the bounding box on an image.
Parameters
----------
image : (H,W,C) ndarray(uint8)
The image onto which to draw the bounding box.
color : iterable of int, optional(default=[0,255,0])
The color to use, corresponding to the channel layout of the image. Usually RGB.
alpha : float, optional(default=1.0)
The transparency of the drawn bounding box, where 1.0 denotes no transparency and
0.0 is invisible.
thickness : int, optional(default=1)
The thickness of the bounding box in pixels. If the value is larger than 1, then
additional pixels will be added around the bounding box (i.e. extension towards the
outside).
copy : bool, optional(default=True)
Whether to copy the input image or change it in-place.
raise_if_out_of_image : bool, optional(default=False)
Whether to raise an error if the bounding box is partially/fully outside of the
image. If set to False, no error will be raised and only the parts inside the image
will be drawn.
Returns
-------
result : (H,W,C) ndarray(uint8)
Image with bounding box drawn on it.
"""
if raise_if_out_of_image and self.is_out_of_image(image):
raise Exception("Cannot draw bounding box x1=%.8f, y1=%.8f, x2=%.8f, y2=%.8f on image with shape %s." % (self.x1, self.y1, self.x2, self.y2, image.shape))
result = np.copy(image) if copy else image
if isinstance(color, (tuple, list)):
color = np.uint8(color)
for i in range(thickness):
y1, y2, x1, x2 = self.y1_int, self.y2_int, self.x1_int, self.x2_int
# When y values get into the range (H-0.5, H), the *_int functions round them to H.
# That is technically sensible, but in the case of drawing means that the border lies
# just barely outside of the image, making the border disappear, even though the BB
# is fully inside the image. Here we correct for that because of beauty reasons.
# Same is the case for x coordinates.
if self.is_fully_within_image(image):
y1 = np.clip(y1, 0, image.shape[0]-1)
y2 = np.clip(y2, 0, image.shape[0]-1)
x1 = np.clip(x1, 0, image.shape[1]-1)
x2 = np.clip(x2, 0, image.shape[1]-1)
y = [y1-i, y1-i, y2+i, y2+i]
x = [x1-i, x2+i, x2+i, x1-i]
rr, cc = skimage.draw.polygon_perimeter(y, x, shape=result.shape)
if alpha >= 0.99:
result[rr, cc, :] = color
else:
if is_float_array(result):
result[rr, cc, :] = (1 - alpha) * result[rr, cc, :] + alpha * color
result = np.clip(result, 0, 255)
else:
input_dtype = result.dtype
result = result.astype(np.float32)
result[rr, cc, :] = (1 - alpha) * result[rr, cc, :] + alpha * color
result = np.clip(result, 0, 255).astype(input_dtype)
return result
def extract_from_image(self, image):
"""
Extract the image pixels within the bounding box.
This function will zero-pad the image if the bounding box is partially/fully outside of
the image.
Parameters
----------
image : (H,W) or (H,W,C) ndarray
The image from which to extract the pixels within the bounding box.
Returns
-------
result : (H',W') or (H',W',C) ndarray
Pixels within the bounding box. Zero-padded if the bounding box is partially/fully
outside of the image.
"""
pad_top = 0
pad_right = 0
pad_bottom = 0
pad_left = 0
height, width = image.shape[0], image.shape[1]
x1, x2, y1, y2 = self.x1_int, self.x2_int, self.y1_int, self.y2_int
# When y values get into the range (H-0.5, H), the *_int functions round them to H.
# That is technically sensible, but in the case of extraction leads to a black border,
# which is both ugly and unexpected after calling cut_out_of_image(). Here we correct for
# that because of beauty reasons.
# Same is the case for x coordinates.
if self.is_fully_within_image(image):
y1 = np.clip(y1, 0, image.shape[0]-1)
y2 = np.clip(y2, 0, image.shape[0]-1)
x1 = np.clip(x1, 0, image.shape[1]-1)
x2 = np.clip(x2, 0, image.shape[1]-1)
# if the bb is outside of the image area, the following pads the image
# first with black pixels until the bb is inside the image
# and only then extracts the image area
# TODO probably more efficient to initialize an array of zeros
# and copy only the portions of the bb into that array that are
# natively inside the image area
if x1 < 0:
pad_left = abs(x1)
x2 = x2 + abs(x1)
x1 = 0
if y1 < 0:
pad_top = abs(y1)
y2 = y2 + abs(y1)
y1 = 0
if x2 >= width:
pad_right = x2 - (width - 1)
if y2 >= height:
pad_bottom = y2 - (height - 1)
if any([val > 0 for val in [pad_top, pad_right, pad_bottom, pad_left]]):
if len(image.shape) == 2:
image = np.pad(image, ((pad_top, pad_bottom), (pad_left, pad_right)), mode="constant")
else:
image = np.pad(image, ((pad_top, pad_bottom), (pad_left, pad_right), (0, 0)), mode="constant")
return image[y1:y2, x1:x2]
# TODO also add to_heatmap
# TODO add this to BoundingBoxesOnImage
def to_keypoints(self):
"""
Convert the corners of the bounding box to keypoints (clockwise, starting at top left).
Returns
-------
result : list of Keypoint
Corners of the bounding box as keypoints.
"""
return [
Keypoint(x=self.x1, y=self.y1),
Keypoint(x=self.x2, y=self.y1),
Keypoint(x=self.x2, y=self.y2),
Keypoint(x=self.x1, y=self.y2)
]
def copy(self, x1=None, y1=None, x2=None, y2=None, label=None):
"""
Create a shallow copy of the BoundingBox object.
Parameters
----------
x1 : None or number
If not None, then the x1 coordinate of the copied object will be set to this value.
y1 : None or number
If not None, then the y1 coordinate of the copied object will be set to this value.
x2 : None or number
If not None, then the x2 coordinate of the copied object will be set to this value.
y2 : None or number
If not None, then the y2 coordinate of the copied object will be set to this value.
label : None or string
If not None, then the label of the copied object will be set to this value.
Returns
-------
result : BoundingBox
Shallow copy.
"""
return BoundingBox(
x1=self.x1 if x1 is None else x1,
x2=self.x2 if x2 is None else x2,
y1=self.y1 if y1 is None else y1,
y2=self.y2 if y2 is None else y2,
label=self.label if label is None else label
)
def deepcopy(self, x1=None, y1=None, x2=None, y2=None, label=None):
"""
Create a deep copy of the BoundingBoxesOnImage object.
Returns
-------
out : KeypointsOnImage
Deep copy.
"""
return self.copy(x1=x1, y1=y1, x2=x2, y2=y2, label=label)
def __repr__(self):
return self.__str__()
def __str__(self):
return "BoundingBox(x1=%.4f, y1=%.4f, x2=%.4f, y2=%.4f, label=%s)" % (self.x1, self.y1, self.x2, self.y2, self.label)
class BoundingBoxesOnImage(object):
"""
Object that represents all bounding boxes on a single image.
Parameters
----------
bounding_boxes : list of BoundingBox
List of bounding boxes on the image.
shape : tuple of int
The shape of the image on which the bounding boxes are placed.
Examples
--------
>>> bbs = [
>>> BoundingBox(x1=10, y1=20, x2=20, y2=30),
>>> BoundingBox(x1=25, y1=50, x2=30, y2=70)
>>> ]
>>> bbs_oi = BoundingBoxesOnImage(bbs, shape=image.shape)
"""
def __init__(self, bounding_boxes, shape):
self.bounding_boxes = bounding_boxes
if is_np_array(shape):
self.shape = shape.shape
else:
do_assert(isinstance(shape, (tuple, list)))
self.shape = tuple(shape)
@property
def height(self):
"""
Get the height of the image on which the bounding boxes fall.
Returns
-------
result : int
Image height.
"""
return self.shape[0]
@property
def width(self):
"""
Get the width of the image on which the bounding boxes fall.
Returns
-------
result : int
Image width.
"""
return self.shape[1]
@property
def empty(self):
"""
Returns whether this object contains zero bounding boxes.
Returns
-------
result : bool
True if this object contains zero bounding boxes.
"""
return len(self.bounding_boxes) == 0
def on(self, image):
"""
Project bounding boxes from one image to a new one.
Parameters
----------
image : ndarray or tuple
New image onto which the bounding boxes are to be projected.
May also simply be that new image's shape tuple.
Returns
-------
keypoints : BoundingBoxesOnImage
Object containing all projected bounding boxes.
"""
if is_np_array(image):
shape = image.shape
else:
shape = image
if shape[0:2] == self.shape[0:2]:
return self.deepcopy()
else:
bounding_boxes = [bb.project(self.shape, shape) for bb in self.bounding_boxes]
return BoundingBoxesOnImage(bounding_boxes, shape)
def draw_on_image(self, image, color=[0, 255, 0], alpha=1.0, thickness=1, copy=True, raise_if_out_of_image=False):
"""
Draw all bounding boxes onto a given image.
Parameters
----------
image : (H,W,3) ndarray
The image onto which to draw the bounding boxes.
This image should usually have the same shape as
set in BoundingBoxesOnImage.shape.
color : int or list of ints or tuple of ints or (3,) ndarray, optional(default=[0, 255, 0])
The RGB color of all bounding boxes. If a single int `C`, then that is
equivalent to (C,C,C).
size : float, optional(default=1.0)
Alpha/transparency of the bounding box.
thickness : int, optional(default=1)
Thickness in pixels.
copy : bool, optional(default=True)
Whether to copy the image before drawing the points.
raise_if_out_of_image : bool, optional(default=False)
Whether to raise an exception if any bounding box is outside of the
image.
Returns
-------
image : (H,W,3) ndarray
Image with drawn bounding boxes.
"""
for bb in self.bounding_boxes:
image = bb.draw_on_image(
image,
color=color,
alpha=alpha,
thickness=thickness,
copy=copy,
raise_if_out_of_image=raise_if_out_of_image
)
return image
def remove_out_of_image(self, fully=True, partly=False):
"""
Remove all bounding boxes that are fully or partially outside of the image.
Parameters
----------
fully : bool, optional(default=True)
Whether to remove bounding boxes that are fully outside of the image.
partly : bool, optional(default=False)
Whether to remove bounding boxes that are partially outside of the image.
Returns
-------
result : BoundingBoxesOnImage
Reduced set of bounding boxes, with those that were fully/partially outside of
the image removed.
"""
bbs_clean = [bb for bb in self.bounding_boxes if not bb.is_out_of_image(self.shape, fully=fully, partly=partly)]
return BoundingBoxesOnImage(bbs_clean, shape=self.shape)
def cut_out_of_image(self):
"""
Cut off all parts from all bounding boxes that are outside of the image.
Returns
-------
result : BoundingBoxesOnImage
Bounding boxes, clipped to fall within the image dimensions.
"""
bbs_cut = [bb.cut_out_of_image(self.shape) for bb in self.bounding_boxes if bb.is_partly_within_image(self.shape)]
return BoundingBoxesOnImage(bbs_cut, shape=self.shape)
def shift(self, top=None, right=None, bottom=None, left=None):
"""
Shift all bounding boxes from one or more image sides, i.e. move them on the x/y-axis.
Parameters
----------
top : None or int, optional(default=None)
Amount of pixels by which to shift all bounding boxes from the top.
right : None or int, optional(default=None)
Amount of pixels by which to shift all bounding boxes from the right.
bottom : None or int, optional(default=None)
Amount of pixels by which to shift all bounding boxes from the bottom.
left : None or int, optional(default=None)
Amount of pixels by which to shift all bounding boxes from the left.
Returns
-------
result : BoundingBoxesOnImage
Shifted bounding boxes.
"""
bbs_new = [bb.shift(top=top, right=right, bottom=bottom, left=left) for bb in self.bounding_boxes]
return BoundingBoxesOnImage(bbs_new, shape=self.shape)
def copy(self):
"""
Create a shallow copy of the BoundingBoxesOnImage object.
Returns
-------
out : BoundingBoxesOnImage
Shallow copy.
"""
return copy.copy(self)
def deepcopy(self):
"""
Create a deep copy of the BoundingBoxesOnImage object.
Returns
-------
out : KeypointsOnImage
Deep copy.
"""
# Manual copy is far faster than deepcopy for KeypointsOnImage,
# so use manual copy here too
bbs = [bb.deepcopy() for bb in self.bounding_boxes]
return BoundingBoxesOnImage(bbs, tuple(self.shape))
def __repr__(self):
return self.__str__()
def __str__(self):
return "BoundingBoxesOnImage(%s, shape=%s)" % (str(self.bounding_boxes), self.shape)
class HeatmapsOnImage(object):
"""
Object representing heatmaps on images.
Parameters
----------
arr : (H,W) or (H,W,C) ndarray(float32)
Array representing the heatmap(s). If multiple heatmaps, then C is expected to denote
their number.
shape : tuple of ints
Shape of the image on which the heatmap(s) is/are placed. NOT the shape of the
heatmap(s) array, unless it is identical to the image shape (note the likely
difference between the arrays in the number of channels).
If there is not a corresponding image, use the shape of the heatmaps array.
min_value : float, optional(default=0.0)
Minimum value for the heatmaps that `arr` represents. This will usually
be 0.0.
max_value : float, optional(default=1.0)
Maximum value for the heatmaps that `arr` represents. This will usually
be 1.0.
"""
def __init__(self, arr, shape, min_value=0.0, max_value=1.0):
"""Construct a new HeatmapsOnImage object."""
do_assert(arr.dtype.type in [np.float32])
do_assert(arr.ndim in [2, 3])
do_assert(len(shape) in [2, 3])
do_assert(min_value < max_value)
do_assert(np.min(arr.flat[0:50]) >= min_value - np.finfo(arr.dtype).eps)
do_assert(np.max(arr.flat[0:50]) <= max_value + np.finfo(arr.dtype).eps)
if arr.ndim == 2:
arr = arr[..., np.newaxis]
self.arr_was_2d = True
else:
self.arr_was_2d = False
eps = np.finfo(np.float32).eps
min_is_zero = 0.0 - eps < min_value < 0.0 + eps
max_is_one = 1.0 - eps < max_value < 1.0 + eps
if min_is_zero and max_is_one:
self.arr_0to1 = arr
else:
self.arr_0to1 = (arr - min_value) / (max_value - min_value)
self.shape = shape
self.min_value = min_value
self.max_value = max_value
def get_arr(self):
"""
Get the heatmap array in the desired value range.
The HeatmapsOnImage object saves heatmaps internally in the value range (min=0.0, max=1.0).
This function converts the internal representation to (min=min_value, max=max_value),
where min_value and max_value are provided upon instantiation of the object.
Returns
-------
result : (H,W) or (H,W,C) ndarray(float32)
Heatmap array.
"""
if self.arr_was_2d and self.arr_0to1.shape[2] == 1:
arr = self.arr_0to1[:, :, 0]
else:
arr = self.arr_0to1
eps = np.finfo(np.float32).eps
min_is_zero = 0.0 - eps < self.min_value < 0.0 + eps
max_is_one = 1.0 - eps < self.max_value < 1.0 + eps
if min_is_zero and max_is_one:
return np.copy(arr)
else:
diff = self.max_value - self.min_value
return self.min_value + diff * arr
# TODO
#def find_global_maxima(self):
# raise NotImplementedError()
def draw(self, size=None, cmap="jet"):
"""
Render the heatmaps as RGB images.
Parameters
----------
size : None or float or iterable of two ints or iterable of two floats, optional(default=None)
Size of the rendered RGB image as (height, width).
See `imresize_single_image()` for details.
If set to None, no resizing is performed and the size of the heatmaps array is used.
cmap : string or None, optional(default="jet")
Color map of matplotlib to use in order to convert the heatmaps into RGB images.
If set to None, no color map will be used and the heatmaps will be converted
as simple intensity maps.
Returns
-------
heatmaps_drawn : list of (H,W,3) ndarray(uint8)
Rendered heatmaps, one per heatmap array channel.
"""
heatmaps_uint8 = self.to_uint8()
heatmaps_drawn = []
for c in sm.xrange(heatmaps_uint8.shape[2]):
# c:c+1 here, because the additional axis is needed by imresize_single_image
heatmap_c = heatmaps_uint8[..., c:c+1]
if size is not None:
heatmap_c_rs = imresize_single_image(heatmap_c, size,
interpolation="nearest")
else:
heatmap_c_rs = heatmap_c
heatmap_c_rs = np.squeeze(heatmap_c_rs).astype(np.float32) / 255.0
if cmap is not None:
cmap_func = plt.get_cmap(cmap)
heatmap_cmapped = cmap_func(heatmap_c_rs)
heatmap_cmapped = np.delete(heatmap_cmapped, 3, 2)
else:
heatmap_cmapped = np.tile(heatmap_c_rs[..., np.newaxis], (1, 1, 3))
heatmap_cmapped = np.clip(heatmap_cmapped * 255, 0, 255).astype(np.uint8)
heatmaps_drawn.append(heatmap_cmapped)
return heatmaps_drawn
def draw_on_image(self, image, alpha=0.75, cmap="jet", resize="heatmaps"):
"""
Draw the heatmaps as overlays over an image.
Parameters
----------
image : (H,W,3) ndarray(uint8)
Image onto which to draw the heatmaps.
alpha : float, optional(default=0.75)
Alpha/opacity value to use for the mixing of image and heatmaps.
Higher values mean that the heatmaps will be more visible and the image less visible.
cmap : string or None, optional(default="jet")
Color map to use. See `HeatmapsOnImage.draw()` for details.
resize : "heatmaps" or "image", optional(default="heatmaps")
In case of size differences between the image and heatmaps, either the image or
the heatmaps can be resized. This parameter controls which of the two will be resized
to the other's size.
Returns
-------
mix : list of (H,W,3) ndarray(uint8)
Rendered overlays, one per heatmap array channel.
"""
# assert RGB image
do_assert(image.ndim == 3)
do_assert(image.shape[2] == 3)
do_assert(image.dtype.type == np.uint8)
do_assert(0 - 1e-8 <= alpha <= 1.0 + 1e-8)
do_assert(resize in ["heatmaps", "image"])
if resize == "image":
image = imresize_single_image(image, self.arr_0to1.shape[0:2], interpolation="cubic")
heatmaps_drawn = self.draw(
size=image.shape[0:2] if resize == "heatmaps" else None,
cmap=cmap
)
mix = [
np.clip((1-alpha) * image + alpha * heatmap_i, 0, 255).astype(np.uint8)
for heatmap_i
in heatmaps_drawn
]
return mix
def invert(self):
"""
Inverts each value in the heatmap, shifting low towards high values and vice versa.
This changes each value to::
v' = max - (v - min)
where `v` is the value at some spatial location, `min` is the minimum value in the heatmap
and `max` is the maximum value.
As the heatmap uses internally a 0.0 to 1.0 representation, this simply
becomes `v' = 1.0 - v`.
Note that the attributes `min_value` and `max_value` are not switched. They both keep their
values.
This function can be useful e.g. when working with depth maps, where algorithms might have
an easier time representing the furthest away points with zeros, requiring an inverted
depth map.
Returns
-------
result : HeatmapsOnImage
Inverted heatmap.
"""
arr_inv = HeatmapsOnImage.from_0to1(1 - self.arr_0to1, shape=self.shape, min_value=self.min_value, max_value=self.max_value)
arr_inv.arr_was_2d = self.arr_was_2d
return arr_inv
def pad(self, top=0, right=0, bottom=0, left=0, mode="constant", cval=0.0):
"""
Pad the heatmaps on their top/right/bottom/left side.
Parameters
----------
top : int, optional(default=0)
Amount of pixels to add at the top side of the heatmaps. Must be 0 or greater.
right : int, optional(default=0)
Amount of pixels to add at the right side of the heatmaps. Must be 0 or greater.
bottom : int, optional(default=0)
Amount of pixels to add at the bottom side of the heatmaps. Must be 0 or greater.
left : int, optional(default=0)
Amount of pixels to add at the left side of the heatmaps. Must be 0 or greater.
mode : string, optional(default="constant")
Padding mode to use. See `numpy.pad()` for details.
cval : number, optional(default=0.0)
Value to use for padding if mode="constant". See `numpy.pad()` for details.
Returns
-------
result : HeatmapsOnImage
Padded heatmaps of height H'=H+top+bottom and width W'=W+left+right.
"""
arr_0to1_padded = pad(self.arr_0to1, top=top, right=right, bottom=bottom, left=left, mode=mode, cval=cval)
return HeatmapsOnImage.from_0to1(arr_0to1_padded, shape=self.shape, min_value=self.min_value, max_value=self.max_value)
def pad_to_aspect_ratio(self, aspect_ratio, mode="constant", cval=0.0, return_pad_amounts=False):
"""
Pad the heatmaps on their sides so that they match a target aspect ratio.
Depending on which dimension is smaller (height or width), only the corresponding
sides (left/right or top/bottom) will be padded. In each case, both of the sides will
be padded equally.
Parameters
----------
aspect_ratio : float
Target aspect ratio, given as width/height. E.g. 2.0 denotes the image having twice
as much width as height.
mode : string, optional(default="constant")
Padding mode to use. See `numpy.pad()` for details.
cval : number, optional(default=0.0)
Value to use for padding if mode="constant". See `numpy.pad()` for details.
return_pad_amounts : bool, optional(default=False)
If False, then only the padded image will be returned. If True, a tuple with two
entries will be returned, where the first entry is the padded image and the second
entry are the amounts by which each image side was padded. These amounts are again a
tuple of the form (top, right, bottom, left), with each value being an integer.
Returns
-------
result : tuple
First tuple entry: Padded heatmaps as HeatmapsOnImage object.
Second tuple entry: Amounts by which the heatmaps were padded on each side, given
as a tuple (top, right, bottom, left).
If return_pad_amounts is False, then only the heatmaps object is returned.
"""
arr_0to1_padded, pad_amounts = pad_to_aspect_ratio(self.arr_0to1, aspect_ratio=aspect_ratio, mode=mode, cval=cval, return_pad_amounts=True)
heatmaps = HeatmapsOnImage.from_0to1(arr_0to1_padded, shape=self.shape, min_value=self.min_value, max_value=self.max_value)
if return_pad_amounts:
return heatmaps, pad_amounts
else:
return heatmaps
def avg_pool(self, block_size):
"""
Rescale the heatmap(s) array using average pooling of a given block/kernel size.
Parameters
----------
block_size : int or tuple of two ints or tuple of three ints
Size of each block of values to pool, aka kernel size. See `imgaug.pool()` for details.
Returns
-------
result : HeatmapsOnImage
Heatmaps after average pooling.
"""
arr_0to1_reduced = avg_pool(self.arr_0to1, block_size, cval=0.0)
return HeatmapsOnImage.from_0to1(arr_0to1_reduced, shape=self.shape, min_value=self.min_value, max_value=self.max_value)
def max_pool(self, block_size):
"""
Rescale the heatmap(s) array using max-pooling of a given block/kernel size.
Parameters
----------
block_size : int or tuple of two ints or tuple of three ints
Size of each block of values to pool, aka kernel size. See `imgaug.pool()` for details.
Returns
-------
result : HeatmapsOnImage
Heatmaps after max-pooling.
"""
arr_0to1_reduced = max_pool(self.arr_0to1, block_size)
return HeatmapsOnImage.from_0to1(arr_0to1_reduced, shape=self.shape, min_value=self.min_value, max_value=self.max_value)
def scale(self, sizes, interpolation="cubic"):
"""
Rescale the heatmap(s) array to the provided size given the provided interpolation.
Parameters
----------
sizes : float or iterable of two ints or iterable of two floats
New size of the array in (height, width). See `imresize_single_image()` for details.
interpolation : None or string or int, optional(default="cubic")
The interpolation to use during resize. See `imresize_single_image()` for details.
Returns
-------
result : HeatmapsOnImage
Rescaled heatmaps object.
"""
arr_0to1_rescaled = imresize_single_image(self.arr_0to1, sizes, interpolation=interpolation)
# cubic interpolation can lead to values outside of [0.0, 1.0],
# see https://github.com/opencv/opencv/issues/7195
# TODO area interpolation too?
arr_0to1_rescaled = np.clip(arr_0to1_rescaled, 0.0, 1.0)
return HeatmapsOnImage.from_0to1(arr_0to1_rescaled, shape=self.shape, min_value=self.min_value, max_value=self.max_value)
def to_uint8(self):
"""
Convert this heatmaps object to a 0-to-255 array.
Returns
-------
arr_uint8 : (H,W,C) ndarray(uint8)
Heatmap as a 0-to-255 array.
"""
# TODO this always returns (H,W,C), even if input ndarray was originall (H,W)
# does it make sense here to also return (H,W) if self.arr_was_2d?
arr_0to255 = np.clip(np.round(self.arr_0to1 * 255), 0, 255)
arr_uint8 = arr_0to255.astype(np.uint8)
return arr_uint8
@staticmethod
def from_uint8(arr_uint8, shape, min_value=0.0, max_value=1.0):
"""
Create a heatmaps object from an heatmap array containing values ranging from 0 to 255.
Parameters
----------
arr_uint8 : (H,W) or (H,W,C) ndarray(uint8)
Heatmap(s) array, where H=height, W=width, C=heatmap channels.
shape : tuple of ints
Shape of the image on which the heatmap(s) is/are placed. NOT the shape of the
heatmap(s) array, unless it is identical to the image shape (note the likely
difference between the arrays in the number of channels).
If there is not a corresponding image, use the shape of the heatmaps array.
min_value : float, optional(default=0.0)
Minimum value for the heatmaps that the 0-to-255 array represents. This will usually
be 0.0. It is used when calling `HeatmapsOnImage.get_arr()`, which converts the
underlying (0, 255) array to value range (min_value, max_value).
max_value : float, optional(default=1.0)
Maximum value for the heatmaps that 0-to-255 array represents.
See parameter min_value for details.
Returns
-------
heatmaps : HeatmapsOnImage
Heatmaps object.
"""
arr_0to1 = arr_uint8.astype(np.float32) / 255.0
return HeatmapsOnImage.from_0to1(arr_0to1, shape, min_value=min_value, max_value=max_value)
@staticmethod
def from_0to1(arr_0to1, shape, min_value=0.0, max_value=1.0):
"""
Create a heatmaps object from an heatmap array containing values ranging from 0.0 to 1.0.
Parameters
----------
arr_0to1 : (H,W) or (H,W,C) ndarray(float32)
Heatmap(s) array, where H=height, W=width, C=heatmap channels.
shape : tuple of ints
Shape of the image on which the heatmap(s) is/are placed. NOT the shape of the
heatmap(s) array, unless it is identical to the image shape (note the likely
difference between the arrays in the number of channels).
If there is not a corresponding image, use the shape of the heatmaps array.
min_value : float, optional(default=0.0)
Minimum value for the heatmaps that the 0-to-1 array represents. This will usually
be 0.0. It is used when calling `HeatmapsOnImage.get_arr()`, which converts the
underlying (0.0, 1.0) array to value range (min_value, max_value).
E.g. if you started with heatmaps in the range (-1.0, 1.0) and projected these
to (0.0, 1.0), you should call this function with min_value=-1.0, max_value=1.0
so that `get_arr()` returns heatmap arrays having value range (-1.0, 1.0).
max_value : float, optional(default=1.0)
Maximum value for the heatmaps that to 0-to-255 array represents.
See parameter min_value for details.
Returns
-------
heatmaps : HeatmapsOnImage
Heatmaps object.
"""
heatmaps = HeatmapsOnImage(arr_0to1, shape, min_value=0.0, max_value=1.0)
heatmaps.min_value = min_value
heatmaps.max_value = max_value
return heatmaps
@staticmethod
def change_normalization(arr, source, target):
"""
Change the value range of a heatmap from one min-max to another min-max.
E.g. the value range may be changed from min=0.0, max=1.0 to min=-1.0, max=1.0.
Parameters
----------
arr : ndarray
Heatmap array to modify.
source : tuple of two floats
Current value range of the input array, given as (min, max), where both are float
values.
target : tuple of two floats
Desired output value range of the array, given as (min, max), where both are float
values.
Returns
-------
arr_target : ndarray
Input array, with value range projected to the desired target value range.
"""
do_assert(is_np_array(arr))
if isinstance(source, HeatmapsOnImage):
source = (source.min_value, source.max_value)
else:
do_assert(isinstance(source, tuple))
do_assert(len(source) == 2)
do_assert(source[0] < source[1])
if isinstance(target, HeatmapsOnImage):
target = (target.min_value, target.max_value)
else:
do_assert(isinstance(target, tuple))
do_assert(len(target) == 2)
do_assert(target[0] < target[1])
# Check if source and target are the same (with a tiny bit of tolerance)
# if so, evade compuation and just copy the array instead.
# This is reasonable, as source and target will often both be (0.0, 1.0).
eps = np.finfo(arr.dtype).eps
mins_same = source[0] - 10*eps < target[0] < source[0] + 10*eps
maxs_same = source[1] - 10*eps < target[1] < source[1] + 10*eps
if mins_same and maxs_same:
return np.copy(arr)
min_source, max_source = source
min_target, max_target = target
diff_source = max_source - min_source
diff_target = max_target - min_target
arr_0to1 = (arr - min_source) / diff_source
arr_target = min_target + arr_0to1 * diff_target
return arr_target
def copy(self):
"""
Create a shallow copy of the Heatmaps object.
Returns
-------
out : HeatmapsOnImage
Shallow copy.
"""
return self.deepcopy()
def deepcopy(self):
"""
Create a deep copy of the Heatmaps object.
Returns
-------
out : HeatmapsOnImage
Deep copy.
"""
return HeatmapsOnImage(self.get_arr(), shape=self.shape, min_value=self.min_value, max_value=self.max_value)
class SegmentationMapOnImage(object):
"""
Object representing a segmentation map associated with an image.
Attributes
----------
DEFAULT_SEGMENT_COLORS : list of tuple of int
Standard RGB colors to use during drawing, ordered by class index.
Parameters
----------
arr : (H,W) ndarray or (H,W,1) ndarray or (H,W,C) ndarray
Array representing the segmentation map. May have datatypes bool, integer or float.
* If bool: Assumed to be of shape (H,W), (H,W,1) or (H,W,C). If (H,W) or (H,W,1) it
is assumed to be for the case of having a single class (where any False denotes
background). Otherwise there are assumed to be C channels, one for each class,
with each of them containing a mask for that class. The masks may overlap.
* If integer: Assumed to be of shape (H,W) or (H,W,1). Each pixel is assumed to
contain an integer denoting the class index. Classes are assumed to be
non-overlapping. The number of classes cannot be guessed from this input, hence
nb_classes must be set.
* If float: Assumed to b eof shape (H,W), (H,W,1) or (H,W,C) with meanings being
similar to the case of `bool`. Values are expected to fall always in the range
0.0 to 1.0 and are usually expected to be either 0.0 or 1.0 upon instantiation
of a new segmentation map. Classes may overlap.
shape : iterable of int
Shape of the corresponding image (NOT the segmentation map array). This is expected
to be (H, W) or (H, W, C) with C usually being 3. If there is no corresponding image,
then use the segmentation map's shape instead.
nb_classes : int or None
Total number of unique classes that may appear in an segmentation map, i.e. the max
class index. This may be None if the input array is of type bool or float. The number
of classes however must be provided if the input array is of type int, as then the
number of classes cannot be guessed.
"""
DEFAULT_SEGMENT_COLORS = [
(0, 0, 0), # black
(230, 25, 75), # red
(60, 180, 75), # green
(255, 225, 25), # yellow
(0, 130, 200), # blue
(245, 130, 48), # orange
(145, 30, 180), # purple
(70, 240, 240), # cyan
(240, 50, 230), # magenta
(210, 245, 60), # lime
(250, 190, 190), # pink
(0, 128, 128), # teal
(230, 190, 255), # lavender
(170, 110, 40), # brown
(255, 250, 200), # beige
(128, 0, 0), # maroon
(170, 255, 195), # mint
(128, 128, 0), # olive
(255, 215, 180), # coral
(0, 0, 128), # navy
(128, 128, 128), # grey
(255, 255, 255), # white
# --
(115, 12, 37), # dark red
(30, 90, 37), # dark green
(127, 112, 12), # dark yellow
(0, 65, 100), # dark blue
(122, 65, 24), # dark orange
(72, 15, 90), # dark purple
(35, 120, 120), # dark cyan
(120, 25, 115), # dark magenta
(105, 122, 30), # dark lime
(125, 95, 95), # dark pink
(0, 64, 64), # dark teal
(115, 95, 127), # dark lavender
(85, 55, 20), # dark brown
(127, 125, 100), # dark beige
(64, 0, 0), # dark maroon
(85, 127, 97), # dark mint
(64, 64, 0), # dark olive
(127, 107, 90), # dark coral
(0, 0, 64), # dark navy
(64, 64, 64), # dark grey
]
def __init__(self, arr, shape, nb_classes=None):
if arr.dtype.type == np.bool:
do_assert(arr.ndim in [2, 3])
self.input_was = ("bool", arr.ndim)
if arr.ndim == 2:
arr = arr[..., np.newaxis]
arr = arr.astype(np.float32)
elif arr.dtype.type in [np.uint8, np.uint32, np.int8, np.int16, np.int32]:
do_assert(arr.ndim == 2 or (arr.ndim == 3 and arr.shape[2] == 1))
do_assert(nb_classes is not None)
do_assert(nb_classes > 0)
do_assert(np.min(arr.flat[0:100]) >= 0)
do_assert(np.max(arr.flat[0:100]) <= nb_classes)
self.input_was = ("int", arr.dtype.type, arr.ndim)
if arr.ndim == 3:
arr = arr[..., 0]
arr = np.eye(nb_classes)[arr] # from class indices to one hot
arr = arr.astype(np.float32)
elif arr.dtype.type in [np.float16, np.float32]:
do_assert(arr.ndim == 3)
self.input_was = ("float", arr.dtype.type, arr.ndim)
arr = arr.astype(np.float32)
else:
dt = str(arr.dtype) if is_np_array(arr) else "<no ndarray>"
raise Exception("Input was expected to be an ndarray of dtype bool, uint8, uint32 "
"int8, int16, int32 or float32. Got type %s with dtype %s." % (type(arr), dt))
do_assert(arr.ndim == 3)
do_assert(arr.dtype.type == np.float32)
self.arr = arr
self.shape = shape
self.nb_classes = nb_classes if nb_classes is not None else arr.shape[2]
#@property
#def nb_classes(self):
# return self.arr.shape[2]
def get_arr_int(self, background_threshold=0.01, background_class_id=None):
"""
Get the segmentation map array as an integer array of shape (H, W).
Each pixel in that array contains an integer value representing the pixel's class.
If multiple classes overlap, the one with the highest local float value is picked.
If that highest local value is below `background_threshold`, the method instead uses
the background class id as the pixel's class value.
By default, class id 0 is the background class. This may only be changed if the original
input to the segmentation map object was an integer map.
Parameters
----------
background_threshold : float, optional(default=0.01)
At each pixel, each class-heatmap has a value between 0.0 and 1.0. If none of the
class-heatmaps has a value above this threshold, the method uses the background class
id instead.
background_class_id : None or int, optional(default=None)
Class id to fall back to if no class-heatmap passes the threshold at a spatial
location. May only be provided if the original input was an integer mask and in these
cases defaults to 0. If the input were float or boolean masks, the background class id
may not be set as it is assumed that the background is implicitly defined
as 'any spatial location that has zero-like values in all masks'.
Returns
-------
result : (H,W) ndarray(int)
Segmentation map array.
If the original input consisted of boolean or float masks, then the highest possible
class id is `1+C`, where `C` is the number of provided float/boolean masks. The value
`0` in the integer mask then denotes the background class.
"""
if self.input_was[0] in ["bool", "float"]:
do_assert(background_class_id is None, "The background class id may only be changed if the original input to SegmentationMapOnImage was an *integer* based segmentation map.")
if background_class_id is None:
background_class_id = 0
channelwise_max_idx = np.argmax(self.arr, axis=2)
# for bool and float input masks, we assume that the background is implicitly given,
# i.e. anything where all masks/channels have zero-like values
# for int, we assume that the background class is explicitly given and has the index 0
if self.input_was[0] in ["bool", "float"]:
result = 1 + channelwise_max_idx
else: # integer mask was provided
result = channelwise_max_idx
if background_threshold is not None and background_threshold > 0:
probs = np.amax(self.arr, axis=2)
result[probs < background_threshold] = background_class_id
return result.astype(np.int32)
#def get_arr_bool(self, allow_overlapping=False, threshold=0.5, background_threshold=0.01, background_class_id=0):
# # TODO
# raise NotImplementedError()
def draw(self, size=None, background_threshold=0.01, background_class_id=None, colors=None, return_foreground_mask=False):
"""
Render the segmentation map as an RGB image.
Parameters
----------
size : None or float or iterable of two ints or iterable of two floats, optional(default=None)
Size of the rendered RGB image as (height, width).
See `imresize_single_image()` for details.
If set to None, no resizing is performed and the size of the segmentation map array is
used.
background_threshold : float, optional(default=0.01)
See `SegmentationMapOnImage.get_arr_int()`.
background_class_id : None or int, optional(default=None)
See `SegmentationMapOnImage.get_arr_int()`.
colors : None or list of tuple of int, optional(default=None)
Colors to use. One for each class to draw. If None, then default colors will be used.
return_foreground_mask : bool, optional(default=False)
Whether to return a mask of the same size as the drawn segmentation map, containing
True at any spatial location that is not the background class and False everywhere
else.
Returns
-------
segmap_drawn : (H,W,3) ndarray(uint8)
Rendered segmentation map.
foreground_mask : (H,W) ndarray(bool)
Mask indicating the locations of foreground classes. Only returned if
return_foreground_mask is True.
"""
arr = self.get_arr_int(background_threshold=background_threshold, background_class_id=background_class_id)
nb_classes = 1 + np.max(arr)
segmap_drawn = np.zeros((arr.shape[0], arr.shape[1], 3), dtype=np.uint8)
if colors is None:
colors = SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS
do_assert(nb_classes <= len(colors), "Can't draw all %d classes as it would exceed the maximum number of %d available colors." % (nb_classes, len(colors),))
ids_in_map = np.unique(arr)
for c, color in zip(sm.xrange(nb_classes), colors):
if c in ids_in_map:
class_mask = (arr == c)
segmap_drawn[class_mask] = color
if return_foreground_mask:
background_class_id = 0 if background_class_id is None else background_class_id
foreground_mask = (arr != background_class_id)
else:
foreground_mask = None
if size is not None:
segmap_drawn = imresize_single_image(segmap_drawn, size, interpolation="nearest")
if foreground_mask is not None:
foreground_mask = imresize_single_image(foreground_mask.astype(np.uint8), size, interpolation="nearest") > 0
if foreground_mask is not None:
return segmap_drawn, foreground_mask
return segmap_drawn
def draw_on_image(self, image, alpha=0.5, resize="segmentation_map", background_threshold=0.01, background_class_id=None, colors=None, draw_background=False):
"""
Draw the segmentation map as an overlay over an image.
Parameters
----------
image : (H,W,3) ndarray(uint8)
Image onto which to draw the segmentation map.
alpha : float, optional(default=0.75)
Alpha/opacity value to use for the mixing of image and segmentation map.
Higher values mean that the segmentation map will be more visible and the image less
visible.
resize : "segmentation_map" or "image", optional(default="segmentation_map")
In case of size differences between the image and segmentation map, either the image or
the segmentation map can be resized. This parameter controls which of the two will be
resized to the other's size.
background_threshold : float, optional(default=0.01)
See `SegmentationMapOnImage.get_arr_int()`.
background_class_id : None or int, optional(default=None)
See `SegmentationMapOnImage.get_arr_int()`.
colors : None or list of tuple of int, optional(default=None)
Colors to use. One for each class to draw. If None, then default colors will be used.
draw_background : bool, optional(default=False)
If True, the background will be drawn like any other class.
If False, the background will not be drawn, i.e. the respective background pixels
will be identical with the image's RGB color at the corresponding spatial location
and no color overlay will be applied.
Returns
-------
mix : (H,W,3) ndarray(uint8)
Rendered overlays.
"""
# assert RGB image
do_assert(image.ndim == 3)
do_assert(image.shape[2] == 3)
do_assert(image.dtype.type == np.uint8)
do_assert(0 - 1e-8 <= alpha <= 1.0 + 1e-8)
do_assert(resize in ["segmentation_map", "image"])
if resize == "image":
image = imresize_single_image(image, self.arr.shape[0:2], interpolation="cubic")
segmap_drawn, foreground_mask = self.draw(
background_threshold=background_threshold,
background_class_id=background_class_id,
size=image.shape[0:2] if resize == "segmentation_map" else None,
colors=colors,
return_foreground_mask=True
)
if draw_background:
mix = np.clip(
(1-alpha) * image + alpha * segmap_drawn,
0,
255
).astype(np.uint8)
else:
foreground_mask = foreground_mask[..., np.newaxis]
mix = np.zeros_like(image)
mix += (~foreground_mask).astype(np.uint8) * image
mix += foreground_mask.astype(np.uint8) * np.clip(
(1-alpha) * image + alpha * segmap_drawn,
0,
255
).astype(np.uint8)
return mix
def pad(self, top=0, right=0, bottom=0, left=0, mode="constant", cval=0.0):
"""
Pad the segmentation map on its top/right/bottom/left side.
Parameters
----------
top : int, optional(default=0)
Amount of pixels to add at the top side of the segmentation map. Must be 0 or
greater.
right : int, optional(default=0)
Amount of pixels to add at the right side of the segmentation map. Must be 0 or
greater.
bottom : int, optional(default=0)
Amount of pixels to add at the bottom side of the segmentation map. Must be 0 or
greater.
left : int, optional(default=0)
Amount of pixels to add at the left side of the segmentation map. Must be 0 or
greater.
mode : string, optional(default="constant")
Padding mode to use. See `numpy.pad()` for details.
cval : number, optional(default=0.0)
Value to use for padding if mode="constant". See `numpy.pad()` for details.
Returns
-------
segmap : SegmentationMapOnImage
Padded segmentation map of height H'=H+top+bottom and width W'=W+left+right.
"""
arr_padded = pad(self.arr, top=top, right=right, bottom=bottom, left=left, mode=mode, cval=cval)
segmap = SegmentationMapOnImage(arr_padded, shape=self.shape)
segmap.input_was = self.input_was
return segmap
def pad_to_aspect_ratio(self, aspect_ratio, mode="constant", cval=0.0, return_pad_amounts=False):
"""
Pad the segmentation map on its sides so that its matches a target aspect ratio.
Depending on which dimension is smaller (height or width), only the corresponding
sides (left/right or top/bottom) will be padded. In each case, both of the sides will
be padded equally.
Parameters
----------
aspect_ratio : float
Target aspect ratio, given as width/height. E.g. 2.0 denotes the image having twice
as much width as height.
mode : string, optional(default="constant")
Padding mode to use. See `numpy.pad()` for details.
cval : number, optional(default=0.0)
Value to use for padding if mode="constant". See `numpy.pad()` for details.
return_pad_amounts : bool, optional(default=False)
If False, then only the padded image will be returned. If True, a tuple with two
entries will be returned, where the first entry is the padded image and the second
entry are the amounts by which each image side was padded. These amounts are again a
tuple of the form (top, right, bottom, left), with each value being an integer.
Returns
-------
segmap : tuple
First tuple entry: Padded segmentation map as SegmentationMapOnImage object.
Second tuple entry: Amounts by which the segmentation map was padded on each side,
given as a tuple (top, right, bottom, left).
If return_pad_amounts is False, then only the segmentation map object is returned.
"""
arr_padded, pad_amounts = pad_to_aspect_ratio(self.arr, aspect_ratio=aspect_ratio, mode=mode, cval=cval, return_pad_amounts=True)
segmap = SegmentationMapOnImage(arr_padded, shape=self.shape)
segmap.input_was = self.input_was
if return_pad_amounts:
return segmap, pad_amounts
else:
return segmap
def scale(self, sizes, interpolation="cubic"):
"""
Rescale the segmentation map array to the provided size given the provided interpolation.
Parameters
----------
sizes : float or iterable of two ints or iterable of two floats
New size of the array in (height, width). See `imresize_single_image()` for details.
interpolation : None or string or int, optional(default="cubic")
The interpolation to use during resize. See `imresize_single_image()` for details.
Note: The segmentation map is internally stored as multiple float-based heatmaps,
making smooth interpolations potentially more reasonable than nearest neighbour
interpolation.
Returns
-------
segmap : SegmentationMapOnImage
Rescaled segmentation map object.
"""
arr_rescaled = imresize_single_image(self.arr, sizes, interpolation=interpolation)
# cubic interpolation can lead to values outside of [0.0, 1.0],
# see https://github.com/opencv/opencv/issues/7195
# TODO area interpolation too?
arr_rescaled = np.clip(arr_rescaled, 0.0, 1.0)
segmap = SegmentationMapOnImage(arr_rescaled, shape=self.shape)
segmap.input_was = self.input_was
return segmap
def to_heatmaps(self, only_nonempty=False, not_none_if_no_nonempty=False):
"""
Convert segmentation map to heatmaps object.
Each segmentation map class will be represented as a single heatmap channel.
Parameters
----------
only_nonempty : bool, optional(default=False)
If True, then only heatmaps for classes that appear in the segmentation map will be
generated. Additionally, a list of these class ids will be returned.
not_none_if_no_nonempty : bool, optional(default=False)
If `only_nonempty` is True and for a segmentation map no channel was non-empty,
this function usually returns None as the heatmaps object. If however this parameter
is set to True, a heatmaps object with one channel (representing class 0)
will be returned as a fallback in these cases.
Returns
-------
result : HeatmapsOnImage or None
Segmentation map as heatmaps.
If `only_nonempty` was set to True and no class appeared in the segmentation map,
then this is None.
class_indices : list of int
Class ids (0 to C-1) of the classes that were actually added to the heatmaps.
Only returned if `only_nonempty` was set to True.
"""
if not only_nonempty:
return HeatmapsOnImage.from_0to1(self.arr, self.shape, min_value=0.0, max_value=1.0)
else:
nonempty_mask = np.sum(self.arr, axis=(0, 1)) > 0 + 1e-4
if np.sum(nonempty_mask) == 0:
if not_none_if_no_nonempty:
nonempty_mask[0] = True
else:
return None, []
class_indices = np.arange(self.arr.shape[2])[nonempty_mask]
channels = self.arr[..., class_indices]
return HeatmapsOnImage(channels, self.shape, min_value=0.0, max_value=1.0), class_indices
@staticmethod
def from_heatmaps(heatmaps, class_indices=None, nb_classes=None):
"""
Convert heatmaps to segmentation map.
Assumes that each class is represented as a single heatmap channel.
Parameters
----------
heatmaps : HeatmapsOnImage
Heatmaps to convert.
class_indices : None or list of int, optional(default=None)
List of class indices represented by each heatmap channel. See also the
secondary output of `to_heatmap()`. If this is provided, it must have the same
length as the number of heatmap channels.
nb_classes : None or int, optional(default=None)
Number of classes. Must be provided if class_indices is set.
Returns
-------
result : SegmentationMapOnImage
Segmentation map derived from heatmaps.
"""
if class_indices is None:
return SegmentationMapOnImage(heatmaps.arr_0to1, shape=heatmaps.shape)
else:
do_assert(nb_classes is not None)
do_assert(min(class_indices) >= 0)
do_assert(max(class_indices) < nb_classes)
do_assert(len(class_indices) == heatmaps.arr_0to1.shape[2])
arr_0to1 = heatmaps.arr_0to1
arr_0to1_full = np.zeros((arr_0to1.shape[0], arr_0to1.shape[1], nb_classes), dtype=np.float32)
#empty_channel = np.zeros((arr_0to1.shape[0], arr_0to1.shape[1]), dtype=np.float32)
class_indices_set = set(class_indices)
heatmap_channel = 0
for c in sm.xrange(nb_classes):
if c in class_indices_set:
arr_0to1_full[:, :, c] = arr_0to1[:, :, heatmap_channel]
heatmap_channel += 1
return SegmentationMapOnImage(arr_0to1_full, shape=heatmaps.shape)
def copy(self):
"""
Create a shallow copy of the segmentation map object.
Returns
-------
out : SegmentationMapOnImage
Shallow copy.
"""
return self.deepcopy()
def deepcopy(self):
"""
Create a deep copy of the segmentation map object.
Returns
-------
out : SegmentationMapOnImage
Deep copy.
"""
segmap = SegmentationMapOnImage(self.arr, shape=self.shape, nb_classes=self.nb_classes)
segmap.input_was = self.input_was
return segmap
############################
# Background augmentation
############################
class Batch(object):
"""
Class encapsulating a batch before and after augmentation.
Parameters
----------
images : None or (N,H,W,C) ndarray or (N,H,W) ndarray or list of (H,W,C) ndarray or list of (H,W) ndarray
The images to
augment.
heatmaps : None or list of HeatmapsOnImage
The heatmaps to
augment.
segmentation_maps : None or list of SegmentationMapOnImage
The segmentation maps to
augment.
keypoints : None or list of KeypointOnImage
The keypoints to
augment.
bounding_boxes : None or list of BoundingBoxesOnImage
The bounding boxes to
augment.
data : anything
Additional data that is saved in the batch and may be read out
after augmentation. This could e.g. contain filepaths to each image
in `images`. As this object is usually used for background
augmentation with multiple processes, the augmented Batch objects might
not be returned in the original order, making this information useful.
"""
def __init__(self, images=None, heatmaps=None, segmentation_maps=None, keypoints=None, bounding_boxes=None, data=None):
self.images = images
self.images_aug = None
self.heatmaps = heatmaps
self.heatmaps_aug = None
self.segmentation_maps = segmentation_maps
self.segmentation_maps_aug = None
self.keypoints = keypoints
self.keypoints_aug = None
self.bounding_boxes = bounding_boxes
self.bounding_boxes_aug = None
self.data = data
class BatchLoader(object):
"""
Class to load batches in the background.
Loaded batches can be accesses using `BatchLoader.queue`.
Parameters
----------
load_batch_func : callable
Function that yields Batch objects (i.e. expected to be a generator).
Background loading automatically stops when the last batch was yielded.
queue_size : int, optional(default=50)
Maximum number of batches to store in the queue. May be set higher
for small images and/or small batches.
nb_workers : int, optional(default=1)
Number of workers to run in the background.
threaded : bool, optional(default=True)
Whether to run the background processes using threads (true) or
full processes (false).
"""
def __init__(self, load_batch_func, queue_size=50, nb_workers=1, threaded=True):
do_assert(queue_size > 0)
do_assert(nb_workers >= 1)
self.queue = multiprocessing.Queue(queue_size)
self.join_signal = multiprocessing.Event()
self.finished_signals = []
self.workers = []
self.threaded = threaded
seeds = current_random_state().randint(0, 10**6, size=(nb_workers,))
for i in range(nb_workers):
finished_signal = multiprocessing.Event()
self.finished_signals.append(finished_signal)
if threaded:
worker = threading.Thread(target=self._load_batches, args=(load_batch_func, self.queue, finished_signal, self.join_signal, None))
else:
worker = multiprocessing.Process(target=self._load_batches, args=(load_batch_func, self.queue, finished_signal, self.join_signal, seeds[i]))
worker.daemon = True
worker.start()
self.workers.append(worker)
def all_finished(self):
"""
Determine whether the workers have finished the loading process.
Returns
-------
out : bool
True if all workers have finished. Else False.
"""
return all([event.is_set() for event in self.finished_signals])
def _load_batches(self, load_batch_func, queue, finished_signal, join_signal, seedval):
if seedval is not None:
random.seed(seedval)
np.random.seed(seedval)
seed(seedval)
try:
for batch in load_batch_func():
do_assert(isinstance(batch, Batch), "Expected batch returned by lambda function to be of class imgaug.Batch, got %s." % (type(batch),))
batch_pickled = pickle.dumps(batch, protocol=-1)
while not join_signal.is_set():
try:
queue.put(batch_pickled, timeout=0.001)
break
except QueueFull:
pass
if join_signal.is_set():
break
except Exception:
traceback.print_exc()
finally:
finished_signal.set()
def terminate(self):
"""
Stop all workers.
"""
self.join_signal.set()
# give minimal time to put generated batches in queue and gracefully shut down
time.sleep(0.002)
# clean the queue, this reportedly prevents hanging threads
while True:
try:
self.queue.get(timeout=0.005)
except QueueEmpty:
break
if self.threaded:
for worker in self.workers:
worker.join()
# we don't have to set the finished_signals here, because threads always finish
# gracefully
else:
for worker in self.workers:
worker.terminate()
worker.join()
# wait here a tiny bit to really make sure that everything is killed before setting
# the finished_signals. calling set() and is_set() (via a subprocess) on them at the
# same time apparently results in a deadlock (at least in python 2).
#time.sleep(0.02)
for finished_signal in self.finished_signals:
finished_signal.set()
self.queue.close()
class BackgroundAugmenter(object):
"""
Class to augment batches in the background (while training on the GPU).
This is a wrapper around the multiprocessing module.
Parameters
----------
batch_loader : BatchLoader
BatchLoader object to load data in the
background.
augseq : Augmenter
An augmenter to apply to all loaded images.
This may be e.g. a Sequential to apply multiple augmenters.
queue_size : int
Size of the queue that is used to temporarily save the augmentation
results. Larger values offer the background processes more room
to save results when the main process doesn't load much, i.e. they
can lead to smoother and faster training. For large images, high
values can block a lot of RAM though.
nb_workers : "auto" or int
Number of background workers to spawn. If auto, it will be set
to C-1, where C is the number of CPU cores.
"""
def __init__(self, batch_loader, augseq, queue_size=50, nb_workers="auto"):
do_assert(queue_size > 0)
self.augseq = augseq
self.source_finished_signals = batch_loader.finished_signals
self.queue_source = batch_loader.queue
self.queue_result = multiprocessing.Queue(queue_size)
if nb_workers == "auto":
try:
nb_workers = multiprocessing.cpu_count()
except (ImportError, NotImplementedError):
nb_workers = 1
# try to reserve at least one core for the main process
nb_workers = max(1, nb_workers - 1)
else:
do_assert(nb_workers >= 1)
#print("Starting %d background processes" % (nb_workers,))
self.nb_workers = nb_workers
self.workers = []
self.nb_workers_finished = 0
self.augment_images = True
self.augment_keypoints = True
seeds = current_random_state().randint(0, 10**6, size=(nb_workers,))
for i in range(nb_workers):
worker = multiprocessing.Process(target=self._augment_images_worker, args=(augseq, self.queue_source, self.queue_result, self.source_finished_signals, seeds[i]))
worker.daemon = True
worker.start()
self.workers.append(worker)
def get_batch(self):
"""
Returns a batch from the queue of augmented batches.
If workers are still running and there are no batches in the queue,
it will automatically wait for the next batch.
Returns
-------
out : None or ia.Batch
One batch or None if all workers have finished.
"""
batch_str = self.queue_result.get()
batch = pickle.loads(batch_str)
if batch is not None:
return batch
else:
self.nb_workers_finished += 1
if self.nb_workers_finished == self.nb_workers:
return None
else:
return self.get_batch()
def _augment_images_worker(self, augseq, queue_source, queue_result, source_finished_signals, seedval):
"""
Worker function that endlessly queries the source queue (input
batches), augments batches in it and sends the result to the output
queue.
"""
np.random.seed(seedval)
random.seed(seedval)
augseq.reseed(seedval)
seed(seedval)
while True:
# wait for a new batch in the source queue and load it
try:
batch_str = queue_source.get(timeout=0.1)
batch = pickle.loads(batch_str)
"""
# augment the batch
batch_augment_images = batch.images is not None and self.augment_images
batch_augment_keypoints = batch.keypoints is not None and self.augment_keypoints
if batch_augment_images and batch_augment_keypoints:
augseq_det = augseq.to_deterministic() if not augseq.deterministic else augseq
batch.images_aug = augseq_det.augment_images(batch.images)
batch.keypoints_aug = augseq_det.augment_keypoints(batch.keypoints)
elif batch_augment_images:
batch.images_aug = augseq.augment_images(batch.images)
elif batch_augment_keypoints:
batch.keypoints_aug = augseq.augment_keypoints(batch.keypoints)
"""
batch_aug = list(augseq.augment_batches([batch], background=False))[0]
# send augmented batch to output queue
batch_str = pickle.dumps(batch, protocol=-1)
queue_result.put(batch_str)
except QueueEmpty:
if all([signal.is_set() for signal in source_finished_signals]):
queue_result.put(pickle.dumps(None, protocol=-1))
return
def terminate(self):
"""
Terminates all background processes immediately.
This will also free their RAM.
"""
for worker in self.workers:
worker.terminate()
self.queue_result.close()
Remove dead code
from __future__ import print_function, division, absolute_import
import random
import numpy as np
import copy
import numbers
import cv2
import math
import imageio
import multiprocessing
import threading
import traceback
import sys
import six
import six.moves as sm
import os
import skimage.draw
import skimage.measure
import collections
import time
import json
import matplotlib.pyplot as plt
if sys.version_info[0] == 2:
import cPickle as pickle
from Queue import Empty as QueueEmpty, Full as QueueFull
elif sys.version_info[0] == 3:
import pickle
from queue import Empty as QueueEmpty, Full as QueueFull
xrange = range
ALL = "ALL"
FILE_DIR = os.path.dirname(os.path.abspath(__file__))
# filepath to the quokka image, its annotations and depth map
QUOKKA_FP = os.path.join(FILE_DIR, "quokka.jpg")
QUOKKA_ANNOTATIONS_FP = os.path.join(FILE_DIR, "quokka_annotations.json")
QUOKKA_DEPTH_MAP_HALFRES_FP = os.path.join(FILE_DIR, "quokka_depth_map_halfres.png")
DEFAULT_FONT_FP = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"DejaVuSans.ttf"
)
# We instantiate a current/global random state here once.
# One can also call np.random, but that is (in contrast to np.random.RandomState)
# a module and hence cannot be copied via deepcopy. That's why we use RandomState
# here (and in all augmenters) instead of np.random.
CURRENT_RANDOM_STATE = np.random.RandomState(42)
def is_np_array(val):
"""
Checks whether a variable is a numpy array.
Parameters
----------
val : anything
The variable to
check.
Returns
-------
out : bool
True if the variable is a numpy array. Otherwise False.
"""
# using np.generic here seems to also fire for scalar numpy values even
# though those are not arrays
#return isinstance(val, (np.ndarray, np.generic))
return isinstance(val, np.ndarray)
def is_single_integer(val):
"""
Checks whether a variable is an integer.
Parameters
----------
val : anything
The variable to
check.
Returns
-------
out : bool
True if the variable is an integer. Otherwise False.
"""
return isinstance(val, numbers.Integral) and not isinstance(val, bool)
def is_single_float(val):
"""
Checks whether a variable is a float.
Parameters
----------
val : anything
The variable to
check.
Returns
-------
out : bool
True if the variable is a float. Otherwise False.
"""
return isinstance(val, numbers.Real) and not is_single_integer(val) and not isinstance(val, bool)
def is_single_number(val):
"""
Checks whether a variable is a number, i.e. an integer or float.
Parameters
----------
val : anything
The variable to
check.
Returns
-------
out : bool
True if the variable is a number. Otherwise False.
"""
return is_single_integer(val) or is_single_float(val)
def is_iterable(val):
"""
Checks whether a variable is iterable.
Parameters
----------
val : anything
The variable to
check.
Returns
-------
out : bool
True if the variable is an iterable. Otherwise False.
"""
return isinstance(val, collections.Iterable)
# TODO convert to is_single_string() or rename is_single_integer/float/number()
def is_string(val):
"""
Checks whether a variable is a string.
Parameters
----------
val : anything
The variable to
check.
Returns
-------
out : bool
True if the variable is a string. Otherwise False.
"""
return isinstance(val, six.string_types)
def is_integer_array(val):
"""
Checks whether a variable is a numpy integer array.
Parameters
----------
val : anything
The variable to
check.
Returns
-------
out : bool
True if the variable is a numpy integer array. Otherwise False.
"""
return is_np_array(val) and issubclass(val.dtype.type, np.integer)
def is_float_array(val):
"""
Checks whether a variable is a numpy float array.
Parameters
----------
val : anything
The variable to
check.
Returns
-------
out : bool
True if the variable is a numpy float array. Otherwise False.
"""
return is_np_array(val) and issubclass(val.dtype.type, np.floating)
def is_callable(val):
"""
Checks whether a variable is a callable, e.g. a function.
Parameters
----------
val : anything
The variable to
check.
Returns
-------
out : bool
True if the variable is a callable. Otherwise False.
"""
# python 3.x with x <= 2 does not support callable(), apparently
if sys.version_info[0] == 3 and sys.version_info[1] <= 2:
return hasattr(val, '__call__')
else:
return callable(val)
def caller_name():
"""
Returns the name of the caller, e.g. a function.
Returns
-------
name : str
The name of the caller as a string
"""
return sys._getframe(1).f_code.co_name
def seed(seedval):
"""
Set the seed used by the global random state and thereby all randomness
in the library.
This random state is by default by all augmenters. Under special
circumstances (e.g. when an augmenter is switched to deterministic mode),
the global random state is replaced by another -- local -- one.
The replacement is dependent on the global random state.
Parameters
----------
seedval : int
The seed to
use.
"""
CURRENT_RANDOM_STATE.seed(seedval)
def current_random_state():
"""
Returns the current/global random state of the library.
Returns
----------
out : np.random.RandomState
The current/global random state.
"""
return CURRENT_RANDOM_STATE
def new_random_state(seed=None, fully_random=False):
"""
Returns a new random state.
Parameters
----------
seed : None or int, optional(default=None)
Optional seed value to use.
The same datatypes are allowed as for np.random.RandomState(seed).
fully_random : bool, optional(default=False)
Whether to use numpy's random initialization for the
RandomState (used if set to True). If False, a seed is sampled from
the global random state, which is a bit faster and hence the default.
Returns
-------
out : np.random.RandomState
The new random state.
"""
if seed is None:
if not fully_random:
# sample manually a seed instead of just RandomState(),
# because the latter one
# is way slower.
seed = CURRENT_RANDOM_STATE.randint(0, 10**6, 1)[0]
return np.random.RandomState(seed)
def dummy_random_state():
"""
Returns a dummy random state that is always based on a seed of 1.
Returns
-------
out : np.random.RandomState
The new random state.
"""
return np.random.RandomState(1)
def copy_random_state(random_state, force_copy=False):
"""
Creates a copy of a random state.
Parameters
----------
random_state : np.random.RandomState
The random state to
copy.
force_copy : bool, optional(default=False)
If True, this function will always create a copy of every random
state. If False, it will not copy numpy's default random state,
but all other random states.
Returns
-------
rs_copy : np.random.RandomState
The copied random state.
"""
if random_state == np.random and not force_copy:
return random_state
else:
rs_copy = dummy_random_state()
orig_state = random_state.get_state()
rs_copy.set_state(orig_state)
return rs_copy
def derive_random_state(random_state):
"""
Create a new random states based on an existing random state or seed.
Parameters
----------
random_state : np.random.RandomState
Random state or seed from which to derive the new random state.
Returns
-------
result : np.random.RandomState
Derived random state.
"""
return derive_random_states(random_state, n=1)[0]
# TODO use this everywhere instead of manual seed + create
def derive_random_states(random_state, n=1):
"""
Create N new random states based on an existing random state or seed.
Parameters
----------
random_state : np.random.RandomState
Random state or seed from which to derive new random states.
n : int, optional(default=1)
Number of random states to derive.
Returns
-------
result : list of np.random.RandomState
Derived random states.
"""
seed = random_state.randint(0, 10**6, 1)[0]
return [new_random_state(seed+i) for i in sm.xrange(n)]
def forward_random_state(random_state):
"""
Forward the internal state of a random state.
This makes sure that future calls to the random_state will produce new random values.
Parameters
----------
random_state : np.random.RandomState
Random state to forward.
"""
random_state.uniform()
# TODO
# def from_json(json_str):
# pass
def _quokka_normalize_extract(extract):
"""
Generate a normalized rectangle to be extract from the standard quokka image.
Parameters
----------
extract : "square" or tuple of number or BoundingBox or BoundingBoxesOnImage
Unnormalized representation of the image subarea to be extracted.
* If string "square", then a squared area (x: 0-643, y: 0-643) will be extracted from
the image.
* If a tuple, then expected to contain four numbers denoting x1, y1, x2 and y2.
* If a BoundingBox, then that bounding box's area will be extracted from the image.
* If a BoundingBoxesOnImage, then expected to contain exactly one bounding box
and a shape matching the full image dimensions (i.e. (643, 960, *)). Then the
one bounding box will be used similar to BoundingBox.
Returns
-------
bb : BoundingBox
Normalized representation of the area to extract from the standard quokka image.
"""
if extract == "square":
bb = BoundingBox(x1=0, y1=0, x2=643, y2=643)
elif isinstance(extract, tuple) and len(extract) == 4:
bb = BoundingBox(x1=extract[0], y1=extract[1], x2=extract[2], y2=extract[3])
elif isinstance(extract, BoundingBox):
bb = extract
elif isinstance(extract, BoundingBoxesOnImage):
do_assert(len(BoundingBoxesOnImage.bounding_boxes) == 1)
do_assert(extract.shape[0:2] == (643, 960))
bb = extract.bounding_boxes[0]
else:
raise Exception(
"Expected None or tuple of four entries or BoundingBox or BoundingBoxesOnImage "
"for parameter 'extract', got %s." % (type(extract),)
)
return bb
def _compute_resized_shape(from_shape, to_shape):
"""
Computes the intended new shape of an image-like array after resizing.
Parameters
----------
from_shape : tuple or ndarray
Old shape of the array. Usually expected to be a tuple of form (H, W) or (H, W, C) or
alternatively an array with two or three dimensions.
to_shape : None or tuple of ints or tuple of floats or int or float
New shape of the array.
* If None, then `from_shape` will be used as the new shape.
* If an int V, then the new shape will be (V, V, [C]), where C will be added if it
is part of from_shape.
* If a float V, then the new shape will be (H*V, W*V, [C]), where H and W are the old
height/width.
* If a tuple (H', W', [C']) of ints, then H' and W' will be used as the new height
and width.
* If a tuple (H', W', [C']) of floats (except C), then H' and W' will be used as the new height
and width.
Returns
-------
to_shape_computed : tuple of int
New shape.
"""
if is_np_array(from_shape):
from_shape = from_shape.shape
if is_np_array(to_shape):
to_shape = to_shape.shape
to_shape_computed = list(from_shape)
if to_shape is None:
pass
elif isinstance(to_shape, tuple):
if len(from_shape) == 3 and len(to_shape) == 3:
do_assert(from_shape[2] == to_shape[2])
elif len(to_shape) == 3:
to_shape_computed.append(to_shape[2])
if all([is_single_integer(v) for v in to_shape[0:2]]):
to_shape_computed[0] = to_shape[0]
to_shape_computed[1] = to_shape[1]
elif all([is_single_float(v) for v in to_shape[0:2]]):
to_shape_computed[0] = int(round(from_shape[0] * to_shape[0])) if to_shape[0] is not None else from_shape[0]
to_shape_computed[1] = int(round(from_shape[1] * to_shape[1])) if to_shape[1] is not None else from_shape[1]
elif is_single_integer(to_shape) or is_single_float(to_shape):
to_shape_computed = _compute_resized_shape(from_shape, (to_shape, to_shape))
else:
raise Exception("Expected to_shape to be None or ndarray or tuple of floats or tuple of ints or single int or single float, got %s." % (type(to_shape),))
return to_shape_computed
def quokka(size=None, extract=None):
"""
Returns an image of a quokka as a numpy array.
Parameters
----------
size : None or float or tuple of two ints, optional(default=None)
Size of the output image. Input into `imgaug.imresize_single_image()`.
Usually expected to be a tuple (H, W), where H is the desired height
and W is the width. If None, then the image will not be resized.
extract : None or "square" or tuple of four numbers or BoundingBox or BoundingBoxesOnImage
Subarea of the quokka image to extract::
* If None, then the whole image will be used.
* If string "square", then a squared area (x: 0-643, y: 0-643) will be extracted from
the image.
* If a tuple, then expected to contain four numbers denoting x1, y1, x2 and y2.
* If a BoundingBox, then that bounding box's area will be extracted from the image.
* If a BoundingBoxesOnImage, then expected to contain exactly one bounding box
and a shape matching the full image dimensions (i.e. (643, 960, *)). Then the
one bounding box will be used similar to BoundingBox.
Returns
-------
img : (H,W,3) ndarray
The image array of dtype uint8.
"""
img = imageio.imread(QUOKKA_FP, pilmode="RGB")
if extract is not None:
bb = _quokka_normalize_extract(extract)
img = bb.extract_from_image(img)
if size is not None:
shape_resized = _compute_resized_shape(img.shape, size)
img = imresize_single_image(img, shape_resized[0:2])
return img
def quokka_square(size=None):
"""
Returns an (square) image of a quokka as a numpy array.
Parameters
----------
size : None or float or tuple of two ints, optional(default=None)
Size of the output image. Input into `imgaug.imresize_single_image()`.
Usually expected to be a tuple (H, W), where H is the desired height
and W is the width. If None, then the image will not be resized.
Returns
-------
img : (H,W,3) ndarray
The image array of dtype uint8.
"""
return quokka(size=size, extract="square")
def quokka_heatmap(size=None, extract=None):
"""
Returns a heatmap (here: depth map) for the standard example quokka image.
Parameters
----------
size : None or float or tuple of two ints, optional(default=None)
See `quokka()`.
extract : None or "square" or tuple of four numbers or BoundingBox or BoundingBoxesOnImage
See `quokka()`.
Returns
-------
result : HeatmapsOnImage
Depth map as an heatmap object. Values close to 0.0 denote objects that are close to
the camera. Values close to 1.0 denote objects that are furthest away (among all shown
objects).
"""
img = imageio.imread(QUOKKA_DEPTH_MAP_HALFRES_FP, pilmode="RGB")
if extract is not None:
bb = _quokka_normalize_extract(extract)
img = bb.extract_from_image(img)
if size is None:
size = (643, 960)
shape_resized = _compute_resized_shape(img.shape, size)
img = imresize_single_image(img, shape_resized[0:2])
img_0to1 = img.astype(np.float32) / 255.0
img_0to1 = 1 - img_0to1 # depth map was saved as 0 being furthest away
return HeatmapsOnImage(img_0to1, shape=(643, 960, 3))
def quokka_segmentation_map(size=None, extract=None):
"""
Returns a segmentation map for the standard example quokka image.
Parameters
----------
size : None or float or tuple of two ints, optional(default=None)
See `quokka()`.
extract : None or "square" or tuple of four numbers or BoundingBox or BoundingBoxesOnImage
See `quokka()`.
Returns
-------
result : SegmentationMapOnImage
Segmentation map object.
"""
with open(QUOKKA_ANNOTATIONS_FP, "r") as f:
json_dict = json.load(f)
xx = []
yy = []
for kp_dict in json_dict["polygons"][0]["keypoints"]:
x = kp_dict["x"]
y = kp_dict["y"]
xx.append(x)
yy.append(y)
img_seg = np.zeros((643, 960, 1), dtype=np.float32)
rr, cc = skimage.draw.polygon(np.array(yy), np.array(xx), shape=img_seg.shape)
img_seg[rr, cc] = 1.0
if extract is not None:
bb = _quokka_normalize_extract(extract)
img_seg = bb.extract_from_image(img_seg)
segmap = SegmentationMapOnImage(img_seg, shape=(643, 960, 3))
if size is not None:
shape_resized = _compute_resized_shape(img_seg.shape, size)
segmap = segmap.scale(shape_resized[0:2])
return segmap
def quokka_keypoints(size=None, extract=None):
"""
Returns example keypoints on the standard example quokke image.
The keypoints cover the eyes, ears, nose and paws.
Parameters
----------
size : None or float or tuple of two ints or tuple of two floats, optional(default=None)
Size of the output image on which the keypoints are placed. If None, then the keypoints
are not projected to any new size (positions on the original image are used).
Floats lead to relative size changes, ints to absolute sizes in pixels.
extract : None or "square" or tuple of number or BoundingBox or BoundingBoxesOnImage
Subarea to extract from the image. See `quokka()`.
Returns
-------
kpsoi : KeypointsOnImage
Example keypoints on the quokka image.
"""
left, top = 0, 0
if extract is not None:
bb_extract = _quokka_normalize_extract(extract)
left = bb_extract.x1
top = bb_extract.y1
with open(QUOKKA_ANNOTATIONS_FP, "r") as f:
json_dict = json.load(f)
keypoints = []
for kp_dict in json_dict["keypoints"]:
keypoints.append(Keypoint(x=kp_dict["x"] - left, y=kp_dict["y"] - top))
if extract is not None:
shape = (bb_extract.height, bb_extract.width, 3)
else:
shape = (643, 960, 3)
kpsoi = KeypointsOnImage(keypoints, shape=shape)
if size is not None:
shape_resized = _compute_resized_shape(shape, size)
kpsoi = kpsoi.on(shape_resized)
return kpsoi
def quokka_bounding_boxes(size=None, extract=None):
"""
Returns example bounding boxes on the standard example quokke image.
Currently only a single bounding box is returned that covers the quokka.
Parameters
----------
size : None or float or tuple of two ints or tuple of two floats, optional(default=None)
Size of the output image on which the BBs are placed. If None, then the BBs
are not projected to any new size (positions on the original image are used).
Floats lead to relative size changes, ints to absolute sizes in pixels.
extract : None or "square" or tuple of number or BoundingBox or BoundingBoxesOnImage
Subarea to extract from the image. See `quokka()`.
Returns
-------
bbsoi : BoundingBoxesOnImage
Example BBs on the quokka image.
"""
left, top = 0, 0
if extract is not None:
bb_extract = _quokka_normalize_extract(extract)
left = bb_extract.x1
top = bb_extract.y1
with open(QUOKKA_ANNOTATIONS_FP, "r") as f:
json_dict = json.load(f)
bbs = []
for bb_dict in json_dict["bounding_boxes"]:
bbs.append(
BoundingBox(
x1=bb_dict["x1"] - left,
y1=bb_dict["y1"] - top,
x2=bb_dict["x2"] - left,
y2=bb_dict["y2"] - top
)
)
if extract is not None:
shape = (bb_extract.height, bb_extract.width, 3)
else:
shape = (643, 960, 3)
bbsoi = BoundingBoxesOnImage(bbs, shape=shape)
if size is not None:
shape_resized = _compute_resized_shape(shape, size)
bbsoi = bbsoi.on(shape_resized)
return bbsoi
def angle_between_vectors(v1, v2):
"""
Returns the angle in radians between vectors 'v1' and 'v2'.
From http://stackoverflow.com/questions/2827393/angles-between-two-n-dimensional-vectors-in-python
Parameters
----------
{v1, v2} : (N,) ndarray
Input
vectors.
Returns
-------
out : float
Angle in radians.
Examples
--------
>>> angle_between((1, 0, 0), (0, 1, 0))
1.5707963267948966
>>> angle_between((1, 0, 0), (1, 0, 0))
0.0
>>> angle_between((1, 0, 0), (-1, 0, 0))
3.141592653589793
"""
v1_u = v1 / np.linalg.norm(v1)
v2_u = v2 / np.linalg.norm(v2)
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
def draw_text(img, y, x, text, color=[0, 255, 0], size=25): # pylint: disable=locally-disabled, dangerous-default-value, line-too-long
"""
Draw text on an image.
This uses by default DejaVuSans as its font, which is included in the
library.
Parameters
----------
img : (H,W,3) ndarray
The image array to draw text on.
Expected to be of dtype uint8 or float32 (value range 0.0 to 255.0).
{y, x} : int
x- and y- coordinate of the top left corner of the
text.
color : iterable of 3 ints, optional(default=[0, 255, 0])
Color of the text to draw. For RGB-images this is expected to be
an RGB color.
size : int, optional(default=25)
Font size of the text to
draw.
Returns
-------
img_np : (H,W,3) ndarray
Input image with text drawn on it.
"""
# keeping PIL here so that it is not a dependency of the library right now
from PIL import Image, ImageDraw, ImageFont
do_assert(img.dtype in [np.uint8, np.float32])
input_dtype = img.dtype
if img.dtype == np.float32:
img = img.astype(np.uint8)
for i in range(len(color)):
val = color[i]
if isinstance(val, float):
val = int(val * 255)
val = np.clip(val, 0, 255)
color[i] = val
img = Image.fromarray(img)
font = ImageFont.truetype(DEFAULT_FONT_FP, size)
context = ImageDraw.Draw(img)
context.text((x, y), text, fill=tuple(color), font=font)
img_np = np.asarray(img)
img_np.setflags(write=True) # PIL/asarray returns read only array
if img_np.dtype != input_dtype:
img_np = img_np.astype(input_dtype)
return img_np
# TODO rename sizes to size?
def imresize_many_images(images, sizes=None, interpolation=None):
"""
Resize many images to a specified size.
Parameters
----------
images : (N,H,W,C) ndarray
Array of the images to resize.
Expected to usually be of dtype uint8.
sizes : float or iterable of two ints or iterable of two floats
The new size of the images, given either as a fraction (a single float) or as
a (height, width) tuple of two integers or as a (height fraction, width fraction)
tuple of two floats.
interpolation : None or string or int, optional(default=None)
The interpolation to use during resize.
If int, then expected to be one of:
* cv2.INTER_NEAREST (nearest neighbour interpolation)
* cv2.INTER_LINEAR (linear interpolation)
* cv2.INTER_AREA (area interpolation)
* cv2.INTER_CUBIC (cubic interpolation)
If string, then expected to be one of:
* "nearest" (identical to cv2.INTER_NEAREST)
* "linear" (identical to cv2.INTER_LINEAR)
* "area" (identical to cv2.INTER_AREA)
* "cubic" (identical to cv2.INTER_CUBIC)
If None, the interpolation will be chosen automatically. For size
increases, area interpolation will be picked and for size decreases,
linear interpolation will be picked.
Returns
-------
result : (N,H',W',C) ndarray
Array of the resized images.
Examples
--------
>>> imresize_many_images(np.zeros((2, 16, 16, 3), dtype=np.uint8), 2.0)
Converts 2 RGB images of height and width 16 to images of height and width 16*2 = 32.
>>> imresize_many_images(np.zeros((2, 16, 16, 3), dtype=np.uint8), (16, 32))
Converts 2 RGB images of height and width 16 to images of height 16 and width 32.
>>> imresize_many_images(np.zeros((2, 16, 16, 3), dtype=np.uint8), (2.0, 4.0))
Converts 2 RGB images of height and width 16 to images of height 32 and width 64.
"""
shape = images.shape
do_assert(images.ndim == 4, "Expected array of shape (N, H, W, C), got shape %s" % (str(shape),))
nb_images = shape[0]
im_height, im_width = shape[1], shape[2]
nb_channels = shape[3]
if is_single_float(sizes):
do_assert(sizes > 0.0)
height = int(round(im_height * sizes))
width = int(round(im_width * sizes))
else:
do_assert(len(sizes) == 2)
all_int = all([is_single_integer(size) for size in sizes])
all_float = all([is_single_float(size) for size in sizes])
do_assert(all_int or all_float)
if all_int:
height, width = sizes[0], sizes[1]
else:
height = int(round(im_height * sizes[0]))
width = int(round(im_width * sizes[1]))
if height == im_height and width == im_width:
return np.copy(images)
ip = interpolation
do_assert(ip is None or ip in ["nearest", "linear", "area", "cubic", cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_AREA, cv2.INTER_CUBIC])
if ip is None:
if height > im_height or width > im_width:
ip = cv2.INTER_AREA
else:
ip = cv2.INTER_LINEAR
elif ip in ["nearest", cv2.INTER_NEAREST]:
ip = cv2.INTER_NEAREST
elif ip in ["linear", cv2.INTER_LINEAR]:
ip = cv2.INTER_LINEAR
elif ip in ["area", cv2.INTER_AREA]:
ip = cv2.INTER_AREA
else: # if ip in ["cubic", cv2.INTER_CUBIC]:
ip = cv2.INTER_CUBIC
result = np.zeros((nb_images, height, width, nb_channels), dtype=images.dtype)
for img_idx in sm.xrange(nb_images):
# TODO fallback to scipy here if image isn't uint8
result_img = cv2.resize(images[img_idx], (width, height), interpolation=ip)
if len(result_img.shape) == 2:
result_img = result_img[:, :, np.newaxis]
result[img_idx] = result_img.astype(images.dtype)
return result
def imresize_single_image(image, sizes, interpolation=None):
"""
Resizes a single image.
Parameters
----------
image : (H,W,C) ndarray or (H,W) ndarray
Array of the image to resize.
Expected to usually be of dtype uint8.
sizes : float or iterable of two ints or iterable of two floats
See `imresize_many_images()`.
interpolation : None or string or int, optional(default=None)
See `imresize_many_images()`.
Returns
-------
out : (H',W',C) ndarray or (H',W') ndarray
The resized image.
"""
grayscale = False
if image.ndim == 2:
grayscale = True
image = image[:, :, np.newaxis]
do_assert(len(image.shape) == 3, image.shape)
rs = imresize_many_images(image[np.newaxis, :, :, :], sizes, interpolation=interpolation)
if grayscale:
return np.squeeze(rs[0, :, :, 0])
else:
return rs[0, ...]
def pad(arr, top=0, right=0, bottom=0, left=0, mode="constant", cval=0):
"""
Pad an image-like array on its top/right/bottom/left side.
This function is a wrapper around `numpy.pad()`.
Parameters
----------
arr : (H,W) or (H,W,C) ndarray
Image-like array to pad.
top : int, optional(default=0)
Amount of pixels to add at the top side of the image. Must be 0 or greater.
right : int, optional(default=0)
Amount of pixels to add at the right side of the image. Must be 0 or greater.
bottom : int, optional(default=0)
Amount of pixels to add at the bottom side of the image. Must be 0 or greater.
left : int, optional(default=0)
Amount of pixels to add at the left side of the image. Must be 0 or greater.
mode : string, optional(default="constant")
Padding mode to use. See `numpy.pad()` for details.
cval : number, optional(default=0)
Value to use for padding if mode="constant". See `numpy.pad()` for details.
Returns
-------
arr_pad : (H',W') or (H',W',C) ndarray
Padded array with height H'=H+top+bottom and width W'=W+left+right.
"""
do_assert(arr.ndim in [2, 3])
do_assert(top >= 0)
do_assert(right >= 0)
do_assert(bottom >= 0)
do_assert(left >= 0)
if top > 0 or right > 0 or bottom > 0 or left > 0:
paddings_np = [(top, bottom), (left, right)] # paddings for 2d case
if arr.ndim == 3:
paddings_np.append((0, 0)) # add paddings for 3d case
if mode == "constant":
arr_pad = np.pad(
arr,
paddings_np,
mode=mode,
constant_values=cval
)
else:
arr_pad = np.pad(
arr,
paddings_np,
mode=mode
)
return arr_pad
else:
return np.copy(arr)
def compute_paddings_for_aspect_ratio(arr, aspect_ratio):
"""
Compute the amount of pixels by which an array has to be padded to fulfill an aspect ratio.
The aspect ratio is given as width/height.
Depending on which dimension is smaller (height or width), only the corresponding
sides (left/right or top/bottom) will be padded. In each case, both of the sides will
be padded equally.
Parameters
----------
arr : (H,W) or (H,W,C) ndarray
Image-like array for which to compute pad amounts.
aspect_ratio : float
Target aspect ratio, given as width/height. E.g. 2.0 denotes the image having twice
as much width as height.
Returns
-------
result : tuple of ints
Required paddign amounts to reach the target aspect ratio, given as a tuple
of the form (top, right, bottom, left).
"""
do_assert(arr.ndim in [2, 3])
do_assert(aspect_ratio > 0)
height, width = arr.shape[0:2]
do_assert(height > 0)
aspect_ratio_current = width / height
pad_top = 0
pad_right = 0
pad_bottom = 0
pad_left = 0
if aspect_ratio_current < aspect_ratio:
# vertical image, height > width
diff = (aspect_ratio * height) - width
pad_right = int(np.ceil(diff / 2))
pad_left = int(np.floor(diff / 2))
elif aspect_ratio_current > aspect_ratio:
# horizontal image, width > height
diff = ((1/aspect_ratio) * width) - height
pad_top = int(np.ceil(diff / 2))
pad_bottom = int(np.floor(diff / 2))
return (pad_top, pad_right, pad_bottom, pad_left)
def pad_to_aspect_ratio(arr, aspect_ratio, mode="constant", cval=0, return_pad_amounts=False):
"""
Pad an image-like array on its sides so that it matches a target aspect ratio.
Depending on which dimension is smaller (height or width), only the corresponding
sides (left/right or top/bottom) will be padded. In each case, both of the sides will
be padded equally.
Parameters
----------
arr : (H,W) or (H,W,C) ndarray
Image-like array to pad.
aspect_ratio : float
Target aspect ratio, given as width/height. E.g. 2.0 denotes the image having twice
as much width as height.
mode : string, optional(default="constant")
Padding mode to use. See `numpy.pad()` for details.
cval : number, optional(default=0)
Value to use for padding if mode="constant". See `numpy.pad()` for details.
return_pad_amounts : bool, optional(default=False)
If False, then only the padded image will be returned. If True, a tuple with two
entries will be returned, where the first entry is the padded image and the second
entry are the amounts by which each image side was padded. These amounts are again a
tuple of the form (top, right, bottom, left), with each value being an integer.
Returns
-------
result : tuple
First tuple entry: Padded image as (H',W') or (H',W',C) ndarray, fulfulling the given
aspect_ratio.
Second tuple entry: Amounts by which the image was padded on each side, given
as a tuple (top, right, bottom, left).
If return_pad_amounts is False, then only the image is returned.
"""
pad_top, pad_right, pad_bottom, pad_left = compute_paddings_for_aspect_ratio(arr, aspect_ratio)
arr_padded = pad(
arr,
top=pad_top,
right=pad_right,
bottom=pad_bottom,
left=pad_left,
mode=mode,
cval=cval
)
if return_pad_amounts:
return arr_padded, (pad_top, pad_right, pad_bottom, pad_left)
else:
return arr_padded
def pool(arr, block_size, func, cval=0, preserve_dtype=True):
"""
Rescale an array by pooling values within blocks.
Parameters
----------
arr : (H,W) or (H,W,C) ndarray
Image-like array to pool. Ideally of datatype np.float64.
block_size : int or tuple of two ints or tuple of three ints
Spatial size of each group of each values to pool, aka kernel size.
If a single integer, then a symmetric block of that size along height and width will
be used.
If a tuple of two values, it is assumed to be the block size along height and width
of the image-like, with pooling happening per channel.
If a tuple of three values, it is assuemd to be the block size along height, width and
channels.
func : callable
Function to apply to a given block in order to convert it to a single number,
e.g. np.average, np.min, np.max.
cval : number, optional(default=0)
Value to use in order to pad the array along its border if the array cannot be divided
by block_size without remainder.
preserve_dtype : bool, optional(default=True)
Whether to convert the array back to the input datatype if it is changed away from
that in the pooling process.
Returns
-------
arr_reduced : (H',W') or (H',W',C') ndarray
Array after pooling.
"""
do_assert(arr.ndim in [2, 3])
is_valid_int = is_single_integer(block_size) and block_size >= 1
is_valid_tuple = is_iterable(block_size) and len(block_size) in [2, 3] and [is_single_integer(val) and val >= 1 for val in block_size]
do_assert(is_valid_int or is_valid_tuple)
if is_single_integer(block_size):
block_size = [block_size, block_size]
if len(block_size) < arr.ndim:
block_size = list(block_size) + [1]
input_dtype = arr.dtype
arr_reduced = skimage.measure.block_reduce(arr, tuple(block_size), func, cval=cval)
if preserve_dtype and arr_reduced.dtype.type != input_dtype:
arr_reduced = arr_reduced.astype(input_dtype)
return arr_reduced
def avg_pool(arr, block_size, cval=0, preserve_dtype=True):
"""
Rescale an array using average pooling.
Parameters
----------
arr : (H,W) or (H,W,C) ndarray
Image-like array to pool. See `pool()` for details.
block_size : int or tuple of two ints or tuple of three ints
Size of each block of values to pool. See `pool()` for details.
cval : number, optional(default=0)
Padding value. See `pool()` for details.
preserve_dtype : bool, optional(default=True)
Whether to preserve the input array dtype. See `pool()` for details.
Returns
-------
arr_reduced : (H',W') or (H',W',C') ndarray
Array after average pooling.
"""
return pool(arr, block_size, np.average, cval=cval, preserve_dtype=preserve_dtype)
def max_pool(arr, block_size, cval=0, preserve_dtype=True):
"""
Rescale an array using max-pooling.
Parameters
----------
arr : (H,W) or (H,W,C) ndarray
Image-like array to pool. See `pool()` for details.
block_size : int or tuple of two ints or tuple of three ints
Size of each block of values to pool. See `pool()` for details.
cval : number, optional(default=0)
Padding value. See `pool()` for details.
preserve_dtype : bool, optional(default=True)
Whether to preserve the input array dtype. See `pool()` for details.
Returns
-------
arr_reduced : (H',W') or (H',W',C') ndarray
Array after max-pooling.
"""
return pool(arr, block_size, np.max, cval=cval, preserve_dtype=preserve_dtype)
def draw_grid(images, rows=None, cols=None):
"""
Converts multiple input images into a single image showing them in a grid.
Parameters
----------
images : (N,H,W,3) ndarray or iterable of (H,W,3) array
The input images to convert to a grid.
Expected to be RGB and have dtype uint8.
rows : None or int, optional(default=None)
The number of rows to show in the grid.
If None, it will be automatically derived.
cols : None or int, optional(default=None)
The number of cols to show in the grid.
If None, it will be automatically derived.
Returns
-------
grid : (H',W',3) ndarray
Image of the generated grid.
"""
if is_np_array(images):
do_assert(images.ndim == 4)
else:
do_assert(is_iterable(images) and is_np_array(images[0]) and images[0].ndim == 3)
nb_images = len(images)
do_assert(nb_images > 0)
cell_height = max([image.shape[0] for image in images])
cell_width = max([image.shape[1] for image in images])
channels = set([image.shape[2] for image in images])
do_assert(len(channels) == 1, "All images are expected to have the same number of channels, but got channel set %s with length %d instead." % (str(channels), len(channels)))
nb_channels = list(channels)[0]
if rows is None and cols is None:
rows = cols = int(math.ceil(math.sqrt(nb_images)))
elif rows is not None:
cols = int(math.ceil(nb_images / rows))
elif cols is not None:
rows = int(math.ceil(nb_images / cols))
do_assert(rows * cols >= nb_images)
width = cell_width * cols
height = cell_height * rows
grid = np.zeros((height, width, nb_channels), dtype=np.uint8)
cell_idx = 0
for row_idx in sm.xrange(rows):
for col_idx in sm.xrange(cols):
if cell_idx < nb_images:
image = images[cell_idx]
cell_y1 = cell_height * row_idx
cell_y2 = cell_y1 + image.shape[0]
cell_x1 = cell_width * col_idx
cell_x2 = cell_x1 + image.shape[1]
grid[cell_y1:cell_y2, cell_x1:cell_x2, :] = image
cell_idx += 1
return grid
def show_grid(images, rows=None, cols=None):
"""
Converts the input images to a grid image and shows it in a new window.
Parameters
----------
images : (N,H,W,3) ndarray or iterable of (H,W,3) array
See `draw_grid()`.
rows : None or int, optional(default=None)
See `draw_grid()`.
cols : None or int, optional(default=None)
See `draw_grid()`.
"""
grid = draw_grid(images, rows=rows, cols=cols)
imshow(grid)
def imshow(image):
"""
Shows an image in a window.
Parameters
----------
image : (H,W,3) ndarray
Image to show.
"""
plt.imshow(image)
plt.show()
def do_assert(condition, message="Assertion failed."):
"""
Function that behaves equally to an `assert` statement, but raises an
Exception.
This is added because `assert` statements are removed in optimized code.
It replaces `assert` statements throughout the library that should be
kept even in optimized code.
Parameters
----------
condition : bool
If False, an exception is raised.
message : string, optional(default="Assertion failed.")
Error message.
"""
if not condition:
raise AssertionError(str(message))
class HooksImages(object):
"""
Class to intervene with image augmentation runs.
This is e.g. useful to dynamically deactivate some augmenters.
Parameters
----------
activator : None or callable, optional(default=None)
A function that gives permission to execute an augmenter.
The expected interface is `f(images, augmenter, parents, default)`,
where `images` are the input images to augment, `augmenter` is the
instance of the augmenter to execute, `parents` are previously
executed augmenters and `default` is an expected default value to be
returned if the activator function does not plan to make a decision
for the given inputs.
propagator : None or callable, optional(default=None)
A function that gives permission to propagate the augmentation further
to the children of an augmenter. This happens after the activator.
In theory, an augmenter may augment images itself (if allowed by the
activator) and then execute child augmenters afterwards (if allowed by
the propagator). If the activator returned False, the propagation step
will never be executed.
The expected interface is `f(images, augmenter, parents, default)`,
with all arguments having identical meaning to the activator.
preprocessor : None or callable, optional(default=None)
A function to call before an augmenter performed any augmentations.
The interface is `f(images, augmenter, parents)`,
with all arguments having identical meaning to the activator.
It is expected to return the input images, optionally modified.
postprocessor : None or callable, optional(default=None)
A function to call after an augmenter performed augmentations.
The interface is the same as for the preprocessor.
Examples
--------
>>> seq = iaa.Sequential([
>>> iaa.GaussianBlur(3.0, name="blur"),
>>> iaa.Dropout(0.05, name="dropout"),
>>> iaa.Affine(translate_px=-5, name="affine")
>>> ])
>>>
>>> def activator(images, augmenter, parents, default):
>>> return False if augmenter.name in ["blur", "dropout"] else default
>>>
>>> seq_det = seq.to_deterministic()
>>> images_aug = seq_det.augment_images(images)
>>> heatmaps_aug = seq_det.augment_images(
>>> heatmaps,
>>> hooks=ia.HooksImages(activator=activator)
>>> )
This augments images and their respective heatmaps in the same way.
The heatmaps however are only modified by Affine, not by GaussianBlur or
Dropout.
"""
#def __init__(self, activator=None, propagator=None, preprocessor=None, postprocessor=None, propagation_method=None):
def __init__(self, activator=None, propagator=None, preprocessor=None, postprocessor=None):
self.activator = activator
self.propagator = propagator
self.preprocessor = preprocessor
self.postprocessor = postprocessor
#self.propagation_method = propagation_method
def is_activated(self, images, augmenter, parents, default):
"""
Returns whether an augmenter may be executed.
Returns
-------
out : bool
If True, the augmenter may be executed. If False, it may
not be executed.
"""
if self.activator is None:
return default
else:
return self.activator(images, augmenter, parents, default)
# TODO is a propagating hook necessary? seems to be covered by activated
# hook already
def is_propagating(self, images, augmenter, parents, default):
"""
Returns whether an augmenter may call its children to augment an
image. This is independent of the augmenter itself possible changing
the image, without calling its children. (Most (all?) augmenters with
children currently dont perform any changes themselves.)
Returns
-------
out : bool
If True, the augmenter may be propagate to its children.
If False, it may not.
"""
if self.propagator is None:
return default
else:
return self.propagator(images, augmenter, parents, default)
#def get_propagation_method(self, images, augmenter, parents, child, default):
# if self.propagation_method is None:
# return default
# else:
# return self.propagation_method(images, augmenter, parents, child, default)
def preprocess(self, images, augmenter, parents):
"""
A function to be called before the augmentation of images starts (per
augmenter).
Returns
-------
out : (N,H,W,C) ndarray or (N,H,W) ndarray or list of (H,W,C) ndarray or list of (H,W) ndarray
The input images, optionally modified.
"""
if self.preprocessor is None:
return images
else:
return self.preprocessor(images, augmenter, parents)
def postprocess(self, images, augmenter, parents):
"""
A function to be called after the augmentation of images was
performed.
Returns
-------
out : (N,H,W,C) ndarray or (N,H,W) ndarray or list of (H,W,C) ndarray or list of (H,W) ndarray
The input images, optionally modified.
"""
if self.postprocessor is None:
return images
else:
return self.postprocessor(images, augmenter, parents)
class HooksHeatmaps(HooksImages):
"""
Class to intervene with heatmap augmentation runs.
This is e.g. useful to dynamically deactivate some augmenters.
This class is currently the same as the one for images. This may or may
not change in the future.
"""
pass
class HooksKeypoints(HooksImages):
"""
Class to intervene with keypoint augmentation runs.
This is e.g. useful to dynamically deactivate some augmenters.
This class is currently the same as the one for images. This may or may
not change in the future.
"""
pass
class Keypoint(object):
"""
A single keypoint (aka landmark) on an image.
Parameters
----------
x : number
Coordinate of the keypoint on the x axis.
y : number
Coordinate of the keypoint on the y axis.
"""
def __init__(self, x, y):
self.x = x
self.y = y
@property
def x_int(self):
"""
Return the keypoint's x-coordinate, rounded to the closest integer.
Returns
-------
result : int
Keypoint's x-coordinate, rounded to the closest integer.
"""
return int(round(self.x))
@property
def y_int(self):
"""
Return the keypoint's y-coordinate, rounded to the closest integer.
Returns
-------
result : int
Keypoint's y-coordinate, rounded to the closest integer.
"""
return int(round(self.y))
def project(self, from_shape, to_shape):
"""
Project the keypoint onto a new position on a new image.
E.g. if the keypoint is on its original image at x=(10 of 100 pixels)
and y=(20 of 100 pixels) and is projected onto a new image with
size (width=200, height=200), its new position will be (20, 40).
This is intended for cases where the original image is resized.
It cannot be used for more complex changes (e.g. padding, cropping).
Parameters
----------
from_shape : tuple
Shape of the original image. (Before resize.)
to_shape : tuple
Shape of the new image. (After resize.)
Returns
-------
out : Keypoint
Keypoint object with new coordinates.
"""
if from_shape[0:2] == to_shape[0:2]:
return Keypoint(x=self.x, y=self.y)
else:
from_height, from_width = from_shape[0:2]
to_height, to_width = to_shape[0:2]
x = (self.x / from_width) * to_width
y = (self.y / from_height) * to_height
return Keypoint(x=x, y=y)
def shift(self, x=0, y=0):
"""
Move the keypoint around on an image.
Parameters
----------
x : number, optional(default=0)
Move by this value on the x axis.
y : number, optional(default=0)
Move by this value on the y axis.
Returns
-------
out : Keypoint
Keypoint object with new coordinates.
"""
return Keypoint(self.x + x, self.y + y)
def __repr__(self):
return self.__str__()
def __str__(self):
return "Keypoint(x=%.8f, y=%.8f)" % (self.x, self.y)
class KeypointsOnImage(object):
"""
Object that represents all keypoints on a single image.
Parameters
----------
keypoints : list of Keypoint
List of keypoints on the image.
shape : tuple of int
The shape of the image on which the keypoints are placed.
Examples
--------
>>> kps = [Keypoint(x=10, y=20), Keypoint(x=34, y=60)]
>>> kps_oi = KeypointsOnImage(kps, shape=image.shape)
"""
def __init__(self, keypoints, shape):
#assert len(shape) == 3, "KeypointsOnImage requires shape tuples of form (H, W, C) but got %s. Use C=1 for 2-dimensional images." % (str(shape),)
self.keypoints = keypoints
if is_np_array(shape):
self.shape = shape.shape
else:
do_assert(isinstance(shape, (tuple, list)))
self.shape = tuple(shape)
@property
def height(self):
return self.shape[0]
@property
def width(self):
return self.shape[1]
@property
def empty(self):
"""
Returns whether this object contains zero keypoints.
Returns
-------
result : bool
True if this object contains zero keypoints.
"""
return len(self.keypoints) == 0
def on(self, image):
"""
Project keypoints from one image to a new one.
Parameters
----------
image : ndarray or tuple
New image onto which the keypoints are to be projected.
May also simply be that new image's shape tuple.
Returns
-------
keypoints : KeypointsOnImage
Object containing all projected keypoints.
"""
if is_np_array(image):
shape = image.shape
else:
shape = image
if shape[0:2] == self.shape[0:2]:
return self.deepcopy()
else:
keypoints = [kp.project(self.shape, shape) for kp in self.keypoints]
return KeypointsOnImage(keypoints, shape)
def draw_on_image(self, image, color=[0, 255, 0], size=3, copy=True, raise_if_out_of_image=False): # pylint: disable=locally-disabled, dangerous-default-value, line-too-long
"""
Draw all keypoints onto a given image. Each keypoint is marked by a
square of a chosen color and size.
Parameters
----------
image : (H,W,3) ndarray
The image onto which to draw the keypoints.
This image should usually have the same shape as
set in KeypointsOnImage.shape.
color : int or list of ints or tuple of ints or (3,) ndarray, optional(default=[0, 255, 0])
The RGB color of all keypoints. If a single int `C`, then that is
equivalent to (C,C,C).
size : int, optional(default=3)
The size of each point. If set to C, each square will have
size CxC.
copy : bool, optional(default=True)
Whether to copy the image before drawing the points.
raise_if_out_of_image : bool, optional(default=False)
Whether to raise an exception if any keypoint is outside of the
image.
Returns
-------
image : (H,W,3) ndarray
Image with drawn keypoints.
"""
if copy:
image = np.copy(image)
height, width = image.shape[0:2]
for keypoint in self.keypoints:
y, x = keypoint.y_int, keypoint.x_int
if 0 <= y < height and 0 <= x < width:
x1 = max(x - size//2, 0)
x2 = min(x + 1 + size//2, width)
y1 = max(y - size//2, 0)
y2 = min(y + 1 + size//2, height)
image[y1:y2, x1:x2] = color
else:
if raise_if_out_of_image:
raise Exception("Cannot draw keypoint x=%.8f, y=%.8f on image with shape %s." % (y, x, image.shape))
return image
def shift(self, x=0, y=0):
"""
Move the keypoints around on an image.
Parameters
----------
x : number, optional(default=0)
Move each keypoint by this value on the x axis.
y : number, optional(default=0)
Move each keypoint by this value on the y axis.
Returns
-------
out : KeypointsOnImage
Keypoints after moving them.
"""
keypoints = [keypoint.shift(x=x, y=y) for keypoint in self.keypoints]
return KeypointsOnImage(keypoints, self.shape)
def get_coords_array(self):
"""
Convert the coordinates of all keypoints in this object to
an array of shape (N,2).
Returns
-------
result : (N, 2) ndarray
Where N is the number of keypoints. Each first value is the
x coordinate, each second value is the y coordinate.
"""
result = np.zeros((len(self.keypoints), 2), np.float32)
for i, keypoint in enumerate(self.keypoints):
result[i, 0] = keypoint.x
result[i, 1] = keypoint.y
return result
@staticmethod
def from_coords_array(coords, shape):
"""
Convert an array (N,2) with a given image shape to a KeypointsOnImage
object.
Parameters
----------
coords : (N, 2) ndarray
Coordinates of N keypoints on the original image.
Each first entry (i, 0) is expected to be the x coordinate.
Each second entry (i, 1) is expected to be the y coordinate.
shape : tuple
Shape tuple of the image on which the keypoints are placed.
Returns
-------
out : KeypointsOnImage
KeypointsOnImage object that contains all keypoints from the array.
"""
keypoints = [Keypoint(x=coords[i, 0], y=coords[i, 1]) for i in sm.xrange(coords.shape[0])]
return KeypointsOnImage(keypoints, shape)
def to_keypoint_image(self, size=1):
"""
Draws a new black image of shape (H,W,N) in which all keypoint coordinates
are set to 255.
(H=shape height, W=shape width, N=number of keypoints)
This function can be used as a helper when augmenting keypoints with
a method that only supports the augmentation of images.
Parameters
-------
size : int
Size of each (squared) point.
Returns
-------
image : (H,W,N) ndarray
Image in which the keypoints are marked. H is the height,
defined in KeypointsOnImage.shape[0] (analogous W). N is the
number of keypoints.
"""
do_assert(len(self.keypoints) > 0)
height, width = self.shape[0:2]
image = np.zeros((height, width, len(self.keypoints)), dtype=np.uint8)
do_assert(size % 2 != 0)
sizeh = max(0, (size-1)//2)
for i, keypoint in enumerate(self.keypoints):
# TODO for float values spread activation over several cells
# here and do voting at the end
y = keypoint.y_int
x = keypoint.x_int
x1 = np.clip(x - sizeh, 0, width-1)
x2 = np.clip(x + sizeh + 1, 0, width)
y1 = np.clip(y - sizeh, 0, height-1)
y2 = np.clip(y + sizeh + 1, 0, height)
#if 0 <= y < height and 0 <= x < width:
# image[y, x, i] = 255
if x1 < x2 and y1 < y2:
image[y1:y2, x1:x2, i] = 128
if 0 <= y < height and 0 <= x < width:
image[y, x, i] = 255
return image
@staticmethod
def from_keypoint_image(image, if_not_found_coords={"x": -1, "y": -1}, threshold=1, nb_channels=None): # pylint: disable=locally-disabled, dangerous-default-value, line-too-long
"""
Converts an image generated by `to_keypoint_image()` back to
an KeypointsOnImage object.
Parameters
----------
image : (H,W,N) ndarray
The keypoints image. N is the number of
keypoints.
if_not_found_coords : tuple or list or dict or None
Coordinates to use for keypoints that cannot be found in `image`.
If this is a list/tuple, it must have two integer values. If it
is a dictionary, it must have the keys "x" and "y". If this
is None, then the keypoint will not be added to the final
KeypointsOnImage object.
threshold : int
The search for keypoints works by searching for the argmax in
each channel. This parameters contains the minimum value that
the max must have in order to be viewed as a keypoint.
nb_channels : None or int
Number of channels of the image on which the keypoints are placed.
Some keypoint augmenters require that information.
If set to None, the keypoint's shape will be set
to `(height, width)`, otherwise `(height, width, nb_channels)`.
Returns
-------
out : KeypointsOnImage
The extracted keypoints.
"""
do_assert(len(image.shape) == 3)
height, width, nb_keypoints = image.shape
drop_if_not_found = False
if if_not_found_coords is None:
drop_if_not_found = True
if_not_found_x = -1
if_not_found_y = -1
elif isinstance(if_not_found_coords, (tuple, list)):
do_assert(len(if_not_found_coords) == 2)
if_not_found_x = if_not_found_coords[0]
if_not_found_y = if_not_found_coords[1]
elif isinstance(if_not_found_coords, dict):
if_not_found_x = if_not_found_coords["x"]
if_not_found_y = if_not_found_coords["y"]
else:
raise Exception("Expected if_not_found_coords to be None or tuple or list or dict, got %s." % (type(if_not_found_coords),))
keypoints = []
for i in sm.xrange(nb_keypoints):
maxidx_flat = np.argmax(image[..., i])
maxidx_ndim = np.unravel_index(maxidx_flat, (height, width))
found = (image[maxidx_ndim[0], maxidx_ndim[1], i] >= threshold)
if found:
keypoints.append(Keypoint(x=maxidx_ndim[1], y=maxidx_ndim[0]))
else:
if drop_if_not_found:
pass # dont add the keypoint to the result list, i.e. drop it
else:
keypoints.append(Keypoint(x=if_not_found_x, y=if_not_found_y))
out_shape = (height, width)
if nb_channels is not None:
out_shape += (nb_channels,)
return KeypointsOnImage(keypoints, shape=out_shape)
def copy(self):
"""
Create a shallow copy of the KeypointsOnImage object.
Returns
-------
out : KeypointsOnImage
Shallow copy.
"""
return copy.copy(self)
def deepcopy(self):
"""
Create a deep copy of the KeypointsOnImage object.
Returns
-------
out : KeypointsOnImage
Deep copy.
"""
# for some reason deepcopy is way slower here than manual copy
#return copy.deepcopy(self)
kps = [Keypoint(x=kp.x, y=kp.y) for kp in self.keypoints]
return KeypointsOnImage(kps, tuple(self.shape))
def __repr__(self):
return self.__str__()
def __str__(self):
return "KeypointsOnImage(%s, shape=%s)" % (str(self.keypoints), self.shape)
# TODO functions: square(), to_aspect_ratio(), extend()/add_border(), contains_point()
class BoundingBox(object):
"""
Class representing bounding boxes.
Each bounding box is parameterized by its top left and bottom right corners. Both are given
as x and y-coordinates. The corners are intended to lie inside the bounding box area.
As a result, a bounding box that lies completely inside the image but has maximum extensions
would have coordinates `(0.0, 0.0)` and `(W - epsilon, H - epsilon)`. Note that coordinates
are saved internally as floats.
Parameters
----------
x1 : number
X-coordinate of the top left of the bounding box.
y1 : number
Y-coordinate of the top left of the bounding box.
x2 : number
X-coordinate of the bottom right of the bounding box.
y2 : number
Y-coordinate of the bottom right of the bounding box.
label : None or string, optional(default=None)
Label of the bounding box, e.g. a string representing the class.
"""
def __init__(self, x1, y1, x2, y2, label=None):
"""Create a new BoundingBox instance."""
if x1 > x2:
x2, x1 = x1, x2
do_assert(x2 > x1)
if y1 > y2:
y2, y1 = y1, y2
do_assert(y2 > y1)
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
self.label = label
@property
def x1_int(self):
"""
Return the x-coordinate of the top left corner as an integer.
Returns
-------
result : int
X-coordinate of the top left corner, rounded to the closest integer.
"""
return int(round(self.x1))
@property
def y1_int(self):
"""
Return the y-coordinate of the top left corner as an integer.
Returns
-------
result : int
Y-coordinate of the top left corner, rounded to the closest integer.
"""
return int(round(self.y1))
@property
def x2_int(self):
"""
Return the x-coordinate of the bottom left corner as an integer.
Returns
-------
result : int
X-coordinate of the bottom left corner, rounded to the closest integer.
"""
return int(round(self.x2))
@property
def y2_int(self):
"""
Return the y-coordinate of the bottom left corner as an integer.
Returns
-------
result : int
Y-coordinate of the bottom left corner, rounded to the closest integer.
"""
return int(round(self.y2))
@property
def height(self):
"""
Estimate the height of the bounding box.
Returns
-------
result : number
Height of the bounding box.
"""
return self.y2 - self.y1
@property
def width(self):
"""
Estimate the width of the bounding box.
Returns
-------
result : number
Width of the bounding box.
"""
return self.x2 - self.x1
@property
def center_x(self):
"""
Estimate the x-coordinate of the center point of the bounding box.
Returns
-------
result : number
X-coordinate of the center point of the bounding box.
"""
return self.x1 + self.width/2
@property
def center_y(self):
"""
Estimate the y-coordinate of the center point of the bounding box.
Returns
-------
result : number
Y-coordinate of the center point of the bounding box.
"""
return self.y1 + self.height/2
@property
def area(self):
"""
Estimate the area of the bounding box.
Returns
-------
result : number
Area of the bounding box, i.e. `height * width`.
"""
return self.height * self.width
def project(self, from_shape, to_shape):
"""
Project the bounding box onto a new position on a new image.
E.g. if the bounding box is on its original image at
x1=(10 of 100 pixels) and y1=(20 of 100 pixels) and is projected onto
a new image with size (width=200, height=200), its new position will
be (x1=20, y1=40). (Analogous for x2/y2.)
This is intended for cases where the original image is resized.
It cannot be used for more complex changes (e.g. padding, cropping).
Parameters
----------
from_shape : tuple
Shape of the original image. (Before resize.)
to_shape : tuple
Shape of the new image. (After resize.)
Returns
-------
out : BoundingBox
BoundingBox object with new coordinates.
"""
if from_shape[0:2] == to_shape[0:2]:
return self.copy()
else:
from_height, from_width = from_shape[0:2]
to_height, to_width = to_shape[0:2]
do_assert(from_height > 0)
do_assert(from_width > 0)
do_assert(to_height > 0)
do_assert(to_width > 0)
x1 = (self.x1 / from_width) * to_width
y1 = (self.y1 / from_height) * to_height
x2 = (self.x2 / from_width) * to_width
y2 = (self.y2 / from_height) * to_height
return self.copy(
x1=x1,
y1=y1,
x2=x2,
y2=y2,
label=self.label
)
def extend(self, all_sides=0, top=0, right=0, bottom=0, left=0):
"""
Extend the size of the bounding box along its sides.
Parameters
----------
all_sides : number, optional(default=0)
Value by which to extend the bounding box size along all sides.
top : number, optional(default=0)
Value by which to extend the bounding box size along its top side.
right : number, optional(default=0)
Value by which to extend the bounding box size along its right side.
bottom : number, optional(default=0)
Value by which to extend the bounding box size along its bottom side.
left : number, optional(default=0)
Value by which to extend the bounding box size along its left side.
Returns
-------
result : BoundingBox
Extended bounding box.
"""
return BoundingBox(
x1=self.x1 - all_sides - left,
x2=self.x2 + all_sides + right,
y1=self.y1 - all_sides - top,
y2=self.y2 + all_sides + bottom
)
def intersection(self, other, default=None):
"""
Compute the intersection bounding box of this bounding box and another one.
Parameters
----------
other : BoundingBox
Other bounding box with which to generate the intersection.
Returns
-------
result : BoundingBox
Intersection bounding box of the two bounding boxes.
"""
x1_i = max(self.x1, other.x1)
y1_i = max(self.y1, other.y1)
x2_i = min(self.x2, other.x2)
y2_i = min(self.y2, other.y2)
if x1_i >= x2_i or y1_i >= y2_i:
return default
else:
return BoundingBox(x1=x1_i, y1=y1_i, x2=x2_i, y2=y2_i)
def union(self, other):
"""
Compute the union bounding box of this bounding box and another one.
This is equivalent to drawing a bounding box around all corners points of both
bounding boxes.
Parameters
----------
other : BoundingBox
Other bounding box with which to generate the union.
Returns
-------
result : BoundingBox
Union bounding box of the two bounding boxes.
"""
return BoundingBox(
x1=min(self.x1, other.x1),
y1=min(self.y1, other.y1),
x2=max(self.x2, other.x2),
y2=max(self.y2, other.y2),
)
def iou(self, other):
"""
Compute the IoU of this bounding box with another one.
IoU is the intersection over union, defined as:
area(intersection(A, B)) / area(union(A, B))
= area(intersection(A, B)) / (area(A) + area(B) - area(intersection(A, B)))
Parameters
----------
other : BoundingBox
Other bounding box with which to compare.
Returns
-------
result : float
IoU between the two bounding boxes.
"""
inters = self.intersection(other)
if inters is None:
return 0
else:
return inters.area / (self.area + other.area - inters.area)
def is_fully_within_image(self, image):
"""
Estimate whether the bounding box is fully inside the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of at least two ints
Image dimensions to use. If an ndarray, its shape will be used. If a tuple, it is
assumed to represent the image shape.
Returns
-------
result : bool
True if the bounding box is fully inside the image area.
False otherwise.
"""
if isinstance(image, tuple):
shape = image
else:
shape = image.shape
height, width = shape[0:2]
return self.x1 >= 0 and self.x2 < width and self.y1 >= 0 and self.y2 < height
def is_partly_within_image(self, image):
"""
Estimate whether the bounding box is at least partially inside the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of at least two ints
Image dimensions to use. If an ndarray, its shape will be used. If a tuple, it is
assumed to represent the image shape.
Returns
-------
result : bool
True if the bounding box is at least partially inside the image area.
False otherwise.
"""
if isinstance(image, tuple):
shape = image
else:
shape = image.shape
height, width = shape[0:2]
eps = np.finfo(np.float32).eps
img_bb = BoundingBox(x1=0, x2=width-eps, y1=0, y2=height-eps)
return self.intersection(img_bb) is not None
def is_out_of_image(self, image, fully=True, partly=False):
"""
Estimate whether the bounding box is partially or fully outside of the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of ints
Image dimensions to use. If an ndarray, its shape will be used. If a tuple, it is
assumed to represent the image shape and must contain at least two integers.
fully : bool, optional(default=True)
Whether to return True if the bounding box is fully outside fo the image area.
partly : bool, optional(default=False)
Whether to return True if the bounding box is at least partially outside fo the
image area.
Returns
-------
result : bool
True if the bounding box is partially/fully outside of the image area, depending
on defined parameters. False otherwise.
"""
if self.is_fully_within_image(image):
return False
elif self.is_partly_within_image(image):
return partly
else:
return fully
def cut_out_of_image(self, image):
"""
Cut off all parts of the bounding box that are outside of the image.
Parameters
----------
image : (H,W,...) ndarray or tuple of at least two ints
Image dimensions to use for the clipping of the bounding box. If an ndarray, its
shape will be used. If a tuple, it is assumed to represent the image shape.
Returns
-------
result : BoundingBox
Bounding box, clipped to fall within the image dimensions.
"""
if isinstance(image, tuple):
shape = image
else:
shape = image.shape
height, width = shape[0:2]
do_assert(height > 0)
do_assert(width > 0)
eps = np.finfo(np.float32).eps
x1 = np.clip(self.x1, 0, width - eps)
x2 = np.clip(self.x2, 0, width - eps)
y1 = np.clip(self.y1, 0, height - eps)
y2 = np.clip(self.y2, 0, height - eps)
return self.copy(
x1=x1,
y1=y1,
x2=x2,
y2=y2,
label=self.label
)
def shift(self, top=None, right=None, bottom=None, left=None):
"""
Shift the bounding box from one or more image sides, i.e. move it on the x/y-axis.
Parameters
----------
top : None or int, optional(default=None)
Amount of pixels by which to shift the bounding box from the top.
right : None or int, optional(default=None)
Amount of pixels by which to shift the bounding box from the right.
bottom : None or int, optional(default=None)
Amount of pixels by which to shift the bounding box from the bottom.
left : None or int, optional(default=None)
Amount of pixels by which to shift the bounding box from the left.
Returns
-------
result : BoundingBox
Shifted bounding box.
"""
top = top if top is not None else 0
right = right if right is not None else 0
bottom = bottom if bottom is not None else 0
left = left if left is not None else 0
return self.copy(
x1=self.x1+left-right,
x2=self.x2+left-right,
y1=self.y1+top-bottom,
y2=self.y2+top-bottom
)
def draw_on_image(self, image, color=[0, 255, 0], alpha=1.0, thickness=1, copy=True, raise_if_out_of_image=False): # pylint: disable=locally-disabled, dangerous-default-value, line-too-long
"""
Draw the bounding box on an image.
Parameters
----------
image : (H,W,C) ndarray(uint8)
The image onto which to draw the bounding box.
color : iterable of int, optional(default=[0,255,0])
The color to use, corresponding to the channel layout of the image. Usually RGB.
alpha : float, optional(default=1.0)
The transparency of the drawn bounding box, where 1.0 denotes no transparency and
0.0 is invisible.
thickness : int, optional(default=1)
The thickness of the bounding box in pixels. If the value is larger than 1, then
additional pixels will be added around the bounding box (i.e. extension towards the
outside).
copy : bool, optional(default=True)
Whether to copy the input image or change it in-place.
raise_if_out_of_image : bool, optional(default=False)
Whether to raise an error if the bounding box is partially/fully outside of the
image. If set to False, no error will be raised and only the parts inside the image
will be drawn.
Returns
-------
result : (H,W,C) ndarray(uint8)
Image with bounding box drawn on it.
"""
if raise_if_out_of_image and self.is_out_of_image(image):
raise Exception("Cannot draw bounding box x1=%.8f, y1=%.8f, x2=%.8f, y2=%.8f on image with shape %s." % (self.x1, self.y1, self.x2, self.y2, image.shape))
result = np.copy(image) if copy else image
if isinstance(color, (tuple, list)):
color = np.uint8(color)
for i in range(thickness):
y1, y2, x1, x2 = self.y1_int, self.y2_int, self.x1_int, self.x2_int
# When y values get into the range (H-0.5, H), the *_int functions round them to H.
# That is technically sensible, but in the case of drawing means that the border lies
# just barely outside of the image, making the border disappear, even though the BB
# is fully inside the image. Here we correct for that because of beauty reasons.
# Same is the case for x coordinates.
if self.is_fully_within_image(image):
y1 = np.clip(y1, 0, image.shape[0]-1)
y2 = np.clip(y2, 0, image.shape[0]-1)
x1 = np.clip(x1, 0, image.shape[1]-1)
x2 = np.clip(x2, 0, image.shape[1]-1)
y = [y1-i, y1-i, y2+i, y2+i]
x = [x1-i, x2+i, x2+i, x1-i]
rr, cc = skimage.draw.polygon_perimeter(y, x, shape=result.shape)
if alpha >= 0.99:
result[rr, cc, :] = color
else:
if is_float_array(result):
result[rr, cc, :] = (1 - alpha) * result[rr, cc, :] + alpha * color
result = np.clip(result, 0, 255)
else:
input_dtype = result.dtype
result = result.astype(np.float32)
result[rr, cc, :] = (1 - alpha) * result[rr, cc, :] + alpha * color
result = np.clip(result, 0, 255).astype(input_dtype)
return result
def extract_from_image(self, image):
"""
Extract the image pixels within the bounding box.
This function will zero-pad the image if the bounding box is partially/fully outside of
the image.
Parameters
----------
image : (H,W) or (H,W,C) ndarray
The image from which to extract the pixels within the bounding box.
Returns
-------
result : (H',W') or (H',W',C) ndarray
Pixels within the bounding box. Zero-padded if the bounding box is partially/fully
outside of the image.
"""
pad_top = 0
pad_right = 0
pad_bottom = 0
pad_left = 0
height, width = image.shape[0], image.shape[1]
x1, x2, y1, y2 = self.x1_int, self.x2_int, self.y1_int, self.y2_int
# When y values get into the range (H-0.5, H), the *_int functions round them to H.
# That is technically sensible, but in the case of extraction leads to a black border,
# which is both ugly and unexpected after calling cut_out_of_image(). Here we correct for
# that because of beauty reasons.
# Same is the case for x coordinates.
if self.is_fully_within_image(image):
y1 = np.clip(y1, 0, image.shape[0]-1)
y2 = np.clip(y2, 0, image.shape[0]-1)
x1 = np.clip(x1, 0, image.shape[1]-1)
x2 = np.clip(x2, 0, image.shape[1]-1)
# if the bb is outside of the image area, the following pads the image
# first with black pixels until the bb is inside the image
# and only then extracts the image area
# TODO probably more efficient to initialize an array of zeros
# and copy only the portions of the bb into that array that are
# natively inside the image area
if x1 < 0:
pad_left = abs(x1)
x2 = x2 + abs(x1)
x1 = 0
if y1 < 0:
pad_top = abs(y1)
y2 = y2 + abs(y1)
y1 = 0
if x2 >= width:
pad_right = x2 - (width - 1)
if y2 >= height:
pad_bottom = y2 - (height - 1)
if any([val > 0 for val in [pad_top, pad_right, pad_bottom, pad_left]]):
if len(image.shape) == 2:
image = np.pad(image, ((pad_top, pad_bottom), (pad_left, pad_right)), mode="constant")
else:
image = np.pad(image, ((pad_top, pad_bottom), (pad_left, pad_right), (0, 0)), mode="constant")
return image[y1:y2, x1:x2]
# TODO also add to_heatmap
# TODO add this to BoundingBoxesOnImage
def to_keypoints(self):
"""
Convert the corners of the bounding box to keypoints (clockwise, starting at top left).
Returns
-------
result : list of Keypoint
Corners of the bounding box as keypoints.
"""
return [
Keypoint(x=self.x1, y=self.y1),
Keypoint(x=self.x2, y=self.y1),
Keypoint(x=self.x2, y=self.y2),
Keypoint(x=self.x1, y=self.y2)
]
def copy(self, x1=None, y1=None, x2=None, y2=None, label=None):
"""
Create a shallow copy of the BoundingBox object.
Parameters
----------
x1 : None or number
If not None, then the x1 coordinate of the copied object will be set to this value.
y1 : None or number
If not None, then the y1 coordinate of the copied object will be set to this value.
x2 : None or number
If not None, then the x2 coordinate of the copied object will be set to this value.
y2 : None or number
If not None, then the y2 coordinate of the copied object will be set to this value.
label : None or string
If not None, then the label of the copied object will be set to this value.
Returns
-------
result : BoundingBox
Shallow copy.
"""
return BoundingBox(
x1=self.x1 if x1 is None else x1,
x2=self.x2 if x2 is None else x2,
y1=self.y1 if y1 is None else y1,
y2=self.y2 if y2 is None else y2,
label=self.label if label is None else label
)
def deepcopy(self, x1=None, y1=None, x2=None, y2=None, label=None):
"""
Create a deep copy of the BoundingBoxesOnImage object.
Returns
-------
out : KeypointsOnImage
Deep copy.
"""
return self.copy(x1=x1, y1=y1, x2=x2, y2=y2, label=label)
def __repr__(self):
return self.__str__()
def __str__(self):
return "BoundingBox(x1=%.4f, y1=%.4f, x2=%.4f, y2=%.4f, label=%s)" % (self.x1, self.y1, self.x2, self.y2, self.label)
class BoundingBoxesOnImage(object):
"""
Object that represents all bounding boxes on a single image.
Parameters
----------
bounding_boxes : list of BoundingBox
List of bounding boxes on the image.
shape : tuple of int
The shape of the image on which the bounding boxes are placed.
Examples
--------
>>> bbs = [
>>> BoundingBox(x1=10, y1=20, x2=20, y2=30),
>>> BoundingBox(x1=25, y1=50, x2=30, y2=70)
>>> ]
>>> bbs_oi = BoundingBoxesOnImage(bbs, shape=image.shape)
"""
def __init__(self, bounding_boxes, shape):
self.bounding_boxes = bounding_boxes
if is_np_array(shape):
self.shape = shape.shape
else:
do_assert(isinstance(shape, (tuple, list)))
self.shape = tuple(shape)
@property
def height(self):
"""
Get the height of the image on which the bounding boxes fall.
Returns
-------
result : int
Image height.
"""
return self.shape[0]
@property
def width(self):
"""
Get the width of the image on which the bounding boxes fall.
Returns
-------
result : int
Image width.
"""
return self.shape[1]
@property
def empty(self):
"""
Returns whether this object contains zero bounding boxes.
Returns
-------
result : bool
True if this object contains zero bounding boxes.
"""
return len(self.bounding_boxes) == 0
def on(self, image):
"""
Project bounding boxes from one image to a new one.
Parameters
----------
image : ndarray or tuple
New image onto which the bounding boxes are to be projected.
May also simply be that new image's shape tuple.
Returns
-------
keypoints : BoundingBoxesOnImage
Object containing all projected bounding boxes.
"""
if is_np_array(image):
shape = image.shape
else:
shape = image
if shape[0:2] == self.shape[0:2]:
return self.deepcopy()
else:
bounding_boxes = [bb.project(self.shape, shape) for bb in self.bounding_boxes]
return BoundingBoxesOnImage(bounding_boxes, shape)
def draw_on_image(self, image, color=[0, 255, 0], alpha=1.0, thickness=1, copy=True, raise_if_out_of_image=False):
"""
Draw all bounding boxes onto a given image.
Parameters
----------
image : (H,W,3) ndarray
The image onto which to draw the bounding boxes.
This image should usually have the same shape as
set in BoundingBoxesOnImage.shape.
color : int or list of ints or tuple of ints or (3,) ndarray, optional(default=[0, 255, 0])
The RGB color of all bounding boxes. If a single int `C`, then that is
equivalent to (C,C,C).
size : float, optional(default=1.0)
Alpha/transparency of the bounding box.
thickness : int, optional(default=1)
Thickness in pixels.
copy : bool, optional(default=True)
Whether to copy the image before drawing the points.
raise_if_out_of_image : bool, optional(default=False)
Whether to raise an exception if any bounding box is outside of the
image.
Returns
-------
image : (H,W,3) ndarray
Image with drawn bounding boxes.
"""
for bb in self.bounding_boxes:
image = bb.draw_on_image(
image,
color=color,
alpha=alpha,
thickness=thickness,
copy=copy,
raise_if_out_of_image=raise_if_out_of_image
)
return image
def remove_out_of_image(self, fully=True, partly=False):
"""
Remove all bounding boxes that are fully or partially outside of the image.
Parameters
----------
fully : bool, optional(default=True)
Whether to remove bounding boxes that are fully outside of the image.
partly : bool, optional(default=False)
Whether to remove bounding boxes that are partially outside of the image.
Returns
-------
result : BoundingBoxesOnImage
Reduced set of bounding boxes, with those that were fully/partially outside of
the image removed.
"""
bbs_clean = [bb for bb in self.bounding_boxes if not bb.is_out_of_image(self.shape, fully=fully, partly=partly)]
return BoundingBoxesOnImage(bbs_clean, shape=self.shape)
def cut_out_of_image(self):
"""
Cut off all parts from all bounding boxes that are outside of the image.
Returns
-------
result : BoundingBoxesOnImage
Bounding boxes, clipped to fall within the image dimensions.
"""
bbs_cut = [bb.cut_out_of_image(self.shape) for bb in self.bounding_boxes if bb.is_partly_within_image(self.shape)]
return BoundingBoxesOnImage(bbs_cut, shape=self.shape)
def shift(self, top=None, right=None, bottom=None, left=None):
"""
Shift all bounding boxes from one or more image sides, i.e. move them on the x/y-axis.
Parameters
----------
top : None or int, optional(default=None)
Amount of pixels by which to shift all bounding boxes from the top.
right : None or int, optional(default=None)
Amount of pixels by which to shift all bounding boxes from the right.
bottom : None or int, optional(default=None)
Amount of pixels by which to shift all bounding boxes from the bottom.
left : None or int, optional(default=None)
Amount of pixels by which to shift all bounding boxes from the left.
Returns
-------
result : BoundingBoxesOnImage
Shifted bounding boxes.
"""
bbs_new = [bb.shift(top=top, right=right, bottom=bottom, left=left) for bb in self.bounding_boxes]
return BoundingBoxesOnImage(bbs_new, shape=self.shape)
def copy(self):
"""
Create a shallow copy of the BoundingBoxesOnImage object.
Returns
-------
out : BoundingBoxesOnImage
Shallow copy.
"""
return copy.copy(self)
def deepcopy(self):
"""
Create a deep copy of the BoundingBoxesOnImage object.
Returns
-------
out : KeypointsOnImage
Deep copy.
"""
# Manual copy is far faster than deepcopy for KeypointsOnImage,
# so use manual copy here too
bbs = [bb.deepcopy() for bb in self.bounding_boxes]
return BoundingBoxesOnImage(bbs, tuple(self.shape))
def __repr__(self):
return self.__str__()
def __str__(self):
return "BoundingBoxesOnImage(%s, shape=%s)" % (str(self.bounding_boxes), self.shape)
class HeatmapsOnImage(object):
"""
Object representing heatmaps on images.
Parameters
----------
arr : (H,W) or (H,W,C) ndarray(float32)
Array representing the heatmap(s). If multiple heatmaps, then C is expected to denote
their number.
shape : tuple of ints
Shape of the image on which the heatmap(s) is/are placed. NOT the shape of the
heatmap(s) array, unless it is identical to the image shape (note the likely
difference between the arrays in the number of channels).
If there is not a corresponding image, use the shape of the heatmaps array.
min_value : float, optional(default=0.0)
Minimum value for the heatmaps that `arr` represents. This will usually
be 0.0.
max_value : float, optional(default=1.0)
Maximum value for the heatmaps that `arr` represents. This will usually
be 1.0.
"""
def __init__(self, arr, shape, min_value=0.0, max_value=1.0):
"""Construct a new HeatmapsOnImage object."""
do_assert(arr.dtype.type in [np.float32])
do_assert(arr.ndim in [2, 3])
do_assert(len(shape) in [2, 3])
do_assert(min_value < max_value)
do_assert(np.min(arr.flat[0:50]) >= min_value - np.finfo(arr.dtype).eps)
do_assert(np.max(arr.flat[0:50]) <= max_value + np.finfo(arr.dtype).eps)
if arr.ndim == 2:
arr = arr[..., np.newaxis]
self.arr_was_2d = True
else:
self.arr_was_2d = False
eps = np.finfo(np.float32).eps
min_is_zero = 0.0 - eps < min_value < 0.0 + eps
max_is_one = 1.0 - eps < max_value < 1.0 + eps
if min_is_zero and max_is_one:
self.arr_0to1 = arr
else:
self.arr_0to1 = (arr - min_value) / (max_value - min_value)
self.shape = shape
self.min_value = min_value
self.max_value = max_value
def get_arr(self):
"""
Get the heatmap array in the desired value range.
The HeatmapsOnImage object saves heatmaps internally in the value range (min=0.0, max=1.0).
This function converts the internal representation to (min=min_value, max=max_value),
where min_value and max_value are provided upon instantiation of the object.
Returns
-------
result : (H,W) or (H,W,C) ndarray(float32)
Heatmap array.
"""
if self.arr_was_2d and self.arr_0to1.shape[2] == 1:
arr = self.arr_0to1[:, :, 0]
else:
arr = self.arr_0to1
eps = np.finfo(np.float32).eps
min_is_zero = 0.0 - eps < self.min_value < 0.0 + eps
max_is_one = 1.0 - eps < self.max_value < 1.0 + eps
if min_is_zero and max_is_one:
return np.copy(arr)
else:
diff = self.max_value - self.min_value
return self.min_value + diff * arr
# TODO
#def find_global_maxima(self):
# raise NotImplementedError()
def draw(self, size=None, cmap="jet"):
"""
Render the heatmaps as RGB images.
Parameters
----------
size : None or float or iterable of two ints or iterable of two floats, optional(default=None)
Size of the rendered RGB image as (height, width).
See `imresize_single_image()` for details.
If set to None, no resizing is performed and the size of the heatmaps array is used.
cmap : string or None, optional(default="jet")
Color map of matplotlib to use in order to convert the heatmaps into RGB images.
If set to None, no color map will be used and the heatmaps will be converted
as simple intensity maps.
Returns
-------
heatmaps_drawn : list of (H,W,3) ndarray(uint8)
Rendered heatmaps, one per heatmap array channel.
"""
heatmaps_uint8 = self.to_uint8()
heatmaps_drawn = []
for c in sm.xrange(heatmaps_uint8.shape[2]):
# c:c+1 here, because the additional axis is needed by imresize_single_image
heatmap_c = heatmaps_uint8[..., c:c+1]
if size is not None:
heatmap_c_rs = imresize_single_image(heatmap_c, size,
interpolation="nearest")
else:
heatmap_c_rs = heatmap_c
heatmap_c_rs = np.squeeze(heatmap_c_rs).astype(np.float32) / 255.0
if cmap is not None:
cmap_func = plt.get_cmap(cmap)
heatmap_cmapped = cmap_func(heatmap_c_rs)
heatmap_cmapped = np.delete(heatmap_cmapped, 3, 2)
else:
heatmap_cmapped = np.tile(heatmap_c_rs[..., np.newaxis], (1, 1, 3))
heatmap_cmapped = np.clip(heatmap_cmapped * 255, 0, 255).astype(np.uint8)
heatmaps_drawn.append(heatmap_cmapped)
return heatmaps_drawn
def draw_on_image(self, image, alpha=0.75, cmap="jet", resize="heatmaps"):
"""
Draw the heatmaps as overlays over an image.
Parameters
----------
image : (H,W,3) ndarray(uint8)
Image onto which to draw the heatmaps.
alpha : float, optional(default=0.75)
Alpha/opacity value to use for the mixing of image and heatmaps.
Higher values mean that the heatmaps will be more visible and the image less visible.
cmap : string or None, optional(default="jet")
Color map to use. See `HeatmapsOnImage.draw()` for details.
resize : "heatmaps" or "image", optional(default="heatmaps")
In case of size differences between the image and heatmaps, either the image or
the heatmaps can be resized. This parameter controls which of the two will be resized
to the other's size.
Returns
-------
mix : list of (H,W,3) ndarray(uint8)
Rendered overlays, one per heatmap array channel.
"""
# assert RGB image
do_assert(image.ndim == 3)
do_assert(image.shape[2] == 3)
do_assert(image.dtype.type == np.uint8)
do_assert(0 - 1e-8 <= alpha <= 1.0 + 1e-8)
do_assert(resize in ["heatmaps", "image"])
if resize == "image":
image = imresize_single_image(image, self.arr_0to1.shape[0:2], interpolation="cubic")
heatmaps_drawn = self.draw(
size=image.shape[0:2] if resize == "heatmaps" else None,
cmap=cmap
)
mix = [
np.clip((1-alpha) * image + alpha * heatmap_i, 0, 255).astype(np.uint8)
for heatmap_i
in heatmaps_drawn
]
return mix
def invert(self):
"""
Inverts each value in the heatmap, shifting low towards high values and vice versa.
This changes each value to::
v' = max - (v - min)
where `v` is the value at some spatial location, `min` is the minimum value in the heatmap
and `max` is the maximum value.
As the heatmap uses internally a 0.0 to 1.0 representation, this simply
becomes `v' = 1.0 - v`.
Note that the attributes `min_value` and `max_value` are not switched. They both keep their
values.
This function can be useful e.g. when working with depth maps, where algorithms might have
an easier time representing the furthest away points with zeros, requiring an inverted
depth map.
Returns
-------
result : HeatmapsOnImage
Inverted heatmap.
"""
arr_inv = HeatmapsOnImage.from_0to1(1 - self.arr_0to1, shape=self.shape, min_value=self.min_value, max_value=self.max_value)
arr_inv.arr_was_2d = self.arr_was_2d
return arr_inv
def pad(self, top=0, right=0, bottom=0, left=0, mode="constant", cval=0.0):
"""
Pad the heatmaps on their top/right/bottom/left side.
Parameters
----------
top : int, optional(default=0)
Amount of pixels to add at the top side of the heatmaps. Must be 0 or greater.
right : int, optional(default=0)
Amount of pixels to add at the right side of the heatmaps. Must be 0 or greater.
bottom : int, optional(default=0)
Amount of pixels to add at the bottom side of the heatmaps. Must be 0 or greater.
left : int, optional(default=0)
Amount of pixels to add at the left side of the heatmaps. Must be 0 or greater.
mode : string, optional(default="constant")
Padding mode to use. See `numpy.pad()` for details.
cval : number, optional(default=0.0)
Value to use for padding if mode="constant". See `numpy.pad()` for details.
Returns
-------
result : HeatmapsOnImage
Padded heatmaps of height H'=H+top+bottom and width W'=W+left+right.
"""
arr_0to1_padded = pad(self.arr_0to1, top=top, right=right, bottom=bottom, left=left, mode=mode, cval=cval)
return HeatmapsOnImage.from_0to1(arr_0to1_padded, shape=self.shape, min_value=self.min_value, max_value=self.max_value)
def pad_to_aspect_ratio(self, aspect_ratio, mode="constant", cval=0.0, return_pad_amounts=False):
"""
Pad the heatmaps on their sides so that they match a target aspect ratio.
Depending on which dimension is smaller (height or width), only the corresponding
sides (left/right or top/bottom) will be padded. In each case, both of the sides will
be padded equally.
Parameters
----------
aspect_ratio : float
Target aspect ratio, given as width/height. E.g. 2.0 denotes the image having twice
as much width as height.
mode : string, optional(default="constant")
Padding mode to use. See `numpy.pad()` for details.
cval : number, optional(default=0.0)
Value to use for padding if mode="constant". See `numpy.pad()` for details.
return_pad_amounts : bool, optional(default=False)
If False, then only the padded image will be returned. If True, a tuple with two
entries will be returned, where the first entry is the padded image and the second
entry are the amounts by which each image side was padded. These amounts are again a
tuple of the form (top, right, bottom, left), with each value being an integer.
Returns
-------
result : tuple
First tuple entry: Padded heatmaps as HeatmapsOnImage object.
Second tuple entry: Amounts by which the heatmaps were padded on each side, given
as a tuple (top, right, bottom, left).
If return_pad_amounts is False, then only the heatmaps object is returned.
"""
arr_0to1_padded, pad_amounts = pad_to_aspect_ratio(self.arr_0to1, aspect_ratio=aspect_ratio, mode=mode, cval=cval, return_pad_amounts=True)
heatmaps = HeatmapsOnImage.from_0to1(arr_0to1_padded, shape=self.shape, min_value=self.min_value, max_value=self.max_value)
if return_pad_amounts:
return heatmaps, pad_amounts
else:
return heatmaps
def avg_pool(self, block_size):
"""
Rescale the heatmap(s) array using average pooling of a given block/kernel size.
Parameters
----------
block_size : int or tuple of two ints or tuple of three ints
Size of each block of values to pool, aka kernel size. See `imgaug.pool()` for details.
Returns
-------
result : HeatmapsOnImage
Heatmaps after average pooling.
"""
arr_0to1_reduced = avg_pool(self.arr_0to1, block_size, cval=0.0)
return HeatmapsOnImage.from_0to1(arr_0to1_reduced, shape=self.shape, min_value=self.min_value, max_value=self.max_value)
def max_pool(self, block_size):
"""
Rescale the heatmap(s) array using max-pooling of a given block/kernel size.
Parameters
----------
block_size : int or tuple of two ints or tuple of three ints
Size of each block of values to pool, aka kernel size. See `imgaug.pool()` for details.
Returns
-------
result : HeatmapsOnImage
Heatmaps after max-pooling.
"""
arr_0to1_reduced = max_pool(self.arr_0to1, block_size)
return HeatmapsOnImage.from_0to1(arr_0to1_reduced, shape=self.shape, min_value=self.min_value, max_value=self.max_value)
def scale(self, sizes, interpolation="cubic"):
"""
Rescale the heatmap(s) array to the provided size given the provided interpolation.
Parameters
----------
sizes : float or iterable of two ints or iterable of two floats
New size of the array in (height, width). See `imresize_single_image()` for details.
interpolation : None or string or int, optional(default="cubic")
The interpolation to use during resize. See `imresize_single_image()` for details.
Returns
-------
result : HeatmapsOnImage
Rescaled heatmaps object.
"""
arr_0to1_rescaled = imresize_single_image(self.arr_0to1, sizes, interpolation=interpolation)
# cubic interpolation can lead to values outside of [0.0, 1.0],
# see https://github.com/opencv/opencv/issues/7195
# TODO area interpolation too?
arr_0to1_rescaled = np.clip(arr_0to1_rescaled, 0.0, 1.0)
return HeatmapsOnImage.from_0to1(arr_0to1_rescaled, shape=self.shape, min_value=self.min_value, max_value=self.max_value)
def to_uint8(self):
"""
Convert this heatmaps object to a 0-to-255 array.
Returns
-------
arr_uint8 : (H,W,C) ndarray(uint8)
Heatmap as a 0-to-255 array.
"""
# TODO this always returns (H,W,C), even if input ndarray was originall (H,W)
# does it make sense here to also return (H,W) if self.arr_was_2d?
arr_0to255 = np.clip(np.round(self.arr_0to1 * 255), 0, 255)
arr_uint8 = arr_0to255.astype(np.uint8)
return arr_uint8
@staticmethod
def from_uint8(arr_uint8, shape, min_value=0.0, max_value=1.0):
"""
Create a heatmaps object from an heatmap array containing values ranging from 0 to 255.
Parameters
----------
arr_uint8 : (H,W) or (H,W,C) ndarray(uint8)
Heatmap(s) array, where H=height, W=width, C=heatmap channels.
shape : tuple of ints
Shape of the image on which the heatmap(s) is/are placed. NOT the shape of the
heatmap(s) array, unless it is identical to the image shape (note the likely
difference between the arrays in the number of channels).
If there is not a corresponding image, use the shape of the heatmaps array.
min_value : float, optional(default=0.0)
Minimum value for the heatmaps that the 0-to-255 array represents. This will usually
be 0.0. It is used when calling `HeatmapsOnImage.get_arr()`, which converts the
underlying (0, 255) array to value range (min_value, max_value).
max_value : float, optional(default=1.0)
Maximum value for the heatmaps that 0-to-255 array represents.
See parameter min_value for details.
Returns
-------
heatmaps : HeatmapsOnImage
Heatmaps object.
"""
arr_0to1 = arr_uint8.astype(np.float32) / 255.0
return HeatmapsOnImage.from_0to1(arr_0to1, shape, min_value=min_value, max_value=max_value)
@staticmethod
def from_0to1(arr_0to1, shape, min_value=0.0, max_value=1.0):
"""
Create a heatmaps object from an heatmap array containing values ranging from 0.0 to 1.0.
Parameters
----------
arr_0to1 : (H,W) or (H,W,C) ndarray(float32)
Heatmap(s) array, where H=height, W=width, C=heatmap channels.
shape : tuple of ints
Shape of the image on which the heatmap(s) is/are placed. NOT the shape of the
heatmap(s) array, unless it is identical to the image shape (note the likely
difference between the arrays in the number of channels).
If there is not a corresponding image, use the shape of the heatmaps array.
min_value : float, optional(default=0.0)
Minimum value for the heatmaps that the 0-to-1 array represents. This will usually
be 0.0. It is used when calling `HeatmapsOnImage.get_arr()`, which converts the
underlying (0.0, 1.0) array to value range (min_value, max_value).
E.g. if you started with heatmaps in the range (-1.0, 1.0) and projected these
to (0.0, 1.0), you should call this function with min_value=-1.0, max_value=1.0
so that `get_arr()` returns heatmap arrays having value range (-1.0, 1.0).
max_value : float, optional(default=1.0)
Maximum value for the heatmaps that to 0-to-255 array represents.
See parameter min_value for details.
Returns
-------
heatmaps : HeatmapsOnImage
Heatmaps object.
"""
heatmaps = HeatmapsOnImage(arr_0to1, shape, min_value=0.0, max_value=1.0)
heatmaps.min_value = min_value
heatmaps.max_value = max_value
return heatmaps
@staticmethod
def change_normalization(arr, source, target):
"""
Change the value range of a heatmap from one min-max to another min-max.
E.g. the value range may be changed from min=0.0, max=1.0 to min=-1.0, max=1.0.
Parameters
----------
arr : ndarray
Heatmap array to modify.
source : tuple of two floats
Current value range of the input array, given as (min, max), where both are float
values.
target : tuple of two floats
Desired output value range of the array, given as (min, max), where both are float
values.
Returns
-------
arr_target : ndarray
Input array, with value range projected to the desired target value range.
"""
do_assert(is_np_array(arr))
if isinstance(source, HeatmapsOnImage):
source = (source.min_value, source.max_value)
else:
do_assert(isinstance(source, tuple))
do_assert(len(source) == 2)
do_assert(source[0] < source[1])
if isinstance(target, HeatmapsOnImage):
target = (target.min_value, target.max_value)
else:
do_assert(isinstance(target, tuple))
do_assert(len(target) == 2)
do_assert(target[0] < target[1])
# Check if source and target are the same (with a tiny bit of tolerance)
# if so, evade compuation and just copy the array instead.
# This is reasonable, as source and target will often both be (0.0, 1.0).
eps = np.finfo(arr.dtype).eps
mins_same = source[0] - 10*eps < target[0] < source[0] + 10*eps
maxs_same = source[1] - 10*eps < target[1] < source[1] + 10*eps
if mins_same and maxs_same:
return np.copy(arr)
min_source, max_source = source
min_target, max_target = target
diff_source = max_source - min_source
diff_target = max_target - min_target
arr_0to1 = (arr - min_source) / diff_source
arr_target = min_target + arr_0to1 * diff_target
return arr_target
def copy(self):
"""
Create a shallow copy of the Heatmaps object.
Returns
-------
out : HeatmapsOnImage
Shallow copy.
"""
return self.deepcopy()
def deepcopy(self):
"""
Create a deep copy of the Heatmaps object.
Returns
-------
out : HeatmapsOnImage
Deep copy.
"""
return HeatmapsOnImage(self.get_arr(), shape=self.shape, min_value=self.min_value, max_value=self.max_value)
class SegmentationMapOnImage(object):
"""
Object representing a segmentation map associated with an image.
Attributes
----------
DEFAULT_SEGMENT_COLORS : list of tuple of int
Standard RGB colors to use during drawing, ordered by class index.
Parameters
----------
arr : (H,W) ndarray or (H,W,1) ndarray or (H,W,C) ndarray
Array representing the segmentation map. May have datatypes bool, integer or float.
* If bool: Assumed to be of shape (H,W), (H,W,1) or (H,W,C). If (H,W) or (H,W,1) it
is assumed to be for the case of having a single class (where any False denotes
background). Otherwise there are assumed to be C channels, one for each class,
with each of them containing a mask for that class. The masks may overlap.
* If integer: Assumed to be of shape (H,W) or (H,W,1). Each pixel is assumed to
contain an integer denoting the class index. Classes are assumed to be
non-overlapping. The number of classes cannot be guessed from this input, hence
nb_classes must be set.
* If float: Assumed to b eof shape (H,W), (H,W,1) or (H,W,C) with meanings being
similar to the case of `bool`. Values are expected to fall always in the range
0.0 to 1.0 and are usually expected to be either 0.0 or 1.0 upon instantiation
of a new segmentation map. Classes may overlap.
shape : iterable of int
Shape of the corresponding image (NOT the segmentation map array). This is expected
to be (H, W) or (H, W, C) with C usually being 3. If there is no corresponding image,
then use the segmentation map's shape instead.
nb_classes : int or None
Total number of unique classes that may appear in an segmentation map, i.e. the max
class index. This may be None if the input array is of type bool or float. The number
of classes however must be provided if the input array is of type int, as then the
number of classes cannot be guessed.
"""
DEFAULT_SEGMENT_COLORS = [
(0, 0, 0), # black
(230, 25, 75), # red
(60, 180, 75), # green
(255, 225, 25), # yellow
(0, 130, 200), # blue
(245, 130, 48), # orange
(145, 30, 180), # purple
(70, 240, 240), # cyan
(240, 50, 230), # magenta
(210, 245, 60), # lime
(250, 190, 190), # pink
(0, 128, 128), # teal
(230, 190, 255), # lavender
(170, 110, 40), # brown
(255, 250, 200), # beige
(128, 0, 0), # maroon
(170, 255, 195), # mint
(128, 128, 0), # olive
(255, 215, 180), # coral
(0, 0, 128), # navy
(128, 128, 128), # grey
(255, 255, 255), # white
# --
(115, 12, 37), # dark red
(30, 90, 37), # dark green
(127, 112, 12), # dark yellow
(0, 65, 100), # dark blue
(122, 65, 24), # dark orange
(72, 15, 90), # dark purple
(35, 120, 120), # dark cyan
(120, 25, 115), # dark magenta
(105, 122, 30), # dark lime
(125, 95, 95), # dark pink
(0, 64, 64), # dark teal
(115, 95, 127), # dark lavender
(85, 55, 20), # dark brown
(127, 125, 100), # dark beige
(64, 0, 0), # dark maroon
(85, 127, 97), # dark mint
(64, 64, 0), # dark olive
(127, 107, 90), # dark coral
(0, 0, 64), # dark navy
(64, 64, 64), # dark grey
]
def __init__(self, arr, shape, nb_classes=None):
if arr.dtype.type == np.bool:
do_assert(arr.ndim in [2, 3])
self.input_was = ("bool", arr.ndim)
if arr.ndim == 2:
arr = arr[..., np.newaxis]
arr = arr.astype(np.float32)
elif arr.dtype.type in [np.uint8, np.uint32, np.int8, np.int16, np.int32]:
do_assert(arr.ndim == 2 or (arr.ndim == 3 and arr.shape[2] == 1))
do_assert(nb_classes is not None)
do_assert(nb_classes > 0)
do_assert(np.min(arr.flat[0:100]) >= 0)
do_assert(np.max(arr.flat[0:100]) <= nb_classes)
self.input_was = ("int", arr.dtype.type, arr.ndim)
if arr.ndim == 3:
arr = arr[..., 0]
arr = np.eye(nb_classes)[arr] # from class indices to one hot
arr = arr.astype(np.float32)
elif arr.dtype.type in [np.float16, np.float32]:
do_assert(arr.ndim == 3)
self.input_was = ("float", arr.dtype.type, arr.ndim)
arr = arr.astype(np.float32)
else:
dt = str(arr.dtype) if is_np_array(arr) else "<no ndarray>"
raise Exception("Input was expected to be an ndarray of dtype bool, uint8, uint32 "
"int8, int16, int32 or float32. Got type %s with dtype %s." % (type(arr), dt))
do_assert(arr.ndim == 3)
do_assert(arr.dtype.type == np.float32)
self.arr = arr
self.shape = shape
self.nb_classes = nb_classes if nb_classes is not None else arr.shape[2]
#@property
#def nb_classes(self):
# return self.arr.shape[2]
def get_arr_int(self, background_threshold=0.01, background_class_id=None):
"""
Get the segmentation map array as an integer array of shape (H, W).
Each pixel in that array contains an integer value representing the pixel's class.
If multiple classes overlap, the one with the highest local float value is picked.
If that highest local value is below `background_threshold`, the method instead uses
the background class id as the pixel's class value.
By default, class id 0 is the background class. This may only be changed if the original
input to the segmentation map object was an integer map.
Parameters
----------
background_threshold : float, optional(default=0.01)
At each pixel, each class-heatmap has a value between 0.0 and 1.0. If none of the
class-heatmaps has a value above this threshold, the method uses the background class
id instead.
background_class_id : None or int, optional(default=None)
Class id to fall back to if no class-heatmap passes the threshold at a spatial
location. May only be provided if the original input was an integer mask and in these
cases defaults to 0. If the input were float or boolean masks, the background class id
may not be set as it is assumed that the background is implicitly defined
as 'any spatial location that has zero-like values in all masks'.
Returns
-------
result : (H,W) ndarray(int)
Segmentation map array.
If the original input consisted of boolean or float masks, then the highest possible
class id is `1+C`, where `C` is the number of provided float/boolean masks. The value
`0` in the integer mask then denotes the background class.
"""
if self.input_was[0] in ["bool", "float"]:
do_assert(background_class_id is None, "The background class id may only be changed if the original input to SegmentationMapOnImage was an *integer* based segmentation map.")
if background_class_id is None:
background_class_id = 0
channelwise_max_idx = np.argmax(self.arr, axis=2)
# for bool and float input masks, we assume that the background is implicitly given,
# i.e. anything where all masks/channels have zero-like values
# for int, we assume that the background class is explicitly given and has the index 0
if self.input_was[0] in ["bool", "float"]:
result = 1 + channelwise_max_idx
else: # integer mask was provided
result = channelwise_max_idx
if background_threshold is not None and background_threshold > 0:
probs = np.amax(self.arr, axis=2)
result[probs < background_threshold] = background_class_id
return result.astype(np.int32)
#def get_arr_bool(self, allow_overlapping=False, threshold=0.5, background_threshold=0.01, background_class_id=0):
# # TODO
# raise NotImplementedError()
def draw(self, size=None, background_threshold=0.01, background_class_id=None, colors=None, return_foreground_mask=False):
"""
Render the segmentation map as an RGB image.
Parameters
----------
size : None or float or iterable of two ints or iterable of two floats, optional(default=None)
Size of the rendered RGB image as (height, width).
See `imresize_single_image()` for details.
If set to None, no resizing is performed and the size of the segmentation map array is
used.
background_threshold : float, optional(default=0.01)
See `SegmentationMapOnImage.get_arr_int()`.
background_class_id : None or int, optional(default=None)
See `SegmentationMapOnImage.get_arr_int()`.
colors : None or list of tuple of int, optional(default=None)
Colors to use. One for each class to draw. If None, then default colors will be used.
return_foreground_mask : bool, optional(default=False)
Whether to return a mask of the same size as the drawn segmentation map, containing
True at any spatial location that is not the background class and False everywhere
else.
Returns
-------
segmap_drawn : (H,W,3) ndarray(uint8)
Rendered segmentation map.
foreground_mask : (H,W) ndarray(bool)
Mask indicating the locations of foreground classes. Only returned if
return_foreground_mask is True.
"""
arr = self.get_arr_int(background_threshold=background_threshold, background_class_id=background_class_id)
nb_classes = 1 + np.max(arr)
segmap_drawn = np.zeros((arr.shape[0], arr.shape[1], 3), dtype=np.uint8)
if colors is None:
colors = SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS
do_assert(nb_classes <= len(colors), "Can't draw all %d classes as it would exceed the maximum number of %d available colors." % (nb_classes, len(colors),))
ids_in_map = np.unique(arr)
for c, color in zip(sm.xrange(nb_classes), colors):
if c in ids_in_map:
class_mask = (arr == c)
segmap_drawn[class_mask] = color
if return_foreground_mask:
background_class_id = 0 if background_class_id is None else background_class_id
foreground_mask = (arr != background_class_id)
else:
foreground_mask = None
if size is not None:
segmap_drawn = imresize_single_image(segmap_drawn, size, interpolation="nearest")
if foreground_mask is not None:
foreground_mask = imresize_single_image(foreground_mask.astype(np.uint8), size, interpolation="nearest") > 0
if foreground_mask is not None:
return segmap_drawn, foreground_mask
return segmap_drawn
def draw_on_image(self, image, alpha=0.5, resize="segmentation_map", background_threshold=0.01, background_class_id=None, colors=None, draw_background=False):
"""
Draw the segmentation map as an overlay over an image.
Parameters
----------
image : (H,W,3) ndarray(uint8)
Image onto which to draw the segmentation map.
alpha : float, optional(default=0.75)
Alpha/opacity value to use for the mixing of image and segmentation map.
Higher values mean that the segmentation map will be more visible and the image less
visible.
resize : "segmentation_map" or "image", optional(default="segmentation_map")
In case of size differences between the image and segmentation map, either the image or
the segmentation map can be resized. This parameter controls which of the two will be
resized to the other's size.
background_threshold : float, optional(default=0.01)
See `SegmentationMapOnImage.get_arr_int()`.
background_class_id : None or int, optional(default=None)
See `SegmentationMapOnImage.get_arr_int()`.
colors : None or list of tuple of int, optional(default=None)
Colors to use. One for each class to draw. If None, then default colors will be used.
draw_background : bool, optional(default=False)
If True, the background will be drawn like any other class.
If False, the background will not be drawn, i.e. the respective background pixels
will be identical with the image's RGB color at the corresponding spatial location
and no color overlay will be applied.
Returns
-------
mix : (H,W,3) ndarray(uint8)
Rendered overlays.
"""
# assert RGB image
do_assert(image.ndim == 3)
do_assert(image.shape[2] == 3)
do_assert(image.dtype.type == np.uint8)
do_assert(0 - 1e-8 <= alpha <= 1.0 + 1e-8)
do_assert(resize in ["segmentation_map", "image"])
if resize == "image":
image = imresize_single_image(image, self.arr.shape[0:2], interpolation="cubic")
segmap_drawn, foreground_mask = self.draw(
background_threshold=background_threshold,
background_class_id=background_class_id,
size=image.shape[0:2] if resize == "segmentation_map" else None,
colors=colors,
return_foreground_mask=True
)
if draw_background:
mix = np.clip(
(1-alpha) * image + alpha * segmap_drawn,
0,
255
).astype(np.uint8)
else:
foreground_mask = foreground_mask[..., np.newaxis]
mix = np.zeros_like(image)
mix += (~foreground_mask).astype(np.uint8) * image
mix += foreground_mask.astype(np.uint8) * np.clip(
(1-alpha) * image + alpha * segmap_drawn,
0,
255
).astype(np.uint8)
return mix
def pad(self, top=0, right=0, bottom=0, left=0, mode="constant", cval=0.0):
"""
Pad the segmentation map on its top/right/bottom/left side.
Parameters
----------
top : int, optional(default=0)
Amount of pixels to add at the top side of the segmentation map. Must be 0 or
greater.
right : int, optional(default=0)
Amount of pixels to add at the right side of the segmentation map. Must be 0 or
greater.
bottom : int, optional(default=0)
Amount of pixels to add at the bottom side of the segmentation map. Must be 0 or
greater.
left : int, optional(default=0)
Amount of pixels to add at the left side of the segmentation map. Must be 0 or
greater.
mode : string, optional(default="constant")
Padding mode to use. See `numpy.pad()` for details.
cval : number, optional(default=0.0)
Value to use for padding if mode="constant". See `numpy.pad()` for details.
Returns
-------
segmap : SegmentationMapOnImage
Padded segmentation map of height H'=H+top+bottom and width W'=W+left+right.
"""
arr_padded = pad(self.arr, top=top, right=right, bottom=bottom, left=left, mode=mode, cval=cval)
segmap = SegmentationMapOnImage(arr_padded, shape=self.shape)
segmap.input_was = self.input_was
return segmap
def pad_to_aspect_ratio(self, aspect_ratio, mode="constant", cval=0.0, return_pad_amounts=False):
"""
Pad the segmentation map on its sides so that its matches a target aspect ratio.
Depending on which dimension is smaller (height or width), only the corresponding
sides (left/right or top/bottom) will be padded. In each case, both of the sides will
be padded equally.
Parameters
----------
aspect_ratio : float
Target aspect ratio, given as width/height. E.g. 2.0 denotes the image having twice
as much width as height.
mode : string, optional(default="constant")
Padding mode to use. See `numpy.pad()` for details.
cval : number, optional(default=0.0)
Value to use for padding if mode="constant". See `numpy.pad()` for details.
return_pad_amounts : bool, optional(default=False)
If False, then only the padded image will be returned. If True, a tuple with two
entries will be returned, where the first entry is the padded image and the second
entry are the amounts by which each image side was padded. These amounts are again a
tuple of the form (top, right, bottom, left), with each value being an integer.
Returns
-------
segmap : tuple
First tuple entry: Padded segmentation map as SegmentationMapOnImage object.
Second tuple entry: Amounts by which the segmentation map was padded on each side,
given as a tuple (top, right, bottom, left).
If return_pad_amounts is False, then only the segmentation map object is returned.
"""
arr_padded, pad_amounts = pad_to_aspect_ratio(self.arr, aspect_ratio=aspect_ratio, mode=mode, cval=cval, return_pad_amounts=True)
segmap = SegmentationMapOnImage(arr_padded, shape=self.shape)
segmap.input_was = self.input_was
if return_pad_amounts:
return segmap, pad_amounts
else:
return segmap
def scale(self, sizes, interpolation="cubic"):
"""
Rescale the segmentation map array to the provided size given the provided interpolation.
Parameters
----------
sizes : float or iterable of two ints or iterable of two floats
New size of the array in (height, width). See `imresize_single_image()` for details.
interpolation : None or string or int, optional(default="cubic")
The interpolation to use during resize. See `imresize_single_image()` for details.
Note: The segmentation map is internally stored as multiple float-based heatmaps,
making smooth interpolations potentially more reasonable than nearest neighbour
interpolation.
Returns
-------
segmap : SegmentationMapOnImage
Rescaled segmentation map object.
"""
arr_rescaled = imresize_single_image(self.arr, sizes, interpolation=interpolation)
# cubic interpolation can lead to values outside of [0.0, 1.0],
# see https://github.com/opencv/opencv/issues/7195
# TODO area interpolation too?
arr_rescaled = np.clip(arr_rescaled, 0.0, 1.0)
segmap = SegmentationMapOnImage(arr_rescaled, shape=self.shape)
segmap.input_was = self.input_was
return segmap
def to_heatmaps(self, only_nonempty=False, not_none_if_no_nonempty=False):
"""
Convert segmentation map to heatmaps object.
Each segmentation map class will be represented as a single heatmap channel.
Parameters
----------
only_nonempty : bool, optional(default=False)
If True, then only heatmaps for classes that appear in the segmentation map will be
generated. Additionally, a list of these class ids will be returned.
not_none_if_no_nonempty : bool, optional(default=False)
If `only_nonempty` is True and for a segmentation map no channel was non-empty,
this function usually returns None as the heatmaps object. If however this parameter
is set to True, a heatmaps object with one channel (representing class 0)
will be returned as a fallback in these cases.
Returns
-------
result : HeatmapsOnImage or None
Segmentation map as heatmaps.
If `only_nonempty` was set to True and no class appeared in the segmentation map,
then this is None.
class_indices : list of int
Class ids (0 to C-1) of the classes that were actually added to the heatmaps.
Only returned if `only_nonempty` was set to True.
"""
if not only_nonempty:
return HeatmapsOnImage.from_0to1(self.arr, self.shape, min_value=0.0, max_value=1.0)
else:
nonempty_mask = np.sum(self.arr, axis=(0, 1)) > 0 + 1e-4
if np.sum(nonempty_mask) == 0:
if not_none_if_no_nonempty:
nonempty_mask[0] = True
else:
return None, []
class_indices = np.arange(self.arr.shape[2])[nonempty_mask]
channels = self.arr[..., class_indices]
return HeatmapsOnImage(channels, self.shape, min_value=0.0, max_value=1.0), class_indices
@staticmethod
def from_heatmaps(heatmaps, class_indices=None, nb_classes=None):
"""
Convert heatmaps to segmentation map.
Assumes that each class is represented as a single heatmap channel.
Parameters
----------
heatmaps : HeatmapsOnImage
Heatmaps to convert.
class_indices : None or list of int, optional(default=None)
List of class indices represented by each heatmap channel. See also the
secondary output of `to_heatmap()`. If this is provided, it must have the same
length as the number of heatmap channels.
nb_classes : None or int, optional(default=None)
Number of classes. Must be provided if class_indices is set.
Returns
-------
result : SegmentationMapOnImage
Segmentation map derived from heatmaps.
"""
if class_indices is None:
return SegmentationMapOnImage(heatmaps.arr_0to1, shape=heatmaps.shape)
else:
do_assert(nb_classes is not None)
do_assert(min(class_indices) >= 0)
do_assert(max(class_indices) < nb_classes)
do_assert(len(class_indices) == heatmaps.arr_0to1.shape[2])
arr_0to1 = heatmaps.arr_0to1
arr_0to1_full = np.zeros((arr_0to1.shape[0], arr_0to1.shape[1], nb_classes), dtype=np.float32)
#empty_channel = np.zeros((arr_0to1.shape[0], arr_0to1.shape[1]), dtype=np.float32)
class_indices_set = set(class_indices)
heatmap_channel = 0
for c in sm.xrange(nb_classes):
if c in class_indices_set:
arr_0to1_full[:, :, c] = arr_0to1[:, :, heatmap_channel]
heatmap_channel += 1
return SegmentationMapOnImage(arr_0to1_full, shape=heatmaps.shape)
def copy(self):
"""
Create a shallow copy of the segmentation map object.
Returns
-------
out : SegmentationMapOnImage
Shallow copy.
"""
return self.deepcopy()
def deepcopy(self):
"""
Create a deep copy of the segmentation map object.
Returns
-------
out : SegmentationMapOnImage
Deep copy.
"""
segmap = SegmentationMapOnImage(self.arr, shape=self.shape, nb_classes=self.nb_classes)
segmap.input_was = self.input_was
return segmap
############################
# Background augmentation
############################
class Batch(object):
"""
Class encapsulating a batch before and after augmentation.
Parameters
----------
images : None or (N,H,W,C) ndarray or (N,H,W) ndarray or list of (H,W,C) ndarray or list of (H,W) ndarray
The images to
augment.
heatmaps : None or list of HeatmapsOnImage
The heatmaps to
augment.
segmentation_maps : None or list of SegmentationMapOnImage
The segmentation maps to
augment.
keypoints : None or list of KeypointOnImage
The keypoints to
augment.
bounding_boxes : None or list of BoundingBoxesOnImage
The bounding boxes to
augment.
data : anything
Additional data that is saved in the batch and may be read out
after augmentation. This could e.g. contain filepaths to each image
in `images`. As this object is usually used for background
augmentation with multiple processes, the augmented Batch objects might
not be returned in the original order, making this information useful.
"""
def __init__(self, images=None, heatmaps=None, segmentation_maps=None, keypoints=None, bounding_boxes=None, data=None):
self.images = images
self.images_aug = None
self.heatmaps = heatmaps
self.heatmaps_aug = None
self.segmentation_maps = segmentation_maps
self.segmentation_maps_aug = None
self.keypoints = keypoints
self.keypoints_aug = None
self.bounding_boxes = bounding_boxes
self.bounding_boxes_aug = None
self.data = data
class BatchLoader(object):
"""
Class to load batches in the background.
Loaded batches can be accesses using `BatchLoader.queue`.
Parameters
----------
load_batch_func : callable
Function that yields Batch objects (i.e. expected to be a generator).
Background loading automatically stops when the last batch was yielded.
queue_size : int, optional(default=50)
Maximum number of batches to store in the queue. May be set higher
for small images and/or small batches.
nb_workers : int, optional(default=1)
Number of workers to run in the background.
threaded : bool, optional(default=True)
Whether to run the background processes using threads (true) or
full processes (false).
"""
def __init__(self, load_batch_func, queue_size=50, nb_workers=1, threaded=True):
do_assert(queue_size > 0)
do_assert(nb_workers >= 1)
self.queue = multiprocessing.Queue(queue_size)
self.join_signal = multiprocessing.Event()
self.finished_signals = []
self.workers = []
self.threaded = threaded
seeds = current_random_state().randint(0, 10**6, size=(nb_workers,))
for i in range(nb_workers):
finished_signal = multiprocessing.Event()
self.finished_signals.append(finished_signal)
if threaded:
worker = threading.Thread(target=self._load_batches, args=(load_batch_func, self.queue, finished_signal, self.join_signal, None))
else:
worker = multiprocessing.Process(target=self._load_batches, args=(load_batch_func, self.queue, finished_signal, self.join_signal, seeds[i]))
worker.daemon = True
worker.start()
self.workers.append(worker)
def all_finished(self):
"""
Determine whether the workers have finished the loading process.
Returns
-------
out : bool
True if all workers have finished. Else False.
"""
return all([event.is_set() for event in self.finished_signals])
def _load_batches(self, load_batch_func, queue, finished_signal, join_signal, seedval):
if seedval is not None:
random.seed(seedval)
np.random.seed(seedval)
seed(seedval)
try:
for batch in load_batch_func():
do_assert(isinstance(batch, Batch), "Expected batch returned by lambda function to be of class imgaug.Batch, got %s." % (type(batch),))
batch_pickled = pickle.dumps(batch, protocol=-1)
while not join_signal.is_set():
try:
queue.put(batch_pickled, timeout=0.001)
break
except QueueFull:
pass
if join_signal.is_set():
break
except Exception:
traceback.print_exc()
finally:
finished_signal.set()
def terminate(self):
"""
Stop all workers.
"""
self.join_signal.set()
# give minimal time to put generated batches in queue and gracefully shut down
time.sleep(0.002)
# clean the queue, this reportedly prevents hanging threads
while True:
try:
self.queue.get(timeout=0.005)
except QueueEmpty:
break
if self.threaded:
for worker in self.workers:
worker.join()
# we don't have to set the finished_signals here, because threads always finish
# gracefully
else:
for worker in self.workers:
worker.terminate()
worker.join()
# wait here a tiny bit to really make sure that everything is killed before setting
# the finished_signals. calling set() and is_set() (via a subprocess) on them at the
# same time apparently results in a deadlock (at least in python 2).
#time.sleep(0.02)
for finished_signal in self.finished_signals:
finished_signal.set()
self.queue.close()
class BackgroundAugmenter(object):
"""
Class to augment batches in the background (while training on the GPU).
This is a wrapper around the multiprocessing module.
Parameters
----------
batch_loader : BatchLoader
BatchLoader object to load data in the
background.
augseq : Augmenter
An augmenter to apply to all loaded images.
This may be e.g. a Sequential to apply multiple augmenters.
queue_size : int
Size of the queue that is used to temporarily save the augmentation
results. Larger values offer the background processes more room
to save results when the main process doesn't load much, i.e. they
can lead to smoother and faster training. For large images, high
values can block a lot of RAM though.
nb_workers : "auto" or int
Number of background workers to spawn. If auto, it will be set
to C-1, where C is the number of CPU cores.
"""
def __init__(self, batch_loader, augseq, queue_size=50, nb_workers="auto"):
do_assert(queue_size > 0)
self.augseq = augseq
self.source_finished_signals = batch_loader.finished_signals
self.queue_source = batch_loader.queue
self.queue_result = multiprocessing.Queue(queue_size)
if nb_workers == "auto":
try:
nb_workers = multiprocessing.cpu_count()
except (ImportError, NotImplementedError):
nb_workers = 1
# try to reserve at least one core for the main process
nb_workers = max(1, nb_workers - 1)
else:
do_assert(nb_workers >= 1)
#print("Starting %d background processes" % (nb_workers,))
self.nb_workers = nb_workers
self.workers = []
self.nb_workers_finished = 0
self.augment_images = True
self.augment_keypoints = True
seeds = current_random_state().randint(0, 10**6, size=(nb_workers,))
for i in range(nb_workers):
worker = multiprocessing.Process(target=self._augment_images_worker, args=(augseq, self.queue_source, self.queue_result, self.source_finished_signals, seeds[i]))
worker.daemon = True
worker.start()
self.workers.append(worker)
def get_batch(self):
"""
Returns a batch from the queue of augmented batches.
If workers are still running and there are no batches in the queue,
it will automatically wait for the next batch.
Returns
-------
out : None or ia.Batch
One batch or None if all workers have finished.
"""
batch_str = self.queue_result.get()
batch = pickle.loads(batch_str)
if batch is not None:
return batch
else:
self.nb_workers_finished += 1
if self.nb_workers_finished == self.nb_workers:
return None
else:
return self.get_batch()
def _augment_images_worker(self, augseq, queue_source, queue_result, source_finished_signals, seedval):
"""
Worker function that endlessly queries the source queue (input
batches), augments batches in it and sends the result to the output
queue.
"""
np.random.seed(seedval)
random.seed(seedval)
augseq.reseed(seedval)
seed(seedval)
while True:
# wait for a new batch in the source queue and load it
try:
batch_str = queue_source.get(timeout=0.1)
batch = pickle.loads(batch_str)
batch_aug = list(augseq.augment_batches([batch], background=False))[0]
# send augmented batch to output queue
batch_str = pickle.dumps(batch, protocol=-1)
queue_result.put(batch_str)
except QueueEmpty:
if all([signal.is_set() for signal in source_finished_signals]):
queue_result.put(pickle.dumps(None, protocol=-1))
return
def terminate(self):
"""
Terminates all background processes immediately.
This will also free their RAM.
"""
for worker in self.workers:
worker.terminate()
self.queue_result.close()
|
# coding: utf-8
import datetime
from django.core.management.base import BaseCommand
from couchdbkit.exceptions import ResourceNotFound, ResourceConflict
from manoseimas.legal_acts.models import LegalAct
from manoseimas.legislation.models import Law, LawChange, LawProject
from manoseimas.legislation.utils import normalize, split_law_name
from sboard.models import couch
from sboard.utils import get_node_id
class SyncException(Exception):
pass
class SyncProcessor(object):
def __init__(self):
self.relations = {}
def get_by_number(self, number):
try:
return couch.view('legislation/by_number', key=number).first()
except ResourceNotFound:
return None
def get_by_cleaned_name(self, cleaned_name):
try:
return couch.view('legislation/by_cleaned_name',
key=cleaned_name).first()
except ResourceNotFound:
return None
def get_parents(self, split=None):
parents = []
split = split or []
for cleaned_name in split:
node = self.get_by_cleaned_name(cleaned_name)
if node is not None:
parents.append(node._id)
return parents
def date_to_datetime(self, date):
return datetime.datetime.combine(date, datetime.time())
def update_node(self, cls, legal_act, node_id, split=None):
node = self.get_by_number(legal_act.number)
if node is None:
node = cls()
node._id = node_id
node.number = legal_act.number
node.title = legal_act.name
node.cleaned_name = normalize(legal_act.name)
node.created = self.date_to_datetime(legal_act.date)
node.parents = self.get_parents(split)
# XXX: temporary, for cleaning database.
if 'body' in node:
del node.body
try:
node.save()
except ResourceConflict:
pass
# TODO: Some leagal acts can have same name, but different numbers.
# This must be some how handlerd.
#
# Possible solution - sync all legal acts starting from oldest, if
# a legal act with same name exists, then track it as history node
# with an update that comes with newer legal act with same name.
node.set_body(legal_act.current_version(), 'text/html')
print('Node: %s' % node._id)
def process_law(self, legal_act):
node_id = get_node_id(legal_act.name)
self.update_node(Law, legal_act, node_id)
def process_law_change(self, legal_act, split):
node_id = get_node_id()
self.update_node(LawChange, legal_act, node_id, split)
def process_law_project(self, legal_act, split):
node_id = get_node_id()
self.update_node(LawProject, legal_act, node_id, split)
def process(self, legal_act):
if 'kind' not in legal_act:
return
# TODO: examine documents, that does not have 'kind' attribute
#raise SyncException("Document does not have 'kind' attribute.")
if 'name' not in legal_act:
return
# TODO: examine documents, that does not have 'name' attribute
#raise SyncException("Document does not have 'name' attribute.")
split = split_law_name(legal_act.name)
law_kinds = (u'įstatymas', u'konstitucija', u'statutas', u'kodeksas')
if legal_act.kind == u'įstatymo projektas':
self.process_law_project(legal_act, split)
elif legal_act.kind in law_kinds:
if split:
self.process_law_change(legal_act, split)
else:
self.process_law(legal_act)
else:
raise SyncException("Unknown 'kind' attribute: %s" % legal_act.kind)
def sync(self, view):
for legal_act in view:
self.process(legal_act)
class Command(BaseCommand):
help = "Synchronize raw legal acts data with django-sboard nodes."
def handle(self, *args, **options):
view = LegalAct.view('_all_docs', include_docs=True)
processor = SyncProcessor()
processor.sync(view)
Fixed a bug in synclegalacts management command.
# coding: utf-8
import datetime
from django.core.management.base import BaseCommand
from couchdbkit.exceptions import ResourceNotFound, ResourceConflict
from manoseimas.legal_acts.models import LegalAct
from manoseimas.legislation.models import Law, LawChange, LawProject
from manoseimas.legislation.utils import normalize, split_law_name
from sboard.models import couch
from sboard.utils import get_node_id
class SyncException(Exception):
pass
class SyncProcessor(object):
def __init__(self):
self.relations = {}
def get_by_number(self, number):
try:
return couch.view('legislation/by_number', key=number).first()
except ResourceNotFound:
return None
def get_by_cleaned_name(self, cleaned_name):
try:
return couch.view('legislation/by_cleaned_name',
key=cleaned_name).first()
except ResourceNotFound:
return None
def get_parents(self, split=None):
parents = []
split = split or []
for cleaned_name in split:
node = self.get_by_cleaned_name(cleaned_name)
if node is not None:
parents.append(node._id)
return parents
def date_to_datetime(self, date):
return datetime.datetime.combine(date, datetime.time())
def update_node(self, cls, legal_act, node_id, split=None):
node = self.get_by_number(legal_act.number)
if node is None:
node = cls()
node._id = node_id
node.number = legal_act.number
node.title = legal_act.name
node.cleaned_name = normalize(legal_act.name)
node.created = self.date_to_datetime(legal_act.date)
node.parents = self.get_parents(split)
# XXX: temporary, for cleaning database.
if 'body' in node:
del node.body
try:
node.save()
except ResourceConflict:
pass
# TODO: Some leagal acts can have same name, but different numbers.
# This must be some how handlerd.
#
# Possible solution - sync all legal acts starting from oldest, if
# a legal act with same name exists, then track it as history node
# with an update that comes with newer legal act with same name.
else:
node.set_body(legal_act.current_version(), 'text/html')
print('Node: %s' % node._id)
def process_law(self, legal_act):
node_id = get_node_id(legal_act.name)
self.update_node(Law, legal_act, node_id)
def process_law_change(self, legal_act, split):
node_id = get_node_id()
self.update_node(LawChange, legal_act, node_id, split)
def process_law_project(self, legal_act, split):
node_id = get_node_id()
self.update_node(LawProject, legal_act, node_id, split)
def process(self, legal_act):
if 'kind' not in legal_act:
return
# TODO: examine documents, that does not have 'kind' attribute
#raise SyncException("Document does not have 'kind' attribute.")
if 'name' not in legal_act:
return
# TODO: examine documents, that does not have 'name' attribute
#raise SyncException("Document does not have 'name' attribute.")
split = split_law_name(legal_act.name)
law_kinds = (u'įstatymas', u'konstitucija', u'statutas', u'kodeksas')
if legal_act.kind == u'įstatymo projektas':
self.process_law_project(legal_act, split)
elif legal_act.kind in law_kinds:
if split:
self.process_law_change(legal_act, split)
else:
self.process_law(legal_act)
else:
raise SyncException("Unknown 'kind' attribute: %s" % legal_act.kind)
def sync(self, view):
for legal_act in view:
self.process(legal_act)
class Command(BaseCommand):
help = "Synchronize raw legal acts data with django-sboard nodes."
def handle(self, *args, **options):
view = LegalAct.view('_all_docs', include_docs=True)
processor = SyncProcessor()
processor.sync(view)
|
# -*- coding: utf-8 -*-
'''
SPACK package manager
'''
from __future__ import absolute_import
# Import python libs
import copy
import logging
# Import salt libs
import salt.utils
from salt.exceptions import CommandExecutionError, MinionError
log = logging.getLogger(__name__)
def spack_directory():
""" Specialized to avoid infinite recurrence """
from os.path import join
default = join(__grains__['userhome'], 'spack')
return __salt__['pillar.get']('spack:directory', default)
def defaults(key=None, value=None):
""" Default pillar values """
from os.path import join
try:
_init_spack()
from spack.cmd import default_list_scope as dls
from spack.repository import canonicalize_path
except ImportError:
dls = "spack"
def canonicalize_path(x):
from os.path import expanduser, expandvars, abspath
return abspath(expanduser(expandvars(x)))
if key is not None and value is not None:
return value
home = __grains__['userhome']
config_dir = join(home, '.spack')
repo_prefix = join(home, '.spack_repos')
values = {
'directory': spack_directory(),
'config_dir':
__salt__['pillar.get']('spack:config_location', config_dir),
'repo_prefix':
__salt__['pillar.get']('spack:repo_prefix', repo_prefix),
'scope':
__salt__['pillar.get']('spack:default_config_location', dls)
}
values['config_dir'] = canonicalize_path(values['config_dir'])
values['repo_prefix'] = canonicalize_path(values['repo_prefix'])
return values[key] if key is not None else values
def module_name(name, compiler=None):
""" Figures out module name(s) from specs """
_init_spack()
from spack.modules import module_types
from spack import installed_db
mt = module_types['tcl']
if compiler is not None:
names = name.split()
names.insert(1, "%" + compiler.rstrip().lstrip())
name = ' '.join(names)
specs = parse_specs(name, concretize=True, normalize=True)
result = []
for spec in specs:
mods = installed_db.query(spec)
if len(mods) == 0:
raise ValueError("No module found for %s." % spec)
elif len(mods) > 1:
raise ValueError(
"More than one module matches %s (%s)." % (spec, mods))
result.append(mt(mods[0]).use_name)
return result
def _init_spack():
from os.path import join, expanduser
from os import getcwd
from sys import path
spackdir = spack_directory()
libdir = join(spackdir, 'lib', 'spack')
if libdir not in path:
path.append(libdir)
path.append(join(libdir, 'external'))
import spack
spack.debug = False
spack.spack_working_dir = spack_directory()
def repo_exists(path, scope=None, prefix=None):
""" Checks whether input is a known repo """
_init_spack()
from spack.repository import Repo
from spack.config import get_config
from os.path import join
cannon = repo_path(path)
repos = get_config('repos', defaults('scope', scope))
repo = Repo(cannon)
return repo.root in repos or path in repos
def repo_path(path="", prefix=None):
_init_spack()
from os.path import join
from spack.repository import canonicalize_path
if len(path) == 0:
path = defaults('repo_prefix', prefix)
elif path[0] not in ['/', '$', '~']:
path = join(defaults('repo_prefix', prefix), path)
return canonicalize_path(path)
def add_repo(path, prefix=None, scope=None):
""" Adds path to spack repos """
_init_spack()
from collections import namedtuple
from spack.repository import Repo
from spack.config import get_config, update_config
from spack.cmd import default_list_scope
cannon = repo_path(path, prefix)
repos = get_config('repos', defaults('scope', scope))
if not repos:
repos = []
repo = Repo(cannon)
if repo.root in repos or path in repos:
return False
repos.insert(0, cannon)
update_config('repos', repos, defaults('scope', scope))
return True
def parse_specs(specs, concretize=False, normalize=False):
""" Converts spec to module name """
_init_spack()
from spack.cmd import parse_specs
return parse_specs(specs, concretize=concretize, normalize=normalize)
def package_prefix(specs):
""" Return package prefix """
_init_spack()
from spack.cmd import parse_specs
packages = parse_specs(specs, concretize=True)
if len(packages) == 0:
raise RuntimeError("No package found")
elif len(packages) > 1:
raise RuntimeError("Specs correspond to more than one package")
return packages[0].prefix
def is_installed(name):
_init_spack()
from spack import repo
from spack.cmd import parse_specs
specs = parse_specs(name, concretize=True)
for spec in specs:
if not repo.get(spec).installed:
return False
return True
def install(name, keep_prefix=False, keep_stage=False, ignore_deps=False, environs=None, compiler=None):
_init_spack()
from spack import repo, installed_db
from spack.cmd import parse_specs
from os import environ
if environs is not None:
environ.update(environs)
if compiler is not None:
names = name.split()
names.insert(1, "%" + compiler.rstrip().lstrip())
name = ' '.join(names)
specs = parse_specs(name, concretize=True)
packages = [repo.get(spec) for spec in specs]
new_pkgs = [u for u in packages if not u.installed]
for package in new_pkgs:
with installed_db.write_transaction():
package.do_install(
keep_prefix=keep_prefix,
keep_stage=keep_stage,
ignore_deps=ignore_deps
)
return [p.name for p in new_pkgs if p.installed], \
[p.name for p in new_pkgs if not p.installed]
Spack does not take list of packages anynmore
# -*- coding: utf-8 -*-
'''
SPACK package manager
'''
from __future__ import absolute_import
# Import python libs
import copy
import logging
# Import salt libs
import salt.utils
from salt.exceptions import CommandExecutionError, MinionError
log = logging.getLogger(__name__)
def spack_directory():
""" Specialized to avoid infinite recurrence """
from os.path import join
default = join(__grains__['userhome'], 'spack')
return __salt__['pillar.get']('spack:directory', default)
def defaults(key=None, value=None):
""" Default pillar values """
from os.path import join
try:
_init_spack()
from spack.cmd import default_list_scope as dls
from spack.repository import canonicalize_path
except ImportError:
dls = "spack"
def canonicalize_path(x):
from os.path import expanduser, expandvars, abspath
return abspath(expanduser(expandvars(x)))
if key is not None and value is not None:
return value
home = __grains__['userhome']
config_dir = join(home, '.spack')
repo_prefix = join(home, '.spack_repos')
values = {
'directory': spack_directory(),
'config_dir':
__salt__['pillar.get']('spack:config_location', config_dir),
'repo_prefix':
__salt__['pillar.get']('spack:repo_prefix', repo_prefix),
'scope':
__salt__['pillar.get']('spack:default_config_location', dls)
}
values['config_dir'] = canonicalize_path(values['config_dir'])
values['repo_prefix'] = canonicalize_path(values['repo_prefix'])
return values[key] if key is not None else values
def module_name(name, compiler=None):
""" Figures out module name(s) from specs """
_init_spack()
from spack.modules import module_types
from spack import installed_db
mt = module_types['tcl']
if compiler is not None:
names = name.split()
names.insert(1, "%" + compiler.rstrip().lstrip())
name = ' '.join(names)
specs = parse_specs(name, concretize=True, normalize=True)
result = []
for spec in specs:
mods = installed_db.query(spec)
if len(mods) == 0:
raise ValueError("No module found for %s." % spec)
elif len(mods) > 1:
raise ValueError(
"More than one module matches %s (%s)." % (spec, mods))
result.append(mt(mods[0]).use_name)
return result
def _init_spack():
from os.path import join, expanduser
from os import getcwd
from sys import path
spackdir = spack_directory()
libdir = join(spackdir, 'lib', 'spack')
if libdir not in path:
path.append(libdir)
path.append(join(libdir, 'external'))
import spack
spack.debug = False
spack.spack_working_dir = spack_directory()
def repo_exists(path, scope=None, prefix=None):
""" Checks whether input is a known repo """
_init_spack()
from spack.repository import Repo
from spack.config import get_config
from os.path import join
cannon = repo_path(path)
repos = get_config('repos', defaults('scope', scope))
repo = Repo(cannon)
return repo.root in repos or path in repos
def repo_path(path="", prefix=None):
_init_spack()
from os.path import join
from spack.repository import canonicalize_path
if len(path) == 0:
path = defaults('repo_prefix', prefix)
elif path[0] not in ['/', '$', '~']:
path = join(defaults('repo_prefix', prefix), path)
return canonicalize_path(path)
def add_repo(path, prefix=None, scope=None):
""" Adds path to spack repos """
_init_spack()
from collections import namedtuple
from spack.repository import Repo
from spack.config import get_config, update_config
from spack.cmd import default_list_scope
cannon = repo_path(path, prefix)
repos = get_config('repos', defaults('scope', scope))
if not repos:
repos = []
repo = Repo(cannon)
if repo.root in repos or path in repos:
return False
repos.insert(0, cannon)
update_config('repos', repos, defaults('scope', scope))
return True
def parse_specs(specs, concretize=False, normalize=False):
""" Converts spec to module name """
_init_spack()
from spack.cmd import parse_specs
return parse_specs(specs, concretize=concretize, normalize=normalize)
def package_prefix(specs):
""" Return package prefix """
_init_spack()
from spack.cmd import parse_specs
packages = parse_specs(specs, concretize=True)
if len(packages) == 0:
raise RuntimeError("No package found")
elif len(packages) > 1:
raise RuntimeError("Specs correspond to more than one package")
return packages[0].prefix
def is_installed(name):
_init_spack()
from spack import repo
from spack.cmd import parse_specs
names = [name] if isinstance(name, str) else name
for name in names:
specs = parse_specs(name, concretize=True)
for spec in specs:
try:
a = repo.get(spec)
if not a.installed:
return False
except:
raise
return True
def install(name, keep_prefix=False, keep_stage=False, ignore_deps=False, environs=None, compiler=None):
_init_spack()
from spack import repo, installed_db
from spack.cmd import parse_specs
from os import environ
if not isinstance(name, str):
results = [], []
for pkg in name:
a, b = install(pkg, keep_prefix=keep_prefix, keep_stage=keep_stage,
ignore_deps=ignore_deps, environs=environs,
compiler=compiler)
results[0].extend(a)
results[1].extend(b)
return results
if environs is not None:
environ.update(environs)
if compiler is not None:
names = name.split()
names.insert(1, "%" + compiler.rstrip().lstrip())
name = ' '.join(names)
specs = parse_specs(name, concretize=True)
packages = [repo.get(spec) for spec in specs]
new_pkgs = [u for u in packages if not u.installed]
for package in new_pkgs:
with installed_db.write_transaction():
package.do_install(
keep_prefix=keep_prefix,
keep_stage=keep_stage,
ignore_deps=ignore_deps
)
return [p.name for p in new_pkgs if p.installed], \
[p.name for p in new_pkgs if not p.installed]
|
from __future__ import print_function, division, absolute_import
import random
import math
import copy
import numbers
import multiprocessing
import threading
import traceback
import sys
import os
import time
import json
import types
import numpy as np
import cv2
import imageio
import scipy.spatial.distance
import six
import six.moves as sm
import skimage.draw
import skimage.measure
import collections
import matplotlib.pyplot as plt
import shapely
import shapely.geometry
import shapely.ops
from PIL import Image as PIL_Image, ImageDraw as PIL_ImageDraw, ImageFont as PIL_ImageFont
if sys.version_info[0] == 2:
import cPickle as pickle
from Queue import Empty as QueueEmpty, Full as QueueFull
import socket
BrokenPipeError = socket.error
elif sys.version_info[0] == 3:
import pickle
from queue import Empty as QueueEmpty, Full as QueueFull
xrange = range
ALL = "ALL"
FILE_DIR = os.path.dirname(os.path.abspath(__file__))
# filepath to the quokka image, its annotations and depth map
QUOKKA_FP = os.path.join(FILE_DIR, "quokka.jpg")
QUOKKA_ANNOTATIONS_FP = os.path.join(FILE_DIR, "quokka_annotations.json")
QUOKKA_DEPTH_MAP_HALFRES_FP = os.path.join(FILE_DIR, "quokka_depth_map_halfres.png")
DEFAULT_FONT_FP = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"DejaVuSans.ttf"
)
# We instantiate a current/global random state here once.
# One can also call np.random, but that is (in contrast to np.random.RandomState)
# a module and hence cannot be copied via deepcopy. That's why we use RandomState
# here (and in all augmenters) instead of np.random.
CURRENT_RANDOM_STATE = np.random.RandomState(42)
NP_FLOAT_TYPES = set(np.sctypes["float"])
NP_INT_TYPES = set(np.sctypes["int"])
NP_UINT_TYPES = set(np.sctypes["uint"])
IMSHOW_BACKEND_DEFAULT = "matplotlib"
IMRESIZE_VALID_INTERPOLATIONS = ["nearest", "linear", "area", "cubic",
cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_AREA, cv2.INTER_CUBIC]
def is_np_array(val):
"""
Checks whether a variable is a numpy array.
Parameters
----------
val
The variable to check.
Returns
-------
out : bool
True if the variable is a numpy array. Otherwise False.
"""
# using np.generic here via isinstance(val, (np.ndarray, np.generic)) seems to also fire for scalar numpy values
# even though those are not arrays
return isinstance(val, np.ndarray)
def is_single_integer(val):
"""
Checks whether a variable is an integer.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is an integer. Otherwise False.
"""
return isinstance(val, numbers.Integral) and not isinstance(val, bool)
def is_single_float(val):
"""
Checks whether a variable is a float.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a float. Otherwise False.
"""
return isinstance(val, numbers.Real) and not is_single_integer(val) and not isinstance(val, bool)
def is_single_number(val):
"""
Checks whether a variable is a number, i.e. an integer or float.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a number. Otherwise False.
"""
return is_single_integer(val) or is_single_float(val)
def is_iterable(val):
"""
Checks whether a variable is iterable.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is an iterable. Otherwise False.
"""
return isinstance(val, collections.Iterable)
# TODO convert to is_single_string() or rename is_single_integer/float/number()
def is_string(val):
"""
Checks whether a variable is a string.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a string. Otherwise False.
"""
return isinstance(val, six.string_types)
def is_single_bool(val):
"""
Checks whether a variable is a boolean.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a boolean. Otherwise False.
"""
return type(val) == type(True)
def is_integer_array(val):
"""
Checks whether a variable is a numpy integer array.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a numpy integer array. Otherwise False.
"""
return is_np_array(val) and issubclass(val.dtype.type, np.integer)
def is_float_array(val):
"""
Checks whether a variable is a numpy float array.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a numpy float array. Otherwise False.
"""
return is_np_array(val) and issubclass(val.dtype.type, np.floating)
def is_callable(val):
"""
Checks whether a variable is a callable, e.g. a function.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a callable. Otherwise False.
"""
# python 3.x with x <= 2 does not support callable(), apparently
if sys.version_info[0] == 3 and sys.version_info[1] <= 2:
return hasattr(val, '__call__')
else:
return callable(val)
def is_generator(val):
"""
Checks whether a variable is a generator.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True is the variable is a generator. Otherwise False.
"""
return isinstance(val, types.GeneratorType)
def caller_name():
"""
Returns the name of the caller, e.g. a function.
Returns
-------
str
The name of the caller as a string
"""
return sys._getframe(1).f_code.co_name
def seed(seedval):
"""
Set the seed used by the global random state and thereby all randomness
in the library.
This random state is by default by all augmenters. Under special
circumstances (e.g. when an augmenter is switched to deterministic mode),
the global random state is replaced by another -- local -- one.
The replacement is dependent on the global random state.
Parameters
----------
seedval : int
The seed to use.
"""
CURRENT_RANDOM_STATE.seed(seedval)
def current_random_state():
"""
Returns the current/global random state of the library.
Returns
----------
numpy.random.RandomState
The current/global random state.
"""
return CURRENT_RANDOM_STATE
def new_random_state(seed=None, fully_random=False):
"""
Returns a new random state.
Parameters
----------
seed : None or int, optional
Optional seed value to use.
The same datatypes are allowed as for ``numpy.random.RandomState(seed)``.
fully_random : bool, optional
Whether to use numpy's random initialization for the
RandomState (used if set to True). If False, a seed is sampled from
the global random state, which is a bit faster and hence the default.
Returns
-------
numpy.random.RandomState
The new random state.
"""
if seed is None:
if not fully_random:
# sample manually a seed instead of just RandomState(),
# because the latter one
# is way slower.
seed = CURRENT_RANDOM_STATE.randint(0, 10**6, 1)[0]
return np.random.RandomState(seed)
def dummy_random_state():
"""
Returns a dummy random state that is always based on a seed of 1.
Returns
-------
numpy.random.RandomState
The new random state.
"""
return np.random.RandomState(1)
def copy_random_state(random_state, force_copy=False):
"""
Creates a copy of a random state.
Parameters
----------
random_state : numpy.random.RandomState
The random state to copy.
force_copy : bool, optional
If True, this function will always create a copy of every random
state. If False, it will not copy numpy's default random state,
but all other random states.
Returns
-------
rs_copy : numpy.random.RandomState
The copied random state.
"""
if random_state == np.random and not force_copy:
return random_state
else:
rs_copy = dummy_random_state()
orig_state = random_state.get_state()
rs_copy.set_state(orig_state)
return rs_copy
def derive_random_state(random_state):
"""
Create a new random states based on an existing random state or seed.
Parameters
----------
random_state : numpy.random.RandomState
Random state or seed from which to derive the new random state.
Returns
-------
numpy.random.RandomState
Derived random state.
"""
return derive_random_states(random_state, n=1)[0]
# TODO use this everywhere instead of manual seed + create
def derive_random_states(random_state, n=1):
"""
Create N new random states based on an existing random state or seed.
Parameters
----------
random_state : numpy.random.RandomState
Random state or seed from which to derive new random states.
n : int, optional
Number of random states to derive.
Returns
-------
list of numpy.random.RandomState
Derived random states.
"""
seed_ = random_state.randint(0, 10**6, 1)[0]
return [new_random_state(seed_+i) for i in sm.xrange(n)]
def forward_random_state(random_state):
"""
Forward the internal state of a random state.
This makes sure that future calls to the random_state will produce new random values.
Parameters
----------
random_state : numpy.random.RandomState
Random state to forward.
"""
random_state.uniform()
def _quokka_normalize_extract(extract):
"""
Generate a normalized rectangle to be extract from the standard quokka image.
Parameters
----------
extract : 'square' or tuple of number or imgaug.BoundingBox or imgaug.BoundingBoxesOnImage
Unnormalized representation of the image subarea to be extracted.
* If string ``square``, then a squared area ``(x: 0 to max 643, y: 0 to max 643)``
will be extracted from the image.
* If a tuple, then expected to contain four numbers denoting ``x1``, ``y1``, ``x2``
and ``y2``.
* If a BoundingBox, then that bounding box's area will be extracted from the image.
* If a BoundingBoxesOnImage, then expected to contain exactly one bounding box
and a shape matching the full image dimensions (i.e. (643, 960, *)). Then the
one bounding box will be used similar to BoundingBox.
Returns
-------
bb : imgaug.BoundingBox
Normalized representation of the area to extract from the standard quokka image.
"""
if extract == "square":
bb = BoundingBox(x1=0, y1=0, x2=643, y2=643)
elif isinstance(extract, tuple) and len(extract) == 4:
bb = BoundingBox(x1=extract[0], y1=extract[1], x2=extract[2], y2=extract[3])
elif isinstance(extract, BoundingBox):
bb = extract
elif isinstance(extract, BoundingBoxesOnImage):
do_assert(len(extract.bounding_boxes) == 1)
do_assert(extract.shape[0:2] == (643, 960))
bb = extract.bounding_boxes[0]
else:
raise Exception(
"Expected 'square' or tuple of four entries or BoundingBox or BoundingBoxesOnImage "
+ "for parameter 'extract', got %s." % (type(extract),)
)
return bb
def _compute_resized_shape(from_shape, to_shape):
"""
Computes the intended new shape of an image-like array after resizing.
Parameters
----------
from_shape : tuple or ndarray
Old shape of the array. Usually expected to be a tuple of form ``(H, W)`` or ``(H, W, C)`` or
alternatively an array with two or three dimensions.
to_shape : None or tuple of ints or tuple of floats or int or float or ndarray
New shape of the array.
* If None, then `from_shape` will be used as the new shape.
* If an int ``V``, then the new shape will be ``(V, V, [C])``, where ``C`` will be added if it
is part of `from_shape`.
* If a float ``V``, then the new shape will be ``(H*V, W*V, [C])``, where ``H`` and ``W`` are the old
height/width.
* If a tuple ``(H', W', [C'])`` of ints, then ``H'`` and ``W'`` will be used as the new height
and width.
* If a tuple ``(H', W', [C'])`` of floats (except ``C``), then ``H'`` and ``W'`` will
be used as the new height and width.
* If a numpy array, then the array's shape will be used.
Returns
-------
to_shape_computed : tuple of int
New shape.
"""
if is_np_array(from_shape):
from_shape = from_shape.shape
if is_np_array(to_shape):
to_shape = to_shape.shape
to_shape_computed = list(from_shape)
if to_shape is None:
pass
elif isinstance(to_shape, tuple):
do_assert(len(from_shape) in [2, 3])
do_assert(len(to_shape) in [2, 3])
if len(from_shape) == 3 and len(to_shape) == 3:
do_assert(from_shape[2] == to_shape[2])
elif len(to_shape) == 3:
to_shape_computed.append(to_shape[2])
do_assert(all([v is None or is_single_number(v) for v in to_shape[0:2]]),
"Expected the first two entries in to_shape to be None or numbers, "
+ "got types %s." % (str([type(v) for v in to_shape[0:2]]),))
for i, from_shape_i in enumerate(from_shape[0:2]):
if to_shape[i] is None:
to_shape_computed[i] = from_shape_i
elif is_single_integer(to_shape[i]):
to_shape_computed[i] = to_shape[i]
else: # float
to_shape_computed[i] = int(np.round(from_shape_i * to_shape[i]))
elif is_single_integer(to_shape) or is_single_float(to_shape):
to_shape_computed = _compute_resized_shape(from_shape, (to_shape, to_shape))
else:
raise Exception("Expected to_shape to be None or ndarray or tuple of floats or tuple of ints or single int "
+ "or single float, got %s." % (type(to_shape),))
return tuple(to_shape_computed)
def quokka(size=None, extract=None):
"""
Returns an image of a quokka as a numpy array.
Parameters
----------
size : None or float or tuple of int, optional
Size of the output image. Input into :func:`imgaug.imresize_single_image`.
Usually expected to be a tuple ``(H, W)``, where ``H`` is the desired height
and ``W`` is the width. If None, then the image will not be resized.
extract : None or 'square' or tuple of number or imgaug.BoundingBox or imgaug.BoundingBoxesOnImage
Subarea of the quokka image to extract:
* If None, then the whole image will be used.
* If string ``square``, then a squared area ``(x: 0 to max 643, y: 0 to max 643)`` will
be extracted from the image.
* If a tuple, then expected to contain four numbers denoting ``x1``, ``y1``, ``x2``
and ``y2``.
* If a BoundingBox, then that bounding box's area will be extracted from the image.
* If a BoundingBoxesOnImage, then expected to contain exactly one bounding box
and a shape matching the full image dimensions (i.e. ``(643, 960, *)``). Then the
one bounding box will be used similar to BoundingBox.
Returns
-------
img : (H,W,3) ndarray
The image array of dtype uint8.
"""
img = imageio.imread(QUOKKA_FP, pilmode="RGB")
if extract is not None:
bb = _quokka_normalize_extract(extract)
img = bb.extract_from_image(img)
if size is not None:
shape_resized = _compute_resized_shape(img.shape, size)
img = imresize_single_image(img, shape_resized[0:2])
return img
def quokka_square(size=None):
"""
Returns an (square) image of a quokka as a numpy array.
Parameters
----------
size : None or float or tuple of int, optional
Size of the output image. Input into :func:`imgaug.imresize_single_image`.
Usually expected to be a tuple ``(H, W)``, where ``H`` is the desired height
and ``W`` is the width. If None, then the image will not be resized.
Returns
-------
img : (H,W,3) ndarray
The image array of dtype uint8.
"""
return quokka(size=size, extract="square")
def quokka_heatmap(size=None, extract=None):
"""
Returns a heatmap (here: depth map) for the standard example quokka image.
Parameters
----------
size : None or float or tuple of int, optional
See :func:`imgaug.quokka`.
extract : None or 'square' or tuple of number or imgaug.BoundingBox or imgaug.BoundingBoxesOnImage
See :func:`imgaug.quokka`.
Returns
-------
result : imgaug.HeatmapsOnImage
Depth map as an heatmap object. Values close to 0.0 denote objects that are close to
the camera. Values close to 1.0 denote objects that are furthest away (among all shown
objects).
"""
img = imageio.imread(QUOKKA_DEPTH_MAP_HALFRES_FP, pilmode="RGB")
img = imresize_single_image(img, (643, 960), interpolation="cubic")
if extract is not None:
bb = _quokka_normalize_extract(extract)
img = bb.extract_from_image(img)
if size is None:
size = img.shape[0:2]
shape_resized = _compute_resized_shape(img.shape, size)
img = imresize_single_image(img, shape_resized[0:2])
img_0to1 = img[..., 0] # depth map was saved as 3-channel RGB
img_0to1 = img_0to1.astype(np.float32) / 255.0
img_0to1 = 1 - img_0to1 # depth map was saved as 0 being furthest away
return HeatmapsOnImage(img_0to1, shape=img_0to1.shape[0:2] + (3,))
def quokka_segmentation_map(size=None, extract=None):
"""
Returns a segmentation map for the standard example quokka image.
Parameters
----------
size : None or float or tuple of int, optional
See :func:`imgaug.quokka`.
extract : None or 'square' or tuple of number or imgaug.BoundingBox or imgaug.BoundingBoxesOnImage
See :func:`imgaug.quokka`.
Returns
-------
result : imgaug.SegmentationMapOnImage
Segmentation map object.
"""
with open(QUOKKA_ANNOTATIONS_FP, "r") as f:
json_dict = json.load(f)
xx = []
yy = []
for kp_dict in json_dict["polygons"][0]["keypoints"]:
x = kp_dict["x"]
y = kp_dict["y"]
xx.append(x)
yy.append(y)
img_seg = np.zeros((643, 960, 1), dtype=np.float32)
rr, cc = skimage.draw.polygon(np.array(yy), np.array(xx), shape=img_seg.shape)
img_seg[rr, cc] = 1.0
if extract is not None:
bb = _quokka_normalize_extract(extract)
img_seg = bb.extract_from_image(img_seg)
segmap = SegmentationMapOnImage(img_seg, shape=img_seg.shape[0:2] + (3,))
if size is not None:
shape_resized = _compute_resized_shape(img_seg.shape, size)
segmap = segmap.scale(shape_resized[0:2])
segmap.shape = tuple(shape_resized[0:2]) + (3,)
return segmap
def quokka_keypoints(size=None, extract=None):
"""
Returns example keypoints on the standard example quokke image.
The keypoints cover the eyes, ears, nose and paws.
Parameters
----------
size : None or float or tuple of int or tuple of float, optional
Size of the output image on which the keypoints are placed. If None, then the keypoints
are not projected to any new size (positions on the original image are used).
Floats lead to relative size changes, ints to absolute sizes in pixels.
extract : None or 'square' or tuple of number or imgaug.BoundingBox or imgaug.BoundingBoxesOnImage
Subarea to extract from the image. See :func:`imgaug.quokka`.
Returns
-------
kpsoi : imgaug.KeypointsOnImage
Example keypoints on the quokka image.
"""
left, top = 0, 0
if extract is not None:
bb_extract = _quokka_normalize_extract(extract)
left = bb_extract.x1
top = bb_extract.y1
with open(QUOKKA_ANNOTATIONS_FP, "r") as f:
json_dict = json.load(f)
keypoints = []
for kp_dict in json_dict["keypoints"]:
keypoints.append(Keypoint(x=kp_dict["x"] - left, y=kp_dict["y"] - top))
if extract is not None:
shape = (bb_extract.height, bb_extract.width, 3)
else:
shape = (643, 960, 3)
kpsoi = KeypointsOnImage(keypoints, shape=shape)
if size is not None:
shape_resized = _compute_resized_shape(shape, size)
kpsoi = kpsoi.on(shape_resized)
return kpsoi
def quokka_bounding_boxes(size=None, extract=None):
"""
Returns example bounding boxes on the standard example quokke image.
Currently only a single bounding box is returned that covers the quokka.
Parameters
----------
size : None or float or tuple of int or tuple of float, optional
Size of the output image on which the BBs are placed. If None, then the BBs
are not projected to any new size (positions on the original image are used).
Floats lead to relative size changes, ints to absolute sizes in pixels.
extract : None or 'square' or tuple of number or imgaug.BoundingBox or imgaug.BoundingBoxesOnImage
Subarea to extract from the image. See :func:`imgaug.quokka`.
Returns
-------
bbsoi : imgaug.BoundingBoxesOnImage
Example BBs on the quokka image.
"""
left, top = 0, 0
if extract is not None:
bb_extract = _quokka_normalize_extract(extract)
left = bb_extract.x1
top = bb_extract.y1
with open(QUOKKA_ANNOTATIONS_FP, "r") as f:
json_dict = json.load(f)
bbs = []
for bb_dict in json_dict["bounding_boxes"]:
bbs.append(
BoundingBox(
x1=bb_dict["x1"] - left,
y1=bb_dict["y1"] - top,
x2=bb_dict["x2"] - left,
y2=bb_dict["y2"] - top
)
)
if extract is not None:
shape = (bb_extract.height, bb_extract.width, 3)
else:
shape = (643, 960, 3)
bbsoi = BoundingBoxesOnImage(bbs, shape=shape)
if size is not None:
shape_resized = _compute_resized_shape(shape, size)
bbsoi = bbsoi.on(shape_resized)
return bbsoi
def angle_between_vectors(v1, v2):
"""
Returns the angle in radians between vectors `v1` and `v2`.
From http://stackoverflow.com/questions/2827393/angles-between-two-n-dimensional-vectors-in-python
Parameters
----------
v1 : (N,) ndarray
First vector.
v2 : (N,) ndarray
Second vector.
Returns
-------
out : float
Angle in radians.
Examples
--------
>>> angle_between_vectors(np.float32([1, 0, 0]), np.float32([0, 1, 0]))
1.5707963267948966
>>> angle_between_vectors(np.float32([1, 0, 0]), np.float32([1, 0, 0]))
0.0
>>> angle_between_vectors(np.float32([1, 0, 0]), np.float32([-1, 0, 0]))
3.141592653589793
"""
v1_u = v1 / np.linalg.norm(v1)
v2_u = v2 / np.linalg.norm(v2)
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
# TODO is this used anywhere?
def compute_line_intersection_point(x1, y1, x2, y2, x3, y3, x4, y4):
"""
Compute the intersection point of two lines.
Taken from https://stackoverflow.com/a/20679579 .
Parameters
----------
x1 : number
x coordinate of the first point on line 1. (The lines extends beyond this point.)
y1 : number:
y coordinate of the first point on line 1. (The lines extends beyond this point.)
x2 : number
x coordinate of the second point on line 1. (The lines extends beyond this point.)
y2 : number:
y coordinate of the second point on line 1. (The lines extends beyond this point.)
x3 : number
x coordinate of the first point on line 2. (The lines extends beyond this point.)
y3 : number:
y coordinate of the first point on line 2. (The lines extends beyond this point.)
x4 : number
x coordinate of the second point on line 2. (The lines extends beyond this point.)
y4 : number:
y coordinate of the second point on line 2. (The lines extends beyond this point.)
Returns
-------
tuple of number or bool
The coordinate of the intersection point as a tuple ``(x, y)``.
If the lines are parallel (no intersection point or an infinite number of them), the result is False.
"""
def _make_line(p1, p2):
A = (p1[1] - p2[1])
B = (p2[0] - p1[0])
C = (p1[0]*p2[1] - p2[0]*p1[1])
return A, B, -C
L1 = _make_line((x1, y1), (x2, y2))
L2 = _make_line((x3, y3), (x4, y4))
D = L1[0] * L2[1] - L1[1] * L2[0]
Dx = L1[2] * L2[1] - L1[1] * L2[2]
Dy = L1[0] * L2[2] - L1[2] * L2[0]
if D != 0:
x = Dx / D
y = Dy / D
return x, y
else:
return False
def draw_text(img, y, x, text, color=(0, 255, 0), size=25):
"""
Draw text on an image.
This uses by default DejaVuSans as its font, which is included in the
library.
Parameters
----------
img : (H,W,3) ndarray
The image array to draw text on.
Expected to be of dtype uint8 or float32 (value range 0.0 to 255.0).
y : int
x-coordinate of the top left corner of the text.
x : int
y- coordinate of the top left corner of the text.
text : str
The text to draw.
color : iterable of int, optional
Color of the text to draw. For RGB-images this is expected to be an RGB color.
size : int, optional
Font size of the text to draw.
Returns
-------
img_np : (H,W,3) ndarray
Input image with text drawn on it.
"""
do_assert(img.dtype in [np.uint8, np.float32])
input_dtype = img.dtype
if img.dtype == np.float32:
img = img.astype(np.uint8)
img = PIL_Image.fromarray(img)
font = PIL_ImageFont.truetype(DEFAULT_FONT_FP, size)
context = PIL_ImageDraw.Draw(img)
context.text((x, y), text, fill=tuple(color), font=font)
img_np = np.asarray(img)
img_np.setflags(write=True) # PIL/asarray returns read only array
if img_np.dtype != input_dtype:
img_np = img_np.astype(input_dtype)
return img_np
# TODO rename sizes to size?
def imresize_many_images(images, sizes=None, interpolation=None):
"""
Resize many images to a specified size.
Parameters
----------
images : (N,H,W,[C]) ndarray or list of (H,W,[C]) ndarray
Array of the images to resize.
Expected to usually be of dtype uint8.
sizes : float or iterable of int or iterable of float
The new size of the images, given either as a fraction (a single float) or as
a ``(height, width)`` tuple of two integers or as a ``(height fraction, width fraction)``
tuple of two floats.
interpolation : None or str or int, optional
The interpolation to use during resize.
If int, then expected to be one of:
* ``cv2.INTER_NEAREST`` (nearest neighbour interpolation)
* ``cv2.INTER_LINEAR`` (linear interpolation)
* ``cv2.INTER_AREA`` (area interpolation)
* ``cv2.INTER_CUBIC`` (cubic interpolation)
If string, then expected to be one of:
* ``nearest`` (identical to ``cv2.INTER_NEAREST``)
* ``linear`` (identical to ``cv2.INTER_LINEAR``)
* ``area`` (identical to ``cv2.INTER_AREA``)
* ``cubic`` (identical to ``cv2.INTER_CUBIC``)
If None, the interpolation will be chosen automatically. For size
increases, area interpolation will be picked and for size decreases,
linear interpolation will be picked.
Returns
-------
result : (N,H',W',[C]) ndarray
Array of the resized images.
Examples
--------
>>> imresize_many_images(np.zeros((2, 16, 16, 3), dtype=np.uint8), 2.0)
Converts 2 RGB images of height and width 16 to images of height and width 16*2 = 32.
>>> imresize_many_images(np.zeros((2, 16, 16, 3), dtype=np.uint8), (16, 32))
Converts 2 RGB images of height and width 16 to images of height 16 and width 32.
>>> imresize_many_images(np.zeros((2, 16, 16, 3), dtype=np.uint8), (2.0, 4.0))
Converts 2 RGB images of height and width 16 to images of height 32 and width 64.
"""
# we just do nothing if the input contains zero images
# one could also argue that an exception would be appropiate here
if len(images) == 0:
return images
# verify that all input images have height/width > 0
do_assert(
all([image.shape[0] > 0 and image.shape[1] > 0 for image in images]),
("Cannot resize images, because at least one image has a height and/or width of zero. "
+ "Observed shapes were: %s.") % (str([image.shape for image in images]),)
)
# verify that sizes contains only values >0
if is_single_number(sizes) and sizes <= 0:
raise Exception(
"Cannot resize to the target size %.8f, because the value is zero or lower than zero." % (sizes,))
elif isinstance(sizes, tuple) and (sizes[0] <= 0 or sizes[1] <= 0):
sizes_str = [
"int %d" % (sizes[0],) if is_single_integer(sizes[0]) else "float %.8f" % (sizes[0],),
"int %d" % (sizes[1],) if is_single_integer(sizes[1]) else "float %.8f" % (sizes[1],),
]
sizes_str = "(%s, %s)" % (sizes_str[0], sizes_str[1])
raise Exception(
"Cannot resize to the target sizes %s. At least one value is zero or lower than zero." % (sizes_str,))
# change afterward the validation to make the above error messages match the original input
if is_single_number(sizes):
sizes = (sizes, sizes)
else:
do_assert(len(sizes) == 2, "Expected tuple with exactly two entries, got %d entries." % (len(sizes),))
do_assert(all([is_single_number(val) for val in sizes]),
"Expected tuple with two ints or floats, got types %s." % (str([type(val) for val in sizes]),))
# if input is a list, call this function N times for N images
# but check beforehand if all images have the same shape, then just convert to a single array and de-convert
# afterwards
if isinstance(images, list):
nb_shapes = len(set([image.shape for image in images]))
if nb_shapes == 1:
return list(imresize_many_images(np.array(images), sizes=sizes, interpolation=interpolation))
else:
return [imresize_many_images(image[np.newaxis, ...], sizes=sizes, interpolation=interpolation)[0, ...]
for image in images]
shape = images.shape
do_assert(images.ndim in [3, 4], "Expected array of shape (N, H, W, [C]), got shape %s" % (str(shape),))
nb_images = shape[0]
im_height, im_width = shape[1], shape[2]
nb_channels = shape[3] if images.ndim > 3 else None
height, width = sizes[0], sizes[1]
height = int(np.round(im_height * height)) if is_single_float(height) else height
width = int(np.round(im_width * width)) if is_single_float(width) else width
if height == im_height and width == im_width:
return np.copy(images)
ip = interpolation
do_assert(ip is None or ip in IMRESIZE_VALID_INTERPOLATIONS)
if ip is None:
if height > im_height or width > im_width:
ip = cv2.INTER_AREA
else:
ip = cv2.INTER_LINEAR
elif ip in ["nearest", cv2.INTER_NEAREST]:
ip = cv2.INTER_NEAREST
elif ip in ["linear", cv2.INTER_LINEAR]:
ip = cv2.INTER_LINEAR
elif ip in ["area", cv2.INTER_AREA]:
ip = cv2.INTER_AREA
else: # if ip in ["cubic", cv2.INTER_CUBIC]:
ip = cv2.INTER_CUBIC
result_shape = (nb_images, height, width)
if nb_channels is not None:
result_shape = result_shape + (nb_channels,)
result = np.zeros(result_shape, dtype=images.dtype)
for img_idx in sm.xrange(nb_images):
# TODO fallback to scipy here if image isn't uint8
result_img = cv2.resize(images[img_idx], (width, height), interpolation=ip)
# cv2 removes the channel axis if input was (H, W, 1)
# we re-add it (but only if input was not (H, W))
if len(result_img.shape) == 2 and nb_channels is not None and nb_channels == 1:
result_img = result_img[:, :, np.newaxis]
result[img_idx] = result_img.astype(images.dtype)
return result
def imresize_single_image(image, sizes, interpolation=None):
"""
Resizes a single image.
Parameters
----------
image : (H,W,C) ndarray or (H,W) ndarray
Array of the image to resize.
Expected to usually be of dtype uint8.
sizes : float or iterable of int or iterable of float
See :func:`imgaug.imresize_many_images`.
interpolation : None or str or int, optional
See :func:`imgaug.imresize_many_images`.
Returns
-------
out : (H',W',C) ndarray or (H',W') ndarray
The resized image.
"""
grayscale = False
if image.ndim == 2:
grayscale = True
image = image[:, :, np.newaxis]
do_assert(len(image.shape) == 3, image.shape)
rs = imresize_many_images(image[np.newaxis, :, :, :], sizes, interpolation=interpolation)
if grayscale:
return np.squeeze(rs[0, :, :, 0])
else:
return rs[0, ...]
def pad(arr, top=0, right=0, bottom=0, left=0, mode="constant", cval=0):
"""
Pad an image-like array on its top/right/bottom/left side.
This function is a wrapper around :func:`numpy.pad`.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Image-like array to pad.
top : int, optional
Amount of pixels to add at the top side of the image. Must be 0 or greater.
right : int, optional
Amount of pixels to add at the right side of the image. Must be 0 or greater.
bottom : int, optional
Amount of pixels to add at the bottom side of the image. Must be 0 or greater.
left : int, optional
Amount of pixels to add at the left side of the image. Must be 0 or greater.
mode : str, optional
Padding mode to use. See :func:`numpy.pad` for details.
In case of mode ``constant``, the parameter `cval` will be used as the ``constant_values``
parameter to :func:`numpy.pad`.
In case of mode ``linear_ramp``, the parameter `cval` will be used as the ``end_values``
parameter to :func:`numpy.pad`.
cval : number, optional
Value to use for padding if `mode` is ``constant``. See :func:`numpy.pad` for details.
Returns
-------
arr_pad : (H',W') ndarray or (H',W',C) ndarray
Padded array with height ``H'=H+top+bottom`` and width ``W'=W+left+right``.
"""
do_assert(arr.ndim in [2, 3])
do_assert(top >= 0)
do_assert(right >= 0)
do_assert(bottom >= 0)
do_assert(left >= 0)
if top > 0 or right > 0 or bottom > 0 or left > 0:
paddings_np = [(top, bottom), (left, right)] # paddings for 2d case
if arr.ndim == 3:
paddings_np.append((0, 0)) # add paddings for 3d case
if mode == "constant":
arr_pad = np.pad(arr, paddings_np, mode=mode, constant_values=cval)
elif mode == "linear_ramp":
arr_pad = np.pad(arr, paddings_np, mode=mode, end_values=cval)
else:
arr_pad = np.pad(arr, paddings_np, mode=mode)
return arr_pad
return np.copy(arr)
def compute_paddings_for_aspect_ratio(arr, aspect_ratio):
"""
Compute the amount of pixels by which an array has to be padded to fulfill an aspect ratio.
The aspect ratio is given as width/height.
Depending on which dimension is smaller (height or width), only the corresponding
sides (left/right or top/bottom) will be padded. In each case, both of the sides will
be padded equally.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Image-like array for which to compute pad amounts.
aspect_ratio : float
Target aspect ratio, given as width/height. E.g. 2.0 denotes the image having twice
as much width as height.
Returns
-------
result : tuple of int
Required paddign amounts to reach the target aspect ratio, given as a tuple
of the form ``(top, right, bottom, left)``.
"""
do_assert(arr.ndim in [2, 3])
do_assert(aspect_ratio > 0)
height, width = arr.shape[0:2]
do_assert(height > 0)
aspect_ratio_current = width / height
pad_top = 0
pad_right = 0
pad_bottom = 0
pad_left = 0
if aspect_ratio_current < aspect_ratio:
# vertical image, height > width
diff = (aspect_ratio * height) - width
pad_right = int(np.ceil(diff / 2))
pad_left = int(np.floor(diff / 2))
elif aspect_ratio_current > aspect_ratio:
# horizontal image, width > height
diff = ((1/aspect_ratio) * width) - height
pad_top = int(np.floor(diff / 2))
pad_bottom = int(np.ceil(diff / 2))
return pad_top, pad_right, pad_bottom, pad_left
def pad_to_aspect_ratio(arr, aspect_ratio, mode="constant", cval=0, return_pad_amounts=False):
"""
Pad an image-like array on its sides so that it matches a target aspect ratio.
Depending on which dimension is smaller (height or width), only the corresponding
sides (left/right or top/bottom) will be padded. In each case, both of the sides will
be padded equally.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Image-like array to pad.
aspect_ratio : float
Target aspect ratio, given as width/height. E.g. 2.0 denotes the image having twice
as much width as height.
mode : str, optional
Padding mode to use. See :func:`numpy.pad` for details.
cval : number, optional
Value to use for padding if `mode` is ``constant``. See :func:`numpy.pad` for details.
return_pad_amounts : bool, optional
If False, then only the padded image will be returned. If True, a tuple with two
entries will be returned, where the first entry is the padded image and the second
entry are the amounts by which each image side was padded. These amounts are again a
tuple of the form (top, right, bottom, left), with each value being an integer.
Returns
-------
arr_padded : (H',W') ndarray or (H',W',C) ndarray
Padded image as (H',W') or (H',W',C) ndarray, fulfulling the given aspect_ratio.
tuple of int
Amounts by which the image was padded on each side, given as a tuple ``(top, right, bottom, left)``.
This tuple is only returned if `return_pad_amounts` was set to True.
Otherwise only ``arr_padded`` is returned.
"""
pad_top, pad_right, pad_bottom, pad_left = compute_paddings_for_aspect_ratio(arr, aspect_ratio)
arr_padded = pad(
arr,
top=pad_top,
right=pad_right,
bottom=pad_bottom,
left=pad_left,
mode=mode,
cval=cval
)
if return_pad_amounts:
return arr_padded, (pad_top, pad_right, pad_bottom, pad_left)
else:
return arr_padded
def pool(arr, block_size, func, cval=0, preserve_dtype=True):
"""
Rescale an array by pooling values within blocks.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Image-like array to pool. Ideally of datatype ``numpy.float64``.
block_size : int or tuple of int
Spatial size of each group of values to pool, aka kernel size.
If a single integer, then a symmetric block of that size along height and width will be used.
If a tuple of two values, it is assumed to be the block size along height and width of the image-like,
with pooling happening per channel.
If a tuple of three values, it is assumed to be the block size along height, width and channels.
func : callable
Function to apply to a given block in order to convert it to a single number,
e.g. :func:`numpy.average`, :func:`numpy.min`, :func:`numpy.max`.
cval : number, optional
Value to use in order to pad the array along its border if the array cannot be divided
by `block_size` without remainder.
preserve_dtype : bool, optional
Whether to convert the array back to the input datatype if it is changed away from
that in the pooling process.
Returns
-------
arr_reduced : (H',W') ndarray or (H',W',C') ndarray
Array after pooling.
"""
do_assert(arr.ndim in [2, 3])
is_valid_int = is_single_integer(block_size) and block_size >= 1
is_valid_tuple = is_iterable(block_size) and len(block_size) in [2, 3] \
and [is_single_integer(val) and val >= 1 for val in block_size]
do_assert(is_valid_int or is_valid_tuple)
if is_single_integer(block_size):
block_size = [block_size, block_size]
if len(block_size) < arr.ndim:
block_size = list(block_size) + [1]
input_dtype = arr.dtype
arr_reduced = skimage.measure.block_reduce(arr, tuple(block_size), func, cval=cval)
if preserve_dtype and arr_reduced.dtype.type != input_dtype:
arr_reduced = arr_reduced.astype(input_dtype)
return arr_reduced
def avg_pool(arr, block_size, cval=0, preserve_dtype=True):
"""
Rescale an array using average pooling.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Image-like array to pool. See :func:`imgaug.pool` for details.
block_size : int or tuple of int or tuple of int
Size of each block of values to pool. See :func:`imgaug.pool` for details.
cval : number, optional
Padding value. See :func:`imgaug.pool` for details.
preserve_dtype : bool, optional
Whether to preserve the input array dtype. See :func:`imgaug.pool` for details.
Returns
-------
arr_reduced : (H',W') ndarray or (H',W',C') ndarray
Array after average pooling.
"""
return pool(arr, block_size, np.average, cval=cval, preserve_dtype=preserve_dtype)
def max_pool(arr, block_size, cval=0, preserve_dtype=True):
"""
Rescale an array using max-pooling.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Image-like array to pool. See :func:`imgaug.pool` for details.
block_size : int or tuple of int or tuple of int
Size of each block of values to pool. See `imgaug.pool` for details.
cval : number, optional
Padding value. See :func:`imgaug.pool` for details.
preserve_dtype : bool, optional
Whether to preserve the input array dtype. See :func:`imgaug.pool` for details.
Returns
-------
arr_reduced : (H',W') ndarray or (H',W',C') ndarray
Array after max-pooling.
"""
return pool(arr, block_size, np.max, cval=cval, preserve_dtype=preserve_dtype)
def draw_grid(images, rows=None, cols=None):
"""
Converts multiple input images into a single image showing them in a grid.
Parameters
----------
images : (N,H,W,3) ndarray or iterable of (H,W,3) array
The input images to convert to a grid.
Expected to be RGB and have dtype uint8.
rows : None or int, optional
The number of rows to show in the grid.
If None, it will be automatically derived.
cols : None or int, optional
The number of cols to show in the grid.
If None, it will be automatically derived.
Returns
-------
grid : (H',W',3) ndarray
Image of the generated grid.
"""
if is_np_array(images):
do_assert(images.ndim == 4)
else:
do_assert(is_iterable(images) and is_np_array(images[0]) and images[0].ndim == 3)
nb_images = len(images)
do_assert(nb_images > 0)
cell_height = max([image.shape[0] for image in images])
cell_width = max([image.shape[1] for image in images])
channels = set([image.shape[2] for image in images])
do_assert(
len(channels) == 1,
"All images are expected to have the same number of channels, "
+ "but got channel set %s with length %d instead." % (str(channels), len(channels))
)
nb_channels = list(channels)[0]
if rows is None and cols is None:
rows = cols = int(math.ceil(math.sqrt(nb_images)))
elif rows is not None:
cols = int(math.ceil(nb_images / rows))
elif cols is not None:
rows = int(math.ceil(nb_images / cols))
do_assert(rows * cols >= nb_images)
width = cell_width * cols
height = cell_height * rows
grid = np.zeros((height, width, nb_channels), dtype=np.uint8)
cell_idx = 0
for row_idx in sm.xrange(rows):
for col_idx in sm.xrange(cols):
if cell_idx < nb_images:
image = images[cell_idx]
cell_y1 = cell_height * row_idx
cell_y2 = cell_y1 + image.shape[0]
cell_x1 = cell_width * col_idx
cell_x2 = cell_x1 + image.shape[1]
grid[cell_y1:cell_y2, cell_x1:cell_x2, :] = image
cell_idx += 1
return grid
def show_grid(images, rows=None, cols=None):
"""
Converts the input images to a grid image and shows it in a new window.
Parameters
----------
images : (N,H,W,3) ndarray or iterable of (H,W,3) array
See :func:`imgaug.draw_grid`.
rows : None or int, optional
See :func:`imgaug.draw_grid`.
cols : None or int, optional
See :func:`imgaug.draw_grid`.
"""
grid = draw_grid(images, rows=rows, cols=cols)
imshow(grid)
def imshow(image, backend=IMSHOW_BACKEND_DEFAULT):
"""
Shows an image in a window.
Parameters
----------
image : (H,W,3) ndarray
Image to show.
backend : {'matplotlib', 'cv2'}, optional
Library to use to show the image. May be either matplotlib or OpenCV ('cv2').
OpenCV tends to be faster, but apparently causes more technical issues.
"""
do_assert(backend in ["matplotlib", "cv2"], "Expected backend 'matplotlib' or 'cv2', got %s." % (backend,))
if backend == "cv2":
image_bgr = image
if image.ndim == 3 and image.shape[2] in [3, 4]:
image_bgr = image[..., 0:3][..., ::-1]
win_name = "imgaug-default-window"
cv2.namedWindow(win_name, cv2.WINDOW_NORMAL)
cv2.imshow(win_name, image_bgr)
cv2.waitKey(0)
cv2.destroyWindow(win_name)
else:
plt.imshow(image, cmap="gray")
plt.gcf().canvas.set_window_title("imgaug.imshow(%s)" % (image.shape,))
plt.show()
def do_assert(condition, message="Assertion failed."):
"""
Function that behaves equally to an `assert` statement, but raises an
Exception.
This is added because `assert` statements are removed in optimized code.
It replaces `assert` statements throughout the library that should be
kept even in optimized code.
Parameters
----------
condition : bool
If False, an exception is raised.
message : str, optional
Error message.
"""
if not condition:
raise AssertionError(str(message))
class HooksImages(object):
"""
Class to intervene with image augmentation runs.
This is e.g. useful to dynamically deactivate some augmenters.
Parameters
----------
activator : None or callable, optional
A function that gives permission to execute an augmenter.
The expected interface is ``f(images, augmenter, parents, default)``,
where ``images`` are the input images to augment, ``augmenter`` is the
instance of the augmenter to execute, ``parents`` are previously
executed augmenters and ``default`` is an expected default value to be
returned if the activator function does not plan to make a decision
for the given inputs.
propagator : None or callable, optional
A function that gives permission to propagate the augmentation further
to the children of an augmenter. This happens after the activator.
In theory, an augmenter may augment images itself (if allowed by the
activator) and then execute child augmenters afterwards (if allowed by
the propagator). If the activator returned False, the propagation step
will never be executed.
The expected interface is ``f(images, augmenter, parents, default)``,
with all arguments having identical meaning to the activator.
preprocessor : None or callable, optional
A function to call before an augmenter performed any augmentations.
The interface is ``f(images, augmenter, parents)``,
with all arguments having identical meaning to the activator.
It is expected to return the input images, optionally modified.
postprocessor : None or callable, optional
A function to call after an augmenter performed augmentations.
The interface is the same as for the preprocessor.
Examples
--------
>>> seq = iaa.Sequential([
>>> iaa.GaussianBlur(3.0, name="blur"),
>>> iaa.Dropout(0.05, name="dropout"),
>>> iaa.Affine(translate_px=-5, name="affine")
>>> ])
>>>
>>> def activator(images, augmenter, parents, default):
>>> return False if augmenter.name in ["blur", "dropout"] else default
>>>
>>> seq_det = seq.to_deterministic()
>>> images_aug = seq_det.augment_images(images)
>>> heatmaps_aug = seq_det.augment_images(
>>> heatmaps,
>>> hooks=ia.HooksImages(activator=activator)
>>> )
This augments images and their respective heatmaps in the same way.
The heatmaps however are only modified by Affine, not by GaussianBlur or
Dropout.
"""
def __init__(self, activator=None, propagator=None, preprocessor=None, postprocessor=None):
self.activator = activator
self.propagator = propagator
self.preprocessor = preprocessor
self.postprocessor = postprocessor
def is_activated(self, images, augmenter, parents, default):
"""
Returns whether an augmenter may be executed.
Returns
-------
bool
If True, the augmenter may be executed. If False, it may not be executed.
"""
if self.activator is None:
return default
else:
return self.activator(images, augmenter, parents, default)
def is_propagating(self, images, augmenter, parents, default):
"""
Returns whether an augmenter may call its children to augment an
image. This is independent of the augmenter itself possible changing
the image, without calling its children. (Most (all?) augmenters with
children currently dont perform any changes themselves.)
Returns
-------
bool
If True, the augmenter may be propagate to its children. If False, it may not.
"""
if self.propagator is None:
return default
else:
return self.propagator(images, augmenter, parents, default)
def preprocess(self, images, augmenter, parents):
"""
A function to be called before the augmentation of images starts (per augmenter).
Returns
-------
(N,H,W,C) ndarray or (N,H,W) ndarray or list of (H,W,C) ndarray or list of (H,W) ndarray
The input images, optionally modified.
"""
if self.preprocessor is None:
return images
else:
return self.preprocessor(images, augmenter, parents)
def postprocess(self, images, augmenter, parents):
"""
A function to be called after the augmentation of images was
performed.
Returns
-------
(N,H,W,C) ndarray or (N,H,W) ndarray or list of (H,W,C) ndarray or list of (H,W) ndarray
The input images, optionally modified.
"""
if self.postprocessor is None:
return images
else:
return self.postprocessor(images, augmenter, parents)
class HooksHeatmaps(HooksImages):
"""
Class to intervene with heatmap augmentation runs.
This is e.g. useful to dynamically deactivate some augmenters.
This class is currently the same as the one for images. This may or may
not change in the future.
"""
pass
class HooksKeypoints(HooksImages):
"""
Class to intervene with keypoint augmentation runs.
This is e.g. useful to dynamically deactivate some augmenters.
This class is currently the same as the one for images. This may or may
not change in the future.
"""
pass
def compute_geometric_median(X, eps=1e-5):
"""
Estimate the geometric median of points in 2D.
Code from https://stackoverflow.com/a/30305181
Parameters
----------
X : (N,2) ndarray
Points in 2D. Second axis must be given in xy-form.
eps : float, optional
Distance threshold when to return the median.
Returns
-------
(2,) ndarray
Geometric median as xy-coordinate.
"""
y = np.mean(X, 0)
while True:
D = scipy.spatial.distance.cdist(X, [y])
nonzeros = (D != 0)[:, 0]
Dinv = 1 / D[nonzeros]
Dinvs = np.sum(Dinv)
W = Dinv / Dinvs
T = np.sum(W * X[nonzeros], 0)
num_zeros = len(X) - np.sum(nonzeros)
if num_zeros == 0:
y1 = T
elif num_zeros == len(X):
return y
else:
R = (T - y) * Dinvs
r = np.linalg.norm(R)
rinv = 0 if r == 0 else num_zeros/r
y1 = max(0, 1-rinv)*T + min(1, rinv)*y
if scipy.spatial.distance.euclidean(y, y1) < eps:
return y1
y = y1
class Keypoint(object):
"""
A single keypoint (aka landmark) on an image.
Parameters
----------
x : number
Coordinate of the keypoint on the x axis.
y : number
Coordinate of the keypoint on the y axis.
"""
def __init__(self, x, y):
self.x = x
self.y = y
@property
def x_int(self):
"""
Return the keypoint's x-coordinate, rounded to the closest integer.
Returns
-------
result : int
Keypoint's x-coordinate, rounded to the closest integer.
"""
return int(np.round(self.x))
@property
def y_int(self):
"""
Return the keypoint's y-coordinate, rounded to the closest integer.
Returns
-------
result : int
Keypoint's y-coordinate, rounded to the closest integer.
"""
return int(np.round(self.y))
def project(self, from_shape, to_shape):
"""
Project the keypoint onto a new position on a new image.
E.g. if the keypoint is on its original image at x=(10 of 100 pixels)
and y=(20 of 100 pixels) and is projected onto a new image with
size (width=200, height=200), its new position will be (20, 40).
This is intended for cases where the original image is resized.
It cannot be used for more complex changes (e.g. padding, cropping).
Parameters
----------
from_shape : tuple of int
Shape of the original image. (Before resize.)
to_shape : tuple of int
Shape of the new image. (After resize.)
Returns
-------
imgaug.Keypoint
Keypoint object with new coordinates.
"""
if from_shape[0:2] == to_shape[0:2]:
return Keypoint(x=self.x, y=self.y)
else:
from_height, from_width = from_shape[0:2]
to_height, to_width = to_shape[0:2]
x = (self.x / from_width) * to_width
y = (self.y / from_height) * to_height
return Keypoint(x=x, y=y)
def shift(self, x=0, y=0):
"""
Move the keypoint around on an image.
Parameters
----------
x : number, optional
Move by this value on the x axis.
y : number, optional
Move by this value on the y axis.
Returns
-------
imgaug.Keypoint
Keypoint object with new coordinates.
"""
return Keypoint(self.x + x, self.y + y)
def generate_similar_points_manhattan(self, nb_steps, step_size, return_array=False):
"""
Generate nearby points to this keypoint based on manhattan distance.
To generate the first neighbouring points, a distance of S (step size) is moved from the
center point (this keypoint) to the top, right, bottom and left, resulting in four new
points. From these new points, the pattern is repeated. Overlapping points are ignored.
The resulting points have a shape similar to a square rotated by 45 degrees.
Parameters
----------
nb_steps : int
The number of steps to move from the center point. nb_steps=1 results in a total of
5 output points (1 center point + 4 neighbours).
step_size : number
The step size to move from every point to its neighbours.
return_array : bool, optional
Whether to return the generated points as a list of keypoints or an array
of shape ``(N,2)``, where ``N`` is the number of generated points and the second axis contains
the x- (first value) and y- (second value) coordinates.
Returns
-------
points : list of imgaug.Keypoint or (N,2) ndarray
If return_array was False, then a list of Keypoint.
Otherwise a numpy array of shape ``(N,2)``, where ``N`` is the number of generated points and
the second axis contains the x- (first value) and y- (second value) coordinates.
The center keypoint (the one on which this function was called) is always included.
"""
# TODO add test
# Points generates in manhattan style with S steps have a shape similar to a 45deg rotated
# square. The center line with the origin point has S+1+S = 1+2*S points (S to the left,
# S to the right). The lines above contain (S+1+S)-2 + (S+1+S)-2-2 + ... + 1 points. E.g.
# for S=2 it would be 3+1=4 and for S=3 it would be 5+3+1=9. Same for the lines below the
# center. Hence the total number of points is S+1+S + 2*(S^2).
points = np.zeros((nb_steps + 1 + nb_steps + 2*(nb_steps**2), 2), dtype=np.float32)
# we start at the bottom-most line and move towards the top-most line
yy = np.linspace(self.y - nb_steps * step_size, self.y + nb_steps * step_size, nb_steps + 1 + nb_steps)
# bottom-most line contains only one point
width = 1
nth_point = 0
for i_y, y in enumerate(yy):
if width == 1:
xx = [self.x]
else:
xx = np.linspace(self.x - (width-1)//2 * step_size, self.x + (width-1)//2 * step_size, width)
for x in xx:
points[nth_point] = [x, y]
nth_point += 1
if i_y < nb_steps:
width += 2
else:
width -= 2
if return_array:
return points
return [Keypoint(x=points[i, 0], y=points[i, 1]) for i in sm.xrange(points.shape[0])]
def __repr__(self):
return self.__str__()
def __str__(self):
return "Keypoint(x=%.8f, y=%.8f)" % (self.x, self.y)
class KeypointsOnImage(object):
"""
Object that represents all keypoints on a single image.
Parameters
----------
keypoints : list of imgaug.Keypoint
List of keypoints on the image.
shape : tuple of int
The shape of the image on which the keypoints are placed.
Examples
--------
>>> kps = [Keypoint(x=10, y=20), Keypoint(x=34, y=60)]
>>> kps_oi = KeypointsOnImage(kps, shape=image.shape)
"""
def __init__(self, keypoints, shape):
self.keypoints = keypoints
if is_np_array(shape):
self.shape = shape.shape
else:
do_assert(isinstance(shape, (tuple, list)))
self.shape = tuple(shape)
@property
def height(self):
return self.shape[0]
@property
def width(self):
return self.shape[1]
@property
def empty(self):
"""
Returns whether this object contains zero keypoints.
Returns
-------
result : bool
True if this object contains zero keypoints.
"""
return len(self.keypoints) == 0
def on(self, image):
"""
Project keypoints from one image to a new one.
Parameters
----------
image : ndarray or tuple of int
New image onto which the keypoints are to be projected.
May also simply be that new image's shape tuple.
Returns
-------
keypoints : imgaug.KeypointsOnImage
Object containing all projected keypoints.
"""
if is_np_array(image):
shape = image.shape
else:
shape = image
if shape[0:2] == self.shape[0:2]:
return self.deepcopy()
else:
keypoints = [kp.project(self.shape, shape) for kp in self.keypoints]
return KeypointsOnImage(keypoints, shape)
def draw_on_image(self, image, color=(0, 255, 0), size=3, copy=True, raise_if_out_of_image=False):
"""
Draw all keypoints onto a given image. Each keypoint is marked by a square of a chosen color and size.
Parameters
----------
image : (H,W,3) ndarray
The image onto which to draw the keypoints.
This image should usually have the same shape as
set in KeypointsOnImage.shape.
color : int or list of int or tuple of int or (3,) ndarray, optional
The RGB color of all keypoints. If a single int ``C``, then that is
equivalent to ``(C,C,C)``.
size : int, optional
The size of each point. If set to ``C``, each square will have size ``C x C``.
copy : bool, optional
Whether to copy the image before drawing the points.
raise_if_out_of_image : bool, optional
Whether to raise an exception if any keypoint is outside of the image.
Returns
-------
image : (H,W,3) ndarray
Image with drawn keypoints.
"""
if copy:
image = np.copy(image)
height, width = image.shape[0:2]
for keypoint in self.keypoints:
y, x = keypoint.y_int, keypoint.x_int
if 0 <= y < height and 0 <= x < width:
x1 = max(x - size//2, 0)
x2 = min(x + 1 + size//2, width)
y1 = max(y - size//2, 0)
y2 = min(y + 1 + size//2, height)
image[y1:y2, x1:x2] = color
else:
if raise_if_out_of_image:
raise Exception("Cannot draw keypoint x=%.8f, y=%.8f on image with shape %s." % (y, x, image.shape))
return image
def shift(self, x=0, y=0):
"""
Move the keypoints around on an image.
Parameters
----------
x : number, optional
Move each keypoint by this value on the x axis.
y : number, optional
Move each keypoint by this value on the y axis.
Returns
-------
out : KeypointsOnImage
Keypoints after moving them.
"""
keypoints = [keypoint.shift(x=x, y=y) for keypoint in self.keypoints]
return KeypointsOnImage(keypoints, self.shape)
def get_coords_array(self):
"""
Convert the coordinates of all keypoints in this object to an array of shape (N,2).
Returns
-------
result : (N, 2) ndarray
Where N is the number of keypoints. Each first value is the
x coordinate, each second value is the y coordinate.
"""
result = np.zeros((len(self.keypoints), 2), np.float32)
for i, keypoint in enumerate(self.keypoints):
result[i, 0] = keypoint.x
result[i, 1] = keypoint.y
return result
@staticmethod
def from_coords_array(coords, shape):
"""
Convert an array (N,2) with a given image shape to a KeypointsOnImage object.
Parameters
----------
coords : (N, 2) ndarray
Coordinates of ``N`` keypoints on the original image.
Each first entry ``coords[i, 0]`` is expected to be the x coordinate.
Each second entry ``coords[i, 1]`` is expected to be the y coordinate.
shape : tuple
Shape tuple of the image on which the keypoints are placed.
Returns
-------
out : KeypointsOnImage
KeypointsOnImage object that contains all keypoints from the array.
"""
keypoints = [Keypoint(x=coords[i, 0], y=coords[i, 1]) for i in sm.xrange(coords.shape[0])]
return KeypointsOnImage(keypoints, shape)
def to_keypoint_image(self, size=1):
"""
Draws a new black image of shape ``(H,W,N)`` in which all keypoint coordinates are set to 255.
(H=shape height, W=shape width, N=number of keypoints)
This function can be used as a helper when augmenting keypoints with a method that only supports the
augmentation of images.
Parameters
-------
size : int
Size of each (squared) point.
Returns
-------
image : (H,W,N) ndarray
Image in which the keypoints are marked. H is the height,
defined in KeypointsOnImage.shape[0] (analogous W). N is the
number of keypoints.
"""
do_assert(len(self.keypoints) > 0)
height, width = self.shape[0:2]
image = np.zeros((height, width, len(self.keypoints)), dtype=np.uint8)
do_assert(size % 2 != 0)
sizeh = max(0, (size-1)//2)
for i, keypoint in enumerate(self.keypoints):
# TODO for float values spread activation over several cells
# here and do voting at the end
y = keypoint.y_int
x = keypoint.x_int
x1 = np.clip(x - sizeh, 0, width-1)
x2 = np.clip(x + sizeh + 1, 0, width)
y1 = np.clip(y - sizeh, 0, height-1)
y2 = np.clip(y + sizeh + 1, 0, height)
if x1 < x2 and y1 < y2:
image[y1:y2, x1:x2, i] = 128
if 0 <= y < height and 0 <= x < width:
image[y, x, i] = 255
return image
@staticmethod
def from_keypoint_image(image, if_not_found_coords={"x": -1, "y": -1}, threshold=1, nb_channels=None): # pylint: disable=locally-disabled, dangerous-default-value, line-too-long
"""
Converts an image generated by ``to_keypoint_image()`` back to a KeypointsOnImage object.
Parameters
----------
image : (H,W,N) ndarray
The keypoints image. N is the number of keypoints.
if_not_found_coords : tuple or list or dict or None, optional
Coordinates to use for keypoints that cannot be found in `image`.
If this is a list/tuple, it must have two integer values.
If it is a dictionary, it must have the keys ``x`` and ``y`` with
each containing one integer value.
If this is None, then the keypoint will not be added to the final
KeypointsOnImage object.
threshold : int, optional
The search for keypoints works by searching for the argmax in
each channel. This parameters contains the minimum value that
the max must have in order to be viewed as a keypoint.
nb_channels : None or int, optional
Number of channels of the image on which the keypoints are placed.
Some keypoint augmenters require that information.
If set to None, the keypoint's shape will be set
to ``(height, width)``, otherwise ``(height, width, nb_channels)``.
Returns
-------
out : KeypointsOnImage
The extracted keypoints.
"""
do_assert(len(image.shape) == 3)
height, width, nb_keypoints = image.shape
drop_if_not_found = False
if if_not_found_coords is None:
drop_if_not_found = True
if_not_found_x = -1
if_not_found_y = -1
elif isinstance(if_not_found_coords, (tuple, list)):
do_assert(len(if_not_found_coords) == 2)
if_not_found_x = if_not_found_coords[0]
if_not_found_y = if_not_found_coords[1]
elif isinstance(if_not_found_coords, dict):
if_not_found_x = if_not_found_coords["x"]
if_not_found_y = if_not_found_coords["y"]
else:
raise Exception("Expected if_not_found_coords to be None or tuple or list or dict, got %s." % (
type(if_not_found_coords),))
keypoints = []
for i in sm.xrange(nb_keypoints):
maxidx_flat = np.argmax(image[..., i])
maxidx_ndim = np.unravel_index(maxidx_flat, (height, width))
found = (image[maxidx_ndim[0], maxidx_ndim[1], i] >= threshold)
if found:
keypoints.append(Keypoint(x=maxidx_ndim[1], y=maxidx_ndim[0]))
else:
if drop_if_not_found:
pass # dont add the keypoint to the result list, i.e. drop it
else:
keypoints.append(Keypoint(x=if_not_found_x, y=if_not_found_y))
out_shape = (height, width)
if nb_channels is not None:
out_shape += (nb_channels,)
return KeypointsOnImage(keypoints, shape=out_shape)
def to_distance_maps(self, inverted=False):
"""
Generates a ``(H,W,K)`` output containing ``K`` distance maps for ``K`` keypoints.
The k-th distance map contains at every location ``(y, x)`` the euclidean distance to the k-th keypoint.
This function can be used as a helper when augmenting keypoints with a method that only supports
the augmentation of images.
Parameters
-------
inverted : bool, optional
If True, inverted distance maps are returned where each distance value d is replaced
by ``d/(d+1)``, i.e. the distance maps have values in the range ``(0.0, 1.0]`` with 1.0
denoting exactly the position of the respective keypoint.
Returns
-------
distance_maps : (H,W,K) ndarray
A ``float32`` array containing ``K`` distance maps for ``K`` keypoints. Each location
``(y, x, k)`` in the array denotes the euclidean distance at ``(y, x)`` to the ``k``-th keypoint.
In inverted mode the distance ``d`` is replaced by ``d/(d+1)``. The height and width
of the array match the height and width in ``KeypointsOnImage.shape``.
"""
do_assert(len(self.keypoints) > 0)
height, width = self.shape[0:2]
distance_maps = np.zeros((height, width, len(self.keypoints)), dtype=np.float32)
yy = np.arange(0, height)
xx = np.arange(0, width)
grid_xx, grid_yy = np.meshgrid(xx, yy)
for i, keypoint in enumerate(self.keypoints):
y, x = keypoint.y, keypoint.x
distance_maps[:, :, i] = (grid_xx - x) ** 2 + (grid_yy - y) ** 2
distance_maps = np.sqrt(distance_maps)
if inverted:
return 1/(distance_maps+1)
return distance_maps
# TODO add option to if_not_found_coords to reuse old keypoint coords
@staticmethod
def from_distance_maps(distance_maps, inverted=False, if_not_found_coords={"x": -1, "y": -1}, threshold=None, # pylint: disable=locally-disabled, dangerous-default-value, line-too-long
nb_channels=None):
"""
Converts maps generated by ``to_distance_maps()`` back to a KeypointsOnImage object.
Parameters
----------
distance_maps : (H,W,N) ndarray
The distance maps. N is the number of keypoints.
inverted : bool, optional
Whether the given distance maps were generated in inverted or normal mode.
if_not_found_coords : tuple or list or dict or None, optional
Coordinates to use for keypoints that cannot be found in ``distance_maps``.
If this is a list/tuple, it must have two integer values.
If it is a dictionary, it must have the keys ``x`` and ``y``, with each
containing one integer value.
If this is None, then the keypoint will not be added to the final
KeypointsOnImage object.
threshold : float, optional
The search for keypoints works by searching for the argmin (non-inverted) or
argmax (inverted) in each channel. This parameters contains the maximum (non-inverted)
or minimum (inverted) value to accept in order to view a hit as a keypoint.
Use None to use no min/max.
nb_channels : None or int, optional
Number of channels of the image on which the keypoints are placed.
Some keypoint augmenters require that information.
If set to None, the keypoint's shape will be set
to ``(height, width)``, otherwise ``(height, width, nb_channels)``.
Returns
-------
imgaug.KeypointsOnImage
The extracted keypoints.
"""
do_assert(len(distance_maps.shape) == 3)
height, width, nb_keypoints = distance_maps.shape
drop_if_not_found = False
if if_not_found_coords is None:
drop_if_not_found = True
if_not_found_x = -1
if_not_found_y = -1
elif isinstance(if_not_found_coords, (tuple, list)):
do_assert(len(if_not_found_coords) == 2)
if_not_found_x = if_not_found_coords[0]
if_not_found_y = if_not_found_coords[1]
elif isinstance(if_not_found_coords, dict):
if_not_found_x = if_not_found_coords["x"]
if_not_found_y = if_not_found_coords["y"]
else:
raise Exception("Expected if_not_found_coords to be None or tuple or list or dict, got %s." % (
type(if_not_found_coords),))
keypoints = []
for i in sm.xrange(nb_keypoints):
# TODO introduce voting here among all distance values that have min/max values
if inverted:
hitidx_flat = np.argmax(distance_maps[..., i])
else:
hitidx_flat = np.argmin(distance_maps[..., i])
hitidx_ndim = np.unravel_index(hitidx_flat, (height, width))
if not inverted and threshold is not None:
found = (distance_maps[hitidx_ndim[0], hitidx_ndim[1], i] < threshold)
elif inverted and threshold is not None:
found = (distance_maps[hitidx_ndim[0], hitidx_ndim[1], i] >= threshold)
else:
found = True
if found:
keypoints.append(Keypoint(x=hitidx_ndim[1], y=hitidx_ndim[0]))
else:
if drop_if_not_found:
pass # dont add the keypoint to the result list, i.e. drop it
else:
keypoints.append(Keypoint(x=if_not_found_x, y=if_not_found_y))
out_shape = (height, width)
if nb_channels is not None:
out_shape += (nb_channels,)
return KeypointsOnImage(keypoints, shape=out_shape)
def copy(self):
"""
Create a shallow copy of the KeypointsOnImage object.
Returns
-------
imgaug.KeypointsOnImage
Shallow copy.
"""
return copy.copy(self)
def deepcopy(self):
"""
Create a deep copy of the KeypointsOnImage object.
Returns
-------
imgaug.KeypointsOnImage
Deep copy.
"""
# for some reason deepcopy is way slower here than manual copy
kps = [Keypoint(x=kp.x, y=kp.y) for kp in self.keypoints]
return KeypointsOnImage(kps, tuple(self.shape))
def __repr__(self):
return self.__str__()
def __str__(self):
return "KeypointsOnImage(%s, shape=%s)" % (str(self.keypoints), self.shape)
# TODO functions: square(), to_aspect_ratio(), contains_point()
class BoundingBox(object):
"""
Class representing bounding boxes.
Each bounding box is parameterized by its top left and bottom right corners. Both are given
as x and y-coordinates. The corners are intended to lie inside the bounding box area.
As a result, a bounding box that lies completely inside the image but has maximum extensions
would have coordinates ``(0.0, 0.0)`` and ``(W - epsilon, H - epsilon)``. Note that coordinates
are saved internally as floats.
Parameters
----------
x1 : number
X-coordinate of the top left of the bounding box.
y1 : number
Y-coordinate of the top left of the bounding box.
x2 : number
X-coordinate of the bottom right of the bounding box.
y2 : number
Y-coordinate of the bottom right of the bounding box.
label : None or str, optional
Label of the bounding box, e.g. a string representing the class.
"""
def __init__(self, x1, y1, x2, y2, label=None):
"""Create a new BoundingBox instance."""
if x1 > x2:
x2, x1 = x1, x2
do_assert(x2 >= x1)
if y1 > y2:
y2, y1 = y1, y2
do_assert(y2 >= y1)
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
self.label = label
@property
def x1_int(self):
"""
Return the x-coordinate of the top left corner as an integer.
Returns
-------
int
X-coordinate of the top left corner, rounded to the closest integer.
"""
return int(np.round(self.x1)) # use numpy's round to have consistent behaviour between python versions
@property
def y1_int(self):
"""
Return the y-coordinate of the top left corner as an integer.
Returns
-------
int
Y-coordinate of the top left corner, rounded to the closest integer.
"""
return int(np.round(self.y1)) # use numpy's round to have consistent behaviour between python versions
@property
def x2_int(self):
"""
Return the x-coordinate of the bottom left corner as an integer.
Returns
-------
int
X-coordinate of the bottom left corner, rounded to the closest integer.
"""
return int(np.round(self.x2)) # use numpy's round to have consistent behaviour between python versions
@property
def y2_int(self):
"""
Return the y-coordinate of the bottom left corner as an integer.
Returns
-------
int
Y-coordinate of the bottom left corner, rounded to the closest integer.
"""
return int(np.round(self.y2)) # use numpy's round to have consistent behaviour between python versions
@property
def height(self):
"""
Estimate the height of the bounding box.
Returns
-------
number
Height of the bounding box.
"""
return self.y2 - self.y1
@property
def width(self):
"""
Estimate the width of the bounding box.
Returns
-------
number
Width of the bounding box.
"""
return self.x2 - self.x1
@property
def center_x(self):
"""
Estimate the x-coordinate of the center point of the bounding box.
Returns
-------
number
X-coordinate of the center point of the bounding box.
"""
return self.x1 + self.width/2
@property
def center_y(self):
"""
Estimate the y-coordinate of the center point of the bounding box.
Returns
-------
number
Y-coordinate of the center point of the bounding box.
"""
return self.y1 + self.height/2
@property
def area(self):
"""
Estimate the area of the bounding box.
Returns
-------
number
Area of the bounding box, i.e. `height * width`.
"""
return self.height * self.width
def contains(self, other):
"""
Estimate whether the bounding box contains a point.
Parameters
----------
other : imgaug.Keypoint
Point to check for.
Returns
-------
bool
True if the point is contained in the bounding box, False otherwise.
"""
x, y = other.x, other.y
return self.x1 <= x <= self.x2 and self.y1 <= y <= self.y2
def project(self, from_shape, to_shape):
"""
Project the bounding box onto a new position on a new image.
E.g. if the bounding box is on its original image at
x1=(10 of 100 pixels) and y1=(20 of 100 pixels) and is projected onto
a new image with size (width=200, height=200), its new position will
be (x1=20, y1=40). (Analogous for x2/y2.)
This is intended for cases where the original image is resized.
It cannot be used for more complex changes (e.g. padding, cropping).
Parameters
----------
from_shape : tuple of int
Shape of the original image. (Before resize.)
to_shape : tuple of int
Shape of the new image. (After resize.)
Returns
-------
out : imgaug.BoundingBox
BoundingBox object with new coordinates.
"""
if from_shape[0:2] == to_shape[0:2]:
return self.copy()
else:
from_height, from_width = from_shape[0:2]
to_height, to_width = to_shape[0:2]
do_assert(from_height > 0)
do_assert(from_width > 0)
do_assert(to_height > 0)
do_assert(to_width > 0)
x1 = (self.x1 / from_width) * to_width
y1 = (self.y1 / from_height) * to_height
x2 = (self.x2 / from_width) * to_width
y2 = (self.y2 / from_height) * to_height
return self.copy(
x1=x1,
y1=y1,
x2=x2,
y2=y2,
label=self.label
)
def extend(self, all_sides=0, top=0, right=0, bottom=0, left=0):
"""
Extend the size of the bounding box along its sides.
Parameters
----------
all_sides : number, optional
Value by which to extend the bounding box size along all sides.
top : number, optional
Value by which to extend the bounding box size along its top side.
right : number, optional
Value by which to extend the bounding box size along its right side.
bottom : number, optional
Value by which to extend the bounding box size along its bottom side.
left : number, optional
Value by which to extend the bounding box size along its left side.
Returns
-------
imgaug.BoundingBox
Extended bounding box.
"""
return BoundingBox(
x1=self.x1 - all_sides - left,
x2=self.x2 + all_sides + right,
y1=self.y1 - all_sides - top,
y2=self.y2 + all_sides + bottom
)
def intersection(self, other, default=None):
"""
Compute the intersection bounding box of this bounding box and another one.
Note that in extreme cases, the intersection can be a single point, meaning that the intersection bounding box
will exist, but then also has a height and width of zero.
Parameters
----------
other : imgaug.BoundingBox
Other bounding box with which to generate the intersection.
default : any, optional
Default value to return if there is no intersection.
Returns
-------
imgaug.BoundingBox or any
Intersection bounding box of the two bounding boxes if there is an intersection.
If there is no intersection, the default value will be returned, which can by anything.
"""
x1_i = max(self.x1, other.x1)
y1_i = max(self.y1, other.y1)
x2_i = min(self.x2, other.x2)
y2_i = min(self.y2, other.y2)
if x1_i > x2_i or y1_i > y2_i:
return default
else:
return BoundingBox(x1=x1_i, y1=y1_i, x2=x2_i, y2=y2_i)
def union(self, other):
"""
Compute the union bounding box of this bounding box and another one.
This is equivalent to drawing a bounding box around all corners points of both
bounding boxes.
Parameters
----------
other : imgaug.BoundingBox
Other bounding box with which to generate the union.
Returns
-------
imgaug.BoundingBox
Union bounding box of the two bounding boxes.
"""
return BoundingBox(
x1=min(self.x1, other.x1),
y1=min(self.y1, other.y1),
x2=max(self.x2, other.x2),
y2=max(self.y2, other.y2),
)
def iou(self, other):
"""
Compute the IoU of this bounding box with another one.
IoU is the intersection over union, defined as::
``area(intersection(A, B)) / area(union(A, B))``
``= area(intersection(A, B)) / (area(A) + area(B) - area(intersection(A, B)))``
Parameters
----------
other : imgaug.BoundingBox
Other bounding box with which to compare.
Returns
-------
float
IoU between the two bounding boxes.
"""
inters = self.intersection(other)
if inters is None:
return 0.0
else:
area_union = self.area + other.area - inters.area
return inters.area / area_union if area_union > 0 else 0.0
def is_fully_within_image(self, image):
"""
Estimate whether the bounding box is fully inside the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use.
If an ndarray, its shape will be used.
If a tuple, it is assumed to represent the image shape
and must contain at least two integers.
Returns
-------
bool
True if the bounding box is fully inside the image area. False otherwise.
"""
if isinstance(image, tuple):
shape = image
else:
shape = image.shape
height, width = shape[0:2]
return self.x1 >= 0 and self.x2 < width and self.y1 >= 0 and self.y2 < height
def is_partly_within_image(self, image):
"""
Estimate whether the bounding box is at least partially inside the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use.
If an ndarray, its shape will be used.
If a tuple, it is assumed to represent the image shape
and must contain at least two integers.
Returns
-------
bool
True if the bounding box is at least partially inside the image area. False otherwise.
"""
if isinstance(image, tuple):
shape = image
else:
shape = image.shape
height, width = shape[0:2]
eps = np.finfo(np.float32).eps
img_bb = BoundingBox(x1=0, x2=width-eps, y1=0, y2=height-eps)
return self.intersection(img_bb) is not None
def is_out_of_image(self, image, fully=True, partly=False):
"""
Estimate whether the bounding box is partially or fully outside of the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use. If an ndarray, its shape will be used. If a tuple, it is
assumed to represent the image shape and must contain at least two integers.
fully : bool, optional
Whether to return True if the bounding box is fully outside fo the image area.
partly : bool, optional
Whether to return True if the bounding box is at least partially outside fo the
image area.
Returns
-------
bool
True if the bounding box is partially/fully outside of the image area, depending
on defined parameters. False otherwise.
"""
if self.is_fully_within_image(image):
return False
elif self.is_partly_within_image(image):
return partly
else:
return fully
def cut_out_of_image(self, image):
"""
Cut off all parts of the bounding box that are outside of the image.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use for the clipping of the bounding box.
If an ndarray, its shape will be used.
If a tuple, it is assumed to represent the image shape and must contain at least two integers.
Returns
-------
result : imgaug.BoundingBox
Bounding box, clipped to fall within the image dimensions.
"""
if isinstance(image, tuple):
shape = image
else:
shape = image.shape
height, width = shape[0:2]
do_assert(height > 0)
do_assert(width > 0)
eps = np.finfo(np.float32).eps
x1 = np.clip(self.x1, 0, width - eps)
x2 = np.clip(self.x2, 0, width - eps)
y1 = np.clip(self.y1, 0, height - eps)
y2 = np.clip(self.y2, 0, height - eps)
return self.copy(
x1=x1,
y1=y1,
x2=x2,
y2=y2,
label=self.label
)
def shift(self, top=None, right=None, bottom=None, left=None):
"""
Shift the bounding box from one or more image sides, i.e. move it on the x/y-axis.
Parameters
----------
top : None or int, optional
Amount of pixels by which to shift the bounding box from the top.
right : None or int, optional
Amount of pixels by which to shift the bounding box from the right.
bottom : None or int, optional
Amount of pixels by which to shift the bounding box from the bottom.
left : None or int, optional
Amount of pixels by which to shift the bounding box from the left.
Returns
-------
result : imgaug.BoundingBox
Shifted bounding box.
"""
top = top if top is not None else 0
right = right if right is not None else 0
bottom = bottom if bottom is not None else 0
left = left if left is not None else 0
return self.copy(
x1=self.x1+left-right,
x2=self.x2+left-right,
y1=self.y1+top-bottom,
y2=self.y2+top-bottom
)
# TODO add explicit test for zero-sized BBs (worked when tested by hand)
def draw_on_image(self, image, color=(0, 255, 0), alpha=1.0, thickness=1, copy=True, raise_if_out_of_image=False):
"""
Draw the bounding box on an image.
Parameters
----------
image : (H,W,C) ndarray(uint8)
The image onto which to draw the bounding box.
color : iterable of int, optional
The color to use, corresponding to the channel layout of the image. Usually RGB.
alpha : float, optional
The transparency of the drawn bounding box, where 1.0 denotes no transparency and
0.0 is invisible.
thickness : int, optional
The thickness of the bounding box in pixels. If the value is larger than 1, then
additional pixels will be added around the bounding box (i.e. extension towards the
outside).
copy : bool, optional
Whether to copy the input image or change it in-place.
raise_if_out_of_image : bool, optional
Whether to raise an error if the bounding box is partially/fully outside of the
image. If set to False, no error will be raised and only the parts inside the image
will be drawn.
Returns
-------
result : (H,W,C) ndarray(uint8)
Image with bounding box drawn on it.
"""
if raise_if_out_of_image and self.is_out_of_image(image):
raise Exception("Cannot draw bounding box x1=%.8f, y1=%.8f, x2=%.8f, y2=%.8f on image with shape %s." % (
self.x1, self.y1, self.x2, self.y2, image.shape))
result = np.copy(image) if copy else image
if isinstance(color, (tuple, list)):
color = np.uint8(color)
for i in range(thickness):
y1, y2, x1, x2 = self.y1_int, self.y2_int, self.x1_int, self.x2_int
# When y values get into the range (H-0.5, H), the *_int functions round them to H.
# That is technically sensible, but in the case of drawing means that the border lies
# just barely outside of the image, making the border disappear, even though the BB
# is fully inside the image. Here we correct for that because of beauty reasons.
# Same is the case for x coordinates.
if self.is_fully_within_image(image):
y1 = np.clip(y1, 0, image.shape[0]-1)
y2 = np.clip(y2, 0, image.shape[0]-1)
x1 = np.clip(x1, 0, image.shape[1]-1)
x2 = np.clip(x2, 0, image.shape[1]-1)
y = [y1-i, y1-i, y2+i, y2+i]
x = [x1-i, x2+i, x2+i, x1-i]
rr, cc = skimage.draw.polygon_perimeter(y, x, shape=result.shape)
if alpha >= 0.99:
result[rr, cc, :] = color
else:
if is_float_array(result):
result[rr, cc, :] = (1 - alpha) * result[rr, cc, :] + alpha * color
result = np.clip(result, 0, 255)
else:
input_dtype = result.dtype
result = result.astype(np.float32)
result[rr, cc, :] = (1 - alpha) * result[rr, cc, :] + alpha * color
result = np.clip(result, 0, 255).astype(input_dtype)
return result
def extract_from_image(self, image, prevent_zero_size=True):
"""
Extract the image pixels within the bounding box.
This function will zero-pad the image if the bounding box is partially/fully outside of
the image.
Parameters
----------
image : (H,W) ndarray or (H,W,C) ndarray
The image from which to extract the pixels within the bounding box.
prevent_zero_size : bool, optional
Whether to prevent height or width of the extracted image from becoming zero.
If this is set to True and height or width of the bounding box is below 1, the height/width will
be increased to 1. This can be useful to prevent problems, e.g. with image saving or plotting.
If it is set to False, images will be returned as ``(H', W')`` or ``(H', W', 3)`` with ``H`` or
``W`` potentially being 0.
Returns
-------
image : (H',W') ndarray or (H',W',C) ndarray
Pixels within the bounding box. Zero-padded if the bounding box is partially/fully
outside of the image. If prevent_zero_size is activated, it is guarantueed that ``H'>0``
and ``W'>0``, otherwise only ``H'>=0`` and ``W'>=0``.
"""
pad_top = 0
pad_right = 0
pad_bottom = 0
pad_left = 0
height, width = image.shape[0], image.shape[1]
x1, x2, y1, y2 = self.x1_int, self.x2_int, self.y1_int, self.y2_int
# When y values get into the range (H-0.5, H), the *_int functions round them to H.
# That is technically sensible, but in the case of extraction leads to a black border,
# which is both ugly and unexpected after calling cut_out_of_image(). Here we correct for
# that because of beauty reasons.
# Same is the case for x coordinates.
if self.is_fully_within_image(image):
y1 = np.clip(y1, 0, image.shape[0]-1)
y2 = np.clip(y2, 0, image.shape[0]-1)
x1 = np.clip(x1, 0, image.shape[1]-1)
x2 = np.clip(x2, 0, image.shape[1]-1)
# TODO add test
if prevent_zero_size:
if abs(x2 - x1) < 1:
x2 = x1 + 1
if abs(y2 - y1) < 1:
y2 = y1 + 1
# if the bb is outside of the image area, the following pads the image
# first with black pixels until the bb is inside the image
# and only then extracts the image area
# TODO probably more efficient to initialize an array of zeros
# and copy only the portions of the bb into that array that are
# natively inside the image area
if x1 < 0:
pad_left = abs(x1)
x2 = x2 + abs(x1)
x1 = 0
if y1 < 0:
pad_top = abs(y1)
y2 = y2 + abs(y1)
y1 = 0
if x2 >= width:
pad_right = x2 - (width - 1)
if y2 >= height:
pad_bottom = y2 - (height - 1)
if any([val > 0 for val in [pad_top, pad_right, pad_bottom, pad_left]]):
if len(image.shape) == 2:
image = np.pad(image, ((pad_top, pad_bottom), (pad_left, pad_right)), mode="constant")
else:
image = np.pad(image, ((pad_top, pad_bottom), (pad_left, pad_right), (0, 0)), mode="constant")
return image[y1:y2, x1:x2]
# TODO also add to_heatmap
# TODO add this to BoundingBoxesOnImage
def to_keypoints(self):
"""
Convert the corners of the bounding box to keypoints (clockwise, starting at top left).
Returns
-------
list of imgaug.Keypoint
Corners of the bounding box as keypoints.
"""
return [
Keypoint(x=self.x1, y=self.y1),
Keypoint(x=self.x2, y=self.y1),
Keypoint(x=self.x2, y=self.y2),
Keypoint(x=self.x1, y=self.y2)
]
def copy(self, x1=None, y1=None, x2=None, y2=None, label=None):
"""
Create a shallow copy of the BoundingBox object.
Parameters
----------
x1 : None or number
If not None, then the x1 coordinate of the copied object will be set to this value.
y1 : None or number
If not None, then the y1 coordinate of the copied object will be set to this value.
x2 : None or number
If not None, then the x2 coordinate of the copied object will be set to this value.
y2 : None or number
If not None, then the y2 coordinate of the copied object will be set to this value.
label : None or string
If not None, then the label of the copied object will be set to this value.
Returns
-------
imgaug.BoundingBox
Shallow copy.
"""
return BoundingBox(
x1=self.x1 if x1 is None else x1,
x2=self.x2 if x2 is None else x2,
y1=self.y1 if y1 is None else y1,
y2=self.y2 if y2 is None else y2,
label=self.label if label is None else label
)
def deepcopy(self, x1=None, y1=None, x2=None, y2=None, label=None):
"""
Create a deep copy of the BoundingBox object.
Parameters
----------
x1 : None or number
If not None, then the x1 coordinate of the copied object will be set to this value.
y1 : None or number
If not None, then the y1 coordinate of the copied object will be set to this value.
x2 : None or number
If not None, then the x2 coordinate of the copied object will be set to this value.
y2 : None or number
If not None, then the y2 coordinate of the copied object will be set to this value.
label : None or string
If not None, then the label of the copied object will be set to this value.
Returns
-------
imgaug.BoundingBox
Deep copy.
"""
return self.copy(x1=x1, y1=y1, x2=x2, y2=y2, label=label)
def __repr__(self):
return self.__str__()
def __str__(self):
return "BoundingBox(x1=%.4f, y1=%.4f, x2=%.4f, y2=%.4f, label=%s)" % (
self.x1, self.y1, self.x2, self.y2, self.label)
class BoundingBoxesOnImage(object):
"""
Object that represents all bounding boxes on a single image.
Parameters
----------
bounding_boxes : list of imgaug.BoundingBox
List of bounding boxes on the image.
shape : tuple of int
The shape of the image on which the bounding boxes are placed.
Examples
--------
>>> bbs = [
>>> BoundingBox(x1=10, y1=20, x2=20, y2=30),
>>> BoundingBox(x1=25, y1=50, x2=30, y2=70)
>>> ]
>>> bbs_oi = BoundingBoxesOnImage(bbs, shape=image.shape)
"""
def __init__(self, bounding_boxes, shape):
self.bounding_boxes = bounding_boxes
if is_np_array(shape):
self.shape = shape.shape
else:
do_assert(isinstance(shape, (tuple, list)))
self.shape = tuple(shape)
# TODO remove this? here it is image height at BoundingBox it is bounding box height
@property
def height(self):
"""
Get the height of the image on which the bounding boxes fall.
Returns
-------
int
Image height.
"""
return self.shape[0]
# TODO remove this? here it is image width at BoundingBox it is bounding box width
@property
def width(self):
"""
Get the width of the image on which the bounding boxes fall.
Returns
-------
int
Image width.
"""
return self.shape[1]
@property
def empty(self):
"""
Returns whether this object contains zero bounding boxes.
Returns
-------
bool
True if this object contains zero bounding boxes.
"""
return len(self.bounding_boxes) == 0
def on(self, image):
"""
Project bounding boxes from one image to a new one.
Parameters
----------
image : ndarray or tuple of int
New image onto which the bounding boxes are to be projected.
May also simply be that new image's shape tuple.
Returns
-------
bounding_boxes : imgaug.BoundingBoxesOnImage
Object containing all projected bounding boxes.
"""
if is_np_array(image):
shape = image.shape
else:
shape = image
if shape[0:2] == self.shape[0:2]:
return self.deepcopy()
else:
bounding_boxes = [bb.project(self.shape, shape) for bb in self.bounding_boxes]
return BoundingBoxesOnImage(bounding_boxes, shape)
@classmethod
def from_xyxy_array(cls, xyxy, shape):
"""
Convert an (N,4) ndarray to a BoundingBoxesOnImage object.
This is the inverse of :func:`imgaug.BoundingBoxesOnImage.to_xyxy_array`.
Parameters
----------
xyxy : (N,4) ndarray
Array containing the corner coordinates (top-left, bottom-right) of ``N`` bounding boxes
in the form ``(x1, y1, x2, y2)``. Should usually be of dtype ``float32``.
shape : tuple of int
Shape of the image on which the bounding boxes are placed.
Should usually be ``(H, W, C)`` or ``(H, W)``.
Returns
-------
imgaug.BoundingBoxesOnImage
Object containing a list of BoundingBox objects following the provided corner coordinates.
"""
do_assert(xyxy.shape[1] == 4, "Expected input array of shape (N, 4), got shape %s." % (xyxy.shape,))
boxes = [BoundingBox(*row) for row in xyxy]
return cls(boxes, shape)
def to_xyxy_array(self, dtype=np.float32):
"""
Convert the BoundingBoxesOnImage object to an (N,4) ndarray.
This is the inverse of :func:`imgaug.BoundingBoxesOnImage.from_xyxy_array`.
Parameters
----------
dtype : numpy.dtype, optional
Desired output datatype of the ndarray.
Returns
-------
ndarray
(N,4) ndarray array, where ``N`` denotes the number of bounding boxes and ``4`` denotes the
top-left and bottom-right bounding box corner coordinates in form ``(x1, y1, x2, y2)``.
"""
xyxy_array = np.zeros((len(self.bounding_boxes), 4), dtype=np.float32)
for i, box in enumerate(self.bounding_boxes):
xyxy_array[i] = [box.x1, box.y1, box.x2, box.y2]
return xyxy_array.astype(dtype)
def draw_on_image(self, image, color=(0, 255, 0), alpha=1.0, thickness=1, copy=True, raise_if_out_of_image=False):
"""
Draw all bounding boxes onto a given image.
Parameters
----------
image : (H,W,3) ndarray
The image onto which to draw the bounding boxes.
This image should usually have the same shape as
set in BoundingBoxesOnImage.shape.
color : int or list of int or tuple of int or (3,) ndarray, optional
The RGB color of all bounding boxes. If a single int ``C``, then that is
equivalent to ``(C,C,C)``.
alpha : float, optional
Alpha/transparency of the bounding box.
thickness : int, optional
Thickness in pixels.
copy : bool, optional
Whether to copy the image before drawing the points.
raise_if_out_of_image : bool, optional
Whether to raise an exception if any bounding box is outside of the image.
Returns
-------
image : (H,W,3) ndarray
Image with drawn bounding boxes.
"""
# TODO improve efficiency here by copying only once
for bb in self.bounding_boxes:
image = bb.draw_on_image(
image,
color=color,
alpha=alpha,
thickness=thickness,
copy=copy,
raise_if_out_of_image=raise_if_out_of_image
)
return image
def remove_out_of_image(self, fully=True, partly=False):
"""
Remove all bounding boxes that are fully or partially outside of the image.
Parameters
----------
fully : bool, optional
Whether to remove bounding boxes that are fully outside of the image.
partly : bool, optional
Whether to remove bounding boxes that are partially outside of the image.
Returns
-------
imgaug.BoundingBoxesOnImage
Reduced set of bounding boxes, with those that were fully/partially outside of
the image removed.
"""
bbs_clean = [bb for bb in self.bounding_boxes
if not bb.is_out_of_image(self.shape, fully=fully, partly=partly)]
return BoundingBoxesOnImage(bbs_clean, shape=self.shape)
def cut_out_of_image(self):
"""
Cut off all parts from all bounding boxes that are outside of the image.
Returns
-------
imgaug.BoundingBoxesOnImage
Bounding boxes, clipped to fall within the image dimensions.
"""
bbs_cut = [bb.cut_out_of_image(self.shape)
for bb in self.bounding_boxes if bb.is_partly_within_image(self.shape)]
return BoundingBoxesOnImage(bbs_cut, shape=self.shape)
def shift(self, top=None, right=None, bottom=None, left=None):
"""
Shift all bounding boxes from one or more image sides, i.e. move them on the x/y-axis.
Parameters
----------
top : None or int, optional
Amount of pixels by which to shift all bounding boxes from the top.
right : None or int, optional
Amount of pixels by which to shift all bounding boxes from the right.
bottom : None or int, optional
Amount of pixels by which to shift all bounding boxes from the bottom.
left : None or int, optional
Amount of pixels by which to shift all bounding boxes from the left.
Returns
-------
imgaug.BoundingBoxesOnImage
Shifted bounding boxes.
"""
bbs_new = [bb.shift(top=top, right=right, bottom=bottom, left=left) for bb in self.bounding_boxes]
return BoundingBoxesOnImage(bbs_new, shape=self.shape)
def copy(self):
"""
Create a shallow copy of the BoundingBoxesOnImage object.
Returns
-------
imgaug.BoundingBoxesOnImage
Shallow copy.
"""
return copy.copy(self)
def deepcopy(self):
"""
Create a deep copy of the BoundingBoxesOnImage object.
Returns
-------
imgaug.BoundingBoxesOnImage
Deep copy.
"""
# Manual copy is far faster than deepcopy for KeypointsOnImage,
# so use manual copy here too
bbs = [bb.deepcopy() for bb in self.bounding_boxes]
return BoundingBoxesOnImage(bbs, tuple(self.shape))
def __repr__(self):
return self.__str__()
def __str__(self):
return "BoundingBoxesOnImage(%s, shape=%s)" % (str(self.bounding_boxes), self.shape)
# TODO somehow merge with BoundingBox
# TODO add functions: simplify() (eg via shapely.ops.simplify()),
# extend(all_sides=0, top=0, right=0, bottom=0, left=0),
# intersection(other, default=None), union(other), iou(other), to_heatmap, to_mask
class Polygon(object):
"""
Class representing polygons.
Each polygon is parameterized by its corner points, given as absolute x- and y-coordinates
with sub-pixel accuracy.
Parameters
----------
exterior : list of imgaug.Keypoint or list of tuple of float or (N,2) ndarray
List of points defining the polygon. May be either a list of Keypoint objects or a list of tuples in xy-form
or a numpy array of shape (N,2) for N points in xy-form.
All coordinates are expected to be the absolute coordinates in the image, given as floats, e.g. x=10.7
and y=3.4 for a point at coordinates (10.7, 3.4). Their order is expected to be clock-wise. They are expected
to not be closed (i.e. first and last coordinate differ).
label : None or str, optional
Label of the polygon, e.g. a string representing the class.
"""
def __init__(self, exterior, label=None):
"""Create a new Polygon instance."""
if isinstance(exterior, list):
if not exterior:
# for empty lists, make sure that the shape is (0, 2) and not (0,) as that is also expected when the
# input is a numpy array
self.exterior = np.zeros((0, 2), dtype=np.float32)
elif isinstance(exterior[0], Keypoint):
# list of Keypoint
self.exterior = np.float32([[point.x, point.y] for point in exterior])
else:
# list of tuples (x, y)
self.exterior = np.float32([[point[0], point[1]] for point in exterior])
else:
do_assert(is_np_array(exterior))
do_assert(exterior.ndim == 2)
do_assert(exterior.shape[1] == 2)
self.exterior = np.float32(exterior)
# Remove last point if it is essentially the same as the first point (polygons are always assumed to be
# closed anyways). This also prevents problems with shapely, which seems to add the last point automatically.
if len(self.exterior) >= 2 and np.allclose(self.exterior[0, :], self.exterior[-1, :]):
self.exterior = self.exterior[:-1]
self.label = label
@property
def xx(self):
"""
Return the x-coordinates of all points in the exterior.
Returns
-------
(N,2) ndarray
X-coordinates of all points in the exterior as a float32 ndarray.
"""
return self.exterior[:, 0]
@property
def yy(self):
"""
Return the y-coordinates of all points in the exterior.
Returns
-------
(N,2) ndarray
Y-coordinates of all points in the exterior as a float32 ndarray.
"""
return self.exterior[:, 1]
@property
def xx_int(self):
"""
Return the x-coordinates of all points in the exterior, rounded to the closest integer value.
Returns
-------
(N,2) ndarray
X-coordinates of all points in the exterior, rounded to the closest integer value.
Result dtype is int32.
"""
return np.int32(np.round(self.xx))
@property
def yy_int(self):
"""
Return the y-coordinates of all points in the exterior, rounded to the closest integer value.
Returns
-------
(N,2) ndarray
Y-coordinates of all points in the exterior, rounded to the closest integer value.
Result dtype is int32.
"""
return np.int32(np.round(self.yy))
@property
def is_valid(self):
"""
Estimate whether the polygon has a valid shape.
To to be considered valid, the polygons must be made up of at least 3 points and have concave shape.
Multiple consecutive points are allowed to have the same coordinates.
Returns
-------
bool
True if polygon has at least 3 points and is concave, otherwise False.
"""
if len(self.exterior) < 3:
return False
return self.to_shapely_polygon().is_valid
@property
def area(self):
"""
Estimate the area of the polygon.
Returns
-------
number
Area of the polygon.
"""
if len(self.exterior) < 3:
raise Exception("Cannot compute the polygon's area because it contains less than three points.")
poly = self.to_shapely_polygon()
return poly.area
def project(self, from_shape, to_shape):
"""
Project the polygon onto an image with different shape.
The relative coordinates of all points remain the same.
E.g. a point at (x=20, y=20) on an image (width=100, height=200) will be
projected on a new image (width=200, height=100) to (x=40, y=10).
This is intended for cases where the original image is resized.
It cannot be used for more complex changes (e.g. padding, cropping).
Parameters
----------
from_shape : tuple of int
Shape of the original image. (Before resize.)
to_shape : tuple of int
Shape of the new image. (After resize.)
Returns
-------
imgaug.Polygon
Polygon object with new coordinates.
"""
if from_shape[0:2] == to_shape[0:2]:
return self.copy()
exterior = [Keypoint(x=x, y=y).project(from_shape, to_shape) for x, y in self.exterior]
return self.copy(exterior=exterior)
def find_closest_point_index(self, x, y, return_distance=False):
"""
Find the index of the point within the exterior that is closest to the given coordinates.
"Closeness" is here defined based on euclidean distance.
This method will raise an AssertionError if the exterior contains no points.
Parameters
----------
x : number
X-coordinate around which to search for close points.
y : number
Y-coordinate around which to search for close points.
return_distance : bool, optional
Whether to also return the distance of the closest point.
Returns
-------
int
Index of the closest point.
number
Euclidean distance to the the closest point.
This value is only returned if `return_distance` was set to True.
"""
do_assert(len(self.exterior) > 0)
distances = []
for x2, y2 in self.exterior:
d = (x2 - x) ** 2 + (y2 - y) ** 2
distances.append(d)
distances = np.sqrt(distances)
closest_idx = np.argmin(distances)
if return_distance:
return closest_idx, distances[closest_idx]
return closest_idx
def _compute_inside_image_point_mask(self, image):
if isinstance(image, tuple):
shape = image
else:
shape = image.shape
h, w = shape[0:2]
return np.logical_and(
np.logical_and(0 <= self.exterior[:, 0], self.exterior[:, 0] < w),
np.logical_and(0 <= self.exterior[:, 1], self.exterior[:, 1] < h)
)
# TODO keep this method? it is almost an alias for is_out_of_image()
def is_fully_within_image(self, image):
"""
Estimate whether the polygon is fully inside the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use.
If an ndarray, its shape will be used.
If a tuple, it is assumed to represent the image shape and must contain at least two integers.
Returns
-------
bool
True if the polygon is fully inside the image area.
False otherwise.
"""
return not self.is_out_of_image(image, fully=True, partly=True)
# TODO keep this method? it is almost an alias for is_out_of_image()
def is_partly_within_image(self, image):
"""
Estimate whether the polygon is at least partially inside the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use.
If an ndarray, its shape will be used.
If a tuple, it is assumed to represent the image shape and must contain at least two integers.
Returns
-------
bool
True if the polygon is at least partially inside the image area.
False otherwise.
"""
return not self.is_out_of_image(image, fully=True, partly=False)
def is_out_of_image(self, image, fully=True, partly=False):
"""
Estimate whether the polygon is partially or fully outside of the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use.
If an ndarray, its shape will be used.
If a tuple, it is assumed to represent the image shape and must contain at least two integers.
fully : bool, optional
Whether to return True if the polygon is fully outside fo the image area.
partly : bool, optional
Whether to return True if the polygon is at least partially outside fo the image area.
Returns
-------
bool
True if the polygon is partially/fully outside of the image area, depending
on defined parameters. False otherwise.
"""
if len(self.exterior) == 0:
raise Exception("Cannot determine whether the polygon is inside the image, because it contains no points.")
inside = self._compute_inside_image_point_mask(image)
nb_inside = sum(inside)
if nb_inside == len(inside):
return False
elif nb_inside > 0:
return partly
else:
return fully
# TODO mark as deprecated
# TODO rename cut_* to clip_* in BoundingBox
def cut_out_of_image(self, image):
return self.clip_out_of_image(image)
def clip_out_of_image(self, image):
"""
Cut off all parts of the polygon that are outside of the image.
This operation may lead to new points being created.
As a single polygon may be split into multiple new polygons, the result is a MultiPolygon.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use for the clipping of the polygon.
If an ndarray, its shape will be used.
If a tuple, it is assumed to represent the image shape and must contain at least two integers.
Returns
-------
imgaug.MultiPolygon
Polygon, clipped to fall within the image dimensions.
Returned as MultiPolygon, because the clipping can split the polygon into multiple parts.
"""
# if fully out of image, clip everything away, nothing remaining
if self.is_out_of_image(image, fully=True, partly=False):
return MultiPolygon([])
h, w = image.shape[0:2]
poly_shapely = self.to_shapely_polygon()
poly_image = shapely.geometry.Polygon([(0, 0), (w, 0), (w, h), (0, h)])
multipoly_inter_shapely = poly_shapely.intersection(poly_image)
if not isinstance(multipoly_inter_shapely, shapely.geometry.MultiPolygon):
do_assert(isinstance(multipoly_inter_shapely, shapely.geometry.Polygon))
multipoly_inter_shapely = shapely.geometry.MultiPolygon([multipoly_inter_shapely])
polygons = []
for poly_inter_shapely in multipoly_inter_shapely.geoms:
polygons.append(Polygon.from_shapely(poly_inter_shapely, label=self.label))
# shapely changes the order of points, we try here to preserve it as good as possible
polygons_reordered = []
for polygon in polygons:
found = False
for x, y in self.exterior:
closest_idx, dist = polygon.find_closest_point_index(x=x, y=y, return_distance=True)
if dist < 1e-6:
polygon_reordered = polygon.change_first_point_by_index(closest_idx)
polygons_reordered.append(polygon_reordered)
found = True
break
do_assert(found) # could only not find closest points if new polys are empty
return MultiPolygon(polygons_reordered)
def shift(self, top=None, right=None, bottom=None, left=None):
"""
Shift the polygon from one or more image sides, i.e. move it on the x/y-axis.
Parameters
----------
top : None or int, optional
Amount of pixels by which to shift the polygon from the top.
right : None or int, optional
Amount of pixels by which to shift the polygon from the right.
bottom : None or int, optional
Amount of pixels by which to shift the polygon from the bottom.
left : None or int, optional
Amount of pixels by which to shift the polygon from the left.
Returns
-------
imgaug.Polygon
Shifted polygon.
"""
top = top if top is not None else 0
right = right if right is not None else 0
bottom = bottom if bottom is not None else 0
left = left if left is not None else 0
exterior = np.copy(self.exterior)
exterior[:, 0] += (left - right)
exterior[:, 1] += (top - bottom)
return self.deepcopy(exterior=exterior)
# TODO add boundary thickness
def draw_on_image(self,
image,
color=(0, 255, 0), color_perimeter=(0, 128, 0),
alpha=0.5, alpha_perimeter=1.0,
raise_if_out_of_image=False):
"""
Draw the polygon on an image.
Parameters
----------
image : (H,W,C) ndarray
The image onto which to draw the polygon. Usually expected to be of dtype uint8, though other dtypes
are also handled.
color : iterable of int, optional
The color to use for the polygon (excluding perimeter). Must correspond to the channel layout of the
image. Usually RGB.
color_perimeter : iterable of int, optional
The color to use for the perimeter/border of the polygon. Must correspond to the channel layout of the
image. Usually RGB.
alpha : float, optional
The transparency of the polygon (excluding the perimeter), where 1.0 denotes no transparency and 0.0 is
invisible.
alpha_perimeter : float, optional
The transparency of the polygon's perimeter/border, where 1.0 denotes no transparency and 0.0 is
invisible.
raise_if_out_of_image : bool, optional
Whether to raise an error if the polygon is partially/fully outside of the
image. If set to False, no error will be raised and only the parts inside the image
will be drawn.
Returns
-------
result : (H,W,C) ndarray
Image with polygon drawn on it. Result dtype is the same as the input dtype.
"""
# TODO separate this into draw_face_on_image() and draw_border_on_image()
if raise_if_out_of_image and self.is_out_of_image(image):
raise Exception("Cannot draw polygon %s on image with shape %s." % (
str(self), image.shape
))
xx = self.xx_int
yy = self.yy_int
# TODO np.clip to image plane if is_fully_within_image(), similar to how it is done for bounding boxes
# TODO improve efficiency by only drawing in rectangle that covers poly instead of drawing in the whole image
# TODO for a rectangular polygon, the face coordinates include the top/left boundary but not the right/bottom
# boundary. This may be unintuitive when not drawing the boundary. Maybe somehow remove the boundary
# coordinates from the face coordinates after generating both?
rr, cc = skimage.draw.polygon(yy, xx, shape=image.shape)
rr_perimeter, cc_perimeter = skimage.draw.polygon_perimeter(yy, xx, shape=image.shape)
params = (rr, cc, color, alpha)
params_perimeter = (rr_perimeter, cc_perimeter, color_perimeter, alpha_perimeter)
input_dtype = image.dtype
result = image.astype(np.float32)
for rr, cc, color, alpha in [params, params_perimeter]:
color = np.float32(color)
if alpha >= 0.99:
result[rr, cc, :] = color
elif alpha < 1e-4:
pass # invisible, do nothing
else:
result[rr, cc, :] = (1 - alpha) * result[rr, cc, :] + alpha * color
if input_dtype.type == np.uint8:
result = np.clip(result, 0, 255).astype(input_dtype) # TODO make clipping more flexible
else:
result = result.astype(input_dtype)
return result
def extract_from_image(self, image):
"""
Extract the image pixels within the polygon.
This function will zero-pad the image if the polygon is partially/fully outside of
the image.
Parameters
----------
image : (H,W) ndarray or (H,W,C) ndarray
The image from which to extract the pixels within the polygon.
Returns
-------
result : (H',W') ndarray or (H',W',C) ndarray
Pixels within the polygon. Zero-padded if the polygon is partially/fully
outside of the image.
"""
do_assert(image.ndim in [2, 3])
if len(self.exterior) <= 2:
raise Exception("Polygon must be made up of at least 3 points to extract its area from an image.")
bb = self.to_bounding_box()
bb_area = bb.extract_from_image(image)
if self.is_out_of_image(image, fully=True, partly=False):
return bb_area
xx = self.xx_int
yy = self.yy_int
xx_mask = xx - np.min(xx)
yy_mask = yy - np.min(yy)
height_mask = np.max(yy_mask)
width_mask = np.max(xx_mask)
rr_face, cc_face = skimage.draw.polygon(yy_mask, xx_mask, shape=(height_mask, width_mask))
mask = np.zeros((height_mask, width_mask), dtype=np.bool)
mask[rr_face, cc_face] = True
if image.ndim == 3:
mask = np.tile(mask[:, :, np.newaxis], (1, 1, image.shape[2]))
return bb_area * mask
def change_first_point_by_coords(self, x, y, max_distance=1e-4):
"""
Set the first point of the exterior to the given point based on its coordinates.
If multiple points are found, the closest one will be picked.
If no matching points are found, an exception is raised.
Note: This method does *not* work in-place.
Parameters
----------
x : number
X-coordinate of the point.
y : number
Y-coordinate of the point.
max_distance : number
Maximum distance past which possible matches are ignored.
Returns
-------
imgaug.Polygon
Copy of this polygon with the new point order.
"""
if len(self.exterior) == 0:
raise Exception("Cannot reorder polygon points, because it contains no points.")
closest_idx, closest_dist = self.find_closest_point_index(x=x, y=y, return_distance=True)
if max_distance is not None and closest_dist > max_distance:
closest_point = self.exterior[closest_idx, :]
raise Exception(
"Closest found point (%.9f, %.9f) exceeds max_distance of %.9f exceeded" % (
closest_point[0], closest_point[1], closest_dist)
)
return self.change_first_point_by_index(closest_idx)
def change_first_point_by_index(self, point_idx):
"""
Set the first point of the exterior to the given point based on its index.
Note: This method does *not* work in-place.
Parameters
----------
point_idx : int
Index of the desired starting point.
Returns
-------
imgaug.Polygon
Copy of this polygon with the new point order.
"""
do_assert(0 <= point_idx < len(self.exterior))
if point_idx == 0:
return self.deepcopy()
exterior = np.concatenate(
(self.exterior[point_idx:, :], self.exterior[:point_idx, :]),
axis=0
)
return self.deepcopy(exterior=exterior)
def to_shapely_polygon(self):
"""
Convert this polygon to a Shapely polygon.
Returns
-------
shapely.geometry.Polygon
The Shapely polygon matching this polygon's exterior.
"""
return shapely.geometry.Polygon([(point[0], point[1]) for point in self.exterior])
def to_shapely_line_string(self, closed=False, interpolate=0):
"""
Convert this polygon to a Shapely LineString object.
Parameters
----------
closed : bool, optional
Whether to return the line string with the last point being identical to the first point.
interpolate : int, optional
Number of points to interpolate between any pair of two consecutive points. These points are added
to the final line string.
Returns
-------
shapely.geometry.LineString
The Shapely LineString matching the polygon's exterior.
"""
return _convert_points_to_shapely_line_string(self.exterior, closed=closed, interpolate=interpolate)
def to_bounding_box(self):
"""
Convert this polygon to a bounding box tightly containing the whole polygon.
Returns
-------
imgaug.BoundingBox
The bounding box tightly containing the polygon.
"""
xx = self.xx
yy = self.yy
return BoundingBox(x1=min(xx), x2=max(xx), y1=min(yy), y2=max(yy), label=self.label)
@staticmethod
def from_shapely(polygon_shapely, label=None):
"""
Create a polygon from a Shapely polygon.
Note: This will remove any holes in the Shapely polygon.
Parameters
----------
polygon_shapely : shapely.geometry.Polygon
The shapely polygon.
label : None or str, optional
The label of the new polygon.
Returns
-------
imgaug.Polygon
A polygon with the same exterior as the Shapely polygon.
"""
do_assert(isinstance(polygon_shapely, shapely.geometry.Polygon))
# polygon_shapely.exterior can be None if the polygon was instantiated without points
if polygon_shapely.exterior is None or len(polygon_shapely.exterior.coords) == 0:
return Polygon([], label=label)
exterior = np.float32([[x, y] for (x, y) in polygon_shapely.exterior.coords])
return Polygon(exterior, label=label)
def exterior_almost_equals(self, other_polygon, max_distance=1e-6, interpolate=8):
"""
Estimate whether the geometry of the exterior of this polygon and another polygon are comparable.
The two exteriors can have different numbers of points, but any point randomly sampled on the exterior
of one polygon should be close to the closest point on the exterior of the other polygon.
Note that this method works approximately. One can come up with polygons with fairly different shapes that
will still be estimated as equal by this method. In practice however this should be unlikely to be the case.
The probability for something like that goes down as the interpolation parameter is increased.
Parameters
----------
other_polygon : imgaug.Polygon or (N,2) ndarray
The other polygon with which to compare the exterior.
If this is an ndarray, it is assumed to represent an exterior.
It must then have dtype float32 and shape (N,2) with the second dimension denoting xy-coordinates.
max_distance : number
The maximum euclidean distance between a point on one polygon and the closest point on the other polygon.
If the distance is exceeded for any such pair, the two exteriors are not viewed as equal.
The points are other the points contained in the polygon's exterior ndarray or interpolated points
between these.
interpolate : int
How many points to interpolate between the points of the polygon's exteriors.
If this is set to zero, then only the points given by the polygon's exterior ndarrays will be used.
Higher values make it less likely that unequal polygons are evaluated as equal.
Returns
-------
bool
Whether the two polygon's exteriors can be viewed as equal (approximate test).
"""
atol = max_distance
ext_a = self.exterior
ext_b = other_polygon.exterior if not is_np_array(other_polygon) else other_polygon
len_a = len(ext_a)
len_b = len(ext_b)
if len_a == 0 and len_b == 0:
return True
elif len_a == 0 and len_b > 0:
return False
elif len_a > 0 and len_b == 0:
return False
# neither A nor B is zero-sized at this point
# if A or B only contain points identical to the first point, merge them to one point
if len_a > 1:
if all([np.allclose(ext_a[0, :], ext_a[1 + i, :], rtol=0, atol=atol) for i in sm.xrange(len_a - 1)]):
ext_a = ext_a[0:1, :]
len_a = 1
if len_b > 1:
if all([np.allclose(ext_b[0, :], ext_b[1 + i, :], rtol=0, atol=atol) for i in sm.xrange(len_b - 1)]):
ext_b = ext_b[0:1, :]
len_b = 1
# handle polygons that contain a single point
if len_a == 1 and len_b == 1:
return np.allclose(ext_a[0, :], ext_b[0, :], rtol=0, atol=atol)
elif len_a == 1:
return all([np.allclose(ext_a[0, :], ext_b[i, :], rtol=0, atol=atol) for i in sm.xrange(len_b)])
elif len_b == 1:
return all([np.allclose(ext_b[0, :], ext_a[i, :], rtol=0, atol=atol) for i in sm.xrange(len_a)])
# After this point, both polygons have at least 2 points, i.e. LineStrings can be used.
# We can also safely go back to the original exteriors (before close points were merged).
ls_a = self.to_shapely_line_string(closed=True, interpolate=interpolate)
ls_b = other_polygon.to_shapely_line_string(closed=True, interpolate=interpolate) \
if not is_np_array(other_polygon) \
else _convert_points_to_shapely_line_string(other_polygon, closed=True, interpolate=interpolate)
# Measure the distance from each point in A to LineString B and vice versa.
# Make sure that no point violates the tolerance.
# Note that we can't just use LineString.almost_equals(LineString) -- that seems to expect the same number
# and order of points in both LineStrings (failed with duplicated points).
for x, y in ls_a.coords:
point = shapely.geometry.Point(x, y)
if not ls_b.distance(point) <= max_distance:
return False
for x, y in ls_b.coords:
point = shapely.geometry.Point(x, y)
if not ls_a.distance(point) <= max_distance:
return False
return True
def almost_equals(self, other, max_distance=1e-6, interpolate=8):
"""
Compare this polygon with another one and estimate whether they can be viewed as equal.
This is the same as :func:`imgaug.Polygon.exterior_almost_equals` but additionally compares the labels.
Parameters
----------
other
The object to compare against. If not a Polygon, then False will be returned.
max_distance : float
See :func:`imgaug.Polygon.exterior_almost_equals`.
interpolate : int
See :func:`imgaug.Polygon.exterior_almost_equals`.
Returns
-------
bool
Whether the two polygons can be viewed as equal. In the case of the exteriors this is an approximate test.
"""
if not isinstance(other, Polygon):
return False
if self.label is not None or other.label is not None:
if self.label is None:
return False
if other.label is None:
return False
if self.label != other.label:
return False
return self.exterior_almost_equals(other, max_distance=max_distance, interpolate=interpolate)
def copy(self, exterior=None, label=None):
"""
Create a shallow copy of the Polygon object.
Parameters
----------
exterior : list of imgaug.Keypoint or list of tuple or (N,2) ndarray, optional
List of points defining the polygon. See :func:`imgaug.Polygon.__init__` for details.
label : None or str, optional
If not None, then the label of the copied object will be set to this value.
Returns
-------
imgaug.Polygon
Shallow copy.
"""
return self.deepcopy(exterior=exterior, label=label)
def deepcopy(self, exterior=None, label=None):
"""
Create a deep copy of the Polygon object.
Parameters
----------
exterior : list of Keypoint or list of tuple or (N,2) ndarray, optional
List of points defining the polygon. See `imgaug.Polygon.__init__` for details.
label : None or str
If not None, then the label of the copied object will be set to this value.
Returns
-------
imgaug.Polygon
Deep copy.
"""
return Polygon(
exterior=np.copy(self.exterior) if exterior is None else exterior,
label=self.label if label is None else label
)
def __repr__(self):
return self.__str__()
def __str__(self):
points_str = ", ".join(["(x=%.3f, y=%.3f)" % (point[0], point[1]) for point in self.exterior])
return "Polygon([%s] (%d points), label=%s)" % (points_str, len(self.exterior), self.label)
def _convert_points_to_shapely_line_string(points, closed=False, interpolate=0):
if len(points) <= 1:
raise Exception(
("Conversion to shapely line string requires at least two points, but points input contains "
"only %d points.") % (len(points),)
)
points_tuples = [(point[0], point[1]) for point in points]
# interpolate points between each consecutive pair of points
if interpolate > 0:
points_tuples = _interpolate_points(points_tuples, interpolate)
# close if requested and not yet closed
if closed and len(points) > 1: # here intentionally used points instead of points_tuples
points_tuples.append(points_tuples[0])
return shapely.geometry.LineString(points_tuples)
def _interpolate_point_pair(point_a, point_b, nb_steps):
if nb_steps < 1:
return []
x1, y1 = point_a
x2, y2 = point_b
vec = np.float32([x2 - x1, y2 - y1])
step_size = vec / (1 + nb_steps)
return [(x1 + (i + 1) * step_size[0], y1 + (i + 1) * step_size[1]) for i in sm.xrange(nb_steps)]
def _interpolate_points(points, nb_steps, closed=True):
if len(points) <= 1:
return points
if closed:
points = list(points) + [points[0]]
points_interp = []
for point_a, point_b in zip(points[:-1], points[1:]):
points_interp.extend([point_a] + _interpolate_point_pair(point_a, point_b, nb_steps))
if not closed:
points_interp.append(points[-1])
# close does not have to be reverted here, as last point is not included in the extend()
return points_interp
def _interpolate_points_by_max_distance(points, max_distance, closed=True):
do_assert(max_distance > 0, "max_distance must have value greater than 0, got %.8f" % (max_distance,))
if len(points) <= 1:
return points
if closed:
points = list(points) + [points[0]]
points_interp = []
for point_a, point_b in zip(points[:-1], points[1:]):
dist = np.sqrt((point_a[0] - point_b[0]) ** 2 + (point_a[1] - point_b[1]) ** 2)
nb_steps = int((dist / max_distance) - 1)
points_interp.extend([point_a] + _interpolate_point_pair(point_a, point_b, nb_steps))
if not closed:
points_interp.append(points[-1])
return points_interp
class MultiPolygon(object):
"""
Class that represents several polygons.
Parameters
----------
geoms : list of imgaug.Polygon
List of the polygons.
"""
def __init__(self, geoms):
"""Create a new MultiPolygon instance."""
do_assert(len(geoms) == 0 or all([isinstance(el, Polygon) for el in geoms]))
self.geoms = geoms
@staticmethod
def from_shapely(geometry, label=None):
"""
Create a MultiPolygon from a Shapely MultiPolygon, a Shapely Polygon or a Shapely GeometryCollection.
This also creates all necessary Polygons contained by this MultiPolygon.
Parameters
----------
geometry : shapely.geometry.MultiPolygon or shapely.geometry.Polygon\
or shapely.geometry.collection.GeometryCollection
The object to convert to a MultiPolygon.
label : None or str, optional
A label assigned to all Polygons within the MultiPolygon.
Returns
-------
imgaug.MultiPolygon
The derived MultiPolygon.
"""
if isinstance(geometry, shapely.geometry.MultiPolygon):
return MultiPolygon([Polygon.from_shapely(poly, label=label) for poly in geometry.geoms])
elif isinstance(geometry, shapely.geometry.Polygon):
return MultiPolygon([Polygon.from_shapely(geometry, label=label)])
elif isinstance(geometry, shapely.geometry.collection.GeometryCollection):
do_assert(all([isinstance(poly, shapely.geometry.Polygon) for poly in geometry.geoms]))
return MultiPolygon([Polygon.from_shapely(poly, label=label) for poly in geometry.geoms])
else:
raise Exception("Unknown datatype '%s'. Expected shapely.geometry.Polygon or "
"shapely.geometry.MultiPolygon or "
"shapely.geometry.collections.GeometryCollection." % (type(geometry),))
class HeatmapsOnImage(object):
"""
Object representing heatmaps on images.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Array representing the heatmap(s).
Must be of dtype float32.
If multiple heatmaps are provided, then ``C`` is expected to denote their number.
shape : tuple of int
Shape of the image on which the heatmap(s) is/are placed. NOT the shape of the
heatmap(s) array, unless it is identical to the image shape (note the likely
difference between the arrays in the number of channels).
If there is not a corresponding image, use the shape of the heatmaps array.
min_value : float, optional
Minimum value for the heatmaps that `arr` represents. This will usually be ``0.0``.
max_value : float, optional
Maximum value for the heatmaps that `arr` represents. This will usually be ``1.0``.
"""
def __init__(self, arr, shape, min_value=0.0, max_value=1.0):
"""Construct a new HeatmapsOnImage object."""
do_assert(is_np_array(arr), "Expected numpy array as heatmap input array, got type %s" % (type(arr),))
do_assert(arr.dtype.type in [np.float32],
"Heatmap input array expected to be of dtype float32, got dtype %s." % (arr.dtype,))
do_assert(arr.ndim in [2, 3], "Heatmap input array must be 2d or 3d, got shape %s." % (arr.shape,))
do_assert(len(shape) in [2, 3],
"Argument 'shape' in HeatmapsOnImage expected to be 2d or 3d, got shape %s." % (shape,))
do_assert(min_value < max_value)
do_assert(np.min(arr.flat[0:50]) >= min_value - np.finfo(arr.dtype).eps,
("Value range of heatmap was chosen to be (%.8f, %.8f), but found value below minimum in first "
+ "50 heatmap array values.") % (min_value, max_value))
do_assert(np.max(arr.flat[0:50]) <= max_value + np.finfo(arr.dtype).eps,
("Value range of heatmap was chosen to be (%.8f, %.8f), but found value above maximum in first "
+ "50 heatmap array values.") % (min_value, max_value))
if arr.ndim == 2:
arr = arr[..., np.newaxis]
self.arr_was_2d = True
else:
self.arr_was_2d = False
eps = np.finfo(np.float32).eps
min_is_zero = 0.0 - eps < min_value < 0.0 + eps
max_is_one = 1.0 - eps < max_value < 1.0 + eps
if min_is_zero and max_is_one:
self.arr_0to1 = arr
else:
self.arr_0to1 = (arr - min_value) / (max_value - min_value)
self.shape = shape
self.min_value = min_value
self.max_value = max_value
def get_arr(self):
"""
Get the heatmap's array within the value range originally provided in ``__init__()``.
The HeatmapsOnImage object saves heatmaps internally in the value range ``(min=0.0, max=1.0)``.
This function converts the internal representation to ``(min=min_value, max=max_value)``,
where ``min_value`` and ``max_value`` are provided upon instantiation of the object.
Returns
-------
result : (H,W) ndarray or (H,W,C) ndarray
Heatmap array. Dtype is float32.
"""
if self.arr_was_2d and self.arr_0to1.shape[2] == 1:
arr = self.arr_0to1[:, :, 0]
else:
arr = self.arr_0to1
eps = np.finfo(np.float32).eps
min_is_zero = 0.0 - eps < self.min_value < 0.0 + eps
max_is_one = 1.0 - eps < self.max_value < 1.0 + eps
if min_is_zero and max_is_one:
return np.copy(arr)
else:
diff = self.max_value - self.min_value
return self.min_value + diff * arr
# TODO
# def find_global_maxima(self):
# raise NotImplementedError()
def draw(self, size=None, cmap="jet"):
"""
Render the heatmaps as RGB images.
Parameters
----------
size : None or float or iterable of int or iterable of float, optional
Size of the rendered RGB image as ``(height, width)``.
See :func:`imgaug.imresize_single_image` for details.
If set to None, no resizing is performed and the size of the heatmaps array is used.
cmap : str or None, optional
Color map of ``matplotlib`` to use in order to convert the heatmaps to RGB images.
If set to None, no color map will be used and the heatmaps will be converted
to simple intensity maps.
Returns
-------
heatmaps_drawn : list of (H,W,3) ndarray
Rendered heatmaps. One per heatmap array channel. Dtype is uint8.
"""
heatmaps_uint8 = self.to_uint8()
heatmaps_drawn = []
for c in sm.xrange(heatmaps_uint8.shape[2]):
# c:c+1 here, because the additional axis is needed by imresize_single_image
heatmap_c = heatmaps_uint8[..., c:c+1]
if size is not None:
heatmap_c_rs = imresize_single_image(heatmap_c, size, interpolation="nearest")
else:
heatmap_c_rs = heatmap_c
heatmap_c_rs = np.squeeze(heatmap_c_rs).astype(np.float32) / 255.0
if cmap is not None:
cmap_func = plt.get_cmap(cmap)
heatmap_cmapped = cmap_func(heatmap_c_rs)
heatmap_cmapped = np.delete(heatmap_cmapped, 3, 2)
else:
heatmap_cmapped = np.tile(heatmap_c_rs[..., np.newaxis], (1, 1, 3))
heatmap_cmapped = np.clip(heatmap_cmapped * 255, 0, 255).astype(np.uint8)
heatmaps_drawn.append(heatmap_cmapped)
return heatmaps_drawn
def draw_on_image(self, image, alpha=0.75, cmap="jet", resize="heatmaps"):
"""
Draw the heatmaps as overlays over an image.
Parameters
----------
image : (H,W,3) ndarray
Image onto which to draw the heatmaps. Expected to be of dtype uint8.
alpha : float, optional
Alpha/opacity value to use for the mixing of image and heatmaps.
Higher values mean that the heatmaps will be more visible and the image less visible.
cmap : str or None, optional
Color map to use. See :func:`imgaug.HeatmapsOnImage.draw` for details.
resize : {'heatmaps', 'image'}, optional
In case of size differences between the image and heatmaps, either the image or
the heatmaps can be resized. This parameter controls which of the two will be resized
to the other's size.
Returns
-------
mix : list of (H,W,3) ndarray
Rendered overlays. One per heatmap array channel. Dtype is uint8.
"""
# assert RGB image
do_assert(image.ndim == 3)
do_assert(image.shape[2] == 3)
do_assert(image.dtype.type == np.uint8)
do_assert(0 - 1e-8 <= alpha <= 1.0 + 1e-8)
do_assert(resize in ["heatmaps", "image"])
if resize == "image":
image = imresize_single_image(image, self.arr_0to1.shape[0:2], interpolation="cubic")
heatmaps_drawn = self.draw(
size=image.shape[0:2] if resize == "heatmaps" else None,
cmap=cmap
)
mix = [
np.clip((1-alpha) * image + alpha * heatmap_i, 0, 255).astype(np.uint8)
for heatmap_i
in heatmaps_drawn
]
return mix
def invert(self):
"""
Inverts each value in the heatmap, shifting low towards high values and vice versa.
This changes each value to::
v' = max - (v - min)
where ``v`` is the value at some spatial location, ``min`` is the minimum value in the heatmap
and ``max`` is the maximum value.
As the heatmap uses internally a 0.0 to 1.0 representation, this simply becomes ``v' = 1.0 - v``.
Note that the attributes ``min_value`` and ``max_value`` are not switched. They both keep their values.
This function can be useful e.g. when working with depth maps, where algorithms might have
an easier time representing the furthest away points with zeros, requiring an inverted
depth map.
Returns
-------
arr_inv : imgaug.HeatmapsOnImage
Inverted heatmap.
"""
arr_inv = HeatmapsOnImage.from_0to1(1 - self.arr_0to1, shape=self.shape, min_value=self.min_value,
max_value=self.max_value)
arr_inv.arr_was_2d = self.arr_was_2d
return arr_inv
def pad(self, top=0, right=0, bottom=0, left=0, mode="constant", cval=0.0):
"""
Pad the heatmaps on their top/right/bottom/left side.
Parameters
----------
top : int, optional
Amount of pixels to add at the top side of the heatmaps. Must be 0 or greater.
right : int, optional
Amount of pixels to add at the right side of the heatmaps. Must be 0 or greater.
bottom : int, optional
Amount of pixels to add at the bottom side of the heatmaps. Must be 0 or greater.
left : int, optional
Amount of pixels to add at the left side of the heatmaps. Must be 0 or greater.
mode : string, optional
Padding mode to use. See :func:`numpy.pad` for details.
cval : number, optional
Value to use for padding if `mode` is ``constant``. See :func:`numpy.pad` for details.
Returns
-------
imgaug.HeatmapsOnImage
Padded heatmaps of height ``H'=H+top+bottom`` and width ``W'=W+left+right``.
"""
arr_0to1_padded = pad(self.arr_0to1, top=top, right=right, bottom=bottom, left=left, mode=mode, cval=cval)
return HeatmapsOnImage.from_0to1(arr_0to1_padded, shape=self.shape, min_value=self.min_value,
max_value=self.max_value)
def pad_to_aspect_ratio(self, aspect_ratio, mode="constant", cval=0.0, return_pad_amounts=False):
"""
Pad the heatmaps on their sides so that they match a target aspect ratio.
Depending on which dimension is smaller (height or width), only the corresponding
sides (left/right or top/bottom) will be padded. In each case, both of the sides will
be padded equally.
Parameters
----------
aspect_ratio : float
Target aspect ratio, given as width/height. E.g. 2.0 denotes the image having twice
as much width as height.
mode : str, optional
Padding mode to use. See :func:`numpy.pad` for details.
cval : number, optional
Value to use for padding if `mode` is ``constant``. See :func:`numpy.pad` for details.
return_pad_amounts : bool, optional
If False, then only the padded image will be returned. If True, a tuple with two
entries will be returned, where the first entry is the padded image and the second
entry are the amounts by which each image side was padded. These amounts are again a
tuple of the form (top, right, bottom, left), with each value being an integer.
Returns
-------
heatmaps : imgaug.HeatmapsOnImage
Padded heatmaps as HeatmapsOnImage object.
pad_amounts : tuple of int
Amounts by which the heatmaps were padded on each side, given as a tuple ``(top, right, bottom, left)``.
This tuple is only returned if `return_pad_amounts` was set to True.
"""
arr_0to1_padded, pad_amounts = pad_to_aspect_ratio(self.arr_0to1, aspect_ratio=aspect_ratio, mode=mode,
cval=cval, return_pad_amounts=True)
heatmaps = HeatmapsOnImage.from_0to1(arr_0to1_padded, shape=self.shape, min_value=self.min_value,
max_value=self.max_value)
if return_pad_amounts:
return heatmaps, pad_amounts
else:
return heatmaps
def avg_pool(self, block_size):
"""
Rescale the heatmap(s) array using average pooling of a given block/kernel size.
Parameters
----------
block_size : int or tuple of int
Size of each block of values to pool, aka kernel size. See :func:`imgaug.pool` for details.
Returns
-------
imgaug.HeatmapsOnImage
Heatmaps after average pooling.
"""
arr_0to1_reduced = avg_pool(self.arr_0to1, block_size, cval=0.0)
return HeatmapsOnImage.from_0to1(arr_0to1_reduced, shape=self.shape, min_value=self.min_value,
max_value=self.max_value)
def max_pool(self, block_size):
"""
Rescale the heatmap(s) array using max-pooling of a given block/kernel size.
Parameters
----------
block_size : int or tuple of int
Size of each block of values to pool, aka kernel size. See :func:`imgaug.pool` for details.
Returns
-------
imgaug.HeatmapsOnImage
Heatmaps after max-pooling.
"""
arr_0to1_reduced = max_pool(self.arr_0to1, block_size)
return HeatmapsOnImage.from_0to1(arr_0to1_reduced, shape=self.shape, min_value=self.min_value,
max_value=self.max_value)
def scale(self, sizes, interpolation="cubic"):
"""
Rescale the heatmap(s) array to the provided size given the provided interpolation.
Parameters
----------
sizes : float or iterable of int or iterable of float
New size of the array in ``(height, width)``. See :func:`imgaug.imresize_single_image` for details.
interpolation : None or str or int, optional
The interpolation to use during resize. See :func:`imgaug.imresize_single_image` for details.
Returns
-------
imgaug.HeatmapsOnImage
Rescaled heatmaps object.
"""
arr_0to1_rescaled = imresize_single_image(self.arr_0to1, sizes, interpolation=interpolation)
# cubic interpolation can lead to values outside of [0.0, 1.0],
# see https://github.com/opencv/opencv/issues/7195
# TODO area interpolation too?
arr_0to1_rescaled = np.clip(arr_0to1_rescaled, 0.0, 1.0)
return HeatmapsOnImage.from_0to1(arr_0to1_rescaled, shape=self.shape, min_value=self.min_value,
max_value=self.max_value)
def to_uint8(self):
"""
Convert this heatmaps object to a 0-to-255 array.
Returns
-------
arr_uint8 : (H,W,C) ndarray
Heatmap as a 0-to-255 array (dtype is uint8).
"""
# TODO this always returns (H,W,C), even if input ndarray was originall (H,W)
# does it make sense here to also return (H,W) if self.arr_was_2d?
arr_0to255 = np.clip(np.round(self.arr_0to1 * 255), 0, 255)
arr_uint8 = arr_0to255.astype(np.uint8)
return arr_uint8
@staticmethod
def from_uint8(arr_uint8, shape, min_value=0.0, max_value=1.0):
"""
Create a heatmaps object from an heatmap array containing values ranging from 0 to 255.
Parameters
----------
arr_uint8 : (H,W) ndarray or (H,W,C) ndarray
Heatmap(s) array, where ``H`` is height, ``W`` is width and ``C`` is the number of heatmap channels.
Expected dtype is uint8.
shape : tuple of int
Shape of the image on which the heatmap(s) is/are placed. NOT the shape of the
heatmap(s) array, unless it is identical to the image shape (note the likely
difference between the arrays in the number of channels).
If there is not a corresponding image, use the shape of the heatmaps array.
min_value : float, optional
Minimum value for the heatmaps that the 0-to-255 array represents. This will usually
be 0.0. It is used when calling :func:`imgaug.HeatmapsOnImage.get_arr`, which converts the
underlying ``(0, 255)`` array to value range ``(min_value, max_value)``.
max_value : float, optional
Maximum value for the heatmaps that 0-to-255 array represents.
See parameter `min_value` for details.
Returns
-------
imgaug.HeatmapsOnImage
Heatmaps object.
"""
arr_0to1 = arr_uint8.astype(np.float32) / 255.0
return HeatmapsOnImage.from_0to1(arr_0to1, shape, min_value=min_value, max_value=max_value)
@staticmethod
def from_0to1(arr_0to1, shape, min_value=0.0, max_value=1.0):
"""
Create a heatmaps object from an heatmap array containing values ranging from 0.0 to 1.0.
Parameters
----------
arr_0to1 : (H,W) or (H,W,C) ndarray
Heatmap(s) array, where ``H`` is height, ``W`` is width and ``C`` is the number of heatmap channels.
Expected dtype is float32.
shape : tuple of ints
Shape of the image on which the heatmap(s) is/are placed. NOT the shape of the
heatmap(s) array, unless it is identical to the image shape (note the likely
difference between the arrays in the number of channels).
If there is not a corresponding image, use the shape of the heatmaps array.
min_value : float, optional
Minimum value for the heatmaps that the 0-to-1 array represents. This will usually
be 0.0. It is used when calling :func:`imgaug.HeatmapsOnImage.get_arr`, which converts the
underlying ``(0.0, 1.0)`` array to value range ``(min_value, max_value)``.
E.g. if you started with heatmaps in the range ``(-1.0, 1.0)`` and projected these
to (0.0, 1.0), you should call this function with ``min_value=-1.0``, ``max_value=1.0``
so that :func:`imgaug.HeatmapsOnImage.get_arr` returns heatmap arrays having value
range (-1.0, 1.0).
max_value : float, optional
Maximum value for the heatmaps that to 0-to-255 array represents.
See parameter min_value for details.
Returns
-------
heatmaps : imgaug.HeatmapsOnImage
Heatmaps object.
"""
heatmaps = HeatmapsOnImage(arr_0to1, shape, min_value=0.0, max_value=1.0)
heatmaps.min_value = min_value
heatmaps.max_value = max_value
return heatmaps
@classmethod
def change_normalization(cls, arr, source, target):
"""
Change the value range of a heatmap from one min-max to another min-max.
E.g. the value range may be changed from min=0.0, max=1.0 to min=-1.0, max=1.0.
Parameters
----------
arr : ndarray
Heatmap array to modify.
source : tuple of float
Current value range of the input array, given as (min, max), where both are float values.
target : tuple of float
Desired output value range of the array, given as (min, max), where both are float values.
Returns
-------
arr_target : ndarray
Input array, with value range projected to the desired target value range.
"""
do_assert(is_np_array(arr))
if isinstance(source, HeatmapsOnImage):
source = (source.min_value, source.max_value)
else:
do_assert(isinstance(source, tuple))
do_assert(len(source) == 2)
do_assert(source[0] < source[1])
if isinstance(target, HeatmapsOnImage):
target = (target.min_value, target.max_value)
else:
do_assert(isinstance(target, tuple))
do_assert(len(target) == 2)
do_assert(target[0] < target[1])
# Check if source and target are the same (with a tiny bit of tolerance)
# if so, evade compuation and just copy the array instead.
# This is reasonable, as source and target will often both be (0.0, 1.0).
eps = np.finfo(arr.dtype).eps
mins_same = source[0] - 10*eps < target[0] < source[0] + 10*eps
maxs_same = source[1] - 10*eps < target[1] < source[1] + 10*eps
if mins_same and maxs_same:
return np.copy(arr)
min_source, max_source = source
min_target, max_target = target
diff_source = max_source - min_source
diff_target = max_target - min_target
arr_0to1 = (arr - min_source) / diff_source
arr_target = min_target + arr_0to1 * diff_target
return arr_target
def copy(self):
"""
Create a shallow copy of the Heatmaps object.
Returns
-------
imgaug.HeatmapsOnImage
Shallow copy.
"""
return self.deepcopy()
def deepcopy(self):
"""
Create a deep copy of the Heatmaps object.
Returns
-------
imgaug.HeatmapsOnImage
Deep copy.
"""
return HeatmapsOnImage(self.get_arr(), shape=self.shape, min_value=self.min_value, max_value=self.max_value)
class SegmentationMapOnImage(object):
"""
Object representing a segmentation map associated with an image.
Attributes
----------
DEFAULT_SEGMENT_COLORS : list of tuple of int
Standard RGB colors to use during drawing, ordered by class index.
Parameters
----------
arr : (H,W) ndarray or (H,W,1) ndarray or (H,W,C) ndarray
Array representing the segmentation map. May have datatypes bool, integer or float.
* If bool: Assumed to be of shape (H,W), (H,W,1) or (H,W,C). If (H,W) or (H,W,1) it
is assumed to be for the case of having a single class (where any False denotes
background). Otherwise there are assumed to be C channels, one for each class,
with each of them containing a mask for that class. The masks may overlap.
* If integer: Assumed to be of shape (H,W) or (H,W,1). Each pixel is assumed to
contain an integer denoting the class index. Classes are assumed to be
non-overlapping. The number of classes cannot be guessed from this input, hence
nb_classes must be set.
* If float: Assumed to b eof shape (H,W), (H,W,1) or (H,W,C) with meanings being
similar to the case of `bool`. Values are expected to fall always in the range
0.0 to 1.0 and are usually expected to be either 0.0 or 1.0 upon instantiation
of a new segmentation map. Classes may overlap.
shape : iterable of int
Shape of the corresponding image (NOT the segmentation map array). This is expected
to be ``(H, W)`` or ``(H, W, C)`` with ``C`` usually being 3. If there is no corresponding image,
then use the segmentation map's shape instead.
nb_classes : int or None
Total number of unique classes that may appear in an segmentation map, i.e. the max
class index. This may be None if the input array is of type bool or float. The number
of classes however must be provided if the input array is of type int, as then the
number of classes cannot be guessed.
"""
DEFAULT_SEGMENT_COLORS = [
(0, 0, 0), # black
(230, 25, 75), # red
(60, 180, 75), # green
(255, 225, 25), # yellow
(0, 130, 200), # blue
(245, 130, 48), # orange
(145, 30, 180), # purple
(70, 240, 240), # cyan
(240, 50, 230), # magenta
(210, 245, 60), # lime
(250, 190, 190), # pink
(0, 128, 128), # teal
(230, 190, 255), # lavender
(170, 110, 40), # brown
(255, 250, 200), # beige
(128, 0, 0), # maroon
(170, 255, 195), # mint
(128, 128, 0), # olive
(255, 215, 180), # coral
(0, 0, 128), # navy
(128, 128, 128), # grey
(255, 255, 255), # white
# --
(115, 12, 37), # dark red
(30, 90, 37), # dark green
(127, 112, 12), # dark yellow
(0, 65, 100), # dark blue
(122, 65, 24), # dark orange
(72, 15, 90), # dark purple
(35, 120, 120), # dark cyan
(120, 25, 115), # dark magenta
(105, 122, 30), # dark lime
(125, 95, 95), # dark pink
(0, 64, 64), # dark teal
(115, 95, 127), # dark lavender
(85, 55, 20), # dark brown
(127, 125, 100), # dark beige
(64, 0, 0), # dark maroon
(85, 127, 97), # dark mint
(64, 64, 0), # dark olive
(127, 107, 90), # dark coral
(0, 0, 64), # dark navy
(64, 64, 64), # dark grey
]
def __init__(self, arr, shape, nb_classes=None):
do_assert(is_np_array(arr), "Expected to get numpy array, got %s." % (type(arr),))
if arr.dtype.type == np.bool_:
do_assert(arr.ndim in [2, 3])
self.input_was = ("bool", arr.ndim)
if arr.ndim == 2:
arr = arr[..., np.newaxis]
arr = arr.astype(np.float32)
elif arr.dtype.type in NP_INT_TYPES.union(NP_UINT_TYPES):
do_assert(arr.ndim == 2 or (arr.ndim == 3 and arr.shape[2] == 1))
do_assert(nb_classes is not None)
do_assert(nb_classes > 0)
do_assert(np.min(arr.flat[0:100]) >= 0)
do_assert(np.max(arr.flat[0:100]) <= nb_classes)
self.input_was = ("int", arr.dtype.type, arr.ndim)
if arr.ndim == 3:
arr = arr[..., 0]
# TODO improve efficiency here by building only sub-heatmaps for classes actually
# present in the image. This would also get rid of nb_classes.
arr = np.eye(nb_classes)[arr] # from class indices to one hot
arr = arr.astype(np.float32)
elif arr.dtype.type in NP_FLOAT_TYPES:
do_assert(arr.ndim == 3)
self.input_was = ("float", arr.dtype.type, arr.ndim)
arr = arr.astype(np.float32)
else:
raise Exception(("Input was expected to be an ndarray of dtype bool or any dtype in %s or any dtype in %s. "
"Got dtype %s.") % (
str(NP_INT_TYPES.union(NP_UINT_TYPES)), str(NP_FLOAT_TYPES), str(arr.dtype)))
do_assert(arr.ndim == 3)
do_assert(arr.dtype.type == np.float32)
self.arr = arr
self.shape = shape
self.nb_classes = nb_classes if nb_classes is not None else arr.shape[2]
def get_arr_int(self, background_threshold=0.01, background_class_id=None):
"""
Get the segmentation map array as an integer array of shape (H, W).
Each pixel in that array contains an integer value representing the pixel's class.
If multiple classes overlap, the one with the highest local float value is picked.
If that highest local value is below `background_threshold`, the method instead uses
the background class id as the pixel's class value.
By default, class id 0 is the background class. This may only be changed if the original
input to the segmentation map object was an integer map.
Parameters
----------
background_threshold : float, optional
At each pixel, each class-heatmap has a value between 0.0 and 1.0. If none of the
class-heatmaps has a value above this threshold, the method uses the background class
id instead.
background_class_id : None or int, optional
Class id to fall back to if no class-heatmap passes the threshold at a spatial
location. May only be provided if the original input was an integer mask and in these
cases defaults to 0. If the input were float or boolean masks, the background class id
may not be set as it is assumed that the background is implicitly defined
as 'any spatial location that has zero-like values in all masks'.
Returns
-------
result : (H,W) ndarray
Segmentation map array (int32).
If the original input consisted of boolean or float masks, then the highest possible
class id is ``1+C``, where ``C`` is the number of provided float/boolean masks. The value
``0`` in the integer mask then denotes the background class.
"""
if self.input_was[0] in ["bool", "float"]:
do_assert(background_class_id is None,
"The background class id may only be changed if the original input to SegmentationMapOnImage "
+ "was an *integer* based segmentation map.")
if background_class_id is None:
background_class_id = 0
channelwise_max_idx = np.argmax(self.arr, axis=2)
# for bool and float input masks, we assume that the background is implicitly given,
# i.e. anything where all masks/channels have zero-like values
# for int, we assume that the background class is explicitly given and has the index 0
if self.input_was[0] in ["bool", "float"]:
result = 1 + channelwise_max_idx
else: # integer mask was provided
result = channelwise_max_idx
if background_threshold is not None and background_threshold > 0:
probs = np.amax(self.arr, axis=2)
result[probs < background_threshold] = background_class_id
return result.astype(np.int32)
# TODO
# def get_arr_bool(self, allow_overlapping=False, threshold=0.5, background_threshold=0.01, background_class_id=0):
# raise NotImplementedError()
def draw(self, size=None, background_threshold=0.01, background_class_id=None, colors=None,
return_foreground_mask=False):
"""
Render the segmentation map as an RGB image.
Parameters
----------
size : None or float or iterable of int or iterable of float, optional
Size of the rendered RGB image as ``(height, width)``.
See :func:`imgaug.imresize_single_image` for details.
If set to None, no resizing is performed and the size of the segmentation map array is used.
background_threshold : float, optional
See :func:`imgaug.SegmentationMapOnImage.get_arr_int`.
background_class_id : None or int, optional
See :func:`imgaug.SegmentationMapOnImage.get_arr_int`.
colors : None or list of tuple of int, optional
Colors to use. One for each class to draw. If None, then default colors will be used.
return_foreground_mask : bool, optional
Whether to return a mask of the same size as the drawn segmentation map, containing
True at any spatial location that is not the background class and False everywhere else.
Returns
-------
segmap_drawn : (H,W,3) ndarray
Rendered segmentation map (dtype is uint8).
foreground_mask : (H,W) ndarray
Mask indicating the locations of foreground classes (dtype is bool).
This value is only returned if `return_foreground_mask` is True.
"""
arr = self.get_arr_int(background_threshold=background_threshold, background_class_id=background_class_id)
nb_classes = 1 + np.max(arr)
segmap_drawn = np.zeros((arr.shape[0], arr.shape[1], 3), dtype=np.uint8)
if colors is None:
colors = SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS
do_assert(nb_classes <= len(colors),
"Can't draw all %d classes as it would exceed the maximum number of %d available colors." % (
nb_classes, len(colors),))
ids_in_map = np.unique(arr)
for c, color in zip(sm.xrange(nb_classes), colors):
if c in ids_in_map:
class_mask = (arr == c)
segmap_drawn[class_mask] = color
if return_foreground_mask:
background_class_id = 0 if background_class_id is None else background_class_id
foreground_mask = (arr != background_class_id)
else:
foreground_mask = None
if size is not None:
segmap_drawn = imresize_single_image(segmap_drawn, size, interpolation="nearest")
if foreground_mask is not None:
foreground_mask = imresize_single_image(
foreground_mask.astype(np.uint8), size, interpolation="nearest") > 0
if foreground_mask is not None:
return segmap_drawn, foreground_mask
return segmap_drawn
def draw_on_image(self, image, alpha=0.75, resize="segmentation_map", background_threshold=0.01,
background_class_id=None, colors=None, draw_background=False):
"""
Draw the segmentation map as an overlay over an image.
Parameters
----------
image : (H,W,3) ndarray
Image onto which to draw the segmentation map. Dtype is expected to be uint8.
alpha : float, optional
Alpha/opacity value to use for the mixing of image and segmentation map.
Higher values mean that the segmentation map will be more visible and the image less visible.
resize : {'segmentation_map', 'image'}, optional
In case of size differences between the image and segmentation map, either the image or
the segmentation map can be resized. This parameter controls which of the two will be
resized to the other's size.
background_threshold : float, optional
See :func:`imgaug.SegmentationMapOnImage.get_arr_int`.
background_class_id : None or int, optional
See :func:`imgaug.SegmentationMapOnImage.get_arr_int`.
colors : None or list of tuple of int, optional
Colors to use. One for each class to draw. If None, then default colors will be used.
draw_background : bool, optional
If True, the background will be drawn like any other class.
If False, the background will not be drawn, i.e. the respective background pixels
will be identical with the image's RGB color at the corresponding spatial location
and no color overlay will be applied.
Returns
-------
mix : (H,W,3) ndarray
Rendered overlays (dtype is uint8).
"""
# assert RGB image
do_assert(image.ndim == 3)
do_assert(image.shape[2] == 3)
do_assert(image.dtype.type == np.uint8)
do_assert(0 - 1e-8 <= alpha <= 1.0 + 1e-8)
do_assert(resize in ["segmentation_map", "image"])
if resize == "image":
image = imresize_single_image(image, self.arr.shape[0:2], interpolation="cubic")
segmap_drawn, foreground_mask = self.draw(
background_threshold=background_threshold,
background_class_id=background_class_id,
size=image.shape[0:2] if resize == "segmentation_map" else None,
colors=colors,
return_foreground_mask=True
)
if draw_background:
mix = np.clip(
(1-alpha) * image + alpha * segmap_drawn,
0,
255
).astype(np.uint8)
else:
foreground_mask = foreground_mask[..., np.newaxis]
mix = np.zeros_like(image)
mix += (~foreground_mask).astype(np.uint8) * image
mix += foreground_mask.astype(np.uint8) * np.clip(
(1-alpha) * image + alpha * segmap_drawn,
0,
255
).astype(np.uint8)
return mix
def pad(self, top=0, right=0, bottom=0, left=0, mode="constant", cval=0.0):
"""
Pad the segmentation map on its top/right/bottom/left side.
Parameters
----------
top : int, optional
Amount of pixels to add at the top side of the segmentation map. Must be 0 or greater.
right : int, optional
Amount of pixels to add at the right side of the segmentation map. Must be 0 or greater.
bottom : int, optional
Amount of pixels to add at the bottom side of the segmentation map. Must be 0 or greater.
left : int, optional
Amount of pixels to add at the left side of the segmentation map. Must be 0 or greater.
mode : str, optional
Padding mode to use. See :func:`numpy.pad` for details.
cval : number, optional
Value to use for padding if `mode` is ``constant``. See :func:`numpy.pad` for details.
Returns
-------
segmap : imgaug.SegmentationMapOnImage
Padded segmentation map of height ``H'=H+top+bottom`` and width ``W'=W+left+right``.
"""
arr_padded = pad(self.arr, top=top, right=right, bottom=bottom, left=left, mode=mode, cval=cval)
segmap = SegmentationMapOnImage(arr_padded, shape=self.shape)
segmap.input_was = self.input_was
return segmap
def pad_to_aspect_ratio(self, aspect_ratio, mode="constant", cval=0.0, return_pad_amounts=False):
"""
Pad the segmentation map on its sides so that its matches a target aspect ratio.
Depending on which dimension is smaller (height or width), only the corresponding
sides (left/right or top/bottom) will be padded. In each case, both of the sides will
be padded equally.
Parameters
----------
aspect_ratio : float
Target aspect ratio, given as width/height. E.g. 2.0 denotes the image having twice
as much width as height.
mode : str, optional
Padding mode to use. See :func:`numpy.pad` for details.
cval : number, optional
Value to use for padding if `mode` is ``constant``. See :func:`numpy.pad` for details.
return_pad_amounts : bool, optional
If False, then only the padded image will be returned. If True, a tuple with two
entries will be returned, where the first entry is the padded image and the second
entry are the amounts by which each image side was padded. These amounts are again a
tuple of the form (top, right, bottom, left), with each value being an integer.
Returns
-------
segmap : imgaug.SegmentationMapOnImage
Padded segmentation map as SegmentationMapOnImage object.
pad_amounts : tuple of int
Amounts by which the segmentation map was padded on each side, given as a
tuple ``(top, right, bottom, left)``.
This tuple is only returned if `return_pad_amounts` was set to True.
"""
arr_padded, pad_amounts = pad_to_aspect_ratio(self.arr, aspect_ratio=aspect_ratio, mode=mode, cval=cval,
return_pad_amounts=True)
segmap = SegmentationMapOnImage(arr_padded, shape=self.shape)
segmap.input_was = self.input_was
if return_pad_amounts:
return segmap, pad_amounts
else:
return segmap
def scale(self, sizes, interpolation="cubic"):
"""
Rescale the segmentation map array to the provided size given the provided interpolation.
Parameters
----------
sizes : float or iterable of int or iterable of float
New size of the array in ``(height, width)``. See :func:`imgaug.imresize_single_image` for details.
interpolation : None or str or int, optional
The interpolation to use during resize. See :func:`imgaug.imresize_single_image` for details.
Note: The segmentation map is internally stored as multiple float-based heatmaps,
making smooth interpolations potentially more reasonable than nearest neighbour
interpolation.
Returns
-------
segmap : imgaug.SegmentationMapOnImage
Rescaled segmentation map object.
"""
arr_rescaled = imresize_single_image(self.arr, sizes, interpolation=interpolation)
# cubic interpolation can lead to values outside of [0.0, 1.0],
# see https://github.com/opencv/opencv/issues/7195
# TODO area interpolation too?
arr_rescaled = np.clip(arr_rescaled, 0.0, 1.0)
segmap = SegmentationMapOnImage(arr_rescaled, shape=self.shape)
segmap.input_was = self.input_was
return segmap
def to_heatmaps(self, only_nonempty=False, not_none_if_no_nonempty=False):
"""
Convert segmentation map to heatmaps object.
Each segmentation map class will be represented as a single heatmap channel.
Parameters
----------
only_nonempty : bool, optional
If True, then only heatmaps for classes that appear in the segmentation map will be
generated. Additionally, a list of these class ids will be returned.
not_none_if_no_nonempty : bool, optional
If `only_nonempty` is True and for a segmentation map no channel was non-empty,
this function usually returns None as the heatmaps object. If however this parameter
is set to True, a heatmaps object with one channel (representing class 0)
will be returned as a fallback in these cases.
Returns
-------
imgaug.HeatmapsOnImage or None
Segmentation map as a heatmaps object.
If `only_nonempty` was set to True and no class appeared in the segmentation map,
then this is None.
class_indices : list of int
Class ids (0 to C-1) of the classes that were actually added to the heatmaps.
Only returned if `only_nonempty` was set to True.
"""
if not only_nonempty:
return HeatmapsOnImage.from_0to1(self.arr, self.shape, min_value=0.0, max_value=1.0)
else:
nonempty_mask = np.sum(self.arr, axis=(0, 1)) > 0 + 1e-4
if np.sum(nonempty_mask) == 0:
if not_none_if_no_nonempty:
nonempty_mask[0] = True
else:
return None, []
class_indices = np.arange(self.arr.shape[2])[nonempty_mask]
channels = self.arr[..., class_indices]
return HeatmapsOnImage(channels, self.shape, min_value=0.0, max_value=1.0), class_indices
@staticmethod
def from_heatmaps(heatmaps, class_indices=None, nb_classes=None):
"""
Convert heatmaps to segmentation map.
Assumes that each class is represented as a single heatmap channel.
Parameters
----------
heatmaps : imgaug.HeatmapsOnImage
Heatmaps to convert.
class_indices : None or list of int, optional
List of class indices represented by each heatmap channel. See also the
secondary output of :func:`imgaug.SegmentationMapOnImage.to_heatmap`.
If this is provided, it must have the same length as the number of heatmap channels.
nb_classes : None or int, optional
Number of classes. Must be provided if class_indices is set.
Returns
-------
imgaug.SegmentationMapOnImage
Segmentation map derived from heatmaps.
"""
if class_indices is None:
return SegmentationMapOnImage(heatmaps.arr_0to1, shape=heatmaps.shape)
else:
do_assert(nb_classes is not None)
do_assert(min(class_indices) >= 0)
do_assert(max(class_indices) < nb_classes)
do_assert(len(class_indices) == heatmaps.arr_0to1.shape[2])
arr_0to1 = heatmaps.arr_0to1
arr_0to1_full = np.zeros((arr_0to1.shape[0], arr_0to1.shape[1], nb_classes), dtype=np.float32)
class_indices_set = set(class_indices)
heatmap_channel = 0
for c in sm.xrange(nb_classes):
if c in class_indices_set:
arr_0to1_full[:, :, c] = arr_0to1[:, :, heatmap_channel]
heatmap_channel += 1
return SegmentationMapOnImage(arr_0to1_full, shape=heatmaps.shape)
def copy(self):
"""
Create a shallow copy of the segmentation map object.
Returns
-------
imgaug.SegmentationMapOnImage
Shallow copy.
"""
return self.deepcopy()
def deepcopy(self):
"""
Create a deep copy of the segmentation map object.
Returns
-------
imgaug.SegmentationMapOnImage
Deep copy.
"""
segmap = SegmentationMapOnImage(self.arr, shape=self.shape, nb_classes=self.nb_classes)
segmap.input_was = self.input_was
return segmap
############################
# Background augmentation
############################
class Batch(object):
"""
Class encapsulating a batch before and after augmentation.
Parameters
----------
images : None or (N,H,W,C) ndarray or (N,H,W) ndarray or list of (H,W,C) ndarray or list of (H,W) ndarray
The images to augment.
heatmaps : None or list of imgaug.HeatmapsOnImage
The heatmaps to augment.
segmentation_maps : None or list of SegmentationMapOnImage
The segmentation maps to augment.
keypoints : None or list of KeypointOnImage
The keypoints to augment.
bounding_boxes : None or list of BoundingBoxesOnImage
The bounding boxes to augment.
data
Additional data that is saved in the batch and may be read out
after augmentation. This could e.g. contain filepaths to each image
in `images`. As this object is usually used for background
augmentation with multiple processes, the augmented Batch objects might
not be returned in the original order, making this information useful.
"""
def __init__(self, images=None, heatmaps=None, segmentation_maps=None, keypoints=None, bounding_boxes=None,
data=None):
self.images = images
self.images_aug = None
self.heatmaps = heatmaps
self.heatmaps_aug = None
self.segmentation_maps = segmentation_maps
self.segmentation_maps_aug = None
self.keypoints = keypoints
self.keypoints_aug = None
self.bounding_boxes = bounding_boxes
self.bounding_boxes_aug = None
self.data = data
def deepcopy(self):
def _copy_images(images):
if images is None:
images_copy = None
elif is_np_array(images):
images_copy = np.copy(images)
else:
do_assert(is_iterable(images))
do_assert(all([is_np_array(image) for image in images]))
images_copy = list([np.copy(image) for image in images])
return images_copy
def _copy_augmentable_objects(augmentables, clazz):
if augmentables is None:
augmentables_copy = None
else:
do_assert(is_iterable(augmentables))
do_assert(all([isinstance(augmentable, clazz) for augmentable in augmentables]))
augmentables_copy = [augmentable.deepcopy() for augmentable in augmentables]
return augmentables_copy
batch = Batch(
images=_copy_images(self.images),
heatmaps=_copy_augmentable_objects(self.heatmaps, HeatmapsOnImage),
segmentation_maps=_copy_augmentable_objects(self.segmentation_maps, SegmentationMapOnImage),
keypoints=_copy_augmentable_objects(self.keypoints, KeypointsOnImage),
bounding_boxes=_copy_augmentable_objects(self.bounding_boxes, BoundingBoxesOnImage),
data=copy.deepcopy(self.data)
)
batch.images_aug = _copy_images(self.images_aug)
batch.heatmaps_aug = _copy_augmentable_objects(self.heatmaps_aug, HeatmapsOnImage)
batch.segmentation_maps_aug = _copy_augmentable_objects(self.segmentation_maps_aug, SegmentationMapOnImage)
batch.keypoints_aug = _copy_augmentable_objects(self.keypoints_aug, KeypointsOnImage)
batch.bounding_boxes_aug = _copy_augmentable_objects(self.bounding_boxes_aug, BoundingBoxesOnImage)
return batch
class BatchLoader(object):
"""
Class to load batches in the background.
Loaded batches can be accesses using :attr:`imgaug.BatchLoader.queue`.
Parameters
----------
load_batch_func : callable or generator
Generator or generator function (i.e. function that yields Batch objects)
or a function that returns a list of Batch objects.
Background loading automatically stops when the last batch was yielded or the
last batch in the list was reached.
queue_size : int, optional
Maximum number of batches to store in the queue. May be set higher
for small images and/or small batches.
nb_workers : int, optional
Number of workers to run in the background.
threaded : bool, optional
Whether to run the background processes using threads (True) or full processes (False).
"""
def __init__(self, load_batch_func, queue_size=50, nb_workers=1, threaded=True):
do_assert(queue_size >= 2, "Queue size for BatchLoader must be at least 2, got %d." % (queue_size,))
do_assert(nb_workers >= 1, "Number of workers for BatchLoader must be at least 1, got %d" % (nb_workers,))
self._queue_internal = multiprocessing.Queue(queue_size//2)
self.queue = multiprocessing.Queue(queue_size//2)
self.join_signal = multiprocessing.Event()
self.workers = []
self.threaded = threaded
seeds = current_random_state().randint(0, 10**6, size=(nb_workers,))
for i in range(nb_workers):
if threaded:
worker = threading.Thread(
target=self._load_batches,
args=(load_batch_func, self._queue_internal, self.join_signal, None)
)
else:
worker = multiprocessing.Process(
target=self._load_batches,
args=(load_batch_func, self._queue_internal, self.join_signal, seeds[i])
)
worker.daemon = True
worker.start()
self.workers.append(worker)
self.main_worker_thread = threading.Thread(
target=self._main_worker,
args=()
)
self.main_worker_thread.daemon = True
self.main_worker_thread.start()
def count_workers_alive(self):
return sum([int(worker.is_alive()) for worker in self.workers])
def all_finished(self):
"""
Determine whether the workers have finished the loading process.
Returns
-------
out : bool
True if all workers have finished. Else False.
"""
return self.count_workers_alive() == 0
def _main_worker(self):
workers_running = self.count_workers_alive()
while workers_running > 0 and not self.join_signal.is_set():
# wait for a new batch in the source queue and load it
try:
batch_str = self._queue_internal.get(timeout=0.1)
if batch_str == "":
workers_running -= 1
else:
self.queue.put(batch_str)
except QueueEmpty:
time.sleep(0.01)
except (EOFError, BrokenPipeError):
break
workers_running = self.count_workers_alive()
# All workers have finished, move the remaining entries from internal to external queue
while True:
try:
batch_str = self._queue_internal.get(timeout=0.005)
if batch_str != "":
self.queue.put(batch_str)
except QueueEmpty:
break
except (EOFError, BrokenPipeError):
break
self.queue.put(pickle.dumps(None, protocol=-1))
time.sleep(0.01)
def _load_batches(self, load_batch_func, queue_internal, join_signal, seedval):
if seedval is not None:
random.seed(seedval)
np.random.seed(seedval)
seed(seedval)
try:
gen = load_batch_func() if not is_generator(load_batch_func) else load_batch_func
for batch in gen:
do_assert(isinstance(batch, Batch),
"Expected batch returned by load_batch_func to be of class imgaug.Batch, got %s." % (
type(batch),))
batch_pickled = pickle.dumps(batch, protocol=-1)
while not join_signal.is_set():
try:
queue_internal.put(batch_pickled, timeout=0.005)
break
except QueueFull:
pass
if join_signal.is_set():
break
except Exception:
traceback.print_exc()
finally:
queue_internal.put("")
time.sleep(0.01)
def terminate(self):
"""Stop all workers."""
if not self.join_signal.is_set():
self.join_signal.set()
# give minimal time to put generated batches in queue and gracefully shut down
time.sleep(0.01)
if self.main_worker_thread.is_alive():
self.main_worker_thread.join()
if self.threaded:
for worker in self.workers:
if worker.is_alive():
worker.join()
else:
for worker in self.workers:
if worker.is_alive():
worker.terminate()
worker.join()
# wait until all workers are fully terminated
while not self.all_finished():
time.sleep(0.001)
# empty queue until at least one element can be added and place None as signal that BL finished
if self.queue.full():
self.queue.get()
self.queue.put(pickle.dumps(None, protocol=-1))
time.sleep(0.01)
# clean the queue, this reportedly prevents hanging threads
while True:
try:
self._queue_internal.get(timeout=0.005)
except QueueEmpty:
break
if not self._queue_internal._closed:
self._queue_internal.close()
if not self.queue._closed:
self.queue.close()
self._queue_internal.join_thread()
self.queue.join_thread()
time.sleep(0.025)
def __del__(self):
if not self.join_signal.is_set():
self.join_signal.set()
class BackgroundAugmenter(object):
"""
Class to augment batches in the background (while training on the GPU).
This is a wrapper around the multiprocessing module.
Parameters
----------
batch_loader : BatchLoader or multiprocessing.Queue
BatchLoader object that loads the data fed into the BackgroundAugmenter, or alternatively a Queue.
If a Queue, then it must be made sure that a final ``None`` in the Queue signals that the loading is
finished and no more batches will follow. Otherwise the BackgroundAugmenter will wait forever for the next
batch.
augseq : Augmenter
An augmenter to apply to all loaded images.
This may be e.g. a Sequential to apply multiple augmenters.
queue_size : int
Size of the queue that is used to temporarily save the augmentation
results. Larger values offer the background processes more room
to save results when the main process doesn't load much, i.e. they
can lead to smoother and faster training. For large images, high
values can block a lot of RAM though.
nb_workers : 'auto' or int
Number of background workers to spawn.
If ``auto``, it will be set to ``C-1``, where ``C`` is the number of CPU cores.
"""
def __init__(self, batch_loader, augseq, queue_size=50, nb_workers="auto"):
do_assert(queue_size > 0)
self.augseq = augseq
self.queue_source = batch_loader if isinstance(batch_loader, multiprocessing.queues.Queue) else batch_loader.queue
self.queue_result = multiprocessing.Queue(queue_size)
if nb_workers == "auto":
try:
nb_workers = multiprocessing.cpu_count()
except (ImportError, NotImplementedError):
nb_workers = 1
# try to reserve at least one core for the main process
nb_workers = max(1, nb_workers - 1)
else:
do_assert(nb_workers >= 1)
self.nb_workers = nb_workers
self.workers = []
self.nb_workers_finished = 0
seeds = current_random_state().randint(0, 10**6, size=(nb_workers,))
for i in range(nb_workers):
worker = multiprocessing.Process(
target=self._augment_images_worker,
args=(augseq, self.queue_source, self.queue_result, seeds[i])
)
worker.daemon = True
worker.start()
self.workers.append(worker)
def all_finished(self):
return self.nb_workers_finished == self.nb_workers
def get_batch(self):
"""
Returns a batch from the queue of augmented batches.
If workers are still running and there are no batches in the queue,
it will automatically wait for the next batch.
Returns
-------
out : None or imgaug.Batch
One batch or None if all workers have finished.
"""
if self.all_finished():
return None
batch_str = self.queue_result.get()
batch = pickle.loads(batch_str)
if batch is not None:
return batch
else:
self.nb_workers_finished += 1
if self.nb_workers_finished >= self.nb_workers:
try:
self.queue_source.get(timeout=0.001) # remove the None from the source queue
except QueueEmpty:
pass
return None
else:
return self.get_batch()
def _augment_images_worker(self, augseq, queue_source, queue_result, seedval):
"""
Augment endlessly images in the source queue.
This is a worker function for that endlessly queries the source queue (input batches),
augments batches in it and sends the result to the output queue.
"""
np.random.seed(seedval)
random.seed(seedval)
augseq.reseed(seedval)
seed(seedval)
loader_finished = False
while not loader_finished:
# wait for a new batch in the source queue and load it
try:
batch_str = queue_source.get(timeout=0.1)
batch = pickle.loads(batch_str)
if batch is None:
loader_finished = True
# put it back in so that other workers know that the loading queue is finished
queue_source.put(pickle.dumps(None, protocol=-1))
else:
batch_aug = list(augseq.augment_batches([batch], background=False))[0]
# send augmented batch to output queue
batch_str = pickle.dumps(batch_aug, protocol=-1)
queue_result.put(batch_str)
except QueueEmpty:
time.sleep(0.01)
queue_result.put(pickle.dumps(None, protocol=-1))
time.sleep(0.01)
def terminate(self):
"""
Terminates all background processes immediately.
This will also free their RAM.
"""
for worker in self.workers:
if worker.is_alive():
worker.terminate()
self.nb_workers_finished = len(self.workers)
if not self.queue_result._closed:
self.queue_result.close()
time.sleep(0.01)
def __del__(self):
time.sleep(0.1)
self.terminate()
Add height/width asserts to heatmap init
from __future__ import print_function, division, absolute_import
import random
import math
import copy
import numbers
import multiprocessing
import threading
import traceback
import sys
import os
import time
import json
import types
import numpy as np
import cv2
import imageio
import scipy.spatial.distance
import six
import six.moves as sm
import skimage.draw
import skimage.measure
import collections
import matplotlib.pyplot as plt
import shapely
import shapely.geometry
import shapely.ops
from PIL import Image as PIL_Image, ImageDraw as PIL_ImageDraw, ImageFont as PIL_ImageFont
if sys.version_info[0] == 2:
import cPickle as pickle
from Queue import Empty as QueueEmpty, Full as QueueFull
import socket
BrokenPipeError = socket.error
elif sys.version_info[0] == 3:
import pickle
from queue import Empty as QueueEmpty, Full as QueueFull
xrange = range
ALL = "ALL"
FILE_DIR = os.path.dirname(os.path.abspath(__file__))
# filepath to the quokka image, its annotations and depth map
QUOKKA_FP = os.path.join(FILE_DIR, "quokka.jpg")
QUOKKA_ANNOTATIONS_FP = os.path.join(FILE_DIR, "quokka_annotations.json")
QUOKKA_DEPTH_MAP_HALFRES_FP = os.path.join(FILE_DIR, "quokka_depth_map_halfres.png")
DEFAULT_FONT_FP = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"DejaVuSans.ttf"
)
# We instantiate a current/global random state here once.
# One can also call np.random, but that is (in contrast to np.random.RandomState)
# a module and hence cannot be copied via deepcopy. That's why we use RandomState
# here (and in all augmenters) instead of np.random.
CURRENT_RANDOM_STATE = np.random.RandomState(42)
NP_FLOAT_TYPES = set(np.sctypes["float"])
NP_INT_TYPES = set(np.sctypes["int"])
NP_UINT_TYPES = set(np.sctypes["uint"])
IMSHOW_BACKEND_DEFAULT = "matplotlib"
IMRESIZE_VALID_INTERPOLATIONS = ["nearest", "linear", "area", "cubic",
cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_AREA, cv2.INTER_CUBIC]
def is_np_array(val):
"""
Checks whether a variable is a numpy array.
Parameters
----------
val
The variable to check.
Returns
-------
out : bool
True if the variable is a numpy array. Otherwise False.
"""
# using np.generic here via isinstance(val, (np.ndarray, np.generic)) seems to also fire for scalar numpy values
# even though those are not arrays
return isinstance(val, np.ndarray)
def is_single_integer(val):
"""
Checks whether a variable is an integer.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is an integer. Otherwise False.
"""
return isinstance(val, numbers.Integral) and not isinstance(val, bool)
def is_single_float(val):
"""
Checks whether a variable is a float.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a float. Otherwise False.
"""
return isinstance(val, numbers.Real) and not is_single_integer(val) and not isinstance(val, bool)
def is_single_number(val):
"""
Checks whether a variable is a number, i.e. an integer or float.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a number. Otherwise False.
"""
return is_single_integer(val) or is_single_float(val)
def is_iterable(val):
"""
Checks whether a variable is iterable.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is an iterable. Otherwise False.
"""
return isinstance(val, collections.Iterable)
# TODO convert to is_single_string() or rename is_single_integer/float/number()
def is_string(val):
"""
Checks whether a variable is a string.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a string. Otherwise False.
"""
return isinstance(val, six.string_types)
def is_single_bool(val):
"""
Checks whether a variable is a boolean.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a boolean. Otherwise False.
"""
return type(val) == type(True)
def is_integer_array(val):
"""
Checks whether a variable is a numpy integer array.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a numpy integer array. Otherwise False.
"""
return is_np_array(val) and issubclass(val.dtype.type, np.integer)
def is_float_array(val):
"""
Checks whether a variable is a numpy float array.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a numpy float array. Otherwise False.
"""
return is_np_array(val) and issubclass(val.dtype.type, np.floating)
def is_callable(val):
"""
Checks whether a variable is a callable, e.g. a function.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a callable. Otherwise False.
"""
# python 3.x with x <= 2 does not support callable(), apparently
if sys.version_info[0] == 3 and sys.version_info[1] <= 2:
return hasattr(val, '__call__')
else:
return callable(val)
def is_generator(val):
"""
Checks whether a variable is a generator.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True is the variable is a generator. Otherwise False.
"""
return isinstance(val, types.GeneratorType)
def caller_name():
"""
Returns the name of the caller, e.g. a function.
Returns
-------
str
The name of the caller as a string
"""
return sys._getframe(1).f_code.co_name
def seed(seedval):
"""
Set the seed used by the global random state and thereby all randomness
in the library.
This random state is by default by all augmenters. Under special
circumstances (e.g. when an augmenter is switched to deterministic mode),
the global random state is replaced by another -- local -- one.
The replacement is dependent on the global random state.
Parameters
----------
seedval : int
The seed to use.
"""
CURRENT_RANDOM_STATE.seed(seedval)
def current_random_state():
"""
Returns the current/global random state of the library.
Returns
----------
numpy.random.RandomState
The current/global random state.
"""
return CURRENT_RANDOM_STATE
def new_random_state(seed=None, fully_random=False):
"""
Returns a new random state.
Parameters
----------
seed : None or int, optional
Optional seed value to use.
The same datatypes are allowed as for ``numpy.random.RandomState(seed)``.
fully_random : bool, optional
Whether to use numpy's random initialization for the
RandomState (used if set to True). If False, a seed is sampled from
the global random state, which is a bit faster and hence the default.
Returns
-------
numpy.random.RandomState
The new random state.
"""
if seed is None:
if not fully_random:
# sample manually a seed instead of just RandomState(),
# because the latter one
# is way slower.
seed = CURRENT_RANDOM_STATE.randint(0, 10**6, 1)[0]
return np.random.RandomState(seed)
def dummy_random_state():
"""
Returns a dummy random state that is always based on a seed of 1.
Returns
-------
numpy.random.RandomState
The new random state.
"""
return np.random.RandomState(1)
def copy_random_state(random_state, force_copy=False):
"""
Creates a copy of a random state.
Parameters
----------
random_state : numpy.random.RandomState
The random state to copy.
force_copy : bool, optional
If True, this function will always create a copy of every random
state. If False, it will not copy numpy's default random state,
but all other random states.
Returns
-------
rs_copy : numpy.random.RandomState
The copied random state.
"""
if random_state == np.random and not force_copy:
return random_state
else:
rs_copy = dummy_random_state()
orig_state = random_state.get_state()
rs_copy.set_state(orig_state)
return rs_copy
def derive_random_state(random_state):
"""
Create a new random states based on an existing random state or seed.
Parameters
----------
random_state : numpy.random.RandomState
Random state or seed from which to derive the new random state.
Returns
-------
numpy.random.RandomState
Derived random state.
"""
return derive_random_states(random_state, n=1)[0]
# TODO use this everywhere instead of manual seed + create
def derive_random_states(random_state, n=1):
"""
Create N new random states based on an existing random state or seed.
Parameters
----------
random_state : numpy.random.RandomState
Random state or seed from which to derive new random states.
n : int, optional
Number of random states to derive.
Returns
-------
list of numpy.random.RandomState
Derived random states.
"""
seed_ = random_state.randint(0, 10**6, 1)[0]
return [new_random_state(seed_+i) for i in sm.xrange(n)]
def forward_random_state(random_state):
"""
Forward the internal state of a random state.
This makes sure that future calls to the random_state will produce new random values.
Parameters
----------
random_state : numpy.random.RandomState
Random state to forward.
"""
random_state.uniform()
def _quokka_normalize_extract(extract):
"""
Generate a normalized rectangle to be extract from the standard quokka image.
Parameters
----------
extract : 'square' or tuple of number or imgaug.BoundingBox or imgaug.BoundingBoxesOnImage
Unnormalized representation of the image subarea to be extracted.
* If string ``square``, then a squared area ``(x: 0 to max 643, y: 0 to max 643)``
will be extracted from the image.
* If a tuple, then expected to contain four numbers denoting ``x1``, ``y1``, ``x2``
and ``y2``.
* If a BoundingBox, then that bounding box's area will be extracted from the image.
* If a BoundingBoxesOnImage, then expected to contain exactly one bounding box
and a shape matching the full image dimensions (i.e. (643, 960, *)). Then the
one bounding box will be used similar to BoundingBox.
Returns
-------
bb : imgaug.BoundingBox
Normalized representation of the area to extract from the standard quokka image.
"""
if extract == "square":
bb = BoundingBox(x1=0, y1=0, x2=643, y2=643)
elif isinstance(extract, tuple) and len(extract) == 4:
bb = BoundingBox(x1=extract[0], y1=extract[1], x2=extract[2], y2=extract[3])
elif isinstance(extract, BoundingBox):
bb = extract
elif isinstance(extract, BoundingBoxesOnImage):
do_assert(len(extract.bounding_boxes) == 1)
do_assert(extract.shape[0:2] == (643, 960))
bb = extract.bounding_boxes[0]
else:
raise Exception(
"Expected 'square' or tuple of four entries or BoundingBox or BoundingBoxesOnImage "
+ "for parameter 'extract', got %s." % (type(extract),)
)
return bb
def _compute_resized_shape(from_shape, to_shape):
"""
Computes the intended new shape of an image-like array after resizing.
Parameters
----------
from_shape : tuple or ndarray
Old shape of the array. Usually expected to be a tuple of form ``(H, W)`` or ``(H, W, C)`` or
alternatively an array with two or three dimensions.
to_shape : None or tuple of ints or tuple of floats or int or float or ndarray
New shape of the array.
* If None, then `from_shape` will be used as the new shape.
* If an int ``V``, then the new shape will be ``(V, V, [C])``, where ``C`` will be added if it
is part of `from_shape`.
* If a float ``V``, then the new shape will be ``(H*V, W*V, [C])``, where ``H`` and ``W`` are the old
height/width.
* If a tuple ``(H', W', [C'])`` of ints, then ``H'`` and ``W'`` will be used as the new height
and width.
* If a tuple ``(H', W', [C'])`` of floats (except ``C``), then ``H'`` and ``W'`` will
be used as the new height and width.
* If a numpy array, then the array's shape will be used.
Returns
-------
to_shape_computed : tuple of int
New shape.
"""
if is_np_array(from_shape):
from_shape = from_shape.shape
if is_np_array(to_shape):
to_shape = to_shape.shape
to_shape_computed = list(from_shape)
if to_shape is None:
pass
elif isinstance(to_shape, tuple):
do_assert(len(from_shape) in [2, 3])
do_assert(len(to_shape) in [2, 3])
if len(from_shape) == 3 and len(to_shape) == 3:
do_assert(from_shape[2] == to_shape[2])
elif len(to_shape) == 3:
to_shape_computed.append(to_shape[2])
do_assert(all([v is None or is_single_number(v) for v in to_shape[0:2]]),
"Expected the first two entries in to_shape to be None or numbers, "
+ "got types %s." % (str([type(v) for v in to_shape[0:2]]),))
for i, from_shape_i in enumerate(from_shape[0:2]):
if to_shape[i] is None:
to_shape_computed[i] = from_shape_i
elif is_single_integer(to_shape[i]):
to_shape_computed[i] = to_shape[i]
else: # float
to_shape_computed[i] = int(np.round(from_shape_i * to_shape[i]))
elif is_single_integer(to_shape) or is_single_float(to_shape):
to_shape_computed = _compute_resized_shape(from_shape, (to_shape, to_shape))
else:
raise Exception("Expected to_shape to be None or ndarray or tuple of floats or tuple of ints or single int "
+ "or single float, got %s." % (type(to_shape),))
return tuple(to_shape_computed)
def quokka(size=None, extract=None):
"""
Returns an image of a quokka as a numpy array.
Parameters
----------
size : None or float or tuple of int, optional
Size of the output image. Input into :func:`imgaug.imresize_single_image`.
Usually expected to be a tuple ``(H, W)``, where ``H`` is the desired height
and ``W`` is the width. If None, then the image will not be resized.
extract : None or 'square' or tuple of number or imgaug.BoundingBox or imgaug.BoundingBoxesOnImage
Subarea of the quokka image to extract:
* If None, then the whole image will be used.
* If string ``square``, then a squared area ``(x: 0 to max 643, y: 0 to max 643)`` will
be extracted from the image.
* If a tuple, then expected to contain four numbers denoting ``x1``, ``y1``, ``x2``
and ``y2``.
* If a BoundingBox, then that bounding box's area will be extracted from the image.
* If a BoundingBoxesOnImage, then expected to contain exactly one bounding box
and a shape matching the full image dimensions (i.e. ``(643, 960, *)``). Then the
one bounding box will be used similar to BoundingBox.
Returns
-------
img : (H,W,3) ndarray
The image array of dtype uint8.
"""
img = imageio.imread(QUOKKA_FP, pilmode="RGB")
if extract is not None:
bb = _quokka_normalize_extract(extract)
img = bb.extract_from_image(img)
if size is not None:
shape_resized = _compute_resized_shape(img.shape, size)
img = imresize_single_image(img, shape_resized[0:2])
return img
def quokka_square(size=None):
"""
Returns an (square) image of a quokka as a numpy array.
Parameters
----------
size : None or float or tuple of int, optional
Size of the output image. Input into :func:`imgaug.imresize_single_image`.
Usually expected to be a tuple ``(H, W)``, where ``H`` is the desired height
and ``W`` is the width. If None, then the image will not be resized.
Returns
-------
img : (H,W,3) ndarray
The image array of dtype uint8.
"""
return quokka(size=size, extract="square")
def quokka_heatmap(size=None, extract=None):
"""
Returns a heatmap (here: depth map) for the standard example quokka image.
Parameters
----------
size : None or float or tuple of int, optional
See :func:`imgaug.quokka`.
extract : None or 'square' or tuple of number or imgaug.BoundingBox or imgaug.BoundingBoxesOnImage
See :func:`imgaug.quokka`.
Returns
-------
result : imgaug.HeatmapsOnImage
Depth map as an heatmap object. Values close to 0.0 denote objects that are close to
the camera. Values close to 1.0 denote objects that are furthest away (among all shown
objects).
"""
img = imageio.imread(QUOKKA_DEPTH_MAP_HALFRES_FP, pilmode="RGB")
img = imresize_single_image(img, (643, 960), interpolation="cubic")
if extract is not None:
bb = _quokka_normalize_extract(extract)
img = bb.extract_from_image(img)
if size is None:
size = img.shape[0:2]
shape_resized = _compute_resized_shape(img.shape, size)
img = imresize_single_image(img, shape_resized[0:2])
img_0to1 = img[..., 0] # depth map was saved as 3-channel RGB
img_0to1 = img_0to1.astype(np.float32) / 255.0
img_0to1 = 1 - img_0to1 # depth map was saved as 0 being furthest away
return HeatmapsOnImage(img_0to1, shape=img_0to1.shape[0:2] + (3,))
def quokka_segmentation_map(size=None, extract=None):
"""
Returns a segmentation map for the standard example quokka image.
Parameters
----------
size : None or float or tuple of int, optional
See :func:`imgaug.quokka`.
extract : None or 'square' or tuple of number or imgaug.BoundingBox or imgaug.BoundingBoxesOnImage
See :func:`imgaug.quokka`.
Returns
-------
result : imgaug.SegmentationMapOnImage
Segmentation map object.
"""
with open(QUOKKA_ANNOTATIONS_FP, "r") as f:
json_dict = json.load(f)
xx = []
yy = []
for kp_dict in json_dict["polygons"][0]["keypoints"]:
x = kp_dict["x"]
y = kp_dict["y"]
xx.append(x)
yy.append(y)
img_seg = np.zeros((643, 960, 1), dtype=np.float32)
rr, cc = skimage.draw.polygon(np.array(yy), np.array(xx), shape=img_seg.shape)
img_seg[rr, cc] = 1.0
if extract is not None:
bb = _quokka_normalize_extract(extract)
img_seg = bb.extract_from_image(img_seg)
segmap = SegmentationMapOnImage(img_seg, shape=img_seg.shape[0:2] + (3,))
if size is not None:
shape_resized = _compute_resized_shape(img_seg.shape, size)
segmap = segmap.scale(shape_resized[0:2])
segmap.shape = tuple(shape_resized[0:2]) + (3,)
return segmap
def quokka_keypoints(size=None, extract=None):
"""
Returns example keypoints on the standard example quokke image.
The keypoints cover the eyes, ears, nose and paws.
Parameters
----------
size : None or float or tuple of int or tuple of float, optional
Size of the output image on which the keypoints are placed. If None, then the keypoints
are not projected to any new size (positions on the original image are used).
Floats lead to relative size changes, ints to absolute sizes in pixels.
extract : None or 'square' or tuple of number or imgaug.BoundingBox or imgaug.BoundingBoxesOnImage
Subarea to extract from the image. See :func:`imgaug.quokka`.
Returns
-------
kpsoi : imgaug.KeypointsOnImage
Example keypoints on the quokka image.
"""
left, top = 0, 0
if extract is not None:
bb_extract = _quokka_normalize_extract(extract)
left = bb_extract.x1
top = bb_extract.y1
with open(QUOKKA_ANNOTATIONS_FP, "r") as f:
json_dict = json.load(f)
keypoints = []
for kp_dict in json_dict["keypoints"]:
keypoints.append(Keypoint(x=kp_dict["x"] - left, y=kp_dict["y"] - top))
if extract is not None:
shape = (bb_extract.height, bb_extract.width, 3)
else:
shape = (643, 960, 3)
kpsoi = KeypointsOnImage(keypoints, shape=shape)
if size is not None:
shape_resized = _compute_resized_shape(shape, size)
kpsoi = kpsoi.on(shape_resized)
return kpsoi
def quokka_bounding_boxes(size=None, extract=None):
"""
Returns example bounding boxes on the standard example quokke image.
Currently only a single bounding box is returned that covers the quokka.
Parameters
----------
size : None or float or tuple of int or tuple of float, optional
Size of the output image on which the BBs are placed. If None, then the BBs
are not projected to any new size (positions on the original image are used).
Floats lead to relative size changes, ints to absolute sizes in pixels.
extract : None or 'square' or tuple of number or imgaug.BoundingBox or imgaug.BoundingBoxesOnImage
Subarea to extract from the image. See :func:`imgaug.quokka`.
Returns
-------
bbsoi : imgaug.BoundingBoxesOnImage
Example BBs on the quokka image.
"""
left, top = 0, 0
if extract is not None:
bb_extract = _quokka_normalize_extract(extract)
left = bb_extract.x1
top = bb_extract.y1
with open(QUOKKA_ANNOTATIONS_FP, "r") as f:
json_dict = json.load(f)
bbs = []
for bb_dict in json_dict["bounding_boxes"]:
bbs.append(
BoundingBox(
x1=bb_dict["x1"] - left,
y1=bb_dict["y1"] - top,
x2=bb_dict["x2"] - left,
y2=bb_dict["y2"] - top
)
)
if extract is not None:
shape = (bb_extract.height, bb_extract.width, 3)
else:
shape = (643, 960, 3)
bbsoi = BoundingBoxesOnImage(bbs, shape=shape)
if size is not None:
shape_resized = _compute_resized_shape(shape, size)
bbsoi = bbsoi.on(shape_resized)
return bbsoi
def angle_between_vectors(v1, v2):
"""
Returns the angle in radians between vectors `v1` and `v2`.
From http://stackoverflow.com/questions/2827393/angles-between-two-n-dimensional-vectors-in-python
Parameters
----------
v1 : (N,) ndarray
First vector.
v2 : (N,) ndarray
Second vector.
Returns
-------
out : float
Angle in radians.
Examples
--------
>>> angle_between_vectors(np.float32([1, 0, 0]), np.float32([0, 1, 0]))
1.5707963267948966
>>> angle_between_vectors(np.float32([1, 0, 0]), np.float32([1, 0, 0]))
0.0
>>> angle_between_vectors(np.float32([1, 0, 0]), np.float32([-1, 0, 0]))
3.141592653589793
"""
v1_u = v1 / np.linalg.norm(v1)
v2_u = v2 / np.linalg.norm(v2)
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
# TODO is this used anywhere?
def compute_line_intersection_point(x1, y1, x2, y2, x3, y3, x4, y4):
"""
Compute the intersection point of two lines.
Taken from https://stackoverflow.com/a/20679579 .
Parameters
----------
x1 : number
x coordinate of the first point on line 1. (The lines extends beyond this point.)
y1 : number:
y coordinate of the first point on line 1. (The lines extends beyond this point.)
x2 : number
x coordinate of the second point on line 1. (The lines extends beyond this point.)
y2 : number:
y coordinate of the second point on line 1. (The lines extends beyond this point.)
x3 : number
x coordinate of the first point on line 2. (The lines extends beyond this point.)
y3 : number:
y coordinate of the first point on line 2. (The lines extends beyond this point.)
x4 : number
x coordinate of the second point on line 2. (The lines extends beyond this point.)
y4 : number:
y coordinate of the second point on line 2. (The lines extends beyond this point.)
Returns
-------
tuple of number or bool
The coordinate of the intersection point as a tuple ``(x, y)``.
If the lines are parallel (no intersection point or an infinite number of them), the result is False.
"""
def _make_line(p1, p2):
A = (p1[1] - p2[1])
B = (p2[0] - p1[0])
C = (p1[0]*p2[1] - p2[0]*p1[1])
return A, B, -C
L1 = _make_line((x1, y1), (x2, y2))
L2 = _make_line((x3, y3), (x4, y4))
D = L1[0] * L2[1] - L1[1] * L2[0]
Dx = L1[2] * L2[1] - L1[1] * L2[2]
Dy = L1[0] * L2[2] - L1[2] * L2[0]
if D != 0:
x = Dx / D
y = Dy / D
return x, y
else:
return False
def draw_text(img, y, x, text, color=(0, 255, 0), size=25):
"""
Draw text on an image.
This uses by default DejaVuSans as its font, which is included in the
library.
Parameters
----------
img : (H,W,3) ndarray
The image array to draw text on.
Expected to be of dtype uint8 or float32 (value range 0.0 to 255.0).
y : int
x-coordinate of the top left corner of the text.
x : int
y- coordinate of the top left corner of the text.
text : str
The text to draw.
color : iterable of int, optional
Color of the text to draw. For RGB-images this is expected to be an RGB color.
size : int, optional
Font size of the text to draw.
Returns
-------
img_np : (H,W,3) ndarray
Input image with text drawn on it.
"""
do_assert(img.dtype in [np.uint8, np.float32])
input_dtype = img.dtype
if img.dtype == np.float32:
img = img.astype(np.uint8)
img = PIL_Image.fromarray(img)
font = PIL_ImageFont.truetype(DEFAULT_FONT_FP, size)
context = PIL_ImageDraw.Draw(img)
context.text((x, y), text, fill=tuple(color), font=font)
img_np = np.asarray(img)
img_np.setflags(write=True) # PIL/asarray returns read only array
if img_np.dtype != input_dtype:
img_np = img_np.astype(input_dtype)
return img_np
# TODO rename sizes to size?
def imresize_many_images(images, sizes=None, interpolation=None):
"""
Resize many images to a specified size.
Parameters
----------
images : (N,H,W,[C]) ndarray or list of (H,W,[C]) ndarray
Array of the images to resize.
Expected to usually be of dtype uint8.
sizes : float or iterable of int or iterable of float
The new size of the images, given either as a fraction (a single float) or as
a ``(height, width)`` tuple of two integers or as a ``(height fraction, width fraction)``
tuple of two floats.
interpolation : None or str or int, optional
The interpolation to use during resize.
If int, then expected to be one of:
* ``cv2.INTER_NEAREST`` (nearest neighbour interpolation)
* ``cv2.INTER_LINEAR`` (linear interpolation)
* ``cv2.INTER_AREA`` (area interpolation)
* ``cv2.INTER_CUBIC`` (cubic interpolation)
If string, then expected to be one of:
* ``nearest`` (identical to ``cv2.INTER_NEAREST``)
* ``linear`` (identical to ``cv2.INTER_LINEAR``)
* ``area`` (identical to ``cv2.INTER_AREA``)
* ``cubic`` (identical to ``cv2.INTER_CUBIC``)
If None, the interpolation will be chosen automatically. For size
increases, area interpolation will be picked and for size decreases,
linear interpolation will be picked.
Returns
-------
result : (N,H',W',[C]) ndarray
Array of the resized images.
Examples
--------
>>> imresize_many_images(np.zeros((2, 16, 16, 3), dtype=np.uint8), 2.0)
Converts 2 RGB images of height and width 16 to images of height and width 16*2 = 32.
>>> imresize_many_images(np.zeros((2, 16, 16, 3), dtype=np.uint8), (16, 32))
Converts 2 RGB images of height and width 16 to images of height 16 and width 32.
>>> imresize_many_images(np.zeros((2, 16, 16, 3), dtype=np.uint8), (2.0, 4.0))
Converts 2 RGB images of height and width 16 to images of height 32 and width 64.
"""
# we just do nothing if the input contains zero images
# one could also argue that an exception would be appropiate here
if len(images) == 0:
return images
# verify that all input images have height/width > 0
do_assert(
all([image.shape[0] > 0 and image.shape[1] > 0 for image in images]),
("Cannot resize images, because at least one image has a height and/or width of zero. "
+ "Observed shapes were: %s.") % (str([image.shape for image in images]),)
)
# verify that sizes contains only values >0
if is_single_number(sizes) and sizes <= 0:
raise Exception(
"Cannot resize to the target size %.8f, because the value is zero or lower than zero." % (sizes,))
elif isinstance(sizes, tuple) and (sizes[0] <= 0 or sizes[1] <= 0):
sizes_str = [
"int %d" % (sizes[0],) if is_single_integer(sizes[0]) else "float %.8f" % (sizes[0],),
"int %d" % (sizes[1],) if is_single_integer(sizes[1]) else "float %.8f" % (sizes[1],),
]
sizes_str = "(%s, %s)" % (sizes_str[0], sizes_str[1])
raise Exception(
"Cannot resize to the target sizes %s. At least one value is zero or lower than zero." % (sizes_str,))
# change afterward the validation to make the above error messages match the original input
if is_single_number(sizes):
sizes = (sizes, sizes)
else:
do_assert(len(sizes) == 2, "Expected tuple with exactly two entries, got %d entries." % (len(sizes),))
do_assert(all([is_single_number(val) for val in sizes]),
"Expected tuple with two ints or floats, got types %s." % (str([type(val) for val in sizes]),))
# if input is a list, call this function N times for N images
# but check beforehand if all images have the same shape, then just convert to a single array and de-convert
# afterwards
if isinstance(images, list):
nb_shapes = len(set([image.shape for image in images]))
if nb_shapes == 1:
return list(imresize_many_images(np.array(images), sizes=sizes, interpolation=interpolation))
else:
return [imresize_many_images(image[np.newaxis, ...], sizes=sizes, interpolation=interpolation)[0, ...]
for image in images]
shape = images.shape
do_assert(images.ndim in [3, 4], "Expected array of shape (N, H, W, [C]), got shape %s" % (str(shape),))
nb_images = shape[0]
im_height, im_width = shape[1], shape[2]
nb_channels = shape[3] if images.ndim > 3 else None
height, width = sizes[0], sizes[1]
height = int(np.round(im_height * height)) if is_single_float(height) else height
width = int(np.round(im_width * width)) if is_single_float(width) else width
if height == im_height and width == im_width:
return np.copy(images)
ip = interpolation
do_assert(ip is None or ip in IMRESIZE_VALID_INTERPOLATIONS)
if ip is None:
if height > im_height or width > im_width:
ip = cv2.INTER_AREA
else:
ip = cv2.INTER_LINEAR
elif ip in ["nearest", cv2.INTER_NEAREST]:
ip = cv2.INTER_NEAREST
elif ip in ["linear", cv2.INTER_LINEAR]:
ip = cv2.INTER_LINEAR
elif ip in ["area", cv2.INTER_AREA]:
ip = cv2.INTER_AREA
else: # if ip in ["cubic", cv2.INTER_CUBIC]:
ip = cv2.INTER_CUBIC
result_shape = (nb_images, height, width)
if nb_channels is not None:
result_shape = result_shape + (nb_channels,)
result = np.zeros(result_shape, dtype=images.dtype)
for img_idx in sm.xrange(nb_images):
# TODO fallback to scipy here if image isn't uint8
result_img = cv2.resize(images[img_idx], (width, height), interpolation=ip)
# cv2 removes the channel axis if input was (H, W, 1)
# we re-add it (but only if input was not (H, W))
if len(result_img.shape) == 2 and nb_channels is not None and nb_channels == 1:
result_img = result_img[:, :, np.newaxis]
result[img_idx] = result_img.astype(images.dtype)
return result
def imresize_single_image(image, sizes, interpolation=None):
"""
Resizes a single image.
Parameters
----------
image : (H,W,C) ndarray or (H,W) ndarray
Array of the image to resize.
Expected to usually be of dtype uint8.
sizes : float or iterable of int or iterable of float
See :func:`imgaug.imresize_many_images`.
interpolation : None or str or int, optional
See :func:`imgaug.imresize_many_images`.
Returns
-------
out : (H',W',C) ndarray or (H',W') ndarray
The resized image.
"""
grayscale = False
if image.ndim == 2:
grayscale = True
image = image[:, :, np.newaxis]
do_assert(len(image.shape) == 3, image.shape)
rs = imresize_many_images(image[np.newaxis, :, :, :], sizes, interpolation=interpolation)
if grayscale:
return np.squeeze(rs[0, :, :, 0])
else:
return rs[0, ...]
def pad(arr, top=0, right=0, bottom=0, left=0, mode="constant", cval=0):
"""
Pad an image-like array on its top/right/bottom/left side.
This function is a wrapper around :func:`numpy.pad`.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Image-like array to pad.
top : int, optional
Amount of pixels to add at the top side of the image. Must be 0 or greater.
right : int, optional
Amount of pixels to add at the right side of the image. Must be 0 or greater.
bottom : int, optional
Amount of pixels to add at the bottom side of the image. Must be 0 or greater.
left : int, optional
Amount of pixels to add at the left side of the image. Must be 0 or greater.
mode : str, optional
Padding mode to use. See :func:`numpy.pad` for details.
In case of mode ``constant``, the parameter `cval` will be used as the ``constant_values``
parameter to :func:`numpy.pad`.
In case of mode ``linear_ramp``, the parameter `cval` will be used as the ``end_values``
parameter to :func:`numpy.pad`.
cval : number, optional
Value to use for padding if `mode` is ``constant``. See :func:`numpy.pad` for details.
Returns
-------
arr_pad : (H',W') ndarray or (H',W',C) ndarray
Padded array with height ``H'=H+top+bottom`` and width ``W'=W+left+right``.
"""
do_assert(arr.ndim in [2, 3])
do_assert(top >= 0)
do_assert(right >= 0)
do_assert(bottom >= 0)
do_assert(left >= 0)
if top > 0 or right > 0 or bottom > 0 or left > 0:
paddings_np = [(top, bottom), (left, right)] # paddings for 2d case
if arr.ndim == 3:
paddings_np.append((0, 0)) # add paddings for 3d case
if mode == "constant":
arr_pad = np.pad(arr, paddings_np, mode=mode, constant_values=cval)
elif mode == "linear_ramp":
arr_pad = np.pad(arr, paddings_np, mode=mode, end_values=cval)
else:
arr_pad = np.pad(arr, paddings_np, mode=mode)
return arr_pad
return np.copy(arr)
def compute_paddings_for_aspect_ratio(arr, aspect_ratio):
"""
Compute the amount of pixels by which an array has to be padded to fulfill an aspect ratio.
The aspect ratio is given as width/height.
Depending on which dimension is smaller (height or width), only the corresponding
sides (left/right or top/bottom) will be padded. In each case, both of the sides will
be padded equally.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Image-like array for which to compute pad amounts.
aspect_ratio : float
Target aspect ratio, given as width/height. E.g. 2.0 denotes the image having twice
as much width as height.
Returns
-------
result : tuple of int
Required paddign amounts to reach the target aspect ratio, given as a tuple
of the form ``(top, right, bottom, left)``.
"""
do_assert(arr.ndim in [2, 3])
do_assert(aspect_ratio > 0)
height, width = arr.shape[0:2]
do_assert(height > 0)
aspect_ratio_current = width / height
pad_top = 0
pad_right = 0
pad_bottom = 0
pad_left = 0
if aspect_ratio_current < aspect_ratio:
# vertical image, height > width
diff = (aspect_ratio * height) - width
pad_right = int(np.ceil(diff / 2))
pad_left = int(np.floor(diff / 2))
elif aspect_ratio_current > aspect_ratio:
# horizontal image, width > height
diff = ((1/aspect_ratio) * width) - height
pad_top = int(np.floor(diff / 2))
pad_bottom = int(np.ceil(diff / 2))
return pad_top, pad_right, pad_bottom, pad_left
def pad_to_aspect_ratio(arr, aspect_ratio, mode="constant", cval=0, return_pad_amounts=False):
"""
Pad an image-like array on its sides so that it matches a target aspect ratio.
Depending on which dimension is smaller (height or width), only the corresponding
sides (left/right or top/bottom) will be padded. In each case, both of the sides will
be padded equally.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Image-like array to pad.
aspect_ratio : float
Target aspect ratio, given as width/height. E.g. 2.0 denotes the image having twice
as much width as height.
mode : str, optional
Padding mode to use. See :func:`numpy.pad` for details.
cval : number, optional
Value to use for padding if `mode` is ``constant``. See :func:`numpy.pad` for details.
return_pad_amounts : bool, optional
If False, then only the padded image will be returned. If True, a tuple with two
entries will be returned, where the first entry is the padded image and the second
entry are the amounts by which each image side was padded. These amounts are again a
tuple of the form (top, right, bottom, left), with each value being an integer.
Returns
-------
arr_padded : (H',W') ndarray or (H',W',C) ndarray
Padded image as (H',W') or (H',W',C) ndarray, fulfulling the given aspect_ratio.
tuple of int
Amounts by which the image was padded on each side, given as a tuple ``(top, right, bottom, left)``.
This tuple is only returned if `return_pad_amounts` was set to True.
Otherwise only ``arr_padded`` is returned.
"""
pad_top, pad_right, pad_bottom, pad_left = compute_paddings_for_aspect_ratio(arr, aspect_ratio)
arr_padded = pad(
arr,
top=pad_top,
right=pad_right,
bottom=pad_bottom,
left=pad_left,
mode=mode,
cval=cval
)
if return_pad_amounts:
return arr_padded, (pad_top, pad_right, pad_bottom, pad_left)
else:
return arr_padded
def pool(arr, block_size, func, cval=0, preserve_dtype=True):
"""
Rescale an array by pooling values within blocks.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Image-like array to pool. Ideally of datatype ``numpy.float64``.
block_size : int or tuple of int
Spatial size of each group of values to pool, aka kernel size.
If a single integer, then a symmetric block of that size along height and width will be used.
If a tuple of two values, it is assumed to be the block size along height and width of the image-like,
with pooling happening per channel.
If a tuple of three values, it is assumed to be the block size along height, width and channels.
func : callable
Function to apply to a given block in order to convert it to a single number,
e.g. :func:`numpy.average`, :func:`numpy.min`, :func:`numpy.max`.
cval : number, optional
Value to use in order to pad the array along its border if the array cannot be divided
by `block_size` without remainder.
preserve_dtype : bool, optional
Whether to convert the array back to the input datatype if it is changed away from
that in the pooling process.
Returns
-------
arr_reduced : (H',W') ndarray or (H',W',C') ndarray
Array after pooling.
"""
do_assert(arr.ndim in [2, 3])
is_valid_int = is_single_integer(block_size) and block_size >= 1
is_valid_tuple = is_iterable(block_size) and len(block_size) in [2, 3] \
and [is_single_integer(val) and val >= 1 for val in block_size]
do_assert(is_valid_int or is_valid_tuple)
if is_single_integer(block_size):
block_size = [block_size, block_size]
if len(block_size) < arr.ndim:
block_size = list(block_size) + [1]
input_dtype = arr.dtype
arr_reduced = skimage.measure.block_reduce(arr, tuple(block_size), func, cval=cval)
if preserve_dtype and arr_reduced.dtype.type != input_dtype:
arr_reduced = arr_reduced.astype(input_dtype)
return arr_reduced
def avg_pool(arr, block_size, cval=0, preserve_dtype=True):
"""
Rescale an array using average pooling.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Image-like array to pool. See :func:`imgaug.pool` for details.
block_size : int or tuple of int or tuple of int
Size of each block of values to pool. See :func:`imgaug.pool` for details.
cval : number, optional
Padding value. See :func:`imgaug.pool` for details.
preserve_dtype : bool, optional
Whether to preserve the input array dtype. See :func:`imgaug.pool` for details.
Returns
-------
arr_reduced : (H',W') ndarray or (H',W',C') ndarray
Array after average pooling.
"""
return pool(arr, block_size, np.average, cval=cval, preserve_dtype=preserve_dtype)
def max_pool(arr, block_size, cval=0, preserve_dtype=True):
"""
Rescale an array using max-pooling.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Image-like array to pool. See :func:`imgaug.pool` for details.
block_size : int or tuple of int or tuple of int
Size of each block of values to pool. See `imgaug.pool` for details.
cval : number, optional
Padding value. See :func:`imgaug.pool` for details.
preserve_dtype : bool, optional
Whether to preserve the input array dtype. See :func:`imgaug.pool` for details.
Returns
-------
arr_reduced : (H',W') ndarray or (H',W',C') ndarray
Array after max-pooling.
"""
return pool(arr, block_size, np.max, cval=cval, preserve_dtype=preserve_dtype)
def draw_grid(images, rows=None, cols=None):
"""
Converts multiple input images into a single image showing them in a grid.
Parameters
----------
images : (N,H,W,3) ndarray or iterable of (H,W,3) array
The input images to convert to a grid.
Expected to be RGB and have dtype uint8.
rows : None or int, optional
The number of rows to show in the grid.
If None, it will be automatically derived.
cols : None or int, optional
The number of cols to show in the grid.
If None, it will be automatically derived.
Returns
-------
grid : (H',W',3) ndarray
Image of the generated grid.
"""
if is_np_array(images):
do_assert(images.ndim == 4)
else:
do_assert(is_iterable(images) and is_np_array(images[0]) and images[0].ndim == 3)
nb_images = len(images)
do_assert(nb_images > 0)
cell_height = max([image.shape[0] for image in images])
cell_width = max([image.shape[1] for image in images])
channels = set([image.shape[2] for image in images])
do_assert(
len(channels) == 1,
"All images are expected to have the same number of channels, "
+ "but got channel set %s with length %d instead." % (str(channels), len(channels))
)
nb_channels = list(channels)[0]
if rows is None and cols is None:
rows = cols = int(math.ceil(math.sqrt(nb_images)))
elif rows is not None:
cols = int(math.ceil(nb_images / rows))
elif cols is not None:
rows = int(math.ceil(nb_images / cols))
do_assert(rows * cols >= nb_images)
width = cell_width * cols
height = cell_height * rows
grid = np.zeros((height, width, nb_channels), dtype=np.uint8)
cell_idx = 0
for row_idx in sm.xrange(rows):
for col_idx in sm.xrange(cols):
if cell_idx < nb_images:
image = images[cell_idx]
cell_y1 = cell_height * row_idx
cell_y2 = cell_y1 + image.shape[0]
cell_x1 = cell_width * col_idx
cell_x2 = cell_x1 + image.shape[1]
grid[cell_y1:cell_y2, cell_x1:cell_x2, :] = image
cell_idx += 1
return grid
def show_grid(images, rows=None, cols=None):
"""
Converts the input images to a grid image and shows it in a new window.
Parameters
----------
images : (N,H,W,3) ndarray or iterable of (H,W,3) array
See :func:`imgaug.draw_grid`.
rows : None or int, optional
See :func:`imgaug.draw_grid`.
cols : None or int, optional
See :func:`imgaug.draw_grid`.
"""
grid = draw_grid(images, rows=rows, cols=cols)
imshow(grid)
def imshow(image, backend=IMSHOW_BACKEND_DEFAULT):
"""
Shows an image in a window.
Parameters
----------
image : (H,W,3) ndarray
Image to show.
backend : {'matplotlib', 'cv2'}, optional
Library to use to show the image. May be either matplotlib or OpenCV ('cv2').
OpenCV tends to be faster, but apparently causes more technical issues.
"""
do_assert(backend in ["matplotlib", "cv2"], "Expected backend 'matplotlib' or 'cv2', got %s." % (backend,))
if backend == "cv2":
image_bgr = image
if image.ndim == 3 and image.shape[2] in [3, 4]:
image_bgr = image[..., 0:3][..., ::-1]
win_name = "imgaug-default-window"
cv2.namedWindow(win_name, cv2.WINDOW_NORMAL)
cv2.imshow(win_name, image_bgr)
cv2.waitKey(0)
cv2.destroyWindow(win_name)
else:
plt.imshow(image, cmap="gray")
plt.gcf().canvas.set_window_title("imgaug.imshow(%s)" % (image.shape,))
plt.show()
def do_assert(condition, message="Assertion failed."):
"""
Function that behaves equally to an `assert` statement, but raises an
Exception.
This is added because `assert` statements are removed in optimized code.
It replaces `assert` statements throughout the library that should be
kept even in optimized code.
Parameters
----------
condition : bool
If False, an exception is raised.
message : str, optional
Error message.
"""
if not condition:
raise AssertionError(str(message))
class HooksImages(object):
"""
Class to intervene with image augmentation runs.
This is e.g. useful to dynamically deactivate some augmenters.
Parameters
----------
activator : None or callable, optional
A function that gives permission to execute an augmenter.
The expected interface is ``f(images, augmenter, parents, default)``,
where ``images`` are the input images to augment, ``augmenter`` is the
instance of the augmenter to execute, ``parents`` are previously
executed augmenters and ``default`` is an expected default value to be
returned if the activator function does not plan to make a decision
for the given inputs.
propagator : None or callable, optional
A function that gives permission to propagate the augmentation further
to the children of an augmenter. This happens after the activator.
In theory, an augmenter may augment images itself (if allowed by the
activator) and then execute child augmenters afterwards (if allowed by
the propagator). If the activator returned False, the propagation step
will never be executed.
The expected interface is ``f(images, augmenter, parents, default)``,
with all arguments having identical meaning to the activator.
preprocessor : None or callable, optional
A function to call before an augmenter performed any augmentations.
The interface is ``f(images, augmenter, parents)``,
with all arguments having identical meaning to the activator.
It is expected to return the input images, optionally modified.
postprocessor : None or callable, optional
A function to call after an augmenter performed augmentations.
The interface is the same as for the preprocessor.
Examples
--------
>>> seq = iaa.Sequential([
>>> iaa.GaussianBlur(3.0, name="blur"),
>>> iaa.Dropout(0.05, name="dropout"),
>>> iaa.Affine(translate_px=-5, name="affine")
>>> ])
>>>
>>> def activator(images, augmenter, parents, default):
>>> return False if augmenter.name in ["blur", "dropout"] else default
>>>
>>> seq_det = seq.to_deterministic()
>>> images_aug = seq_det.augment_images(images)
>>> heatmaps_aug = seq_det.augment_images(
>>> heatmaps,
>>> hooks=ia.HooksImages(activator=activator)
>>> )
This augments images and their respective heatmaps in the same way.
The heatmaps however are only modified by Affine, not by GaussianBlur or
Dropout.
"""
def __init__(self, activator=None, propagator=None, preprocessor=None, postprocessor=None):
self.activator = activator
self.propagator = propagator
self.preprocessor = preprocessor
self.postprocessor = postprocessor
def is_activated(self, images, augmenter, parents, default):
"""
Returns whether an augmenter may be executed.
Returns
-------
bool
If True, the augmenter may be executed. If False, it may not be executed.
"""
if self.activator is None:
return default
else:
return self.activator(images, augmenter, parents, default)
def is_propagating(self, images, augmenter, parents, default):
"""
Returns whether an augmenter may call its children to augment an
image. This is independent of the augmenter itself possible changing
the image, without calling its children. (Most (all?) augmenters with
children currently dont perform any changes themselves.)
Returns
-------
bool
If True, the augmenter may be propagate to its children. If False, it may not.
"""
if self.propagator is None:
return default
else:
return self.propagator(images, augmenter, parents, default)
def preprocess(self, images, augmenter, parents):
"""
A function to be called before the augmentation of images starts (per augmenter).
Returns
-------
(N,H,W,C) ndarray or (N,H,W) ndarray or list of (H,W,C) ndarray or list of (H,W) ndarray
The input images, optionally modified.
"""
if self.preprocessor is None:
return images
else:
return self.preprocessor(images, augmenter, parents)
def postprocess(self, images, augmenter, parents):
"""
A function to be called after the augmentation of images was
performed.
Returns
-------
(N,H,W,C) ndarray or (N,H,W) ndarray or list of (H,W,C) ndarray or list of (H,W) ndarray
The input images, optionally modified.
"""
if self.postprocessor is None:
return images
else:
return self.postprocessor(images, augmenter, parents)
class HooksHeatmaps(HooksImages):
"""
Class to intervene with heatmap augmentation runs.
This is e.g. useful to dynamically deactivate some augmenters.
This class is currently the same as the one for images. This may or may
not change in the future.
"""
pass
class HooksKeypoints(HooksImages):
"""
Class to intervene with keypoint augmentation runs.
This is e.g. useful to dynamically deactivate some augmenters.
This class is currently the same as the one for images. This may or may
not change in the future.
"""
pass
def compute_geometric_median(X, eps=1e-5):
"""
Estimate the geometric median of points in 2D.
Code from https://stackoverflow.com/a/30305181
Parameters
----------
X : (N,2) ndarray
Points in 2D. Second axis must be given in xy-form.
eps : float, optional
Distance threshold when to return the median.
Returns
-------
(2,) ndarray
Geometric median as xy-coordinate.
"""
y = np.mean(X, 0)
while True:
D = scipy.spatial.distance.cdist(X, [y])
nonzeros = (D != 0)[:, 0]
Dinv = 1 / D[nonzeros]
Dinvs = np.sum(Dinv)
W = Dinv / Dinvs
T = np.sum(W * X[nonzeros], 0)
num_zeros = len(X) - np.sum(nonzeros)
if num_zeros == 0:
y1 = T
elif num_zeros == len(X):
return y
else:
R = (T - y) * Dinvs
r = np.linalg.norm(R)
rinv = 0 if r == 0 else num_zeros/r
y1 = max(0, 1-rinv)*T + min(1, rinv)*y
if scipy.spatial.distance.euclidean(y, y1) < eps:
return y1
y = y1
class Keypoint(object):
"""
A single keypoint (aka landmark) on an image.
Parameters
----------
x : number
Coordinate of the keypoint on the x axis.
y : number
Coordinate of the keypoint on the y axis.
"""
def __init__(self, x, y):
self.x = x
self.y = y
@property
def x_int(self):
"""
Return the keypoint's x-coordinate, rounded to the closest integer.
Returns
-------
result : int
Keypoint's x-coordinate, rounded to the closest integer.
"""
return int(np.round(self.x))
@property
def y_int(self):
"""
Return the keypoint's y-coordinate, rounded to the closest integer.
Returns
-------
result : int
Keypoint's y-coordinate, rounded to the closest integer.
"""
return int(np.round(self.y))
def project(self, from_shape, to_shape):
"""
Project the keypoint onto a new position on a new image.
E.g. if the keypoint is on its original image at x=(10 of 100 pixels)
and y=(20 of 100 pixels) and is projected onto a new image with
size (width=200, height=200), its new position will be (20, 40).
This is intended for cases where the original image is resized.
It cannot be used for more complex changes (e.g. padding, cropping).
Parameters
----------
from_shape : tuple of int
Shape of the original image. (Before resize.)
to_shape : tuple of int
Shape of the new image. (After resize.)
Returns
-------
imgaug.Keypoint
Keypoint object with new coordinates.
"""
if from_shape[0:2] == to_shape[0:2]:
return Keypoint(x=self.x, y=self.y)
else:
from_height, from_width = from_shape[0:2]
to_height, to_width = to_shape[0:2]
x = (self.x / from_width) * to_width
y = (self.y / from_height) * to_height
return Keypoint(x=x, y=y)
def shift(self, x=0, y=0):
"""
Move the keypoint around on an image.
Parameters
----------
x : number, optional
Move by this value on the x axis.
y : number, optional
Move by this value on the y axis.
Returns
-------
imgaug.Keypoint
Keypoint object with new coordinates.
"""
return Keypoint(self.x + x, self.y + y)
def generate_similar_points_manhattan(self, nb_steps, step_size, return_array=False):
"""
Generate nearby points to this keypoint based on manhattan distance.
To generate the first neighbouring points, a distance of S (step size) is moved from the
center point (this keypoint) to the top, right, bottom and left, resulting in four new
points. From these new points, the pattern is repeated. Overlapping points are ignored.
The resulting points have a shape similar to a square rotated by 45 degrees.
Parameters
----------
nb_steps : int
The number of steps to move from the center point. nb_steps=1 results in a total of
5 output points (1 center point + 4 neighbours).
step_size : number
The step size to move from every point to its neighbours.
return_array : bool, optional
Whether to return the generated points as a list of keypoints or an array
of shape ``(N,2)``, where ``N`` is the number of generated points and the second axis contains
the x- (first value) and y- (second value) coordinates.
Returns
-------
points : list of imgaug.Keypoint or (N,2) ndarray
If return_array was False, then a list of Keypoint.
Otherwise a numpy array of shape ``(N,2)``, where ``N`` is the number of generated points and
the second axis contains the x- (first value) and y- (second value) coordinates.
The center keypoint (the one on which this function was called) is always included.
"""
# TODO add test
# Points generates in manhattan style with S steps have a shape similar to a 45deg rotated
# square. The center line with the origin point has S+1+S = 1+2*S points (S to the left,
# S to the right). The lines above contain (S+1+S)-2 + (S+1+S)-2-2 + ... + 1 points. E.g.
# for S=2 it would be 3+1=4 and for S=3 it would be 5+3+1=9. Same for the lines below the
# center. Hence the total number of points is S+1+S + 2*(S^2).
points = np.zeros((nb_steps + 1 + nb_steps + 2*(nb_steps**2), 2), dtype=np.float32)
# we start at the bottom-most line and move towards the top-most line
yy = np.linspace(self.y - nb_steps * step_size, self.y + nb_steps * step_size, nb_steps + 1 + nb_steps)
# bottom-most line contains only one point
width = 1
nth_point = 0
for i_y, y in enumerate(yy):
if width == 1:
xx = [self.x]
else:
xx = np.linspace(self.x - (width-1)//2 * step_size, self.x + (width-1)//2 * step_size, width)
for x in xx:
points[nth_point] = [x, y]
nth_point += 1
if i_y < nb_steps:
width += 2
else:
width -= 2
if return_array:
return points
return [Keypoint(x=points[i, 0], y=points[i, 1]) for i in sm.xrange(points.shape[0])]
def __repr__(self):
return self.__str__()
def __str__(self):
return "Keypoint(x=%.8f, y=%.8f)" % (self.x, self.y)
class KeypointsOnImage(object):
"""
Object that represents all keypoints on a single image.
Parameters
----------
keypoints : list of imgaug.Keypoint
List of keypoints on the image.
shape : tuple of int
The shape of the image on which the keypoints are placed.
Examples
--------
>>> kps = [Keypoint(x=10, y=20), Keypoint(x=34, y=60)]
>>> kps_oi = KeypointsOnImage(kps, shape=image.shape)
"""
def __init__(self, keypoints, shape):
self.keypoints = keypoints
if is_np_array(shape):
self.shape = shape.shape
else:
do_assert(isinstance(shape, (tuple, list)))
self.shape = tuple(shape)
@property
def height(self):
return self.shape[0]
@property
def width(self):
return self.shape[1]
@property
def empty(self):
"""
Returns whether this object contains zero keypoints.
Returns
-------
result : bool
True if this object contains zero keypoints.
"""
return len(self.keypoints) == 0
def on(self, image):
"""
Project keypoints from one image to a new one.
Parameters
----------
image : ndarray or tuple of int
New image onto which the keypoints are to be projected.
May also simply be that new image's shape tuple.
Returns
-------
keypoints : imgaug.KeypointsOnImage
Object containing all projected keypoints.
"""
if is_np_array(image):
shape = image.shape
else:
shape = image
if shape[0:2] == self.shape[0:2]:
return self.deepcopy()
else:
keypoints = [kp.project(self.shape, shape) for kp in self.keypoints]
return KeypointsOnImage(keypoints, shape)
def draw_on_image(self, image, color=(0, 255, 0), size=3, copy=True, raise_if_out_of_image=False):
"""
Draw all keypoints onto a given image. Each keypoint is marked by a square of a chosen color and size.
Parameters
----------
image : (H,W,3) ndarray
The image onto which to draw the keypoints.
This image should usually have the same shape as
set in KeypointsOnImage.shape.
color : int or list of int or tuple of int or (3,) ndarray, optional
The RGB color of all keypoints. If a single int ``C``, then that is
equivalent to ``(C,C,C)``.
size : int, optional
The size of each point. If set to ``C``, each square will have size ``C x C``.
copy : bool, optional
Whether to copy the image before drawing the points.
raise_if_out_of_image : bool, optional
Whether to raise an exception if any keypoint is outside of the image.
Returns
-------
image : (H,W,3) ndarray
Image with drawn keypoints.
"""
if copy:
image = np.copy(image)
height, width = image.shape[0:2]
for keypoint in self.keypoints:
y, x = keypoint.y_int, keypoint.x_int
if 0 <= y < height and 0 <= x < width:
x1 = max(x - size//2, 0)
x2 = min(x + 1 + size//2, width)
y1 = max(y - size//2, 0)
y2 = min(y + 1 + size//2, height)
image[y1:y2, x1:x2] = color
else:
if raise_if_out_of_image:
raise Exception("Cannot draw keypoint x=%.8f, y=%.8f on image with shape %s." % (y, x, image.shape))
return image
def shift(self, x=0, y=0):
"""
Move the keypoints around on an image.
Parameters
----------
x : number, optional
Move each keypoint by this value on the x axis.
y : number, optional
Move each keypoint by this value on the y axis.
Returns
-------
out : KeypointsOnImage
Keypoints after moving them.
"""
keypoints = [keypoint.shift(x=x, y=y) for keypoint in self.keypoints]
return KeypointsOnImage(keypoints, self.shape)
def get_coords_array(self):
"""
Convert the coordinates of all keypoints in this object to an array of shape (N,2).
Returns
-------
result : (N, 2) ndarray
Where N is the number of keypoints. Each first value is the
x coordinate, each second value is the y coordinate.
"""
result = np.zeros((len(self.keypoints), 2), np.float32)
for i, keypoint in enumerate(self.keypoints):
result[i, 0] = keypoint.x
result[i, 1] = keypoint.y
return result
@staticmethod
def from_coords_array(coords, shape):
"""
Convert an array (N,2) with a given image shape to a KeypointsOnImage object.
Parameters
----------
coords : (N, 2) ndarray
Coordinates of ``N`` keypoints on the original image.
Each first entry ``coords[i, 0]`` is expected to be the x coordinate.
Each second entry ``coords[i, 1]`` is expected to be the y coordinate.
shape : tuple
Shape tuple of the image on which the keypoints are placed.
Returns
-------
out : KeypointsOnImage
KeypointsOnImage object that contains all keypoints from the array.
"""
keypoints = [Keypoint(x=coords[i, 0], y=coords[i, 1]) for i in sm.xrange(coords.shape[0])]
return KeypointsOnImage(keypoints, shape)
def to_keypoint_image(self, size=1):
"""
Draws a new black image of shape ``(H,W,N)`` in which all keypoint coordinates are set to 255.
(H=shape height, W=shape width, N=number of keypoints)
This function can be used as a helper when augmenting keypoints with a method that only supports the
augmentation of images.
Parameters
-------
size : int
Size of each (squared) point.
Returns
-------
image : (H,W,N) ndarray
Image in which the keypoints are marked. H is the height,
defined in KeypointsOnImage.shape[0] (analogous W). N is the
number of keypoints.
"""
do_assert(len(self.keypoints) > 0)
height, width = self.shape[0:2]
image = np.zeros((height, width, len(self.keypoints)), dtype=np.uint8)
do_assert(size % 2 != 0)
sizeh = max(0, (size-1)//2)
for i, keypoint in enumerate(self.keypoints):
# TODO for float values spread activation over several cells
# here and do voting at the end
y = keypoint.y_int
x = keypoint.x_int
x1 = np.clip(x - sizeh, 0, width-1)
x2 = np.clip(x + sizeh + 1, 0, width)
y1 = np.clip(y - sizeh, 0, height-1)
y2 = np.clip(y + sizeh + 1, 0, height)
if x1 < x2 and y1 < y2:
image[y1:y2, x1:x2, i] = 128
if 0 <= y < height and 0 <= x < width:
image[y, x, i] = 255
return image
@staticmethod
def from_keypoint_image(image, if_not_found_coords={"x": -1, "y": -1}, threshold=1, nb_channels=None): # pylint: disable=locally-disabled, dangerous-default-value, line-too-long
"""
Converts an image generated by ``to_keypoint_image()`` back to a KeypointsOnImage object.
Parameters
----------
image : (H,W,N) ndarray
The keypoints image. N is the number of keypoints.
if_not_found_coords : tuple or list or dict or None, optional
Coordinates to use for keypoints that cannot be found in `image`.
If this is a list/tuple, it must have two integer values.
If it is a dictionary, it must have the keys ``x`` and ``y`` with
each containing one integer value.
If this is None, then the keypoint will not be added to the final
KeypointsOnImage object.
threshold : int, optional
The search for keypoints works by searching for the argmax in
each channel. This parameters contains the minimum value that
the max must have in order to be viewed as a keypoint.
nb_channels : None or int, optional
Number of channels of the image on which the keypoints are placed.
Some keypoint augmenters require that information.
If set to None, the keypoint's shape will be set
to ``(height, width)``, otherwise ``(height, width, nb_channels)``.
Returns
-------
out : KeypointsOnImage
The extracted keypoints.
"""
do_assert(len(image.shape) == 3)
height, width, nb_keypoints = image.shape
drop_if_not_found = False
if if_not_found_coords is None:
drop_if_not_found = True
if_not_found_x = -1
if_not_found_y = -1
elif isinstance(if_not_found_coords, (tuple, list)):
do_assert(len(if_not_found_coords) == 2)
if_not_found_x = if_not_found_coords[0]
if_not_found_y = if_not_found_coords[1]
elif isinstance(if_not_found_coords, dict):
if_not_found_x = if_not_found_coords["x"]
if_not_found_y = if_not_found_coords["y"]
else:
raise Exception("Expected if_not_found_coords to be None or tuple or list or dict, got %s." % (
type(if_not_found_coords),))
keypoints = []
for i in sm.xrange(nb_keypoints):
maxidx_flat = np.argmax(image[..., i])
maxidx_ndim = np.unravel_index(maxidx_flat, (height, width))
found = (image[maxidx_ndim[0], maxidx_ndim[1], i] >= threshold)
if found:
keypoints.append(Keypoint(x=maxidx_ndim[1], y=maxidx_ndim[0]))
else:
if drop_if_not_found:
pass # dont add the keypoint to the result list, i.e. drop it
else:
keypoints.append(Keypoint(x=if_not_found_x, y=if_not_found_y))
out_shape = (height, width)
if nb_channels is not None:
out_shape += (nb_channels,)
return KeypointsOnImage(keypoints, shape=out_shape)
def to_distance_maps(self, inverted=False):
"""
Generates a ``(H,W,K)`` output containing ``K`` distance maps for ``K`` keypoints.
The k-th distance map contains at every location ``(y, x)`` the euclidean distance to the k-th keypoint.
This function can be used as a helper when augmenting keypoints with a method that only supports
the augmentation of images.
Parameters
-------
inverted : bool, optional
If True, inverted distance maps are returned where each distance value d is replaced
by ``d/(d+1)``, i.e. the distance maps have values in the range ``(0.0, 1.0]`` with 1.0
denoting exactly the position of the respective keypoint.
Returns
-------
distance_maps : (H,W,K) ndarray
A ``float32`` array containing ``K`` distance maps for ``K`` keypoints. Each location
``(y, x, k)`` in the array denotes the euclidean distance at ``(y, x)`` to the ``k``-th keypoint.
In inverted mode the distance ``d`` is replaced by ``d/(d+1)``. The height and width
of the array match the height and width in ``KeypointsOnImage.shape``.
"""
do_assert(len(self.keypoints) > 0)
height, width = self.shape[0:2]
distance_maps = np.zeros((height, width, len(self.keypoints)), dtype=np.float32)
yy = np.arange(0, height)
xx = np.arange(0, width)
grid_xx, grid_yy = np.meshgrid(xx, yy)
for i, keypoint in enumerate(self.keypoints):
y, x = keypoint.y, keypoint.x
distance_maps[:, :, i] = (grid_xx - x) ** 2 + (grid_yy - y) ** 2
distance_maps = np.sqrt(distance_maps)
if inverted:
return 1/(distance_maps+1)
return distance_maps
# TODO add option to if_not_found_coords to reuse old keypoint coords
@staticmethod
def from_distance_maps(distance_maps, inverted=False, if_not_found_coords={"x": -1, "y": -1}, threshold=None, # pylint: disable=locally-disabled, dangerous-default-value, line-too-long
nb_channels=None):
"""
Converts maps generated by ``to_distance_maps()`` back to a KeypointsOnImage object.
Parameters
----------
distance_maps : (H,W,N) ndarray
The distance maps. N is the number of keypoints.
inverted : bool, optional
Whether the given distance maps were generated in inverted or normal mode.
if_not_found_coords : tuple or list or dict or None, optional
Coordinates to use for keypoints that cannot be found in ``distance_maps``.
If this is a list/tuple, it must have two integer values.
If it is a dictionary, it must have the keys ``x`` and ``y``, with each
containing one integer value.
If this is None, then the keypoint will not be added to the final
KeypointsOnImage object.
threshold : float, optional
The search for keypoints works by searching for the argmin (non-inverted) or
argmax (inverted) in each channel. This parameters contains the maximum (non-inverted)
or minimum (inverted) value to accept in order to view a hit as a keypoint.
Use None to use no min/max.
nb_channels : None or int, optional
Number of channels of the image on which the keypoints are placed.
Some keypoint augmenters require that information.
If set to None, the keypoint's shape will be set
to ``(height, width)``, otherwise ``(height, width, nb_channels)``.
Returns
-------
imgaug.KeypointsOnImage
The extracted keypoints.
"""
do_assert(len(distance_maps.shape) == 3)
height, width, nb_keypoints = distance_maps.shape
drop_if_not_found = False
if if_not_found_coords is None:
drop_if_not_found = True
if_not_found_x = -1
if_not_found_y = -1
elif isinstance(if_not_found_coords, (tuple, list)):
do_assert(len(if_not_found_coords) == 2)
if_not_found_x = if_not_found_coords[0]
if_not_found_y = if_not_found_coords[1]
elif isinstance(if_not_found_coords, dict):
if_not_found_x = if_not_found_coords["x"]
if_not_found_y = if_not_found_coords["y"]
else:
raise Exception("Expected if_not_found_coords to be None or tuple or list or dict, got %s." % (
type(if_not_found_coords),))
keypoints = []
for i in sm.xrange(nb_keypoints):
# TODO introduce voting here among all distance values that have min/max values
if inverted:
hitidx_flat = np.argmax(distance_maps[..., i])
else:
hitidx_flat = np.argmin(distance_maps[..., i])
hitidx_ndim = np.unravel_index(hitidx_flat, (height, width))
if not inverted and threshold is not None:
found = (distance_maps[hitidx_ndim[0], hitidx_ndim[1], i] < threshold)
elif inverted and threshold is not None:
found = (distance_maps[hitidx_ndim[0], hitidx_ndim[1], i] >= threshold)
else:
found = True
if found:
keypoints.append(Keypoint(x=hitidx_ndim[1], y=hitidx_ndim[0]))
else:
if drop_if_not_found:
pass # dont add the keypoint to the result list, i.e. drop it
else:
keypoints.append(Keypoint(x=if_not_found_x, y=if_not_found_y))
out_shape = (height, width)
if nb_channels is not None:
out_shape += (nb_channels,)
return KeypointsOnImage(keypoints, shape=out_shape)
def copy(self):
"""
Create a shallow copy of the KeypointsOnImage object.
Returns
-------
imgaug.KeypointsOnImage
Shallow copy.
"""
return copy.copy(self)
def deepcopy(self):
"""
Create a deep copy of the KeypointsOnImage object.
Returns
-------
imgaug.KeypointsOnImage
Deep copy.
"""
# for some reason deepcopy is way slower here than manual copy
kps = [Keypoint(x=kp.x, y=kp.y) for kp in self.keypoints]
return KeypointsOnImage(kps, tuple(self.shape))
def __repr__(self):
return self.__str__()
def __str__(self):
return "KeypointsOnImage(%s, shape=%s)" % (str(self.keypoints), self.shape)
# TODO functions: square(), to_aspect_ratio(), contains_point()
class BoundingBox(object):
"""
Class representing bounding boxes.
Each bounding box is parameterized by its top left and bottom right corners. Both are given
as x and y-coordinates. The corners are intended to lie inside the bounding box area.
As a result, a bounding box that lies completely inside the image but has maximum extensions
would have coordinates ``(0.0, 0.0)`` and ``(W - epsilon, H - epsilon)``. Note that coordinates
are saved internally as floats.
Parameters
----------
x1 : number
X-coordinate of the top left of the bounding box.
y1 : number
Y-coordinate of the top left of the bounding box.
x2 : number
X-coordinate of the bottom right of the bounding box.
y2 : number
Y-coordinate of the bottom right of the bounding box.
label : None or str, optional
Label of the bounding box, e.g. a string representing the class.
"""
def __init__(self, x1, y1, x2, y2, label=None):
"""Create a new BoundingBox instance."""
if x1 > x2:
x2, x1 = x1, x2
do_assert(x2 >= x1)
if y1 > y2:
y2, y1 = y1, y2
do_assert(y2 >= y1)
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
self.label = label
@property
def x1_int(self):
"""
Return the x-coordinate of the top left corner as an integer.
Returns
-------
int
X-coordinate of the top left corner, rounded to the closest integer.
"""
return int(np.round(self.x1)) # use numpy's round to have consistent behaviour between python versions
@property
def y1_int(self):
"""
Return the y-coordinate of the top left corner as an integer.
Returns
-------
int
Y-coordinate of the top left corner, rounded to the closest integer.
"""
return int(np.round(self.y1)) # use numpy's round to have consistent behaviour between python versions
@property
def x2_int(self):
"""
Return the x-coordinate of the bottom left corner as an integer.
Returns
-------
int
X-coordinate of the bottom left corner, rounded to the closest integer.
"""
return int(np.round(self.x2)) # use numpy's round to have consistent behaviour between python versions
@property
def y2_int(self):
"""
Return the y-coordinate of the bottom left corner as an integer.
Returns
-------
int
Y-coordinate of the bottom left corner, rounded to the closest integer.
"""
return int(np.round(self.y2)) # use numpy's round to have consistent behaviour between python versions
@property
def height(self):
"""
Estimate the height of the bounding box.
Returns
-------
number
Height of the bounding box.
"""
return self.y2 - self.y1
@property
def width(self):
"""
Estimate the width of the bounding box.
Returns
-------
number
Width of the bounding box.
"""
return self.x2 - self.x1
@property
def center_x(self):
"""
Estimate the x-coordinate of the center point of the bounding box.
Returns
-------
number
X-coordinate of the center point of the bounding box.
"""
return self.x1 + self.width/2
@property
def center_y(self):
"""
Estimate the y-coordinate of the center point of the bounding box.
Returns
-------
number
Y-coordinate of the center point of the bounding box.
"""
return self.y1 + self.height/2
@property
def area(self):
"""
Estimate the area of the bounding box.
Returns
-------
number
Area of the bounding box, i.e. `height * width`.
"""
return self.height * self.width
def contains(self, other):
"""
Estimate whether the bounding box contains a point.
Parameters
----------
other : imgaug.Keypoint
Point to check for.
Returns
-------
bool
True if the point is contained in the bounding box, False otherwise.
"""
x, y = other.x, other.y
return self.x1 <= x <= self.x2 and self.y1 <= y <= self.y2
def project(self, from_shape, to_shape):
"""
Project the bounding box onto a new position on a new image.
E.g. if the bounding box is on its original image at
x1=(10 of 100 pixels) and y1=(20 of 100 pixels) and is projected onto
a new image with size (width=200, height=200), its new position will
be (x1=20, y1=40). (Analogous for x2/y2.)
This is intended for cases where the original image is resized.
It cannot be used for more complex changes (e.g. padding, cropping).
Parameters
----------
from_shape : tuple of int
Shape of the original image. (Before resize.)
to_shape : tuple of int
Shape of the new image. (After resize.)
Returns
-------
out : imgaug.BoundingBox
BoundingBox object with new coordinates.
"""
if from_shape[0:2] == to_shape[0:2]:
return self.copy()
else:
from_height, from_width = from_shape[0:2]
to_height, to_width = to_shape[0:2]
do_assert(from_height > 0)
do_assert(from_width > 0)
do_assert(to_height > 0)
do_assert(to_width > 0)
x1 = (self.x1 / from_width) * to_width
y1 = (self.y1 / from_height) * to_height
x2 = (self.x2 / from_width) * to_width
y2 = (self.y2 / from_height) * to_height
return self.copy(
x1=x1,
y1=y1,
x2=x2,
y2=y2,
label=self.label
)
def extend(self, all_sides=0, top=0, right=0, bottom=0, left=0):
"""
Extend the size of the bounding box along its sides.
Parameters
----------
all_sides : number, optional
Value by which to extend the bounding box size along all sides.
top : number, optional
Value by which to extend the bounding box size along its top side.
right : number, optional
Value by which to extend the bounding box size along its right side.
bottom : number, optional
Value by which to extend the bounding box size along its bottom side.
left : number, optional
Value by which to extend the bounding box size along its left side.
Returns
-------
imgaug.BoundingBox
Extended bounding box.
"""
return BoundingBox(
x1=self.x1 - all_sides - left,
x2=self.x2 + all_sides + right,
y1=self.y1 - all_sides - top,
y2=self.y2 + all_sides + bottom
)
def intersection(self, other, default=None):
"""
Compute the intersection bounding box of this bounding box and another one.
Note that in extreme cases, the intersection can be a single point, meaning that the intersection bounding box
will exist, but then also has a height and width of zero.
Parameters
----------
other : imgaug.BoundingBox
Other bounding box with which to generate the intersection.
default : any, optional
Default value to return if there is no intersection.
Returns
-------
imgaug.BoundingBox or any
Intersection bounding box of the two bounding boxes if there is an intersection.
If there is no intersection, the default value will be returned, which can by anything.
"""
x1_i = max(self.x1, other.x1)
y1_i = max(self.y1, other.y1)
x2_i = min(self.x2, other.x2)
y2_i = min(self.y2, other.y2)
if x1_i > x2_i or y1_i > y2_i:
return default
else:
return BoundingBox(x1=x1_i, y1=y1_i, x2=x2_i, y2=y2_i)
def union(self, other):
"""
Compute the union bounding box of this bounding box and another one.
This is equivalent to drawing a bounding box around all corners points of both
bounding boxes.
Parameters
----------
other : imgaug.BoundingBox
Other bounding box with which to generate the union.
Returns
-------
imgaug.BoundingBox
Union bounding box of the two bounding boxes.
"""
return BoundingBox(
x1=min(self.x1, other.x1),
y1=min(self.y1, other.y1),
x2=max(self.x2, other.x2),
y2=max(self.y2, other.y2),
)
def iou(self, other):
"""
Compute the IoU of this bounding box with another one.
IoU is the intersection over union, defined as::
``area(intersection(A, B)) / area(union(A, B))``
``= area(intersection(A, B)) / (area(A) + area(B) - area(intersection(A, B)))``
Parameters
----------
other : imgaug.BoundingBox
Other bounding box with which to compare.
Returns
-------
float
IoU between the two bounding boxes.
"""
inters = self.intersection(other)
if inters is None:
return 0.0
else:
area_union = self.area + other.area - inters.area
return inters.area / area_union if area_union > 0 else 0.0
def is_fully_within_image(self, image):
"""
Estimate whether the bounding box is fully inside the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use.
If an ndarray, its shape will be used.
If a tuple, it is assumed to represent the image shape
and must contain at least two integers.
Returns
-------
bool
True if the bounding box is fully inside the image area. False otherwise.
"""
if isinstance(image, tuple):
shape = image
else:
shape = image.shape
height, width = shape[0:2]
return self.x1 >= 0 and self.x2 < width and self.y1 >= 0 and self.y2 < height
def is_partly_within_image(self, image):
"""
Estimate whether the bounding box is at least partially inside the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use.
If an ndarray, its shape will be used.
If a tuple, it is assumed to represent the image shape
and must contain at least two integers.
Returns
-------
bool
True if the bounding box is at least partially inside the image area. False otherwise.
"""
if isinstance(image, tuple):
shape = image
else:
shape = image.shape
height, width = shape[0:2]
eps = np.finfo(np.float32).eps
img_bb = BoundingBox(x1=0, x2=width-eps, y1=0, y2=height-eps)
return self.intersection(img_bb) is not None
def is_out_of_image(self, image, fully=True, partly=False):
"""
Estimate whether the bounding box is partially or fully outside of the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use. If an ndarray, its shape will be used. If a tuple, it is
assumed to represent the image shape and must contain at least two integers.
fully : bool, optional
Whether to return True if the bounding box is fully outside fo the image area.
partly : bool, optional
Whether to return True if the bounding box is at least partially outside fo the
image area.
Returns
-------
bool
True if the bounding box is partially/fully outside of the image area, depending
on defined parameters. False otherwise.
"""
if self.is_fully_within_image(image):
return False
elif self.is_partly_within_image(image):
return partly
else:
return fully
def cut_out_of_image(self, image):
"""
Cut off all parts of the bounding box that are outside of the image.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use for the clipping of the bounding box.
If an ndarray, its shape will be used.
If a tuple, it is assumed to represent the image shape and must contain at least two integers.
Returns
-------
result : imgaug.BoundingBox
Bounding box, clipped to fall within the image dimensions.
"""
if isinstance(image, tuple):
shape = image
else:
shape = image.shape
height, width = shape[0:2]
do_assert(height > 0)
do_assert(width > 0)
eps = np.finfo(np.float32).eps
x1 = np.clip(self.x1, 0, width - eps)
x2 = np.clip(self.x2, 0, width - eps)
y1 = np.clip(self.y1, 0, height - eps)
y2 = np.clip(self.y2, 0, height - eps)
return self.copy(
x1=x1,
y1=y1,
x2=x2,
y2=y2,
label=self.label
)
def shift(self, top=None, right=None, bottom=None, left=None):
"""
Shift the bounding box from one or more image sides, i.e. move it on the x/y-axis.
Parameters
----------
top : None or int, optional
Amount of pixels by which to shift the bounding box from the top.
right : None or int, optional
Amount of pixels by which to shift the bounding box from the right.
bottom : None or int, optional
Amount of pixels by which to shift the bounding box from the bottom.
left : None or int, optional
Amount of pixels by which to shift the bounding box from the left.
Returns
-------
result : imgaug.BoundingBox
Shifted bounding box.
"""
top = top if top is not None else 0
right = right if right is not None else 0
bottom = bottom if bottom is not None else 0
left = left if left is not None else 0
return self.copy(
x1=self.x1+left-right,
x2=self.x2+left-right,
y1=self.y1+top-bottom,
y2=self.y2+top-bottom
)
# TODO add explicit test for zero-sized BBs (worked when tested by hand)
def draw_on_image(self, image, color=(0, 255, 0), alpha=1.0, thickness=1, copy=True, raise_if_out_of_image=False):
"""
Draw the bounding box on an image.
Parameters
----------
image : (H,W,C) ndarray(uint8)
The image onto which to draw the bounding box.
color : iterable of int, optional
The color to use, corresponding to the channel layout of the image. Usually RGB.
alpha : float, optional
The transparency of the drawn bounding box, where 1.0 denotes no transparency and
0.0 is invisible.
thickness : int, optional
The thickness of the bounding box in pixels. If the value is larger than 1, then
additional pixels will be added around the bounding box (i.e. extension towards the
outside).
copy : bool, optional
Whether to copy the input image or change it in-place.
raise_if_out_of_image : bool, optional
Whether to raise an error if the bounding box is partially/fully outside of the
image. If set to False, no error will be raised and only the parts inside the image
will be drawn.
Returns
-------
result : (H,W,C) ndarray(uint8)
Image with bounding box drawn on it.
"""
if raise_if_out_of_image and self.is_out_of_image(image):
raise Exception("Cannot draw bounding box x1=%.8f, y1=%.8f, x2=%.8f, y2=%.8f on image with shape %s." % (
self.x1, self.y1, self.x2, self.y2, image.shape))
result = np.copy(image) if copy else image
if isinstance(color, (tuple, list)):
color = np.uint8(color)
for i in range(thickness):
y1, y2, x1, x2 = self.y1_int, self.y2_int, self.x1_int, self.x2_int
# When y values get into the range (H-0.5, H), the *_int functions round them to H.
# That is technically sensible, but in the case of drawing means that the border lies
# just barely outside of the image, making the border disappear, even though the BB
# is fully inside the image. Here we correct for that because of beauty reasons.
# Same is the case for x coordinates.
if self.is_fully_within_image(image):
y1 = np.clip(y1, 0, image.shape[0]-1)
y2 = np.clip(y2, 0, image.shape[0]-1)
x1 = np.clip(x1, 0, image.shape[1]-1)
x2 = np.clip(x2, 0, image.shape[1]-1)
y = [y1-i, y1-i, y2+i, y2+i]
x = [x1-i, x2+i, x2+i, x1-i]
rr, cc = skimage.draw.polygon_perimeter(y, x, shape=result.shape)
if alpha >= 0.99:
result[rr, cc, :] = color
else:
if is_float_array(result):
result[rr, cc, :] = (1 - alpha) * result[rr, cc, :] + alpha * color
result = np.clip(result, 0, 255)
else:
input_dtype = result.dtype
result = result.astype(np.float32)
result[rr, cc, :] = (1 - alpha) * result[rr, cc, :] + alpha * color
result = np.clip(result, 0, 255).astype(input_dtype)
return result
def extract_from_image(self, image, prevent_zero_size=True):
"""
Extract the image pixels within the bounding box.
This function will zero-pad the image if the bounding box is partially/fully outside of
the image.
Parameters
----------
image : (H,W) ndarray or (H,W,C) ndarray
The image from which to extract the pixels within the bounding box.
prevent_zero_size : bool, optional
Whether to prevent height or width of the extracted image from becoming zero.
If this is set to True and height or width of the bounding box is below 1, the height/width will
be increased to 1. This can be useful to prevent problems, e.g. with image saving or plotting.
If it is set to False, images will be returned as ``(H', W')`` or ``(H', W', 3)`` with ``H`` or
``W`` potentially being 0.
Returns
-------
image : (H',W') ndarray or (H',W',C) ndarray
Pixels within the bounding box. Zero-padded if the bounding box is partially/fully
outside of the image. If prevent_zero_size is activated, it is guarantueed that ``H'>0``
and ``W'>0``, otherwise only ``H'>=0`` and ``W'>=0``.
"""
pad_top = 0
pad_right = 0
pad_bottom = 0
pad_left = 0
height, width = image.shape[0], image.shape[1]
x1, x2, y1, y2 = self.x1_int, self.x2_int, self.y1_int, self.y2_int
# When y values get into the range (H-0.5, H), the *_int functions round them to H.
# That is technically sensible, but in the case of extraction leads to a black border,
# which is both ugly and unexpected after calling cut_out_of_image(). Here we correct for
# that because of beauty reasons.
# Same is the case for x coordinates.
if self.is_fully_within_image(image):
y1 = np.clip(y1, 0, image.shape[0]-1)
y2 = np.clip(y2, 0, image.shape[0]-1)
x1 = np.clip(x1, 0, image.shape[1]-1)
x2 = np.clip(x2, 0, image.shape[1]-1)
# TODO add test
if prevent_zero_size:
if abs(x2 - x1) < 1:
x2 = x1 + 1
if abs(y2 - y1) < 1:
y2 = y1 + 1
# if the bb is outside of the image area, the following pads the image
# first with black pixels until the bb is inside the image
# and only then extracts the image area
# TODO probably more efficient to initialize an array of zeros
# and copy only the portions of the bb into that array that are
# natively inside the image area
if x1 < 0:
pad_left = abs(x1)
x2 = x2 + abs(x1)
x1 = 0
if y1 < 0:
pad_top = abs(y1)
y2 = y2 + abs(y1)
y1 = 0
if x2 >= width:
pad_right = x2 - (width - 1)
if y2 >= height:
pad_bottom = y2 - (height - 1)
if any([val > 0 for val in [pad_top, pad_right, pad_bottom, pad_left]]):
if len(image.shape) == 2:
image = np.pad(image, ((pad_top, pad_bottom), (pad_left, pad_right)), mode="constant")
else:
image = np.pad(image, ((pad_top, pad_bottom), (pad_left, pad_right), (0, 0)), mode="constant")
return image[y1:y2, x1:x2]
# TODO also add to_heatmap
# TODO add this to BoundingBoxesOnImage
def to_keypoints(self):
"""
Convert the corners of the bounding box to keypoints (clockwise, starting at top left).
Returns
-------
list of imgaug.Keypoint
Corners of the bounding box as keypoints.
"""
return [
Keypoint(x=self.x1, y=self.y1),
Keypoint(x=self.x2, y=self.y1),
Keypoint(x=self.x2, y=self.y2),
Keypoint(x=self.x1, y=self.y2)
]
def copy(self, x1=None, y1=None, x2=None, y2=None, label=None):
"""
Create a shallow copy of the BoundingBox object.
Parameters
----------
x1 : None or number
If not None, then the x1 coordinate of the copied object will be set to this value.
y1 : None or number
If not None, then the y1 coordinate of the copied object will be set to this value.
x2 : None or number
If not None, then the x2 coordinate of the copied object will be set to this value.
y2 : None or number
If not None, then the y2 coordinate of the copied object will be set to this value.
label : None or string
If not None, then the label of the copied object will be set to this value.
Returns
-------
imgaug.BoundingBox
Shallow copy.
"""
return BoundingBox(
x1=self.x1 if x1 is None else x1,
x2=self.x2 if x2 is None else x2,
y1=self.y1 if y1 is None else y1,
y2=self.y2 if y2 is None else y2,
label=self.label if label is None else label
)
def deepcopy(self, x1=None, y1=None, x2=None, y2=None, label=None):
"""
Create a deep copy of the BoundingBox object.
Parameters
----------
x1 : None or number
If not None, then the x1 coordinate of the copied object will be set to this value.
y1 : None or number
If not None, then the y1 coordinate of the copied object will be set to this value.
x2 : None or number
If not None, then the x2 coordinate of the copied object will be set to this value.
y2 : None or number
If not None, then the y2 coordinate of the copied object will be set to this value.
label : None or string
If not None, then the label of the copied object will be set to this value.
Returns
-------
imgaug.BoundingBox
Deep copy.
"""
return self.copy(x1=x1, y1=y1, x2=x2, y2=y2, label=label)
def __repr__(self):
return self.__str__()
def __str__(self):
return "BoundingBox(x1=%.4f, y1=%.4f, x2=%.4f, y2=%.4f, label=%s)" % (
self.x1, self.y1, self.x2, self.y2, self.label)
class BoundingBoxesOnImage(object):
"""
Object that represents all bounding boxes on a single image.
Parameters
----------
bounding_boxes : list of imgaug.BoundingBox
List of bounding boxes on the image.
shape : tuple of int
The shape of the image on which the bounding boxes are placed.
Examples
--------
>>> bbs = [
>>> BoundingBox(x1=10, y1=20, x2=20, y2=30),
>>> BoundingBox(x1=25, y1=50, x2=30, y2=70)
>>> ]
>>> bbs_oi = BoundingBoxesOnImage(bbs, shape=image.shape)
"""
def __init__(self, bounding_boxes, shape):
self.bounding_boxes = bounding_boxes
if is_np_array(shape):
self.shape = shape.shape
else:
do_assert(isinstance(shape, (tuple, list)))
self.shape = tuple(shape)
# TODO remove this? here it is image height at BoundingBox it is bounding box height
@property
def height(self):
"""
Get the height of the image on which the bounding boxes fall.
Returns
-------
int
Image height.
"""
return self.shape[0]
# TODO remove this? here it is image width at BoundingBox it is bounding box width
@property
def width(self):
"""
Get the width of the image on which the bounding boxes fall.
Returns
-------
int
Image width.
"""
return self.shape[1]
@property
def empty(self):
"""
Returns whether this object contains zero bounding boxes.
Returns
-------
bool
True if this object contains zero bounding boxes.
"""
return len(self.bounding_boxes) == 0
def on(self, image):
"""
Project bounding boxes from one image to a new one.
Parameters
----------
image : ndarray or tuple of int
New image onto which the bounding boxes are to be projected.
May also simply be that new image's shape tuple.
Returns
-------
bounding_boxes : imgaug.BoundingBoxesOnImage
Object containing all projected bounding boxes.
"""
if is_np_array(image):
shape = image.shape
else:
shape = image
if shape[0:2] == self.shape[0:2]:
return self.deepcopy()
else:
bounding_boxes = [bb.project(self.shape, shape) for bb in self.bounding_boxes]
return BoundingBoxesOnImage(bounding_boxes, shape)
@classmethod
def from_xyxy_array(cls, xyxy, shape):
"""
Convert an (N,4) ndarray to a BoundingBoxesOnImage object.
This is the inverse of :func:`imgaug.BoundingBoxesOnImage.to_xyxy_array`.
Parameters
----------
xyxy : (N,4) ndarray
Array containing the corner coordinates (top-left, bottom-right) of ``N`` bounding boxes
in the form ``(x1, y1, x2, y2)``. Should usually be of dtype ``float32``.
shape : tuple of int
Shape of the image on which the bounding boxes are placed.
Should usually be ``(H, W, C)`` or ``(H, W)``.
Returns
-------
imgaug.BoundingBoxesOnImage
Object containing a list of BoundingBox objects following the provided corner coordinates.
"""
do_assert(xyxy.shape[1] == 4, "Expected input array of shape (N, 4), got shape %s." % (xyxy.shape,))
boxes = [BoundingBox(*row) for row in xyxy]
return cls(boxes, shape)
def to_xyxy_array(self, dtype=np.float32):
"""
Convert the BoundingBoxesOnImage object to an (N,4) ndarray.
This is the inverse of :func:`imgaug.BoundingBoxesOnImage.from_xyxy_array`.
Parameters
----------
dtype : numpy.dtype, optional
Desired output datatype of the ndarray.
Returns
-------
ndarray
(N,4) ndarray array, where ``N`` denotes the number of bounding boxes and ``4`` denotes the
top-left and bottom-right bounding box corner coordinates in form ``(x1, y1, x2, y2)``.
"""
xyxy_array = np.zeros((len(self.bounding_boxes), 4), dtype=np.float32)
for i, box in enumerate(self.bounding_boxes):
xyxy_array[i] = [box.x1, box.y1, box.x2, box.y2]
return xyxy_array.astype(dtype)
def draw_on_image(self, image, color=(0, 255, 0), alpha=1.0, thickness=1, copy=True, raise_if_out_of_image=False):
"""
Draw all bounding boxes onto a given image.
Parameters
----------
image : (H,W,3) ndarray
The image onto which to draw the bounding boxes.
This image should usually have the same shape as
set in BoundingBoxesOnImage.shape.
color : int or list of int or tuple of int or (3,) ndarray, optional
The RGB color of all bounding boxes. If a single int ``C``, then that is
equivalent to ``(C,C,C)``.
alpha : float, optional
Alpha/transparency of the bounding box.
thickness : int, optional
Thickness in pixels.
copy : bool, optional
Whether to copy the image before drawing the points.
raise_if_out_of_image : bool, optional
Whether to raise an exception if any bounding box is outside of the image.
Returns
-------
image : (H,W,3) ndarray
Image with drawn bounding boxes.
"""
# TODO improve efficiency here by copying only once
for bb in self.bounding_boxes:
image = bb.draw_on_image(
image,
color=color,
alpha=alpha,
thickness=thickness,
copy=copy,
raise_if_out_of_image=raise_if_out_of_image
)
return image
def remove_out_of_image(self, fully=True, partly=False):
"""
Remove all bounding boxes that are fully or partially outside of the image.
Parameters
----------
fully : bool, optional
Whether to remove bounding boxes that are fully outside of the image.
partly : bool, optional
Whether to remove bounding boxes that are partially outside of the image.
Returns
-------
imgaug.BoundingBoxesOnImage
Reduced set of bounding boxes, with those that were fully/partially outside of
the image removed.
"""
bbs_clean = [bb for bb in self.bounding_boxes
if not bb.is_out_of_image(self.shape, fully=fully, partly=partly)]
return BoundingBoxesOnImage(bbs_clean, shape=self.shape)
def cut_out_of_image(self):
"""
Cut off all parts from all bounding boxes that are outside of the image.
Returns
-------
imgaug.BoundingBoxesOnImage
Bounding boxes, clipped to fall within the image dimensions.
"""
bbs_cut = [bb.cut_out_of_image(self.shape)
for bb in self.bounding_boxes if bb.is_partly_within_image(self.shape)]
return BoundingBoxesOnImage(bbs_cut, shape=self.shape)
def shift(self, top=None, right=None, bottom=None, left=None):
"""
Shift all bounding boxes from one or more image sides, i.e. move them on the x/y-axis.
Parameters
----------
top : None or int, optional
Amount of pixels by which to shift all bounding boxes from the top.
right : None or int, optional
Amount of pixels by which to shift all bounding boxes from the right.
bottom : None or int, optional
Amount of pixels by which to shift all bounding boxes from the bottom.
left : None or int, optional
Amount of pixels by which to shift all bounding boxes from the left.
Returns
-------
imgaug.BoundingBoxesOnImage
Shifted bounding boxes.
"""
bbs_new = [bb.shift(top=top, right=right, bottom=bottom, left=left) for bb in self.bounding_boxes]
return BoundingBoxesOnImage(bbs_new, shape=self.shape)
def copy(self):
"""
Create a shallow copy of the BoundingBoxesOnImage object.
Returns
-------
imgaug.BoundingBoxesOnImage
Shallow copy.
"""
return copy.copy(self)
def deepcopy(self):
"""
Create a deep copy of the BoundingBoxesOnImage object.
Returns
-------
imgaug.BoundingBoxesOnImage
Deep copy.
"""
# Manual copy is far faster than deepcopy for KeypointsOnImage,
# so use manual copy here too
bbs = [bb.deepcopy() for bb in self.bounding_boxes]
return BoundingBoxesOnImage(bbs, tuple(self.shape))
def __repr__(self):
return self.__str__()
def __str__(self):
return "BoundingBoxesOnImage(%s, shape=%s)" % (str(self.bounding_boxes), self.shape)
# TODO somehow merge with BoundingBox
# TODO add functions: simplify() (eg via shapely.ops.simplify()),
# extend(all_sides=0, top=0, right=0, bottom=0, left=0),
# intersection(other, default=None), union(other), iou(other), to_heatmap, to_mask
class Polygon(object):
"""
Class representing polygons.
Each polygon is parameterized by its corner points, given as absolute x- and y-coordinates
with sub-pixel accuracy.
Parameters
----------
exterior : list of imgaug.Keypoint or list of tuple of float or (N,2) ndarray
List of points defining the polygon. May be either a list of Keypoint objects or a list of tuples in xy-form
or a numpy array of shape (N,2) for N points in xy-form.
All coordinates are expected to be the absolute coordinates in the image, given as floats, e.g. x=10.7
and y=3.4 for a point at coordinates (10.7, 3.4). Their order is expected to be clock-wise. They are expected
to not be closed (i.e. first and last coordinate differ).
label : None or str, optional
Label of the polygon, e.g. a string representing the class.
"""
def __init__(self, exterior, label=None):
"""Create a new Polygon instance."""
if isinstance(exterior, list):
if not exterior:
# for empty lists, make sure that the shape is (0, 2) and not (0,) as that is also expected when the
# input is a numpy array
self.exterior = np.zeros((0, 2), dtype=np.float32)
elif isinstance(exterior[0], Keypoint):
# list of Keypoint
self.exterior = np.float32([[point.x, point.y] for point in exterior])
else:
# list of tuples (x, y)
self.exterior = np.float32([[point[0], point[1]] for point in exterior])
else:
do_assert(is_np_array(exterior))
do_assert(exterior.ndim == 2)
do_assert(exterior.shape[1] == 2)
self.exterior = np.float32(exterior)
# Remove last point if it is essentially the same as the first point (polygons are always assumed to be
# closed anyways). This also prevents problems with shapely, which seems to add the last point automatically.
if len(self.exterior) >= 2 and np.allclose(self.exterior[0, :], self.exterior[-1, :]):
self.exterior = self.exterior[:-1]
self.label = label
@property
def xx(self):
"""
Return the x-coordinates of all points in the exterior.
Returns
-------
(N,2) ndarray
X-coordinates of all points in the exterior as a float32 ndarray.
"""
return self.exterior[:, 0]
@property
def yy(self):
"""
Return the y-coordinates of all points in the exterior.
Returns
-------
(N,2) ndarray
Y-coordinates of all points in the exterior as a float32 ndarray.
"""
return self.exterior[:, 1]
@property
def xx_int(self):
"""
Return the x-coordinates of all points in the exterior, rounded to the closest integer value.
Returns
-------
(N,2) ndarray
X-coordinates of all points in the exterior, rounded to the closest integer value.
Result dtype is int32.
"""
return np.int32(np.round(self.xx))
@property
def yy_int(self):
"""
Return the y-coordinates of all points in the exterior, rounded to the closest integer value.
Returns
-------
(N,2) ndarray
Y-coordinates of all points in the exterior, rounded to the closest integer value.
Result dtype is int32.
"""
return np.int32(np.round(self.yy))
@property
def is_valid(self):
"""
Estimate whether the polygon has a valid shape.
To to be considered valid, the polygons must be made up of at least 3 points and have concave shape.
Multiple consecutive points are allowed to have the same coordinates.
Returns
-------
bool
True if polygon has at least 3 points and is concave, otherwise False.
"""
if len(self.exterior) < 3:
return False
return self.to_shapely_polygon().is_valid
@property
def area(self):
"""
Estimate the area of the polygon.
Returns
-------
number
Area of the polygon.
"""
if len(self.exterior) < 3:
raise Exception("Cannot compute the polygon's area because it contains less than three points.")
poly = self.to_shapely_polygon()
return poly.area
def project(self, from_shape, to_shape):
"""
Project the polygon onto an image with different shape.
The relative coordinates of all points remain the same.
E.g. a point at (x=20, y=20) on an image (width=100, height=200) will be
projected on a new image (width=200, height=100) to (x=40, y=10).
This is intended for cases where the original image is resized.
It cannot be used for more complex changes (e.g. padding, cropping).
Parameters
----------
from_shape : tuple of int
Shape of the original image. (Before resize.)
to_shape : tuple of int
Shape of the new image. (After resize.)
Returns
-------
imgaug.Polygon
Polygon object with new coordinates.
"""
if from_shape[0:2] == to_shape[0:2]:
return self.copy()
exterior = [Keypoint(x=x, y=y).project(from_shape, to_shape) for x, y in self.exterior]
return self.copy(exterior=exterior)
def find_closest_point_index(self, x, y, return_distance=False):
"""
Find the index of the point within the exterior that is closest to the given coordinates.
"Closeness" is here defined based on euclidean distance.
This method will raise an AssertionError if the exterior contains no points.
Parameters
----------
x : number
X-coordinate around which to search for close points.
y : number
Y-coordinate around which to search for close points.
return_distance : bool, optional
Whether to also return the distance of the closest point.
Returns
-------
int
Index of the closest point.
number
Euclidean distance to the the closest point.
This value is only returned if `return_distance` was set to True.
"""
do_assert(len(self.exterior) > 0)
distances = []
for x2, y2 in self.exterior:
d = (x2 - x) ** 2 + (y2 - y) ** 2
distances.append(d)
distances = np.sqrt(distances)
closest_idx = np.argmin(distances)
if return_distance:
return closest_idx, distances[closest_idx]
return closest_idx
def _compute_inside_image_point_mask(self, image):
if isinstance(image, tuple):
shape = image
else:
shape = image.shape
h, w = shape[0:2]
return np.logical_and(
np.logical_and(0 <= self.exterior[:, 0], self.exterior[:, 0] < w),
np.logical_and(0 <= self.exterior[:, 1], self.exterior[:, 1] < h)
)
# TODO keep this method? it is almost an alias for is_out_of_image()
def is_fully_within_image(self, image):
"""
Estimate whether the polygon is fully inside the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use.
If an ndarray, its shape will be used.
If a tuple, it is assumed to represent the image shape and must contain at least two integers.
Returns
-------
bool
True if the polygon is fully inside the image area.
False otherwise.
"""
return not self.is_out_of_image(image, fully=True, partly=True)
# TODO keep this method? it is almost an alias for is_out_of_image()
def is_partly_within_image(self, image):
"""
Estimate whether the polygon is at least partially inside the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use.
If an ndarray, its shape will be used.
If a tuple, it is assumed to represent the image shape and must contain at least two integers.
Returns
-------
bool
True if the polygon is at least partially inside the image area.
False otherwise.
"""
return not self.is_out_of_image(image, fully=True, partly=False)
def is_out_of_image(self, image, fully=True, partly=False):
"""
Estimate whether the polygon is partially or fully outside of the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use.
If an ndarray, its shape will be used.
If a tuple, it is assumed to represent the image shape and must contain at least two integers.
fully : bool, optional
Whether to return True if the polygon is fully outside fo the image area.
partly : bool, optional
Whether to return True if the polygon is at least partially outside fo the image area.
Returns
-------
bool
True if the polygon is partially/fully outside of the image area, depending
on defined parameters. False otherwise.
"""
if len(self.exterior) == 0:
raise Exception("Cannot determine whether the polygon is inside the image, because it contains no points.")
inside = self._compute_inside_image_point_mask(image)
nb_inside = sum(inside)
if nb_inside == len(inside):
return False
elif nb_inside > 0:
return partly
else:
return fully
# TODO mark as deprecated
# TODO rename cut_* to clip_* in BoundingBox
def cut_out_of_image(self, image):
return self.clip_out_of_image(image)
def clip_out_of_image(self, image):
"""
Cut off all parts of the polygon that are outside of the image.
This operation may lead to new points being created.
As a single polygon may be split into multiple new polygons, the result is a MultiPolygon.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use for the clipping of the polygon.
If an ndarray, its shape will be used.
If a tuple, it is assumed to represent the image shape and must contain at least two integers.
Returns
-------
imgaug.MultiPolygon
Polygon, clipped to fall within the image dimensions.
Returned as MultiPolygon, because the clipping can split the polygon into multiple parts.
"""
# if fully out of image, clip everything away, nothing remaining
if self.is_out_of_image(image, fully=True, partly=False):
return MultiPolygon([])
h, w = image.shape[0:2]
poly_shapely = self.to_shapely_polygon()
poly_image = shapely.geometry.Polygon([(0, 0), (w, 0), (w, h), (0, h)])
multipoly_inter_shapely = poly_shapely.intersection(poly_image)
if not isinstance(multipoly_inter_shapely, shapely.geometry.MultiPolygon):
do_assert(isinstance(multipoly_inter_shapely, shapely.geometry.Polygon))
multipoly_inter_shapely = shapely.geometry.MultiPolygon([multipoly_inter_shapely])
polygons = []
for poly_inter_shapely in multipoly_inter_shapely.geoms:
polygons.append(Polygon.from_shapely(poly_inter_shapely, label=self.label))
# shapely changes the order of points, we try here to preserve it as good as possible
polygons_reordered = []
for polygon in polygons:
found = False
for x, y in self.exterior:
closest_idx, dist = polygon.find_closest_point_index(x=x, y=y, return_distance=True)
if dist < 1e-6:
polygon_reordered = polygon.change_first_point_by_index(closest_idx)
polygons_reordered.append(polygon_reordered)
found = True
break
do_assert(found) # could only not find closest points if new polys are empty
return MultiPolygon(polygons_reordered)
def shift(self, top=None, right=None, bottom=None, left=None):
"""
Shift the polygon from one or more image sides, i.e. move it on the x/y-axis.
Parameters
----------
top : None or int, optional
Amount of pixels by which to shift the polygon from the top.
right : None or int, optional
Amount of pixels by which to shift the polygon from the right.
bottom : None or int, optional
Amount of pixels by which to shift the polygon from the bottom.
left : None or int, optional
Amount of pixels by which to shift the polygon from the left.
Returns
-------
imgaug.Polygon
Shifted polygon.
"""
top = top if top is not None else 0
right = right if right is not None else 0
bottom = bottom if bottom is not None else 0
left = left if left is not None else 0
exterior = np.copy(self.exterior)
exterior[:, 0] += (left - right)
exterior[:, 1] += (top - bottom)
return self.deepcopy(exterior=exterior)
# TODO add boundary thickness
def draw_on_image(self,
image,
color=(0, 255, 0), color_perimeter=(0, 128, 0),
alpha=0.5, alpha_perimeter=1.0,
raise_if_out_of_image=False):
"""
Draw the polygon on an image.
Parameters
----------
image : (H,W,C) ndarray
The image onto which to draw the polygon. Usually expected to be of dtype uint8, though other dtypes
are also handled.
color : iterable of int, optional
The color to use for the polygon (excluding perimeter). Must correspond to the channel layout of the
image. Usually RGB.
color_perimeter : iterable of int, optional
The color to use for the perimeter/border of the polygon. Must correspond to the channel layout of the
image. Usually RGB.
alpha : float, optional
The transparency of the polygon (excluding the perimeter), where 1.0 denotes no transparency and 0.0 is
invisible.
alpha_perimeter : float, optional
The transparency of the polygon's perimeter/border, where 1.0 denotes no transparency and 0.0 is
invisible.
raise_if_out_of_image : bool, optional
Whether to raise an error if the polygon is partially/fully outside of the
image. If set to False, no error will be raised and only the parts inside the image
will be drawn.
Returns
-------
result : (H,W,C) ndarray
Image with polygon drawn on it. Result dtype is the same as the input dtype.
"""
# TODO separate this into draw_face_on_image() and draw_border_on_image()
if raise_if_out_of_image and self.is_out_of_image(image):
raise Exception("Cannot draw polygon %s on image with shape %s." % (
str(self), image.shape
))
xx = self.xx_int
yy = self.yy_int
# TODO np.clip to image plane if is_fully_within_image(), similar to how it is done for bounding boxes
# TODO improve efficiency by only drawing in rectangle that covers poly instead of drawing in the whole image
# TODO for a rectangular polygon, the face coordinates include the top/left boundary but not the right/bottom
# boundary. This may be unintuitive when not drawing the boundary. Maybe somehow remove the boundary
# coordinates from the face coordinates after generating both?
rr, cc = skimage.draw.polygon(yy, xx, shape=image.shape)
rr_perimeter, cc_perimeter = skimage.draw.polygon_perimeter(yy, xx, shape=image.shape)
params = (rr, cc, color, alpha)
params_perimeter = (rr_perimeter, cc_perimeter, color_perimeter, alpha_perimeter)
input_dtype = image.dtype
result = image.astype(np.float32)
for rr, cc, color, alpha in [params, params_perimeter]:
color = np.float32(color)
if alpha >= 0.99:
result[rr, cc, :] = color
elif alpha < 1e-4:
pass # invisible, do nothing
else:
result[rr, cc, :] = (1 - alpha) * result[rr, cc, :] + alpha * color
if input_dtype.type == np.uint8:
result = np.clip(result, 0, 255).astype(input_dtype) # TODO make clipping more flexible
else:
result = result.astype(input_dtype)
return result
def extract_from_image(self, image):
"""
Extract the image pixels within the polygon.
This function will zero-pad the image if the polygon is partially/fully outside of
the image.
Parameters
----------
image : (H,W) ndarray or (H,W,C) ndarray
The image from which to extract the pixels within the polygon.
Returns
-------
result : (H',W') ndarray or (H',W',C) ndarray
Pixels within the polygon. Zero-padded if the polygon is partially/fully
outside of the image.
"""
do_assert(image.ndim in [2, 3])
if len(self.exterior) <= 2:
raise Exception("Polygon must be made up of at least 3 points to extract its area from an image.")
bb = self.to_bounding_box()
bb_area = bb.extract_from_image(image)
if self.is_out_of_image(image, fully=True, partly=False):
return bb_area
xx = self.xx_int
yy = self.yy_int
xx_mask = xx - np.min(xx)
yy_mask = yy - np.min(yy)
height_mask = np.max(yy_mask)
width_mask = np.max(xx_mask)
rr_face, cc_face = skimage.draw.polygon(yy_mask, xx_mask, shape=(height_mask, width_mask))
mask = np.zeros((height_mask, width_mask), dtype=np.bool)
mask[rr_face, cc_face] = True
if image.ndim == 3:
mask = np.tile(mask[:, :, np.newaxis], (1, 1, image.shape[2]))
return bb_area * mask
def change_first_point_by_coords(self, x, y, max_distance=1e-4):
"""
Set the first point of the exterior to the given point based on its coordinates.
If multiple points are found, the closest one will be picked.
If no matching points are found, an exception is raised.
Note: This method does *not* work in-place.
Parameters
----------
x : number
X-coordinate of the point.
y : number
Y-coordinate of the point.
max_distance : number
Maximum distance past which possible matches are ignored.
Returns
-------
imgaug.Polygon
Copy of this polygon with the new point order.
"""
if len(self.exterior) == 0:
raise Exception("Cannot reorder polygon points, because it contains no points.")
closest_idx, closest_dist = self.find_closest_point_index(x=x, y=y, return_distance=True)
if max_distance is not None and closest_dist > max_distance:
closest_point = self.exterior[closest_idx, :]
raise Exception(
"Closest found point (%.9f, %.9f) exceeds max_distance of %.9f exceeded" % (
closest_point[0], closest_point[1], closest_dist)
)
return self.change_first_point_by_index(closest_idx)
def change_first_point_by_index(self, point_idx):
"""
Set the first point of the exterior to the given point based on its index.
Note: This method does *not* work in-place.
Parameters
----------
point_idx : int
Index of the desired starting point.
Returns
-------
imgaug.Polygon
Copy of this polygon with the new point order.
"""
do_assert(0 <= point_idx < len(self.exterior))
if point_idx == 0:
return self.deepcopy()
exterior = np.concatenate(
(self.exterior[point_idx:, :], self.exterior[:point_idx, :]),
axis=0
)
return self.deepcopy(exterior=exterior)
def to_shapely_polygon(self):
"""
Convert this polygon to a Shapely polygon.
Returns
-------
shapely.geometry.Polygon
The Shapely polygon matching this polygon's exterior.
"""
return shapely.geometry.Polygon([(point[0], point[1]) for point in self.exterior])
def to_shapely_line_string(self, closed=False, interpolate=0):
"""
Convert this polygon to a Shapely LineString object.
Parameters
----------
closed : bool, optional
Whether to return the line string with the last point being identical to the first point.
interpolate : int, optional
Number of points to interpolate between any pair of two consecutive points. These points are added
to the final line string.
Returns
-------
shapely.geometry.LineString
The Shapely LineString matching the polygon's exterior.
"""
return _convert_points_to_shapely_line_string(self.exterior, closed=closed, interpolate=interpolate)
def to_bounding_box(self):
"""
Convert this polygon to a bounding box tightly containing the whole polygon.
Returns
-------
imgaug.BoundingBox
The bounding box tightly containing the polygon.
"""
xx = self.xx
yy = self.yy
return BoundingBox(x1=min(xx), x2=max(xx), y1=min(yy), y2=max(yy), label=self.label)
@staticmethod
def from_shapely(polygon_shapely, label=None):
"""
Create a polygon from a Shapely polygon.
Note: This will remove any holes in the Shapely polygon.
Parameters
----------
polygon_shapely : shapely.geometry.Polygon
The shapely polygon.
label : None or str, optional
The label of the new polygon.
Returns
-------
imgaug.Polygon
A polygon with the same exterior as the Shapely polygon.
"""
do_assert(isinstance(polygon_shapely, shapely.geometry.Polygon))
# polygon_shapely.exterior can be None if the polygon was instantiated without points
if polygon_shapely.exterior is None or len(polygon_shapely.exterior.coords) == 0:
return Polygon([], label=label)
exterior = np.float32([[x, y] for (x, y) in polygon_shapely.exterior.coords])
return Polygon(exterior, label=label)
def exterior_almost_equals(self, other_polygon, max_distance=1e-6, interpolate=8):
"""
Estimate whether the geometry of the exterior of this polygon and another polygon are comparable.
The two exteriors can have different numbers of points, but any point randomly sampled on the exterior
of one polygon should be close to the closest point on the exterior of the other polygon.
Note that this method works approximately. One can come up with polygons with fairly different shapes that
will still be estimated as equal by this method. In practice however this should be unlikely to be the case.
The probability for something like that goes down as the interpolation parameter is increased.
Parameters
----------
other_polygon : imgaug.Polygon or (N,2) ndarray
The other polygon with which to compare the exterior.
If this is an ndarray, it is assumed to represent an exterior.
It must then have dtype float32 and shape (N,2) with the second dimension denoting xy-coordinates.
max_distance : number
The maximum euclidean distance between a point on one polygon and the closest point on the other polygon.
If the distance is exceeded for any such pair, the two exteriors are not viewed as equal.
The points are other the points contained in the polygon's exterior ndarray or interpolated points
between these.
interpolate : int
How many points to interpolate between the points of the polygon's exteriors.
If this is set to zero, then only the points given by the polygon's exterior ndarrays will be used.
Higher values make it less likely that unequal polygons are evaluated as equal.
Returns
-------
bool
Whether the two polygon's exteriors can be viewed as equal (approximate test).
"""
atol = max_distance
ext_a = self.exterior
ext_b = other_polygon.exterior if not is_np_array(other_polygon) else other_polygon
len_a = len(ext_a)
len_b = len(ext_b)
if len_a == 0 and len_b == 0:
return True
elif len_a == 0 and len_b > 0:
return False
elif len_a > 0 and len_b == 0:
return False
# neither A nor B is zero-sized at this point
# if A or B only contain points identical to the first point, merge them to one point
if len_a > 1:
if all([np.allclose(ext_a[0, :], ext_a[1 + i, :], rtol=0, atol=atol) for i in sm.xrange(len_a - 1)]):
ext_a = ext_a[0:1, :]
len_a = 1
if len_b > 1:
if all([np.allclose(ext_b[0, :], ext_b[1 + i, :], rtol=0, atol=atol) for i in sm.xrange(len_b - 1)]):
ext_b = ext_b[0:1, :]
len_b = 1
# handle polygons that contain a single point
if len_a == 1 and len_b == 1:
return np.allclose(ext_a[0, :], ext_b[0, :], rtol=0, atol=atol)
elif len_a == 1:
return all([np.allclose(ext_a[0, :], ext_b[i, :], rtol=0, atol=atol) for i in sm.xrange(len_b)])
elif len_b == 1:
return all([np.allclose(ext_b[0, :], ext_a[i, :], rtol=0, atol=atol) for i in sm.xrange(len_a)])
# After this point, both polygons have at least 2 points, i.e. LineStrings can be used.
# We can also safely go back to the original exteriors (before close points were merged).
ls_a = self.to_shapely_line_string(closed=True, interpolate=interpolate)
ls_b = other_polygon.to_shapely_line_string(closed=True, interpolate=interpolate) \
if not is_np_array(other_polygon) \
else _convert_points_to_shapely_line_string(other_polygon, closed=True, interpolate=interpolate)
# Measure the distance from each point in A to LineString B and vice versa.
# Make sure that no point violates the tolerance.
# Note that we can't just use LineString.almost_equals(LineString) -- that seems to expect the same number
# and order of points in both LineStrings (failed with duplicated points).
for x, y in ls_a.coords:
point = shapely.geometry.Point(x, y)
if not ls_b.distance(point) <= max_distance:
return False
for x, y in ls_b.coords:
point = shapely.geometry.Point(x, y)
if not ls_a.distance(point) <= max_distance:
return False
return True
def almost_equals(self, other, max_distance=1e-6, interpolate=8):
"""
Compare this polygon with another one and estimate whether they can be viewed as equal.
This is the same as :func:`imgaug.Polygon.exterior_almost_equals` but additionally compares the labels.
Parameters
----------
other
The object to compare against. If not a Polygon, then False will be returned.
max_distance : float
See :func:`imgaug.Polygon.exterior_almost_equals`.
interpolate : int
See :func:`imgaug.Polygon.exterior_almost_equals`.
Returns
-------
bool
Whether the two polygons can be viewed as equal. In the case of the exteriors this is an approximate test.
"""
if not isinstance(other, Polygon):
return False
if self.label is not None or other.label is not None:
if self.label is None:
return False
if other.label is None:
return False
if self.label != other.label:
return False
return self.exterior_almost_equals(other, max_distance=max_distance, interpolate=interpolate)
def copy(self, exterior=None, label=None):
"""
Create a shallow copy of the Polygon object.
Parameters
----------
exterior : list of imgaug.Keypoint or list of tuple or (N,2) ndarray, optional
List of points defining the polygon. See :func:`imgaug.Polygon.__init__` for details.
label : None or str, optional
If not None, then the label of the copied object will be set to this value.
Returns
-------
imgaug.Polygon
Shallow copy.
"""
return self.deepcopy(exterior=exterior, label=label)
def deepcopy(self, exterior=None, label=None):
"""
Create a deep copy of the Polygon object.
Parameters
----------
exterior : list of Keypoint or list of tuple or (N,2) ndarray, optional
List of points defining the polygon. See `imgaug.Polygon.__init__` for details.
label : None or str
If not None, then the label of the copied object will be set to this value.
Returns
-------
imgaug.Polygon
Deep copy.
"""
return Polygon(
exterior=np.copy(self.exterior) if exterior is None else exterior,
label=self.label if label is None else label
)
def __repr__(self):
return self.__str__()
def __str__(self):
points_str = ", ".join(["(x=%.3f, y=%.3f)" % (point[0], point[1]) for point in self.exterior])
return "Polygon([%s] (%d points), label=%s)" % (points_str, len(self.exterior), self.label)
def _convert_points_to_shapely_line_string(points, closed=False, interpolate=0):
if len(points) <= 1:
raise Exception(
("Conversion to shapely line string requires at least two points, but points input contains "
"only %d points.") % (len(points),)
)
points_tuples = [(point[0], point[1]) for point in points]
# interpolate points between each consecutive pair of points
if interpolate > 0:
points_tuples = _interpolate_points(points_tuples, interpolate)
# close if requested and not yet closed
if closed and len(points) > 1: # here intentionally used points instead of points_tuples
points_tuples.append(points_tuples[0])
return shapely.geometry.LineString(points_tuples)
def _interpolate_point_pair(point_a, point_b, nb_steps):
if nb_steps < 1:
return []
x1, y1 = point_a
x2, y2 = point_b
vec = np.float32([x2 - x1, y2 - y1])
step_size = vec / (1 + nb_steps)
return [(x1 + (i + 1) * step_size[0], y1 + (i + 1) * step_size[1]) for i in sm.xrange(nb_steps)]
def _interpolate_points(points, nb_steps, closed=True):
if len(points) <= 1:
return points
if closed:
points = list(points) + [points[0]]
points_interp = []
for point_a, point_b in zip(points[:-1], points[1:]):
points_interp.extend([point_a] + _interpolate_point_pair(point_a, point_b, nb_steps))
if not closed:
points_interp.append(points[-1])
# close does not have to be reverted here, as last point is not included in the extend()
return points_interp
def _interpolate_points_by_max_distance(points, max_distance, closed=True):
do_assert(max_distance > 0, "max_distance must have value greater than 0, got %.8f" % (max_distance,))
if len(points) <= 1:
return points
if closed:
points = list(points) + [points[0]]
points_interp = []
for point_a, point_b in zip(points[:-1], points[1:]):
dist = np.sqrt((point_a[0] - point_b[0]) ** 2 + (point_a[1] - point_b[1]) ** 2)
nb_steps = int((dist / max_distance) - 1)
points_interp.extend([point_a] + _interpolate_point_pair(point_a, point_b, nb_steps))
if not closed:
points_interp.append(points[-1])
return points_interp
class MultiPolygon(object):
"""
Class that represents several polygons.
Parameters
----------
geoms : list of imgaug.Polygon
List of the polygons.
"""
def __init__(self, geoms):
"""Create a new MultiPolygon instance."""
do_assert(len(geoms) == 0 or all([isinstance(el, Polygon) for el in geoms]))
self.geoms = geoms
@staticmethod
def from_shapely(geometry, label=None):
"""
Create a MultiPolygon from a Shapely MultiPolygon, a Shapely Polygon or a Shapely GeometryCollection.
This also creates all necessary Polygons contained by this MultiPolygon.
Parameters
----------
geometry : shapely.geometry.MultiPolygon or shapely.geometry.Polygon\
or shapely.geometry.collection.GeometryCollection
The object to convert to a MultiPolygon.
label : None or str, optional
A label assigned to all Polygons within the MultiPolygon.
Returns
-------
imgaug.MultiPolygon
The derived MultiPolygon.
"""
if isinstance(geometry, shapely.geometry.MultiPolygon):
return MultiPolygon([Polygon.from_shapely(poly, label=label) for poly in geometry.geoms])
elif isinstance(geometry, shapely.geometry.Polygon):
return MultiPolygon([Polygon.from_shapely(geometry, label=label)])
elif isinstance(geometry, shapely.geometry.collection.GeometryCollection):
do_assert(all([isinstance(poly, shapely.geometry.Polygon) for poly in geometry.geoms]))
return MultiPolygon([Polygon.from_shapely(poly, label=label) for poly in geometry.geoms])
else:
raise Exception("Unknown datatype '%s'. Expected shapely.geometry.Polygon or "
"shapely.geometry.MultiPolygon or "
"shapely.geometry.collections.GeometryCollection." % (type(geometry),))
class HeatmapsOnImage(object):
"""
Object representing heatmaps on images.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Array representing the heatmap(s).
Must be of dtype float32.
If multiple heatmaps are provided, then ``C`` is expected to denote their number.
shape : tuple of int
Shape of the image on which the heatmap(s) is/are placed. NOT the shape of the
heatmap(s) array, unless it is identical to the image shape (note the likely
difference between the arrays in the number of channels).
If there is not a corresponding image, use the shape of the heatmaps array.
min_value : float, optional
Minimum value for the heatmaps that `arr` represents. This will usually be ``0.0``.
max_value : float, optional
Maximum value for the heatmaps that `arr` represents. This will usually be ``1.0``.
"""
def __init__(self, arr, shape, min_value=0.0, max_value=1.0):
"""Construct a new HeatmapsOnImage object."""
do_assert(is_np_array(arr), "Expected numpy array as heatmap input array, got type %s" % (type(arr),))
# TODO maybe allow 0-sized heatmaps? in that case the min() and max() must be adjusted
do_assert(arr.shape[0] > 0 and arr.shape[1] > 0,
"Expected numpy array as heatmap with height and width greater than 0, got shape %s." % (arr.shape,))
do_assert(arr.dtype.type in [np.float32],
"Heatmap input array expected to be of dtype float32, got dtype %s." % (arr.dtype,))
do_assert(arr.ndim in [2, 3], "Heatmap input array must be 2d or 3d, got shape %s." % (arr.shape,))
do_assert(len(shape) in [2, 3],
"Argument 'shape' in HeatmapsOnImage expected to be 2d or 3d, got shape %s." % (shape,))
do_assert(min_value < max_value)
do_assert(np.min(arr.flat[0:50]) >= min_value - np.finfo(arr.dtype).eps,
("Value range of heatmap was chosen to be (%.8f, %.8f), but found value below minimum in first "
+ "50 heatmap array values.") % (min_value, max_value))
do_assert(np.max(arr.flat[0:50]) <= max_value + np.finfo(arr.dtype).eps,
("Value range of heatmap was chosen to be (%.8f, %.8f), but found value above maximum in first "
+ "50 heatmap array values.") % (min_value, max_value))
if arr.ndim == 2:
arr = arr[..., np.newaxis]
self.arr_was_2d = True
else:
self.arr_was_2d = False
eps = np.finfo(np.float32).eps
min_is_zero = 0.0 - eps < min_value < 0.0 + eps
max_is_one = 1.0 - eps < max_value < 1.0 + eps
if min_is_zero and max_is_one:
self.arr_0to1 = arr
else:
self.arr_0to1 = (arr - min_value) / (max_value - min_value)
self.shape = shape
self.min_value = min_value
self.max_value = max_value
def get_arr(self):
"""
Get the heatmap's array within the value range originally provided in ``__init__()``.
The HeatmapsOnImage object saves heatmaps internally in the value range ``(min=0.0, max=1.0)``.
This function converts the internal representation to ``(min=min_value, max=max_value)``,
where ``min_value`` and ``max_value`` are provided upon instantiation of the object.
Returns
-------
result : (H,W) ndarray or (H,W,C) ndarray
Heatmap array. Dtype is float32.
"""
if self.arr_was_2d and self.arr_0to1.shape[2] == 1:
arr = self.arr_0to1[:, :, 0]
else:
arr = self.arr_0to1
eps = np.finfo(np.float32).eps
min_is_zero = 0.0 - eps < self.min_value < 0.0 + eps
max_is_one = 1.0 - eps < self.max_value < 1.0 + eps
if min_is_zero and max_is_one:
return np.copy(arr)
else:
diff = self.max_value - self.min_value
return self.min_value + diff * arr
# TODO
# def find_global_maxima(self):
# raise NotImplementedError()
def draw(self, size=None, cmap="jet"):
"""
Render the heatmaps as RGB images.
Parameters
----------
size : None or float or iterable of int or iterable of float, optional
Size of the rendered RGB image as ``(height, width)``.
See :func:`imgaug.imresize_single_image` for details.
If set to None, no resizing is performed and the size of the heatmaps array is used.
cmap : str or None, optional
Color map of ``matplotlib`` to use in order to convert the heatmaps to RGB images.
If set to None, no color map will be used and the heatmaps will be converted
to simple intensity maps.
Returns
-------
heatmaps_drawn : list of (H,W,3) ndarray
Rendered heatmaps. One per heatmap array channel. Dtype is uint8.
"""
heatmaps_uint8 = self.to_uint8()
heatmaps_drawn = []
for c in sm.xrange(heatmaps_uint8.shape[2]):
# c:c+1 here, because the additional axis is needed by imresize_single_image
heatmap_c = heatmaps_uint8[..., c:c+1]
if size is not None:
heatmap_c_rs = imresize_single_image(heatmap_c, size, interpolation="nearest")
else:
heatmap_c_rs = heatmap_c
heatmap_c_rs = np.squeeze(heatmap_c_rs).astype(np.float32) / 255.0
if cmap is not None:
cmap_func = plt.get_cmap(cmap)
heatmap_cmapped = cmap_func(heatmap_c_rs)
heatmap_cmapped = np.delete(heatmap_cmapped, 3, 2)
else:
heatmap_cmapped = np.tile(heatmap_c_rs[..., np.newaxis], (1, 1, 3))
heatmap_cmapped = np.clip(heatmap_cmapped * 255, 0, 255).astype(np.uint8)
heatmaps_drawn.append(heatmap_cmapped)
return heatmaps_drawn
def draw_on_image(self, image, alpha=0.75, cmap="jet", resize="heatmaps"):
"""
Draw the heatmaps as overlays over an image.
Parameters
----------
image : (H,W,3) ndarray
Image onto which to draw the heatmaps. Expected to be of dtype uint8.
alpha : float, optional
Alpha/opacity value to use for the mixing of image and heatmaps.
Higher values mean that the heatmaps will be more visible and the image less visible.
cmap : str or None, optional
Color map to use. See :func:`imgaug.HeatmapsOnImage.draw` for details.
resize : {'heatmaps', 'image'}, optional
In case of size differences between the image and heatmaps, either the image or
the heatmaps can be resized. This parameter controls which of the two will be resized
to the other's size.
Returns
-------
mix : list of (H,W,3) ndarray
Rendered overlays. One per heatmap array channel. Dtype is uint8.
"""
# assert RGB image
do_assert(image.ndim == 3)
do_assert(image.shape[2] == 3)
do_assert(image.dtype.type == np.uint8)
do_assert(0 - 1e-8 <= alpha <= 1.0 + 1e-8)
do_assert(resize in ["heatmaps", "image"])
if resize == "image":
image = imresize_single_image(image, self.arr_0to1.shape[0:2], interpolation="cubic")
heatmaps_drawn = self.draw(
size=image.shape[0:2] if resize == "heatmaps" else None,
cmap=cmap
)
mix = [
np.clip((1-alpha) * image + alpha * heatmap_i, 0, 255).astype(np.uint8)
for heatmap_i
in heatmaps_drawn
]
return mix
def invert(self):
"""
Inverts each value in the heatmap, shifting low towards high values and vice versa.
This changes each value to::
v' = max - (v - min)
where ``v`` is the value at some spatial location, ``min`` is the minimum value in the heatmap
and ``max`` is the maximum value.
As the heatmap uses internally a 0.0 to 1.0 representation, this simply becomes ``v' = 1.0 - v``.
Note that the attributes ``min_value`` and ``max_value`` are not switched. They both keep their values.
This function can be useful e.g. when working with depth maps, where algorithms might have
an easier time representing the furthest away points with zeros, requiring an inverted
depth map.
Returns
-------
arr_inv : imgaug.HeatmapsOnImage
Inverted heatmap.
"""
arr_inv = HeatmapsOnImage.from_0to1(1 - self.arr_0to1, shape=self.shape, min_value=self.min_value,
max_value=self.max_value)
arr_inv.arr_was_2d = self.arr_was_2d
return arr_inv
def pad(self, top=0, right=0, bottom=0, left=0, mode="constant", cval=0.0):
"""
Pad the heatmaps on their top/right/bottom/left side.
Parameters
----------
top : int, optional
Amount of pixels to add at the top side of the heatmaps. Must be 0 or greater.
right : int, optional
Amount of pixels to add at the right side of the heatmaps. Must be 0 or greater.
bottom : int, optional
Amount of pixels to add at the bottom side of the heatmaps. Must be 0 or greater.
left : int, optional
Amount of pixels to add at the left side of the heatmaps. Must be 0 or greater.
mode : string, optional
Padding mode to use. See :func:`numpy.pad` for details.
cval : number, optional
Value to use for padding if `mode` is ``constant``. See :func:`numpy.pad` for details.
Returns
-------
imgaug.HeatmapsOnImage
Padded heatmaps of height ``H'=H+top+bottom`` and width ``W'=W+left+right``.
"""
arr_0to1_padded = pad(self.arr_0to1, top=top, right=right, bottom=bottom, left=left, mode=mode, cval=cval)
return HeatmapsOnImage.from_0to1(arr_0to1_padded, shape=self.shape, min_value=self.min_value,
max_value=self.max_value)
def pad_to_aspect_ratio(self, aspect_ratio, mode="constant", cval=0.0, return_pad_amounts=False):
"""
Pad the heatmaps on their sides so that they match a target aspect ratio.
Depending on which dimension is smaller (height or width), only the corresponding
sides (left/right or top/bottom) will be padded. In each case, both of the sides will
be padded equally.
Parameters
----------
aspect_ratio : float
Target aspect ratio, given as width/height. E.g. 2.0 denotes the image having twice
as much width as height.
mode : str, optional
Padding mode to use. See :func:`numpy.pad` for details.
cval : number, optional
Value to use for padding if `mode` is ``constant``. See :func:`numpy.pad` for details.
return_pad_amounts : bool, optional
If False, then only the padded image will be returned. If True, a tuple with two
entries will be returned, where the first entry is the padded image and the second
entry are the amounts by which each image side was padded. These amounts are again a
tuple of the form (top, right, bottom, left), with each value being an integer.
Returns
-------
heatmaps : imgaug.HeatmapsOnImage
Padded heatmaps as HeatmapsOnImage object.
pad_amounts : tuple of int
Amounts by which the heatmaps were padded on each side, given as a tuple ``(top, right, bottom, left)``.
This tuple is only returned if `return_pad_amounts` was set to True.
"""
arr_0to1_padded, pad_amounts = pad_to_aspect_ratio(self.arr_0to1, aspect_ratio=aspect_ratio, mode=mode,
cval=cval, return_pad_amounts=True)
heatmaps = HeatmapsOnImage.from_0to1(arr_0to1_padded, shape=self.shape, min_value=self.min_value,
max_value=self.max_value)
if return_pad_amounts:
return heatmaps, pad_amounts
else:
return heatmaps
def avg_pool(self, block_size):
"""
Rescale the heatmap(s) array using average pooling of a given block/kernel size.
Parameters
----------
block_size : int or tuple of int
Size of each block of values to pool, aka kernel size. See :func:`imgaug.pool` for details.
Returns
-------
imgaug.HeatmapsOnImage
Heatmaps after average pooling.
"""
arr_0to1_reduced = avg_pool(self.arr_0to1, block_size, cval=0.0)
return HeatmapsOnImage.from_0to1(arr_0to1_reduced, shape=self.shape, min_value=self.min_value,
max_value=self.max_value)
def max_pool(self, block_size):
"""
Rescale the heatmap(s) array using max-pooling of a given block/kernel size.
Parameters
----------
block_size : int or tuple of int
Size of each block of values to pool, aka kernel size. See :func:`imgaug.pool` for details.
Returns
-------
imgaug.HeatmapsOnImage
Heatmaps after max-pooling.
"""
arr_0to1_reduced = max_pool(self.arr_0to1, block_size)
return HeatmapsOnImage.from_0to1(arr_0to1_reduced, shape=self.shape, min_value=self.min_value,
max_value=self.max_value)
def scale(self, sizes, interpolation="cubic"):
"""
Rescale the heatmap(s) array to the provided size given the provided interpolation.
Parameters
----------
sizes : float or iterable of int or iterable of float
New size of the array in ``(height, width)``. See :func:`imgaug.imresize_single_image` for details.
interpolation : None or str or int, optional
The interpolation to use during resize. See :func:`imgaug.imresize_single_image` for details.
Returns
-------
imgaug.HeatmapsOnImage
Rescaled heatmaps object.
"""
arr_0to1_rescaled = imresize_single_image(self.arr_0to1, sizes, interpolation=interpolation)
# cubic interpolation can lead to values outside of [0.0, 1.0],
# see https://github.com/opencv/opencv/issues/7195
# TODO area interpolation too?
arr_0to1_rescaled = np.clip(arr_0to1_rescaled, 0.0, 1.0)
return HeatmapsOnImage.from_0to1(arr_0to1_rescaled, shape=self.shape, min_value=self.min_value,
max_value=self.max_value)
def to_uint8(self):
"""
Convert this heatmaps object to a 0-to-255 array.
Returns
-------
arr_uint8 : (H,W,C) ndarray
Heatmap as a 0-to-255 array (dtype is uint8).
"""
# TODO this always returns (H,W,C), even if input ndarray was originall (H,W)
# does it make sense here to also return (H,W) if self.arr_was_2d?
arr_0to255 = np.clip(np.round(self.arr_0to1 * 255), 0, 255)
arr_uint8 = arr_0to255.astype(np.uint8)
return arr_uint8
@staticmethod
def from_uint8(arr_uint8, shape, min_value=0.0, max_value=1.0):
"""
Create a heatmaps object from an heatmap array containing values ranging from 0 to 255.
Parameters
----------
arr_uint8 : (H,W) ndarray or (H,W,C) ndarray
Heatmap(s) array, where ``H`` is height, ``W`` is width and ``C`` is the number of heatmap channels.
Expected dtype is uint8.
shape : tuple of int
Shape of the image on which the heatmap(s) is/are placed. NOT the shape of the
heatmap(s) array, unless it is identical to the image shape (note the likely
difference between the arrays in the number of channels).
If there is not a corresponding image, use the shape of the heatmaps array.
min_value : float, optional
Minimum value for the heatmaps that the 0-to-255 array represents. This will usually
be 0.0. It is used when calling :func:`imgaug.HeatmapsOnImage.get_arr`, which converts the
underlying ``(0, 255)`` array to value range ``(min_value, max_value)``.
max_value : float, optional
Maximum value for the heatmaps that 0-to-255 array represents.
See parameter `min_value` for details.
Returns
-------
imgaug.HeatmapsOnImage
Heatmaps object.
"""
arr_0to1 = arr_uint8.astype(np.float32) / 255.0
return HeatmapsOnImage.from_0to1(arr_0to1, shape, min_value=min_value, max_value=max_value)
@staticmethod
def from_0to1(arr_0to1, shape, min_value=0.0, max_value=1.0):
"""
Create a heatmaps object from an heatmap array containing values ranging from 0.0 to 1.0.
Parameters
----------
arr_0to1 : (H,W) or (H,W,C) ndarray
Heatmap(s) array, where ``H`` is height, ``W`` is width and ``C`` is the number of heatmap channels.
Expected dtype is float32.
shape : tuple of ints
Shape of the image on which the heatmap(s) is/are placed. NOT the shape of the
heatmap(s) array, unless it is identical to the image shape (note the likely
difference between the arrays in the number of channels).
If there is not a corresponding image, use the shape of the heatmaps array.
min_value : float, optional
Minimum value for the heatmaps that the 0-to-1 array represents. This will usually
be 0.0. It is used when calling :func:`imgaug.HeatmapsOnImage.get_arr`, which converts the
underlying ``(0.0, 1.0)`` array to value range ``(min_value, max_value)``.
E.g. if you started with heatmaps in the range ``(-1.0, 1.0)`` and projected these
to (0.0, 1.0), you should call this function with ``min_value=-1.0``, ``max_value=1.0``
so that :func:`imgaug.HeatmapsOnImage.get_arr` returns heatmap arrays having value
range (-1.0, 1.0).
max_value : float, optional
Maximum value for the heatmaps that to 0-to-255 array represents.
See parameter min_value for details.
Returns
-------
heatmaps : imgaug.HeatmapsOnImage
Heatmaps object.
"""
heatmaps = HeatmapsOnImage(arr_0to1, shape, min_value=0.0, max_value=1.0)
heatmaps.min_value = min_value
heatmaps.max_value = max_value
return heatmaps
@classmethod
def change_normalization(cls, arr, source, target):
"""
Change the value range of a heatmap from one min-max to another min-max.
E.g. the value range may be changed from min=0.0, max=1.0 to min=-1.0, max=1.0.
Parameters
----------
arr : ndarray
Heatmap array to modify.
source : tuple of float
Current value range of the input array, given as (min, max), where both are float values.
target : tuple of float
Desired output value range of the array, given as (min, max), where both are float values.
Returns
-------
arr_target : ndarray
Input array, with value range projected to the desired target value range.
"""
do_assert(is_np_array(arr))
if isinstance(source, HeatmapsOnImage):
source = (source.min_value, source.max_value)
else:
do_assert(isinstance(source, tuple))
do_assert(len(source) == 2)
do_assert(source[0] < source[1])
if isinstance(target, HeatmapsOnImage):
target = (target.min_value, target.max_value)
else:
do_assert(isinstance(target, tuple))
do_assert(len(target) == 2)
do_assert(target[0] < target[1])
# Check if source and target are the same (with a tiny bit of tolerance)
# if so, evade compuation and just copy the array instead.
# This is reasonable, as source and target will often both be (0.0, 1.0).
eps = np.finfo(arr.dtype).eps
mins_same = source[0] - 10*eps < target[0] < source[0] + 10*eps
maxs_same = source[1] - 10*eps < target[1] < source[1] + 10*eps
if mins_same and maxs_same:
return np.copy(arr)
min_source, max_source = source
min_target, max_target = target
diff_source = max_source - min_source
diff_target = max_target - min_target
arr_0to1 = (arr - min_source) / diff_source
arr_target = min_target + arr_0to1 * diff_target
return arr_target
def copy(self):
"""
Create a shallow copy of the Heatmaps object.
Returns
-------
imgaug.HeatmapsOnImage
Shallow copy.
"""
return self.deepcopy()
def deepcopy(self):
"""
Create a deep copy of the Heatmaps object.
Returns
-------
imgaug.HeatmapsOnImage
Deep copy.
"""
return HeatmapsOnImage(self.get_arr(), shape=self.shape, min_value=self.min_value, max_value=self.max_value)
class SegmentationMapOnImage(object):
"""
Object representing a segmentation map associated with an image.
Attributes
----------
DEFAULT_SEGMENT_COLORS : list of tuple of int
Standard RGB colors to use during drawing, ordered by class index.
Parameters
----------
arr : (H,W) ndarray or (H,W,1) ndarray or (H,W,C) ndarray
Array representing the segmentation map. May have datatypes bool, integer or float.
* If bool: Assumed to be of shape (H,W), (H,W,1) or (H,W,C). If (H,W) or (H,W,1) it
is assumed to be for the case of having a single class (where any False denotes
background). Otherwise there are assumed to be C channels, one for each class,
with each of them containing a mask for that class. The masks may overlap.
* If integer: Assumed to be of shape (H,W) or (H,W,1). Each pixel is assumed to
contain an integer denoting the class index. Classes are assumed to be
non-overlapping. The number of classes cannot be guessed from this input, hence
nb_classes must be set.
* If float: Assumed to b eof shape (H,W), (H,W,1) or (H,W,C) with meanings being
similar to the case of `bool`. Values are expected to fall always in the range
0.0 to 1.0 and are usually expected to be either 0.0 or 1.0 upon instantiation
of a new segmentation map. Classes may overlap.
shape : iterable of int
Shape of the corresponding image (NOT the segmentation map array). This is expected
to be ``(H, W)`` or ``(H, W, C)`` with ``C`` usually being 3. If there is no corresponding image,
then use the segmentation map's shape instead.
nb_classes : int or None
Total number of unique classes that may appear in an segmentation map, i.e. the max
class index. This may be None if the input array is of type bool or float. The number
of classes however must be provided if the input array is of type int, as then the
number of classes cannot be guessed.
"""
DEFAULT_SEGMENT_COLORS = [
(0, 0, 0), # black
(230, 25, 75), # red
(60, 180, 75), # green
(255, 225, 25), # yellow
(0, 130, 200), # blue
(245, 130, 48), # orange
(145, 30, 180), # purple
(70, 240, 240), # cyan
(240, 50, 230), # magenta
(210, 245, 60), # lime
(250, 190, 190), # pink
(0, 128, 128), # teal
(230, 190, 255), # lavender
(170, 110, 40), # brown
(255, 250, 200), # beige
(128, 0, 0), # maroon
(170, 255, 195), # mint
(128, 128, 0), # olive
(255, 215, 180), # coral
(0, 0, 128), # navy
(128, 128, 128), # grey
(255, 255, 255), # white
# --
(115, 12, 37), # dark red
(30, 90, 37), # dark green
(127, 112, 12), # dark yellow
(0, 65, 100), # dark blue
(122, 65, 24), # dark orange
(72, 15, 90), # dark purple
(35, 120, 120), # dark cyan
(120, 25, 115), # dark magenta
(105, 122, 30), # dark lime
(125, 95, 95), # dark pink
(0, 64, 64), # dark teal
(115, 95, 127), # dark lavender
(85, 55, 20), # dark brown
(127, 125, 100), # dark beige
(64, 0, 0), # dark maroon
(85, 127, 97), # dark mint
(64, 64, 0), # dark olive
(127, 107, 90), # dark coral
(0, 0, 64), # dark navy
(64, 64, 64), # dark grey
]
def __init__(self, arr, shape, nb_classes=None):
do_assert(is_np_array(arr), "Expected to get numpy array, got %s." % (type(arr),))
if arr.dtype.type == np.bool_:
do_assert(arr.ndim in [2, 3])
self.input_was = ("bool", arr.ndim)
if arr.ndim == 2:
arr = arr[..., np.newaxis]
arr = arr.astype(np.float32)
elif arr.dtype.type in NP_INT_TYPES.union(NP_UINT_TYPES):
do_assert(arr.ndim == 2 or (arr.ndim == 3 and arr.shape[2] == 1))
do_assert(nb_classes is not None)
do_assert(nb_classes > 0)
do_assert(np.min(arr.flat[0:100]) >= 0)
do_assert(np.max(arr.flat[0:100]) <= nb_classes)
self.input_was = ("int", arr.dtype.type, arr.ndim)
if arr.ndim == 3:
arr = arr[..., 0]
# TODO improve efficiency here by building only sub-heatmaps for classes actually
# present in the image. This would also get rid of nb_classes.
arr = np.eye(nb_classes)[arr] # from class indices to one hot
arr = arr.astype(np.float32)
elif arr.dtype.type in NP_FLOAT_TYPES:
do_assert(arr.ndim == 3)
self.input_was = ("float", arr.dtype.type, arr.ndim)
arr = arr.astype(np.float32)
else:
raise Exception(("Input was expected to be an ndarray of dtype bool or any dtype in %s or any dtype in %s. "
"Got dtype %s.") % (
str(NP_INT_TYPES.union(NP_UINT_TYPES)), str(NP_FLOAT_TYPES), str(arr.dtype)))
do_assert(arr.ndim == 3)
do_assert(arr.dtype.type == np.float32)
self.arr = arr
self.shape = shape
self.nb_classes = nb_classes if nb_classes is not None else arr.shape[2]
def get_arr_int(self, background_threshold=0.01, background_class_id=None):
"""
Get the segmentation map array as an integer array of shape (H, W).
Each pixel in that array contains an integer value representing the pixel's class.
If multiple classes overlap, the one with the highest local float value is picked.
If that highest local value is below `background_threshold`, the method instead uses
the background class id as the pixel's class value.
By default, class id 0 is the background class. This may only be changed if the original
input to the segmentation map object was an integer map.
Parameters
----------
background_threshold : float, optional
At each pixel, each class-heatmap has a value between 0.0 and 1.0. If none of the
class-heatmaps has a value above this threshold, the method uses the background class
id instead.
background_class_id : None or int, optional
Class id to fall back to if no class-heatmap passes the threshold at a spatial
location. May only be provided if the original input was an integer mask and in these
cases defaults to 0. If the input were float or boolean masks, the background class id
may not be set as it is assumed that the background is implicitly defined
as 'any spatial location that has zero-like values in all masks'.
Returns
-------
result : (H,W) ndarray
Segmentation map array (int32).
If the original input consisted of boolean or float masks, then the highest possible
class id is ``1+C``, where ``C`` is the number of provided float/boolean masks. The value
``0`` in the integer mask then denotes the background class.
"""
if self.input_was[0] in ["bool", "float"]:
do_assert(background_class_id is None,
"The background class id may only be changed if the original input to SegmentationMapOnImage "
+ "was an *integer* based segmentation map.")
if background_class_id is None:
background_class_id = 0
channelwise_max_idx = np.argmax(self.arr, axis=2)
# for bool and float input masks, we assume that the background is implicitly given,
# i.e. anything where all masks/channels have zero-like values
# for int, we assume that the background class is explicitly given and has the index 0
if self.input_was[0] in ["bool", "float"]:
result = 1 + channelwise_max_idx
else: # integer mask was provided
result = channelwise_max_idx
if background_threshold is not None and background_threshold > 0:
probs = np.amax(self.arr, axis=2)
result[probs < background_threshold] = background_class_id
return result.astype(np.int32)
# TODO
# def get_arr_bool(self, allow_overlapping=False, threshold=0.5, background_threshold=0.01, background_class_id=0):
# raise NotImplementedError()
def draw(self, size=None, background_threshold=0.01, background_class_id=None, colors=None,
return_foreground_mask=False):
"""
Render the segmentation map as an RGB image.
Parameters
----------
size : None or float or iterable of int or iterable of float, optional
Size of the rendered RGB image as ``(height, width)``.
See :func:`imgaug.imresize_single_image` for details.
If set to None, no resizing is performed and the size of the segmentation map array is used.
background_threshold : float, optional
See :func:`imgaug.SegmentationMapOnImage.get_arr_int`.
background_class_id : None or int, optional
See :func:`imgaug.SegmentationMapOnImage.get_arr_int`.
colors : None or list of tuple of int, optional
Colors to use. One for each class to draw. If None, then default colors will be used.
return_foreground_mask : bool, optional
Whether to return a mask of the same size as the drawn segmentation map, containing
True at any spatial location that is not the background class and False everywhere else.
Returns
-------
segmap_drawn : (H,W,3) ndarray
Rendered segmentation map (dtype is uint8).
foreground_mask : (H,W) ndarray
Mask indicating the locations of foreground classes (dtype is bool).
This value is only returned if `return_foreground_mask` is True.
"""
arr = self.get_arr_int(background_threshold=background_threshold, background_class_id=background_class_id)
nb_classes = 1 + np.max(arr)
segmap_drawn = np.zeros((arr.shape[0], arr.shape[1], 3), dtype=np.uint8)
if colors is None:
colors = SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS
do_assert(nb_classes <= len(colors),
"Can't draw all %d classes as it would exceed the maximum number of %d available colors." % (
nb_classes, len(colors),))
ids_in_map = np.unique(arr)
for c, color in zip(sm.xrange(nb_classes), colors):
if c in ids_in_map:
class_mask = (arr == c)
segmap_drawn[class_mask] = color
if return_foreground_mask:
background_class_id = 0 if background_class_id is None else background_class_id
foreground_mask = (arr != background_class_id)
else:
foreground_mask = None
if size is not None:
segmap_drawn = imresize_single_image(segmap_drawn, size, interpolation="nearest")
if foreground_mask is not None:
foreground_mask = imresize_single_image(
foreground_mask.astype(np.uint8), size, interpolation="nearest") > 0
if foreground_mask is not None:
return segmap_drawn, foreground_mask
return segmap_drawn
def draw_on_image(self, image, alpha=0.75, resize="segmentation_map", background_threshold=0.01,
background_class_id=None, colors=None, draw_background=False):
"""
Draw the segmentation map as an overlay over an image.
Parameters
----------
image : (H,W,3) ndarray
Image onto which to draw the segmentation map. Dtype is expected to be uint8.
alpha : float, optional
Alpha/opacity value to use for the mixing of image and segmentation map.
Higher values mean that the segmentation map will be more visible and the image less visible.
resize : {'segmentation_map', 'image'}, optional
In case of size differences between the image and segmentation map, either the image or
the segmentation map can be resized. This parameter controls which of the two will be
resized to the other's size.
background_threshold : float, optional
See :func:`imgaug.SegmentationMapOnImage.get_arr_int`.
background_class_id : None or int, optional
See :func:`imgaug.SegmentationMapOnImage.get_arr_int`.
colors : None or list of tuple of int, optional
Colors to use. One for each class to draw. If None, then default colors will be used.
draw_background : bool, optional
If True, the background will be drawn like any other class.
If False, the background will not be drawn, i.e. the respective background pixels
will be identical with the image's RGB color at the corresponding spatial location
and no color overlay will be applied.
Returns
-------
mix : (H,W,3) ndarray
Rendered overlays (dtype is uint8).
"""
# assert RGB image
do_assert(image.ndim == 3)
do_assert(image.shape[2] == 3)
do_assert(image.dtype.type == np.uint8)
do_assert(0 - 1e-8 <= alpha <= 1.0 + 1e-8)
do_assert(resize in ["segmentation_map", "image"])
if resize == "image":
image = imresize_single_image(image, self.arr.shape[0:2], interpolation="cubic")
segmap_drawn, foreground_mask = self.draw(
background_threshold=background_threshold,
background_class_id=background_class_id,
size=image.shape[0:2] if resize == "segmentation_map" else None,
colors=colors,
return_foreground_mask=True
)
if draw_background:
mix = np.clip(
(1-alpha) * image + alpha * segmap_drawn,
0,
255
).astype(np.uint8)
else:
foreground_mask = foreground_mask[..., np.newaxis]
mix = np.zeros_like(image)
mix += (~foreground_mask).astype(np.uint8) * image
mix += foreground_mask.astype(np.uint8) * np.clip(
(1-alpha) * image + alpha * segmap_drawn,
0,
255
).astype(np.uint8)
return mix
def pad(self, top=0, right=0, bottom=0, left=0, mode="constant", cval=0.0):
"""
Pad the segmentation map on its top/right/bottom/left side.
Parameters
----------
top : int, optional
Amount of pixels to add at the top side of the segmentation map. Must be 0 or greater.
right : int, optional
Amount of pixels to add at the right side of the segmentation map. Must be 0 or greater.
bottom : int, optional
Amount of pixels to add at the bottom side of the segmentation map. Must be 0 or greater.
left : int, optional
Amount of pixels to add at the left side of the segmentation map. Must be 0 or greater.
mode : str, optional
Padding mode to use. See :func:`numpy.pad` for details.
cval : number, optional
Value to use for padding if `mode` is ``constant``. See :func:`numpy.pad` for details.
Returns
-------
segmap : imgaug.SegmentationMapOnImage
Padded segmentation map of height ``H'=H+top+bottom`` and width ``W'=W+left+right``.
"""
arr_padded = pad(self.arr, top=top, right=right, bottom=bottom, left=left, mode=mode, cval=cval)
segmap = SegmentationMapOnImage(arr_padded, shape=self.shape)
segmap.input_was = self.input_was
return segmap
def pad_to_aspect_ratio(self, aspect_ratio, mode="constant", cval=0.0, return_pad_amounts=False):
"""
Pad the segmentation map on its sides so that its matches a target aspect ratio.
Depending on which dimension is smaller (height or width), only the corresponding
sides (left/right or top/bottom) will be padded. In each case, both of the sides will
be padded equally.
Parameters
----------
aspect_ratio : float
Target aspect ratio, given as width/height. E.g. 2.0 denotes the image having twice
as much width as height.
mode : str, optional
Padding mode to use. See :func:`numpy.pad` for details.
cval : number, optional
Value to use for padding if `mode` is ``constant``. See :func:`numpy.pad` for details.
return_pad_amounts : bool, optional
If False, then only the padded image will be returned. If True, a tuple with two
entries will be returned, where the first entry is the padded image and the second
entry are the amounts by which each image side was padded. These amounts are again a
tuple of the form (top, right, bottom, left), with each value being an integer.
Returns
-------
segmap : imgaug.SegmentationMapOnImage
Padded segmentation map as SegmentationMapOnImage object.
pad_amounts : tuple of int
Amounts by which the segmentation map was padded on each side, given as a
tuple ``(top, right, bottom, left)``.
This tuple is only returned if `return_pad_amounts` was set to True.
"""
arr_padded, pad_amounts = pad_to_aspect_ratio(self.arr, aspect_ratio=aspect_ratio, mode=mode, cval=cval,
return_pad_amounts=True)
segmap = SegmentationMapOnImage(arr_padded, shape=self.shape)
segmap.input_was = self.input_was
if return_pad_amounts:
return segmap, pad_amounts
else:
return segmap
def scale(self, sizes, interpolation="cubic"):
"""
Rescale the segmentation map array to the provided size given the provided interpolation.
Parameters
----------
sizes : float or iterable of int or iterable of float
New size of the array in ``(height, width)``. See :func:`imgaug.imresize_single_image` for details.
interpolation : None or str or int, optional
The interpolation to use during resize. See :func:`imgaug.imresize_single_image` for details.
Note: The segmentation map is internally stored as multiple float-based heatmaps,
making smooth interpolations potentially more reasonable than nearest neighbour
interpolation.
Returns
-------
segmap : imgaug.SegmentationMapOnImage
Rescaled segmentation map object.
"""
arr_rescaled = imresize_single_image(self.arr, sizes, interpolation=interpolation)
# cubic interpolation can lead to values outside of [0.0, 1.0],
# see https://github.com/opencv/opencv/issues/7195
# TODO area interpolation too?
arr_rescaled = np.clip(arr_rescaled, 0.0, 1.0)
segmap = SegmentationMapOnImage(arr_rescaled, shape=self.shape)
segmap.input_was = self.input_was
return segmap
def to_heatmaps(self, only_nonempty=False, not_none_if_no_nonempty=False):
"""
Convert segmentation map to heatmaps object.
Each segmentation map class will be represented as a single heatmap channel.
Parameters
----------
only_nonempty : bool, optional
If True, then only heatmaps for classes that appear in the segmentation map will be
generated. Additionally, a list of these class ids will be returned.
not_none_if_no_nonempty : bool, optional
If `only_nonempty` is True and for a segmentation map no channel was non-empty,
this function usually returns None as the heatmaps object. If however this parameter
is set to True, a heatmaps object with one channel (representing class 0)
will be returned as a fallback in these cases.
Returns
-------
imgaug.HeatmapsOnImage or None
Segmentation map as a heatmaps object.
If `only_nonempty` was set to True and no class appeared in the segmentation map,
then this is None.
class_indices : list of int
Class ids (0 to C-1) of the classes that were actually added to the heatmaps.
Only returned if `only_nonempty` was set to True.
"""
if not only_nonempty:
return HeatmapsOnImage.from_0to1(self.arr, self.shape, min_value=0.0, max_value=1.0)
else:
nonempty_mask = np.sum(self.arr, axis=(0, 1)) > 0 + 1e-4
if np.sum(nonempty_mask) == 0:
if not_none_if_no_nonempty:
nonempty_mask[0] = True
else:
return None, []
class_indices = np.arange(self.arr.shape[2])[nonempty_mask]
channels = self.arr[..., class_indices]
return HeatmapsOnImage(channels, self.shape, min_value=0.0, max_value=1.0), class_indices
@staticmethod
def from_heatmaps(heatmaps, class_indices=None, nb_classes=None):
"""
Convert heatmaps to segmentation map.
Assumes that each class is represented as a single heatmap channel.
Parameters
----------
heatmaps : imgaug.HeatmapsOnImage
Heatmaps to convert.
class_indices : None or list of int, optional
List of class indices represented by each heatmap channel. See also the
secondary output of :func:`imgaug.SegmentationMapOnImage.to_heatmap`.
If this is provided, it must have the same length as the number of heatmap channels.
nb_classes : None or int, optional
Number of classes. Must be provided if class_indices is set.
Returns
-------
imgaug.SegmentationMapOnImage
Segmentation map derived from heatmaps.
"""
if class_indices is None:
return SegmentationMapOnImage(heatmaps.arr_0to1, shape=heatmaps.shape)
else:
do_assert(nb_classes is not None)
do_assert(min(class_indices) >= 0)
do_assert(max(class_indices) < nb_classes)
do_assert(len(class_indices) == heatmaps.arr_0to1.shape[2])
arr_0to1 = heatmaps.arr_0to1
arr_0to1_full = np.zeros((arr_0to1.shape[0], arr_0to1.shape[1], nb_classes), dtype=np.float32)
class_indices_set = set(class_indices)
heatmap_channel = 0
for c in sm.xrange(nb_classes):
if c in class_indices_set:
arr_0to1_full[:, :, c] = arr_0to1[:, :, heatmap_channel]
heatmap_channel += 1
return SegmentationMapOnImage(arr_0to1_full, shape=heatmaps.shape)
def copy(self):
"""
Create a shallow copy of the segmentation map object.
Returns
-------
imgaug.SegmentationMapOnImage
Shallow copy.
"""
return self.deepcopy()
def deepcopy(self):
"""
Create a deep copy of the segmentation map object.
Returns
-------
imgaug.SegmentationMapOnImage
Deep copy.
"""
segmap = SegmentationMapOnImage(self.arr, shape=self.shape, nb_classes=self.nb_classes)
segmap.input_was = self.input_was
return segmap
############################
# Background augmentation
############################
class Batch(object):
"""
Class encapsulating a batch before and after augmentation.
Parameters
----------
images : None or (N,H,W,C) ndarray or (N,H,W) ndarray or list of (H,W,C) ndarray or list of (H,W) ndarray
The images to augment.
heatmaps : None or list of imgaug.HeatmapsOnImage
The heatmaps to augment.
segmentation_maps : None or list of SegmentationMapOnImage
The segmentation maps to augment.
keypoints : None or list of KeypointOnImage
The keypoints to augment.
bounding_boxes : None or list of BoundingBoxesOnImage
The bounding boxes to augment.
data
Additional data that is saved in the batch and may be read out
after augmentation. This could e.g. contain filepaths to each image
in `images`. As this object is usually used for background
augmentation with multiple processes, the augmented Batch objects might
not be returned in the original order, making this information useful.
"""
def __init__(self, images=None, heatmaps=None, segmentation_maps=None, keypoints=None, bounding_boxes=None,
data=None):
self.images = images
self.images_aug = None
self.heatmaps = heatmaps
self.heatmaps_aug = None
self.segmentation_maps = segmentation_maps
self.segmentation_maps_aug = None
self.keypoints = keypoints
self.keypoints_aug = None
self.bounding_boxes = bounding_boxes
self.bounding_boxes_aug = None
self.data = data
def deepcopy(self):
def _copy_images(images):
if images is None:
images_copy = None
elif is_np_array(images):
images_copy = np.copy(images)
else:
do_assert(is_iterable(images))
do_assert(all([is_np_array(image) for image in images]))
images_copy = list([np.copy(image) for image in images])
return images_copy
def _copy_augmentable_objects(augmentables, clazz):
if augmentables is None:
augmentables_copy = None
else:
do_assert(is_iterable(augmentables))
do_assert(all([isinstance(augmentable, clazz) for augmentable in augmentables]))
augmentables_copy = [augmentable.deepcopy() for augmentable in augmentables]
return augmentables_copy
batch = Batch(
images=_copy_images(self.images),
heatmaps=_copy_augmentable_objects(self.heatmaps, HeatmapsOnImage),
segmentation_maps=_copy_augmentable_objects(self.segmentation_maps, SegmentationMapOnImage),
keypoints=_copy_augmentable_objects(self.keypoints, KeypointsOnImage),
bounding_boxes=_copy_augmentable_objects(self.bounding_boxes, BoundingBoxesOnImage),
data=copy.deepcopy(self.data)
)
batch.images_aug = _copy_images(self.images_aug)
batch.heatmaps_aug = _copy_augmentable_objects(self.heatmaps_aug, HeatmapsOnImage)
batch.segmentation_maps_aug = _copy_augmentable_objects(self.segmentation_maps_aug, SegmentationMapOnImage)
batch.keypoints_aug = _copy_augmentable_objects(self.keypoints_aug, KeypointsOnImage)
batch.bounding_boxes_aug = _copy_augmentable_objects(self.bounding_boxes_aug, BoundingBoxesOnImage)
return batch
class BatchLoader(object):
"""
Class to load batches in the background.
Loaded batches can be accesses using :attr:`imgaug.BatchLoader.queue`.
Parameters
----------
load_batch_func : callable or generator
Generator or generator function (i.e. function that yields Batch objects)
or a function that returns a list of Batch objects.
Background loading automatically stops when the last batch was yielded or the
last batch in the list was reached.
queue_size : int, optional
Maximum number of batches to store in the queue. May be set higher
for small images and/or small batches.
nb_workers : int, optional
Number of workers to run in the background.
threaded : bool, optional
Whether to run the background processes using threads (True) or full processes (False).
"""
def __init__(self, load_batch_func, queue_size=50, nb_workers=1, threaded=True):
do_assert(queue_size >= 2, "Queue size for BatchLoader must be at least 2, got %d." % (queue_size,))
do_assert(nb_workers >= 1, "Number of workers for BatchLoader must be at least 1, got %d" % (nb_workers,))
self._queue_internal = multiprocessing.Queue(queue_size//2)
self.queue = multiprocessing.Queue(queue_size//2)
self.join_signal = multiprocessing.Event()
self.workers = []
self.threaded = threaded
seeds = current_random_state().randint(0, 10**6, size=(nb_workers,))
for i in range(nb_workers):
if threaded:
worker = threading.Thread(
target=self._load_batches,
args=(load_batch_func, self._queue_internal, self.join_signal, None)
)
else:
worker = multiprocessing.Process(
target=self._load_batches,
args=(load_batch_func, self._queue_internal, self.join_signal, seeds[i])
)
worker.daemon = True
worker.start()
self.workers.append(worker)
self.main_worker_thread = threading.Thread(
target=self._main_worker,
args=()
)
self.main_worker_thread.daemon = True
self.main_worker_thread.start()
def count_workers_alive(self):
return sum([int(worker.is_alive()) for worker in self.workers])
def all_finished(self):
"""
Determine whether the workers have finished the loading process.
Returns
-------
out : bool
True if all workers have finished. Else False.
"""
return self.count_workers_alive() == 0
def _main_worker(self):
workers_running = self.count_workers_alive()
while workers_running > 0 and not self.join_signal.is_set():
# wait for a new batch in the source queue and load it
try:
batch_str = self._queue_internal.get(timeout=0.1)
if batch_str == "":
workers_running -= 1
else:
self.queue.put(batch_str)
except QueueEmpty:
time.sleep(0.01)
except (EOFError, BrokenPipeError):
break
workers_running = self.count_workers_alive()
# All workers have finished, move the remaining entries from internal to external queue
while True:
try:
batch_str = self._queue_internal.get(timeout=0.005)
if batch_str != "":
self.queue.put(batch_str)
except QueueEmpty:
break
except (EOFError, BrokenPipeError):
break
self.queue.put(pickle.dumps(None, protocol=-1))
time.sleep(0.01)
def _load_batches(self, load_batch_func, queue_internal, join_signal, seedval):
if seedval is not None:
random.seed(seedval)
np.random.seed(seedval)
seed(seedval)
try:
gen = load_batch_func() if not is_generator(load_batch_func) else load_batch_func
for batch in gen:
do_assert(isinstance(batch, Batch),
"Expected batch returned by load_batch_func to be of class imgaug.Batch, got %s." % (
type(batch),))
batch_pickled = pickle.dumps(batch, protocol=-1)
while not join_signal.is_set():
try:
queue_internal.put(batch_pickled, timeout=0.005)
break
except QueueFull:
pass
if join_signal.is_set():
break
except Exception:
traceback.print_exc()
finally:
queue_internal.put("")
time.sleep(0.01)
def terminate(self):
"""Stop all workers."""
if not self.join_signal.is_set():
self.join_signal.set()
# give minimal time to put generated batches in queue and gracefully shut down
time.sleep(0.01)
if self.main_worker_thread.is_alive():
self.main_worker_thread.join()
if self.threaded:
for worker in self.workers:
if worker.is_alive():
worker.join()
else:
for worker in self.workers:
if worker.is_alive():
worker.terminate()
worker.join()
# wait until all workers are fully terminated
while not self.all_finished():
time.sleep(0.001)
# empty queue until at least one element can be added and place None as signal that BL finished
if self.queue.full():
self.queue.get()
self.queue.put(pickle.dumps(None, protocol=-1))
time.sleep(0.01)
# clean the queue, this reportedly prevents hanging threads
while True:
try:
self._queue_internal.get(timeout=0.005)
except QueueEmpty:
break
if not self._queue_internal._closed:
self._queue_internal.close()
if not self.queue._closed:
self.queue.close()
self._queue_internal.join_thread()
self.queue.join_thread()
time.sleep(0.025)
def __del__(self):
if not self.join_signal.is_set():
self.join_signal.set()
class BackgroundAugmenter(object):
"""
Class to augment batches in the background (while training on the GPU).
This is a wrapper around the multiprocessing module.
Parameters
----------
batch_loader : BatchLoader or multiprocessing.Queue
BatchLoader object that loads the data fed into the BackgroundAugmenter, or alternatively a Queue.
If a Queue, then it must be made sure that a final ``None`` in the Queue signals that the loading is
finished and no more batches will follow. Otherwise the BackgroundAugmenter will wait forever for the next
batch.
augseq : Augmenter
An augmenter to apply to all loaded images.
This may be e.g. a Sequential to apply multiple augmenters.
queue_size : int
Size of the queue that is used to temporarily save the augmentation
results. Larger values offer the background processes more room
to save results when the main process doesn't load much, i.e. they
can lead to smoother and faster training. For large images, high
values can block a lot of RAM though.
nb_workers : 'auto' or int
Number of background workers to spawn.
If ``auto``, it will be set to ``C-1``, where ``C`` is the number of CPU cores.
"""
def __init__(self, batch_loader, augseq, queue_size=50, nb_workers="auto"):
do_assert(queue_size > 0)
self.augseq = augseq
self.queue_source = batch_loader if isinstance(batch_loader, multiprocessing.queues.Queue) else batch_loader.queue
self.queue_result = multiprocessing.Queue(queue_size)
if nb_workers == "auto":
try:
nb_workers = multiprocessing.cpu_count()
except (ImportError, NotImplementedError):
nb_workers = 1
# try to reserve at least one core for the main process
nb_workers = max(1, nb_workers - 1)
else:
do_assert(nb_workers >= 1)
self.nb_workers = nb_workers
self.workers = []
self.nb_workers_finished = 0
seeds = current_random_state().randint(0, 10**6, size=(nb_workers,))
for i in range(nb_workers):
worker = multiprocessing.Process(
target=self._augment_images_worker,
args=(augseq, self.queue_source, self.queue_result, seeds[i])
)
worker.daemon = True
worker.start()
self.workers.append(worker)
def all_finished(self):
return self.nb_workers_finished == self.nb_workers
def get_batch(self):
"""
Returns a batch from the queue of augmented batches.
If workers are still running and there are no batches in the queue,
it will automatically wait for the next batch.
Returns
-------
out : None or imgaug.Batch
One batch or None if all workers have finished.
"""
if self.all_finished():
return None
batch_str = self.queue_result.get()
batch = pickle.loads(batch_str)
if batch is not None:
return batch
else:
self.nb_workers_finished += 1
if self.nb_workers_finished >= self.nb_workers:
try:
self.queue_source.get(timeout=0.001) # remove the None from the source queue
except QueueEmpty:
pass
return None
else:
return self.get_batch()
def _augment_images_worker(self, augseq, queue_source, queue_result, seedval):
"""
Augment endlessly images in the source queue.
This is a worker function for that endlessly queries the source queue (input batches),
augments batches in it and sends the result to the output queue.
"""
np.random.seed(seedval)
random.seed(seedval)
augseq.reseed(seedval)
seed(seedval)
loader_finished = False
while not loader_finished:
# wait for a new batch in the source queue and load it
try:
batch_str = queue_source.get(timeout=0.1)
batch = pickle.loads(batch_str)
if batch is None:
loader_finished = True
# put it back in so that other workers know that the loading queue is finished
queue_source.put(pickle.dumps(None, protocol=-1))
else:
batch_aug = list(augseq.augment_batches([batch], background=False))[0]
# send augmented batch to output queue
batch_str = pickle.dumps(batch_aug, protocol=-1)
queue_result.put(batch_str)
except QueueEmpty:
time.sleep(0.01)
queue_result.put(pickle.dumps(None, protocol=-1))
time.sleep(0.01)
def terminate(self):
"""
Terminates all background processes immediately.
This will also free their RAM.
"""
for worker in self.workers:
if worker.is_alive():
worker.terminate()
self.nb_workers_finished = len(self.workers)
if not self.queue_result._closed:
self.queue_result.close()
time.sleep(0.01)
def __del__(self):
time.sleep(0.1)
self.terminate()
|
"""Support for Microsoft face recognition."""
import asyncio
import json
import logging
import aiohttp
from aiohttp.hdrs import CONTENT_TYPE
import async_timeout
import voluptuous as vol
from homeassistant.const import ATTR_NAME, CONF_API_KEY, CONF_TIMEOUT
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import slugify
_LOGGER = logging.getLogger(__name__)
ATTR_CAMERA_ENTITY = "camera_entity"
ATTR_GROUP = "group"
ATTR_PERSON = "person"
CONF_AZURE_REGION = "azure_region"
DATA_MICROSOFT_FACE = "microsoft_face"
DEFAULT_TIMEOUT = 10
DOMAIN = "microsoft_face"
FACE_API_URL = "api.cognitive.microsoft.com/face/v1.0/{0}"
SERVICE_CREATE_GROUP = "create_group"
SERVICE_CREATE_PERSON = "create_person"
SERVICE_DELETE_GROUP = "delete_group"
SERVICE_DELETE_PERSON = "delete_person"
SERVICE_FACE_PERSON = "face_person"
SERVICE_TRAIN_GROUP = "train_group"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_API_KEY): cv.string,
vol.Optional(CONF_AZURE_REGION, default="westus"): cv.string,
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
}
)
},
extra=vol.ALLOW_EXTRA,
)
SCHEMA_GROUP_SERVICE = vol.Schema({vol.Required(ATTR_NAME): cv.string})
SCHEMA_PERSON_SERVICE = SCHEMA_GROUP_SERVICE.extend(
{vol.Required(ATTR_GROUP): cv.slugify}
)
SCHEMA_FACE_SERVICE = vol.Schema(
{
vol.Required(ATTR_PERSON): cv.string,
vol.Required(ATTR_GROUP): cv.slugify,
vol.Required(ATTR_CAMERA_ENTITY): cv.entity_id,
}
)
SCHEMA_TRAIN_SERVICE = vol.Schema({vol.Required(ATTR_GROUP): cv.slugify})
async def async_setup(hass, config):
"""Set up Microsoft Face."""
entities = {}
face = MicrosoftFace(
hass,
config[DOMAIN].get(CONF_AZURE_REGION),
config[DOMAIN].get(CONF_API_KEY),
config[DOMAIN].get(CONF_TIMEOUT),
entities,
)
try:
# read exists group/person from cloud and create entities
await face.update_store()
except HomeAssistantError as err:
_LOGGER.error("Can't load data from face api: %s", err)
return False
hass.data[DATA_MICROSOFT_FACE] = face
async def async_create_group(service):
"""Create a new person group."""
name = service.data[ATTR_NAME]
g_id = slugify(name)
try:
await face.call_api("put", f"persongroups/{g_id}", {"name": name})
face.store[g_id] = {}
entities[g_id] = MicrosoftFaceGroupEntity(hass, face, g_id, name)
entities[g_id].async_write_ha_state()
except HomeAssistantError as err:
_LOGGER.error("Can't create group '%s' with error: %s", g_id, err)
hass.services.async_register(
DOMAIN, SERVICE_CREATE_GROUP, async_create_group, schema=SCHEMA_GROUP_SERVICE
)
async def async_delete_group(service):
"""Delete a person group."""
g_id = slugify(service.data[ATTR_NAME])
try:
await face.call_api("delete", f"persongroups/{g_id}")
face.store.pop(g_id)
entity = entities.pop(g_id)
hass.states.async_remove(entity.entity_id, service.context)
except HomeAssistantError as err:
_LOGGER.error("Can't delete group '%s' with error: %s", g_id, err)
hass.services.async_register(
DOMAIN, SERVICE_DELETE_GROUP, async_delete_group, schema=SCHEMA_GROUP_SERVICE
)
async def async_train_group(service):
"""Train a person group."""
g_id = service.data[ATTR_GROUP]
try:
await face.call_api("post", f"persongroups/{g_id}/train")
except HomeAssistantError as err:
_LOGGER.error("Can't train group '%s' with error: %s", g_id, err)
hass.services.async_register(
DOMAIN, SERVICE_TRAIN_GROUP, async_train_group, schema=SCHEMA_TRAIN_SERVICE
)
async def async_create_person(service):
"""Create a person in a group."""
name = service.data[ATTR_NAME]
g_id = service.data[ATTR_GROUP]
try:
user_data = await face.call_api(
"post", f"persongroups/{g_id}/persons", {"name": name}
)
face.store[g_id][name] = user_data["personId"]
entities[g_id].async_write_ha_state()
except HomeAssistantError as err:
_LOGGER.error("Can't create person '%s' with error: %s", name, err)
hass.services.async_register(
DOMAIN, SERVICE_CREATE_PERSON, async_create_person, schema=SCHEMA_PERSON_SERVICE
)
async def async_delete_person(service):
"""Delete a person in a group."""
name = service.data[ATTR_NAME]
g_id = service.data[ATTR_GROUP]
p_id = face.store[g_id].get(name)
try:
await face.call_api("delete", f"persongroups/{g_id}/persons/{p_id}")
face.store[g_id].pop(name)
entities[g_id].async_write_ha_state()
except HomeAssistantError as err:
_LOGGER.error("Can't delete person '%s' with error: %s", p_id, err)
hass.services.async_register(
DOMAIN, SERVICE_DELETE_PERSON, async_delete_person, schema=SCHEMA_PERSON_SERVICE
)
async def async_face_person(service):
"""Add a new face picture to a person."""
g_id = service.data[ATTR_GROUP]
p_id = face.store[g_id].get(service.data[ATTR_PERSON])
camera_entity = service.data[ATTR_CAMERA_ENTITY]
camera = hass.components.camera
try:
image = await camera.async_get_image(hass, camera_entity)
await face.call_api(
"post",
f"persongroups/{g_id}/persons/{p_id}/persistedFaces",
image.content,
binary=True,
)
except HomeAssistantError as err:
_LOGGER.error("Can't delete person '%s' with error: %s", p_id, err)
hass.services.async_register(
DOMAIN, SERVICE_FACE_PERSON, async_face_person, schema=SCHEMA_FACE_SERVICE
)
return True
class MicrosoftFaceGroupEntity(Entity):
"""Person-Group state/data Entity."""
def __init__(self, hass, api, g_id, name):
"""Initialize person/group entity."""
self.hass = hass
self._api = api
self._id = g_id
self._name = name
@property
def name(self):
"""Return the name of the entity."""
return self._name
@property
def entity_id(self):
"""Return entity id."""
return f"{DOMAIN}.{self._id}"
@property
def state(self):
"""Return the state of the entity."""
return len(self._api.store[self._id])
@property
def should_poll(self):
"""Return True if entity has to be polled for state."""
return False
@property
def device_state_attributes(self):
"""Return device specific state attributes."""
attr = {}
for name, p_id in self._api.store[self._id].items():
attr[name] = p_id
return attr
class MicrosoftFace:
"""Microsoft Face api for Home Assistant."""
def __init__(self, hass, server_loc, api_key, timeout, entities):
"""Initialize Microsoft Face api."""
self.hass = hass
self.websession = async_get_clientsession(hass)
self.timeout = timeout
self._api_key = api_key
self._server_url = f"https://{server_loc}.{FACE_API_URL}"
self._store = {}
self._entities = entities
@property
def store(self):
"""Store group/person data and IDs."""
return self._store
async def update_store(self):
"""Load all group/person data into local store."""
groups = await self.call_api("get", "persongroups")
tasks = []
for group in groups:
g_id = group["personGroupId"]
self._store[g_id] = {}
self._entities[g_id] = MicrosoftFaceGroupEntity(
self.hass, self, g_id, group["name"]
)
persons = await self.call_api("get", f"persongroups/{g_id}/persons")
for person in persons:
self._store[g_id][person["name"]] = person["personId"]
tasks.append(self._entities[g_id].async_update_ha_state())
if tasks:
await asyncio.wait(tasks)
async def call_api(self, method, function, data=None, binary=False, params=None):
"""Make an api call."""
headers = {"Ocp-Apim-Subscription-Key": self._api_key}
url = self._server_url.format(function)
payload = None
if binary:
headers[CONTENT_TYPE] = "application/octet-stream"
payload = data
else:
headers[CONTENT_TYPE] = "application/json"
if data is not None:
payload = json.dumps(data).encode()
else:
payload = None
try:
with async_timeout.timeout(self.timeout):
response = await getattr(self.websession, method)(
url, data=payload, headers=headers, params=params
)
answer = await response.json()
_LOGGER.debug("Read from microsoft face api: %s", answer)
if response.status < 300:
return answer
_LOGGER.warning(
"Error %d microsoft face api %s", response.status, response.url
)
raise HomeAssistantError(answer["error"]["message"])
except aiohttp.ClientError:
_LOGGER.warning("Can't connect to microsoft face api")
except asyncio.TimeoutError:
_LOGGER.warning("Timeout from microsoft face api %s", response.url)
raise HomeAssistantError("Network error on microsoft face api.")
Correct error message in Microsoft Face (#35096)
"""Support for Microsoft face recognition."""
import asyncio
import json
import logging
import aiohttp
from aiohttp.hdrs import CONTENT_TYPE
import async_timeout
import voluptuous as vol
from homeassistant.const import ATTR_NAME, CONF_API_KEY, CONF_TIMEOUT
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import slugify
_LOGGER = logging.getLogger(__name__)
ATTR_CAMERA_ENTITY = "camera_entity"
ATTR_GROUP = "group"
ATTR_PERSON = "person"
CONF_AZURE_REGION = "azure_region"
DATA_MICROSOFT_FACE = "microsoft_face"
DEFAULT_TIMEOUT = 10
DOMAIN = "microsoft_face"
FACE_API_URL = "api.cognitive.microsoft.com/face/v1.0/{0}"
SERVICE_CREATE_GROUP = "create_group"
SERVICE_CREATE_PERSON = "create_person"
SERVICE_DELETE_GROUP = "delete_group"
SERVICE_DELETE_PERSON = "delete_person"
SERVICE_FACE_PERSON = "face_person"
SERVICE_TRAIN_GROUP = "train_group"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_API_KEY): cv.string,
vol.Optional(CONF_AZURE_REGION, default="westus"): cv.string,
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
}
)
},
extra=vol.ALLOW_EXTRA,
)
SCHEMA_GROUP_SERVICE = vol.Schema({vol.Required(ATTR_NAME): cv.string})
SCHEMA_PERSON_SERVICE = SCHEMA_GROUP_SERVICE.extend(
{vol.Required(ATTR_GROUP): cv.slugify}
)
SCHEMA_FACE_SERVICE = vol.Schema(
{
vol.Required(ATTR_PERSON): cv.string,
vol.Required(ATTR_GROUP): cv.slugify,
vol.Required(ATTR_CAMERA_ENTITY): cv.entity_id,
}
)
SCHEMA_TRAIN_SERVICE = vol.Schema({vol.Required(ATTR_GROUP): cv.slugify})
async def async_setup(hass, config):
"""Set up Microsoft Face."""
entities = {}
face = MicrosoftFace(
hass,
config[DOMAIN].get(CONF_AZURE_REGION),
config[DOMAIN].get(CONF_API_KEY),
config[DOMAIN].get(CONF_TIMEOUT),
entities,
)
try:
# read exists group/person from cloud and create entities
await face.update_store()
except HomeAssistantError as err:
_LOGGER.error("Can't load data from face api: %s", err)
return False
hass.data[DATA_MICROSOFT_FACE] = face
async def async_create_group(service):
"""Create a new person group."""
name = service.data[ATTR_NAME]
g_id = slugify(name)
try:
await face.call_api("put", f"persongroups/{g_id}", {"name": name})
face.store[g_id] = {}
entities[g_id] = MicrosoftFaceGroupEntity(hass, face, g_id, name)
entities[g_id].async_write_ha_state()
except HomeAssistantError as err:
_LOGGER.error("Can't create group '%s' with error: %s", g_id, err)
hass.services.async_register(
DOMAIN, SERVICE_CREATE_GROUP, async_create_group, schema=SCHEMA_GROUP_SERVICE
)
async def async_delete_group(service):
"""Delete a person group."""
g_id = slugify(service.data[ATTR_NAME])
try:
await face.call_api("delete", f"persongroups/{g_id}")
face.store.pop(g_id)
entity = entities.pop(g_id)
hass.states.async_remove(entity.entity_id, service.context)
except HomeAssistantError as err:
_LOGGER.error("Can't delete group '%s' with error: %s", g_id, err)
hass.services.async_register(
DOMAIN, SERVICE_DELETE_GROUP, async_delete_group, schema=SCHEMA_GROUP_SERVICE
)
async def async_train_group(service):
"""Train a person group."""
g_id = service.data[ATTR_GROUP]
try:
await face.call_api("post", f"persongroups/{g_id}/train")
except HomeAssistantError as err:
_LOGGER.error("Can't train group '%s' with error: %s", g_id, err)
hass.services.async_register(
DOMAIN, SERVICE_TRAIN_GROUP, async_train_group, schema=SCHEMA_TRAIN_SERVICE
)
async def async_create_person(service):
"""Create a person in a group."""
name = service.data[ATTR_NAME]
g_id = service.data[ATTR_GROUP]
try:
user_data = await face.call_api(
"post", f"persongroups/{g_id}/persons", {"name": name}
)
face.store[g_id][name] = user_data["personId"]
entities[g_id].async_write_ha_state()
except HomeAssistantError as err:
_LOGGER.error("Can't create person '%s' with error: %s", name, err)
hass.services.async_register(
DOMAIN, SERVICE_CREATE_PERSON, async_create_person, schema=SCHEMA_PERSON_SERVICE
)
async def async_delete_person(service):
"""Delete a person in a group."""
name = service.data[ATTR_NAME]
g_id = service.data[ATTR_GROUP]
p_id = face.store[g_id].get(name)
try:
await face.call_api("delete", f"persongroups/{g_id}/persons/{p_id}")
face.store[g_id].pop(name)
entities[g_id].async_write_ha_state()
except HomeAssistantError as err:
_LOGGER.error("Can't delete person '%s' with error: %s", p_id, err)
hass.services.async_register(
DOMAIN, SERVICE_DELETE_PERSON, async_delete_person, schema=SCHEMA_PERSON_SERVICE
)
async def async_face_person(service):
"""Add a new face picture to a person."""
g_id = service.data[ATTR_GROUP]
p_id = face.store[g_id].get(service.data[ATTR_PERSON])
camera_entity = service.data[ATTR_CAMERA_ENTITY]
camera = hass.components.camera
try:
image = await camera.async_get_image(hass, camera_entity)
await face.call_api(
"post",
f"persongroups/{g_id}/persons/{p_id}/persistedFaces",
image.content,
binary=True,
)
except HomeAssistantError as err:
_LOGGER.error(
"Can't add an image of a person '%s' with error: %s", p_id, err
)
hass.services.async_register(
DOMAIN, SERVICE_FACE_PERSON, async_face_person, schema=SCHEMA_FACE_SERVICE
)
return True
class MicrosoftFaceGroupEntity(Entity):
"""Person-Group state/data Entity."""
def __init__(self, hass, api, g_id, name):
"""Initialize person/group entity."""
self.hass = hass
self._api = api
self._id = g_id
self._name = name
@property
def name(self):
"""Return the name of the entity."""
return self._name
@property
def entity_id(self):
"""Return entity id."""
return f"{DOMAIN}.{self._id}"
@property
def state(self):
"""Return the state of the entity."""
return len(self._api.store[self._id])
@property
def should_poll(self):
"""Return True if entity has to be polled for state."""
return False
@property
def device_state_attributes(self):
"""Return device specific state attributes."""
attr = {}
for name, p_id in self._api.store[self._id].items():
attr[name] = p_id
return attr
class MicrosoftFace:
"""Microsoft Face api for Home Assistant."""
def __init__(self, hass, server_loc, api_key, timeout, entities):
"""Initialize Microsoft Face api."""
self.hass = hass
self.websession = async_get_clientsession(hass)
self.timeout = timeout
self._api_key = api_key
self._server_url = f"https://{server_loc}.{FACE_API_URL}"
self._store = {}
self._entities = entities
@property
def store(self):
"""Store group/person data and IDs."""
return self._store
async def update_store(self):
"""Load all group/person data into local store."""
groups = await self.call_api("get", "persongroups")
tasks = []
for group in groups:
g_id = group["personGroupId"]
self._store[g_id] = {}
self._entities[g_id] = MicrosoftFaceGroupEntity(
self.hass, self, g_id, group["name"]
)
persons = await self.call_api("get", f"persongroups/{g_id}/persons")
for person in persons:
self._store[g_id][person["name"]] = person["personId"]
tasks.append(self._entities[g_id].async_update_ha_state())
if tasks:
await asyncio.wait(tasks)
async def call_api(self, method, function, data=None, binary=False, params=None):
"""Make an api call."""
headers = {"Ocp-Apim-Subscription-Key": self._api_key}
url = self._server_url.format(function)
payload = None
if binary:
headers[CONTENT_TYPE] = "application/octet-stream"
payload = data
else:
headers[CONTENT_TYPE] = "application/json"
if data is not None:
payload = json.dumps(data).encode()
else:
payload = None
try:
with async_timeout.timeout(self.timeout):
response = await getattr(self.websession, method)(
url, data=payload, headers=headers, params=params
)
answer = await response.json()
_LOGGER.debug("Read from microsoft face api: %s", answer)
if response.status < 300:
return answer
_LOGGER.warning(
"Error %d microsoft face api %s", response.status, response.url
)
raise HomeAssistantError(answer["error"]["message"])
except aiohttp.ClientError:
_LOGGER.warning("Can't connect to microsoft face api")
except asyncio.TimeoutError:
_LOGGER.warning("Timeout from microsoft face api %s", response.url)
raise HomeAssistantError("Network error on microsoft face api.")
|
from __future__ import absolute_import, unicode_literals
import io
import json
import os
import re
import shutil
import subprocess
import tarfile
import pandas as pd
import numpy as np
from django.db import transaction
from django.utils import timezone
from typing import Dict, List
from data_refinery_common.job_lookup import Downloaders
from data_refinery_common.logging import get_and_configure_logger
from data_refinery_common.models import (
OrganismIndex,
ComputationalResult,
ComputationalResultAnnotation,
ComputedFile,
Processor,
Pipeline,
Experiment,
ExperimentSampleAssociation,
SampleResultAssociation,
Sample,
SampleComputedFileAssociation
)
from data_refinery_common.utils import get_env_variable
from data_refinery_workers._version import __version__
from data_refinery_workers.processors import utils
logger = get_and_configure_logger(__name__)
JOB_DIR_PREFIX = "processor_job_"
S3_BUCKET_NAME = get_env_variable("S3_BUCKET_NAME", "data-refinery")
SKIP_PROCESSED = get_env_variable("SKIP_PROCESSED", True)
def _set_job_prefix(job_context: Dict) -> Dict:
""" Sets the `job_dir_prefix` value in the job context object."""
job_context["job_dir_prefix"] = JOB_DIR_PREFIX + str(job_context["job_id"])
return job_context
def _prepare_files(job_context: Dict) -> Dict:
"""Moves the file(s) from the raw directory to the temp directory.
Also adds the keys "input_file_path" and "output_directory" to
job_context so everything is prepared for processing. If the reads
are paired then there will also be an "input_file_path_2" key
added to job_context for the second read.
"""
logger.debug("Preparing files..")
original_files = job_context["original_files"]
job_context["input_file_path"] = original_files[0].get_synced_file_path()
if len(original_files) == 2:
job_context["input_file_path_2"] = original_files[1].get_synced_file_path()
# There should only ever be one per Salmon run
job_context['sample'] = job_context['original_files'][0].samples.first()
job_context['organism'] = job_context['sample'].organism
job_context["success"] = True
# The paths of original_files are in this format:
# <experiment_accession_code>/raw/<filename>
# Salmon outputs an entire directory of files, so create a temp
# directory to output it to until we can zip it to.
# The path of temp directory is in this format:
# <experiment_accession_code>/<sample_accession_code>/processed/
pre_part = '/'.join(original_files[0].absolute_file_path.split('/')[:-2])
sample_accession = job_context['sample'].accession_code
job_context["output_directory"] = pre_part + "/" + sample_accession + '/processed/'
os.makedirs(job_context["output_directory"], exist_ok=True)
job_context["qc_input_directory"] = pre_part + '/'
job_context["qc_directory"] = pre_part + '/qc/'
os.makedirs(job_context["qc_directory"], exist_ok=True)
timestamp = str(timezone.now().timestamp()).split('.')[0]
job_context["output_archive"] = pre_part + '/result-' + timestamp + '.tar.gz'
os.makedirs(job_context["output_directory"], exist_ok=True)
return job_context
def _determine_index_length(job_context: Dict) -> Dict:
"""Determines whether to use the long or short salmon index.
Adds the key 'kmer_size' to the job_context with a value of '23'
if the short index is appropriate or '31' if the long index is
appropriate. For more information on index length see the
_create_index function of the transcriptome_index processor.
"""
logger.debug("Determining index length..")
total_base_pairs = 0
number_of_reads = 0
counter = 1
# zcat unzips the file provided and dumps the output to STDOUT.
# It is installed by default in Debian so it should be included
# in every docker image already.
with subprocess.Popen(['zcat', job_context["input_file_path"]], stdout=subprocess.PIPE,
universal_newlines=True) as process:
for line in process.stdout:
# In the FASTQ file format, there are 4 lines for each
# read. Three of these contain metadata about the
# read. The string representing the read itself is found
# on the second line of each quartet.
if counter % 4 == 2:
total_base_pairs += len(line.replace("\n", ""))
number_of_reads += 1
counter += 1
if "input_file_path_2" in job_context:
with subprocess.Popen(['zcat', job_context["input_file_path_2"]], stdout=subprocess.PIPE,
universal_newlines=True) as process:
for line in process.stdout:
if counter % 4 == 2:
total_base_pairs += len(line.replace("\n", ""))
number_of_reads += 1
counter += 1
if number_of_reads == 0:
logger.error("Unable to determine number_of_reads for job.",
input_file_1=job_context["input_file_path"],
input_file_2=job_context["input_file_path_2"],
job_id=job_context['job'].id
)
job_context['job'].failure_reason = "Unable to determine number_of_reads."
job_context['success'] = False
return job_context
index_length_raw = total_base_pairs / number_of_reads
# Put the raw index length into the job context in a new field for regression testing purposes
job_context["index_length_raw"] = index_length_raw
if index_length_raw > 75:
job_context["index_length"] = "long"
else:
job_context["index_length"] = "short"
return job_context
def _download_index(job_context: Dict) -> Dict:
"""Downloads the appropriate Salmon Index for this experiment.
Salmon documentation states:
"If you want to use Salmon in quasi-mapping-based mode, then you
first have to build an Salmon index for your transcriptome."
We have used the Data Refinery to build these indices already,
this function retrieves the correct index for the organism and
read length from Permanent Storage.
"""
logger.debug("Fetching and installing index..")
index_type = "TRANSCRIPTOME_" + job_context["index_length"].upper()
index_object = OrganismIndex.objects.filter(organism=job_context['organism'],
index_type=index_type).order_by('-created_at').first()
if not index_object:
logger.error("Could not run Salmon processor without index for organism",
organism=job_context['organism'],
processor_job=job_context["job_id"]
)
job_context["job"].failure_reason = "Missing transcriptome index."
job_context["success"] = False
return job_context
result = index_object.result
file = ComputedFile.objects.get(result=result)
job_context["index_unpacked"] = '/'.join(file.get_synced_file_path().split('/')[:-1])
job_context["index_directory"] = job_context["index_unpacked"] + "/index"
job_context["genes_to_transcripts_path"] = os.path.join(
job_context["index_directory"], "genes_to_transcripts.txt")
if not os.path.exists(job_context["index_directory"] + '/versionInfo.json'):
with tarfile.open(file.get_synced_file_path(), "r:gz") as tarball:
tarball.extractall(job_context["index_unpacked"])
else:
logger.info("Index already installed", processor_job=job_context["job_id"])
job_context["success"] = True
return job_context
def _count_samples_processed_by_salmon(experiment):
"""Count the number of salmon-quant-processed samples in an experiment."""
counter = 0
salmon_cmd_str = 'salmon --no-version-check quant'
for sample in experiment.samples.all():
cmd_found = False
for result in sample.results.all():
for command in result.commands:
if command.startswith(salmon_cmd_str):
counter += 1
cmd_found = True
break
if cmd_found:
break
return counter
def _get_salmon_completed_exp_dirs(job_context: Dict) -> List[str]:
"""Return a list of directory names of experiments whose samples
have all been processed by `salmon quant` command.
"""
experiments_set = ExperimentSampleAssociation.objects.filter(
sample=job_context['sample']).values_list('experiment')
experiments = Experiment.objects.filter(pk__in=experiments_set)
salmon_completed_exp_dirs = []
for experiment in experiments:
if _count_samples_processed_by_salmon(experiment) == experiment.samples.count():
# Remove the last two parts from the path of job_context['input_file_path']
# (which is "<experiment_accession_code>/raw/<filename>")
# to get the experiment directory name.
tokens = job_context["input_file_path"].split('/')[:-2]
experiment_dir = '/'.join(tokens)
salmon_completed_exp_dirs.append(experiment_dir)
return salmon_completed_exp_dirs
def _tximport(job_context: Dict, experiment_dir: str) -> Dict:
"""Run tximport R script based on input experiment_dir and the path
of genes_to_transcripts.txt."""
logger.info("Running tximport!", processor_job=job_context['job_id'], ex_dir=experiment_dir)
result = ComputationalResult()
cmd_tokens = [
"/usr/bin/Rscript", "--vanilla",
"/home/user/data_refinery_workers/processors/tximport.R",
"--exp_dir", experiment_dir,
"--gene2txmap", job_context["genes_to_transcripts_path"]
]
result.time_start = timezone.now()
try:
subprocess.run(cmd_tokens, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except Exception as e:
error_template = ("Encountered error in R code while running tximport.R"
" pipeline during processing of {0}: {1}")
error_message = error_template.format(exp_dir, str(e))
logger.error(error_message, processor_job=job_context["job_id"])
job_context["success"] = False
return job_context
result.time_end = timezone.now()
result.commands.append(" ".join(cmd_tokens))
result.is_ccdl = True
result.pipeline = "tximport" # TODO: should be removed
try:
processor_key = "TXIMPORT"
result.processor = utils.find_processor(processor_key)
except Exception as e:
return utils.handle_processor_exception(job_context, processor_key, e)
result.save()
job_context['pipeline'].steps.append(result.id)
# Associate this result with all samples in this experiment.
# TODO: This may not be completely sensible, because `tximport` is
# done at experiment level, not at sample level.
experiment_accession = experiment_dir.split('/')[-1]
current_experiment = Experiment.objects.get(accession_code=experiment_accession)
for sample in current_experiment.samples.all():
s_r = SampleResultAssociation(sample=sample, result=result)
s_r.save()
rds_file = ComputedFile()
rds_file.absolute_file_path = experiment_dir + '/txi_out.RDS'
rds_file.filename = 'txi_out.RDS'
rds_file.result = result
rds_file.is_smashable = False
rds_file.is_qc = False
rds_file.is_public = True
rds_file.calculate_sha1()
rds_file.calculate_size()
rds_file.save()
job_context['computed_files'].append(rds_file)
# Split the tximport result into smashable subfiles
big_tsv = experiment_dir + '/gene_lengthScaledTPM.tsv'
data = pd.read_csv(big_tsv, sep='\t', header=0, index_col=0)
individual_files = []
frames = np.split(data, len(data.columns), axis=1)
for frame in frames:
frame_path = os.path.join(experiment_dir, frame.columns.values[0]) + '_gene_lengthScaledTPM.tsv'
frame.to_csv(frame_path, sep='\t', encoding='utf-8')
sample = Sample.objects.get(accession_code=frame.columns.values[0])
computed_file = ComputedFile()
computed_file.absolute_file_path = frame_path
computed_file.filename = frame_path.split('/')[-1]
computed_file.result = result
computed_file.is_smashable = True
computed_file.is_qc = False
computed_file.is_public = True
computed_file.calculate_sha1()
computed_file.calculate_size()
computed_file.save()
job_context['computed_files'].append(computed_file)
SampleResultAssociation.objects.get_or_create(
sample=sample,
result=result)
SampleComputedFileAssociation.objects.get_or_create(
sample=sample,
computed_file=computed_file)
individual_files.append(computed_file)
job_context['tximported'] = True
job_context['individual_files'] = individual_files
return job_context
def _run_salmon(job_context: Dict, skip_processed=SKIP_PROCESSED) -> Dict:
""" """
logger.debug("Running Salmon..")
skip = False
if skip_processed and os.path.exists(os.path.join(job_context['output_directory'] + 'quant.sf')):
logger.info("Skipping pre-processed Salmon run!")
skip = True
# Salmon needs to be run differently for different sample types.
# XXX: TODO: We need to tune the -p/--numThreads to the machines this process will run on.
# It's possible we want to remove -p entirely and have Salmon figure out for itself.
# Related: https://github.com/COMBINE-lab/salmon/commit/95866337bde0feb57a0c3231efdfa26c847ba141
if "input_file_path_2" in job_context:
second_read_str = " -2 {}".format(job_context["input_file_path_2"])
command_str = ("salmon --no-version-check quant -l A --biasSpeedSamp 5 -i {index}"
" -1 {input_one}{second_read_str}"
" -p 20 -o {output_directory} --seqBias --gcBias --dumpEq --writeUnmappedNames")
formatted_command = command_str.format(index=job_context["index_directory"],
input_one=job_context["input_file_path"],
second_read_str=second_read_str,
output_directory=job_context["output_directory"])
else:
# Related: https://github.com/COMBINE-lab/salmon/issues/83
command_str = ("salmon --no-version-check quant -l A -i {index}"
" -r {input_one}"
" -p 20 -o {output_directory} --seqBias --dumpEq --writeUnmappedNames")
formatted_command = command_str.format(index=job_context["index_directory"],
input_one=job_context["input_file_path"],
output_directory=job_context["output_directory"])
logger.info("Running Salmon Quant using the following shell command: %s",
formatted_command,
processor_job=job_context["job_id"])
job_context['time_start'] = timezone.now()
if not skip:
completed_command = subprocess.run(formatted_command.split(),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
job_context['time_end'] = timezone.now()
## To me, this looks broken: error codes are anything non-zero.
## However, Salmon (seems) to output with negative status codes
## even with successful executions.
## Possibly related: https://github.com/COMBINE-lab/salmon/issues/55
if not skip and completed_command.returncode == 1:
stderr = completed_command.stderr.decode().strip()
error_start = stderr.find("Error:")
error_start = error_start if error_start != -1 else 0
logger.error("Shell call to salmon failed with error message: %s",
stderr[error_start:],
processor_job=job_context["job_id"])
# The failure_reason column is only 256 characters wide.
error_end = error_start + 200
job_context["job"].failure_reason = ("Shell call to salmon failed because: "
+ stderr[error_start:error_end])
job_context["success"] = False
else:
result = ComputationalResult()
result.commands.append(formatted_command)
result.time_start = job_context['time_start']
result.time_end = job_context['time_end']
result.pipeline = "Salmon" # TODO: should be removed
try:
processor_key = "SALMON_QUANT"
result.processor = utils.find_processor(processor_key)
except Exception as e:
return utils.handle_processor_exception(job_context, processor_key, e)
result.is_ccdl = True
# Here select_for_update() is used as a mutex that forces multiple
# jobs to execute this block of code in serial manner. See:
# https://docs.djangoproject.com/en/1.11/ref/models/querysets/#select-for-update
# Theorectically any rows in any table can be locked here, we're
# locking all existing rows in ComputationalResult table.
with transaction.atomic():
ComputationalResult.objects.select_for_update()
result.save()
job_context['pipeline'].steps.append(result.id)
SampleResultAssociation.objects.get_or_create(sample=job_context['sample'],
result=result)
salmon_completed_exp_dirs = _get_salmon_completed_exp_dirs(job_context)
# tximport analysis is done outside of the transaction so that
# the mutex wouldn't hold the other jobs too long.
for experiment_dir in salmon_completed_exp_dirs:
_tximport(job_context, experiment_dir)
# If `tximport` on any related experiment fails, exit immediately.
if not job_context["success"]:
return job_context
with open(os.path.join(job_context['output_directory'], 'lib_format_counts.json')) as lfc_file:
format_count_data = json.load(lfc_file)
kv = ComputationalResultAnnotation()
kv.data = format_count_data
kv.result = result
kv.is_public = True
kv.save()
with open(os.path.join(job_context['output_directory'], 'aux_info', 'meta_info.json')) as mi_file:
meta_info = json.load(mi_file)
kv = ComputationalResultAnnotation()
kv.data = meta_info
kv.result = result
kv.is_public = True
kv.save()
job_context["result"] = result
job_context["success"] = True
return job_context
def _run_multiqc(job_context: Dict) -> Dict:
"""Runs the `MultiQC` package to generate the QC report."""
command_str = ("multiqc {input_directory} --outdir {qc_directory} --zip-data-dir")
formatted_command = command_str.format(input_directory=job_context["qc_input_directory"],
qc_directory=job_context["qc_directory"])
logger.info("Running MultiQC using the following shell command: %s",
formatted_command,
processor_job=job_context["job_id"])
qc_env = os.environ.copy()
qc_env["LC_ALL"] = "C.UTF-8"
qc_env["LANG"] = "C.UTF-8"
time_start = timezone.now()
completed_command = subprocess.run(formatted_command.split(),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=qc_env)
time_end = timezone.now()
if completed_command.returncode != 0:
stderr = str(completed_command.stderr)
error_start = stderr.find("Error:")
error_start = error_start if error_start != -1 else 0
logger.error("Shell call to MultiQC failed with error message: %s",
stderr[error_start:],
processor_job=job_context["job_id"])
# The failure_reason column is only 256 characters wide.
error_end = error_start + 200
job_context["job"].failure_reason = ("Shell call to MultiQC failed because: "
+ stderr[error_start:error_end])
job_context["success"] = False
result = ComputationalResult()
result.commands.append(formatted_command)
result.time_start = time_start
result.time_end = time_end
result.is_ccdl = True
result.pipeline = "MultiQC" # TODO: should be removed
try:
processor_key = "MULTIQC"
result.processor = utils.find_processor(processor_key)
except Exception as e:
return utils.handle_processor_exception(job_context, processor_key, e)
result.save()
job_context['pipeline'].steps.append(result.id)
assoc = SampleResultAssociation()
assoc.sample = job_context["sample"]
assoc.result = result
assoc.save()
job_context['qc_result'] = result
data_file = ComputedFile()
data_file.filename = "multiqc_data.zip" # This is deterministic
data_file.absolute_file_path = os.path.join(job_context["qc_directory"], data_file.filename)
data_file.calculate_sha1()
data_file.calculate_size()
data_file.is_public = True
data_file.result = job_context['qc_result']
data_file.is_smashable = False
data_file.is_qc = True
data_file.save()
job_context['computed_files'].append(data_file)
report_file = ComputedFile()
report_file.filename = "multiqc_report.html" # This is deterministic
report_file.absolute_file_path = os.path.join(job_context["qc_directory"], report_file.filename)
report_file.calculate_sha1()
report_file.calculate_size()
report_file.is_public = True
report_file.is_smashable = False
report_file.is_qc = True
report_file.result = job_context['qc_result']
report_file.save()
job_context['computed_files'].append(report_file)
job_context['qc_files'] = [data_file, report_file]
return job_context
def _run_fastqc(job_context: Dict) -> Dict:
""" Runs the `FastQC` package to generate the QC report.
"""
# We could use --noextract here, but MultiQC wants extracted files.
command_str = ("./FastQC/fastqc --outdir={qc_directory} {files}")
files = ' '.join(file.get_synced_file_path() for file in job_context['original_files'])
formatted_command = command_str.format(qc_directory=job_context["qc_directory"],
files=files)
logger.info("Running FastQC using the following shell command: %s",
formatted_command,
processor_job=job_context["job_id"])
completed_command = subprocess.run(formatted_command.split(),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Java returns a 0 error code for runtime-related errors and FastQC puts progress
# information in stderr rather than stdout, so handle both.
if completed_command.returncode != 0 or b"complete for" not in completed_command.stderr:
stderr = str(completed_command.stderr)
logger.error("Shell call to FastQC failed with error message: %s",
stderr,
processor_job=job_context["job_id"])
# The failure_reason column is only 256 characters wide.
job_context["job"].failure_reason = stderr[0:255]
job_context["success"] = False
# We don't need to make a ComputationalResult here because
# MultiQC will read these files in as well.
return job_context
def _run_salmontools(job_context: Dict, skip_processed=SKIP_PROCESSED) -> Dict:
""" Run Salmontools to extract unmapped genes. """
logger.debug("Running SalmonTools ...")
skip = False
unmapped_filename = job_context['output_directory'] + 'aux_info/unmapped_names.txt'
if skip_processed and os.path.exists(unmapped_filename):
logger.info("Skipping pre-processed SalmonTools run!")
skip = True
if skip: # If this procedure should be skipped, return immediately
return job_context
command_str = "salmontools extract-unmapped -u {unmapped_file} -o {output} "
output_prefix = job_context["output_directory"] + "unmapped_by_salmon"
command_str = command_str.format(unmapped_file=unmapped_filename,
output=output_prefix)
if "input_file_path_2" in job_context:
command_str += "-1 {input_1} -2 {input_2}"
command_str = command_str.format(input_1=job_context["input_file_path"],
input_2=job_context["input_file_path_2"])
else:
command_str += "-r {input_1}"
command_str= command_str.format(input_1=job_context["input_file_path"])
start_time = timezone.now()
logger.info("Running the following SalmonTools command: %s",
command_str,
processor_job=job_context["job_id"])
completed_command = subprocess.run(command_str.split(),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
end_time = timezone.now()
# As of SalmonTools 0.1.0, completed_command.returncode is always 0,
# (even if error happens). completed_command.stderr is not totally
# reliable either, because it will output the following line even
# when the execution succeeds:
# "There were <N> unmapped reads\n"
# in which "<N>" is the number of lines in input unmapped_names.txt.
#
# As a workaround, we are using a regular expression here to test
# the status of SalmonTools execution. Any text in stderr that is
# not in the above format is treated as error message.
status_str = completed_command.stderr.decode().strip()
success_pattern = r'^There were \d+ unmapped reads$'
if re.match(success_pattern, status_str):
result = ComputationalResult()
result.commands.append(command_str)
result.time_start = start_time
result.time_end = end_time
result.is_ccdl = True
result.pipeline = "Salmontools" # TODO: should be removed
try:
processor_key = "SALMONTOOLS"
result.processor = utils.find_processor(processor_key)
except Exception as e:
return utils.handle_processor_exception(job_context, processor_key, e)
result.save()
job_context['pipeline'].steps.append(result.id)
assoc = SampleResultAssociation()
assoc.sample = job_context["sample"]
assoc.result = result
assoc.save()
job_context["result"] = result
job_context["success"] = True
else: # error in salmontools
logger.error("Shell call to salmontools failed with error message: %s",
status_str,
processor_job=job_context["job_id"])
job_context["job"].failure_reason = ("Shell call to salmontools failed because: "
+ status_str[0:256])
job_context["success"] = False
return job_context
def _zip_and_upload(job_context: Dict) -> Dict:
"""Zips the directory output by Salmon into a single file and uploads it.
Adds the 'success' key to job_context because this function is the
last in the job.
"""
try:
with tarfile.open(job_context['output_archive'], "w:gz") as tar:
tar.add(job_context["output_directory"], arcname=os.sep)
except Exception:
logger.exception("Exception caught while zipping processed directory %s",
job_context["output_directory"],
processor_job=job_context["job_id"]
)
failure_template = "Exception caught while zipping processed directory {}"
job_context["job"].failure_reason = failure_template.format(job_context['output_archive'])
job_context["success"] = False
return job_context
computed_file = ComputedFile()
computed_file.absolute_file_path = job_context["output_archive"]
computed_file.filename = os.path.split(job_context["output_archive"])[-1]
computed_file.calculate_sha1()
computed_file.calculate_size()
computed_file.is_public = True
computed_file.result = job_context['result']
computed_file.is_smashable = True
computed_file.is_qc = False
computed_file.save()
job_context['computed_files'].append(computed_file)
job_context["success"] = True
return job_context
def salmon(job_id: int) -> None:
"""Main processor function for the Salmon Processor.
Runs salmon quant command line tool, specifying either a long or
short read length.
"""
pipeline = Pipeline(name=utils.PipelineEnum.SALMON.value)
utils.run_pipeline({"job_id": job_id, "pipeline": pipeline},
[utils.start_job,
_set_job_prefix,
_prepare_files,
_determine_index_length,
_download_index,
_run_fastqc,
_run_salmon,
_run_salmontools,
_run_multiqc,
_zip_and_upload,
utils.end_job])
Optimize Salmon core usage, remove XXX note
from __future__ import absolute_import, unicode_literals
import io
import json
import os
import re
import shutil
import subprocess
import tarfile
import pandas as pd
import numpy as np
from django.db import transaction
from django.utils import timezone
from typing import Dict, List
from data_refinery_common.job_lookup import Downloaders
from data_refinery_common.logging import get_and_configure_logger
from data_refinery_common.models import (
OrganismIndex,
ComputationalResult,
ComputationalResultAnnotation,
ComputedFile,
Processor,
Pipeline,
Experiment,
ExperimentSampleAssociation,
SampleResultAssociation,
Sample,
SampleComputedFileAssociation
)
from data_refinery_common.utils import get_env_variable
from data_refinery_workers._version import __version__
from data_refinery_workers.processors import utils
logger = get_and_configure_logger(__name__)
JOB_DIR_PREFIX = "processor_job_"
S3_BUCKET_NAME = get_env_variable("S3_BUCKET_NAME", "data-refinery")
SKIP_PROCESSED = get_env_variable("SKIP_PROCESSED", True)
def _set_job_prefix(job_context: Dict) -> Dict:
""" Sets the `job_dir_prefix` value in the job context object."""
job_context["job_dir_prefix"] = JOB_DIR_PREFIX + str(job_context["job_id"])
return job_context
def _prepare_files(job_context: Dict) -> Dict:
"""Moves the file(s) from the raw directory to the temp directory.
Also adds the keys "input_file_path" and "output_directory" to
job_context so everything is prepared for processing. If the reads
are paired then there will also be an "input_file_path_2" key
added to job_context for the second read.
"""
logger.debug("Preparing files..")
original_files = job_context["original_files"]
job_context["input_file_path"] = original_files[0].get_synced_file_path()
if len(original_files) == 2:
job_context["input_file_path_2"] = original_files[1].get_synced_file_path()
# There should only ever be one per Salmon run
job_context['sample'] = job_context['original_files'][0].samples.first()
job_context['organism'] = job_context['sample'].organism
job_context["success"] = True
# The paths of original_files are in this format:
# <experiment_accession_code>/raw/<filename>
# Salmon outputs an entire directory of files, so create a temp
# directory to output it to until we can zip it to.
# The path of temp directory is in this format:
# <experiment_accession_code>/<sample_accession_code>/processed/
pre_part = '/'.join(original_files[0].absolute_file_path.split('/')[:-2])
sample_accession = job_context['sample'].accession_code
job_context["output_directory"] = pre_part + "/" + sample_accession + '/processed/'
os.makedirs(job_context["output_directory"], exist_ok=True)
job_context["qc_input_directory"] = pre_part + '/'
job_context["qc_directory"] = pre_part + '/qc/'
os.makedirs(job_context["qc_directory"], exist_ok=True)
timestamp = str(timezone.now().timestamp()).split('.')[0]
job_context["output_archive"] = pre_part + '/result-' + timestamp + '.tar.gz'
os.makedirs(job_context["output_directory"], exist_ok=True)
return job_context
def _determine_index_length(job_context: Dict) -> Dict:
"""Determines whether to use the long or short salmon index.
Adds the key 'kmer_size' to the job_context with a value of '23'
if the short index is appropriate or '31' if the long index is
appropriate. For more information on index length see the
_create_index function of the transcriptome_index processor.
"""
logger.debug("Determining index length..")
total_base_pairs = 0
number_of_reads = 0
counter = 1
# zcat unzips the file provided and dumps the output to STDOUT.
# It is installed by default in Debian so it should be included
# in every docker image already.
with subprocess.Popen(['zcat', job_context["input_file_path"]], stdout=subprocess.PIPE,
universal_newlines=True) as process:
for line in process.stdout:
# In the FASTQ file format, there are 4 lines for each
# read. Three of these contain metadata about the
# read. The string representing the read itself is found
# on the second line of each quartet.
if counter % 4 == 2:
total_base_pairs += len(line.replace("\n", ""))
number_of_reads += 1
counter += 1
if "input_file_path_2" in job_context:
with subprocess.Popen(['zcat', job_context["input_file_path_2"]], stdout=subprocess.PIPE,
universal_newlines=True) as process:
for line in process.stdout:
if counter % 4 == 2:
total_base_pairs += len(line.replace("\n", ""))
number_of_reads += 1
counter += 1
if number_of_reads == 0:
logger.error("Unable to determine number_of_reads for job.",
input_file_1=job_context["input_file_path"],
input_file_2=job_context["input_file_path_2"],
job_id=job_context['job'].id
)
job_context['job'].failure_reason = "Unable to determine number_of_reads."
job_context['success'] = False
return job_context
index_length_raw = total_base_pairs / number_of_reads
# Put the raw index length into the job context in a new field for regression testing purposes
job_context["index_length_raw"] = index_length_raw
if index_length_raw > 75:
job_context["index_length"] = "long"
else:
job_context["index_length"] = "short"
return job_context
def _download_index(job_context: Dict) -> Dict:
"""Downloads the appropriate Salmon Index for this experiment.
Salmon documentation states:
"If you want to use Salmon in quasi-mapping-based mode, then you
first have to build an Salmon index for your transcriptome."
We have used the Data Refinery to build these indices already,
this function retrieves the correct index for the organism and
read length from Permanent Storage.
"""
logger.debug("Fetching and installing index..")
index_type = "TRANSCRIPTOME_" + job_context["index_length"].upper()
index_object = OrganismIndex.objects.filter(organism=job_context['organism'],
index_type=index_type).order_by('-created_at').first()
if not index_object:
logger.error("Could not run Salmon processor without index for organism",
organism=job_context['organism'],
processor_job=job_context["job_id"]
)
job_context["job"].failure_reason = "Missing transcriptome index."
job_context["success"] = False
return job_context
result = index_object.result
file = ComputedFile.objects.get(result=result)
job_context["index_unpacked"] = '/'.join(file.get_synced_file_path().split('/')[:-1])
job_context["index_directory"] = job_context["index_unpacked"] + "/index"
job_context["genes_to_transcripts_path"] = os.path.join(
job_context["index_directory"], "genes_to_transcripts.txt")
if not os.path.exists(job_context["index_directory"] + '/versionInfo.json'):
with tarfile.open(file.get_synced_file_path(), "r:gz") as tarball:
tarball.extractall(job_context["index_unpacked"])
else:
logger.info("Index already installed", processor_job=job_context["job_id"])
job_context["success"] = True
return job_context
def _count_samples_processed_by_salmon(experiment):
"""Count the number of salmon-quant-processed samples in an experiment."""
counter = 0
salmon_cmd_str = 'salmon --no-version-check quant'
for sample in experiment.samples.all():
cmd_found = False
for result in sample.results.all():
for command in result.commands:
if command.startswith(salmon_cmd_str):
counter += 1
cmd_found = True
break
if cmd_found:
break
return counter
def _get_salmon_completed_exp_dirs(job_context: Dict) -> List[str]:
"""Return a list of directory names of experiments whose samples
have all been processed by `salmon quant` command.
"""
experiments_set = ExperimentSampleAssociation.objects.filter(
sample=job_context['sample']).values_list('experiment')
experiments = Experiment.objects.filter(pk__in=experiments_set)
salmon_completed_exp_dirs = []
for experiment in experiments:
if _count_samples_processed_by_salmon(experiment) == experiment.samples.count():
# Remove the last two parts from the path of job_context['input_file_path']
# (which is "<experiment_accession_code>/raw/<filename>")
# to get the experiment directory name.
tokens = job_context["input_file_path"].split('/')[:-2]
experiment_dir = '/'.join(tokens)
salmon_completed_exp_dirs.append(experiment_dir)
return salmon_completed_exp_dirs
def _tximport(job_context: Dict, experiment_dir: str) -> Dict:
"""Run tximport R script based on input experiment_dir and the path
of genes_to_transcripts.txt."""
logger.info("Running tximport!", processor_job=job_context['job_id'], ex_dir=experiment_dir)
result = ComputationalResult()
cmd_tokens = [
"/usr/bin/Rscript", "--vanilla",
"/home/user/data_refinery_workers/processors/tximport.R",
"--exp_dir", experiment_dir,
"--gene2txmap", job_context["genes_to_transcripts_path"]
]
result.time_start = timezone.now()
try:
subprocess.run(cmd_tokens, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except Exception as e:
error_template = ("Encountered error in R code while running tximport.R"
" pipeline during processing of {0}: {1}")
error_message = error_template.format(exp_dir, str(e))
logger.error(error_message, processor_job=job_context["job_id"])
job_context["success"] = False
return job_context
result.time_end = timezone.now()
result.commands.append(" ".join(cmd_tokens))
result.is_ccdl = True
result.pipeline = "tximport" # TODO: should be removed
try:
processor_key = "TXIMPORT"
result.processor = utils.find_processor(processor_key)
except Exception as e:
return utils.handle_processor_exception(job_context, processor_key, e)
result.save()
job_context['pipeline'].steps.append(result.id)
# Associate this result with all samples in this experiment.
# TODO: This may not be completely sensible, because `tximport` is
# done at experiment level, not at sample level.
experiment_accession = experiment_dir.split('/')[-1]
current_experiment = Experiment.objects.get(accession_code=experiment_accession)
for sample in current_experiment.samples.all():
s_r = SampleResultAssociation(sample=sample, result=result)
s_r.save()
rds_file = ComputedFile()
rds_file.absolute_file_path = experiment_dir + '/txi_out.RDS'
rds_file.filename = 'txi_out.RDS'
rds_file.result = result
rds_file.is_smashable = False
rds_file.is_qc = False
rds_file.is_public = True
rds_file.calculate_sha1()
rds_file.calculate_size()
rds_file.save()
job_context['computed_files'].append(rds_file)
# Split the tximport result into smashable subfiles
big_tsv = experiment_dir + '/gene_lengthScaledTPM.tsv'
data = pd.read_csv(big_tsv, sep='\t', header=0, index_col=0)
individual_files = []
frames = np.split(data, len(data.columns), axis=1)
for frame in frames:
frame_path = os.path.join(experiment_dir, frame.columns.values[0]) + '_gene_lengthScaledTPM.tsv'
frame.to_csv(frame_path, sep='\t', encoding='utf-8')
sample = Sample.objects.get(accession_code=frame.columns.values[0])
computed_file = ComputedFile()
computed_file.absolute_file_path = frame_path
computed_file.filename = frame_path.split('/')[-1]
computed_file.result = result
computed_file.is_smashable = True
computed_file.is_qc = False
computed_file.is_public = True
computed_file.calculate_sha1()
computed_file.calculate_size()
computed_file.save()
job_context['computed_files'].append(computed_file)
SampleResultAssociation.objects.get_or_create(
sample=sample,
result=result)
SampleComputedFileAssociation.objects.get_or_create(
sample=sample,
computed_file=computed_file)
individual_files.append(computed_file)
job_context['tximported'] = True
job_context['individual_files'] = individual_files
return job_context
def _run_salmon(job_context: Dict, skip_processed=SKIP_PROCESSED) -> Dict:
""" """
logger.debug("Running Salmon..")
skip = False
if skip_processed and os.path.exists(os.path.join(job_context['output_directory'] + 'quant.sf')):
logger.info("Skipping pre-processed Salmon run!")
skip = True
# Salmon needs to be run differently for different sample types.
if "input_file_path_2" in job_context:
second_read_str = " -2 {}".format(job_context["input_file_path_2"])
# Rob recommends 16 threads/process, which fits snugly on an x1 at 8GB RAM per Salmon container:
# (2 threads/core * 16 cores/socket * 64 vCPU) / (1TB/18GB) = ~17
command_str = ("salmon --no-version-check quant -l A --biasSpeedSamp 5 -i {index}"
" -1 {input_one}{second_read_str}"
" -p 16 -o {output_directory} --seqBias --gcBias --dumpEq --writeUnmappedNames")
formatted_command = command_str.format(index=job_context["index_directory"],
input_one=job_context["input_file_path"],
second_read_str=second_read_str,
output_directory=job_context["output_directory"])
else:
# Related: https://github.com/COMBINE-lab/salmon/issues/83
command_str = ("salmon --no-version-check quant -l A -i {index}"
" -r {input_one}"
" -p 16 -o {output_directory} --seqBias --dumpEq --writeUnmappedNames")
formatted_command = command_str.format(index=job_context["index_directory"],
input_one=job_context["input_file_path"],
output_directory=job_context["output_directory"])
logger.info("Running Salmon Quant using the following shell command: %s",
formatted_command,
processor_job=job_context["job_id"])
job_context['time_start'] = timezone.now()
if not skip:
completed_command = subprocess.run(formatted_command.split(),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
job_context['time_end'] = timezone.now()
## To me, this looks broken: error codes are anything non-zero.
## However, Salmon (seems) to output with negative status codes
## even with successful executions.
## Possibly related: https://github.com/COMBINE-lab/salmon/issues/55
if not skip and completed_command.returncode == 1:
stderr = completed_command.stderr.decode().strip()
error_start = stderr.find("Error:")
error_start = error_start if error_start != -1 else 0
logger.error("Shell call to salmon failed with error message: %s",
stderr[error_start:],
processor_job=job_context["job_id"])
# The failure_reason column is only 256 characters wide.
error_end = error_start + 200
job_context["job"].failure_reason = ("Shell call to salmon failed because: "
+ stderr[error_start:error_end])
job_context["success"] = False
else:
result = ComputationalResult()
result.commands.append(formatted_command)
result.time_start = job_context['time_start']
result.time_end = job_context['time_end']
result.pipeline = "Salmon" # TODO: should be removed
try:
processor_key = "SALMON_QUANT"
result.processor = utils.find_processor(processor_key)
except Exception as e:
return utils.handle_processor_exception(job_context, processor_key, e)
result.is_ccdl = True
# Here select_for_update() is used as a mutex that forces multiple
# jobs to execute this block of code in serial manner. See:
# https://docs.djangoproject.com/en/1.11/ref/models/querysets/#select-for-update
# Theorectically any rows in any table can be locked here, we're
# locking all existing rows in ComputationalResult table.
with transaction.atomic():
ComputationalResult.objects.select_for_update()
result.save()
job_context['pipeline'].steps.append(result.id)
SampleResultAssociation.objects.get_or_create(sample=job_context['sample'],
result=result)
salmon_completed_exp_dirs = _get_salmon_completed_exp_dirs(job_context)
# tximport analysis is done outside of the transaction so that
# the mutex wouldn't hold the other jobs too long.
for experiment_dir in salmon_completed_exp_dirs:
_tximport(job_context, experiment_dir)
# If `tximport` on any related experiment fails, exit immediately.
if not job_context["success"]:
return job_context
with open(os.path.join(job_context['output_directory'], 'lib_format_counts.json')) as lfc_file:
format_count_data = json.load(lfc_file)
kv = ComputationalResultAnnotation()
kv.data = format_count_data
kv.result = result
kv.is_public = True
kv.save()
with open(os.path.join(job_context['output_directory'], 'aux_info', 'meta_info.json')) as mi_file:
meta_info = json.load(mi_file)
kv = ComputationalResultAnnotation()
kv.data = meta_info
kv.result = result
kv.is_public = True
kv.save()
job_context["result"] = result
job_context["success"] = True
return job_context
def _run_multiqc(job_context: Dict) -> Dict:
"""Runs the `MultiQC` package to generate the QC report."""
command_str = ("multiqc {input_directory} --outdir {qc_directory} --zip-data-dir")
formatted_command = command_str.format(input_directory=job_context["qc_input_directory"],
qc_directory=job_context["qc_directory"])
logger.info("Running MultiQC using the following shell command: %s",
formatted_command,
processor_job=job_context["job_id"])
qc_env = os.environ.copy()
qc_env["LC_ALL"] = "C.UTF-8"
qc_env["LANG"] = "C.UTF-8"
time_start = timezone.now()
completed_command = subprocess.run(formatted_command.split(),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=qc_env)
time_end = timezone.now()
if completed_command.returncode != 0:
stderr = str(completed_command.stderr)
error_start = stderr.find("Error:")
error_start = error_start if error_start != -1 else 0
logger.error("Shell call to MultiQC failed with error message: %s",
stderr[error_start:],
processor_job=job_context["job_id"])
# The failure_reason column is only 256 characters wide.
error_end = error_start + 200
job_context["job"].failure_reason = ("Shell call to MultiQC failed because: "
+ stderr[error_start:error_end])
job_context["success"] = False
result = ComputationalResult()
result.commands.append(formatted_command)
result.time_start = time_start
result.time_end = time_end
result.is_ccdl = True
result.pipeline = "MultiQC" # TODO: should be removed
try:
processor_key = "MULTIQC"
result.processor = utils.find_processor(processor_key)
except Exception as e:
return utils.handle_processor_exception(job_context, processor_key, e)
result.save()
job_context['pipeline'].steps.append(result.id)
assoc = SampleResultAssociation()
assoc.sample = job_context["sample"]
assoc.result = result
assoc.save()
job_context['qc_result'] = result
data_file = ComputedFile()
data_file.filename = "multiqc_data.zip" # This is deterministic
data_file.absolute_file_path = os.path.join(job_context["qc_directory"], data_file.filename)
data_file.calculate_sha1()
data_file.calculate_size()
data_file.is_public = True
data_file.result = job_context['qc_result']
data_file.is_smashable = False
data_file.is_qc = True
data_file.save()
job_context['computed_files'].append(data_file)
report_file = ComputedFile()
report_file.filename = "multiqc_report.html" # This is deterministic
report_file.absolute_file_path = os.path.join(job_context["qc_directory"], report_file.filename)
report_file.calculate_sha1()
report_file.calculate_size()
report_file.is_public = True
report_file.is_smashable = False
report_file.is_qc = True
report_file.result = job_context['qc_result']
report_file.save()
job_context['computed_files'].append(report_file)
job_context['qc_files'] = [data_file, report_file]
return job_context
def _run_fastqc(job_context: Dict) -> Dict:
""" Runs the `FastQC` package to generate the QC report.
"""
# We could use --noextract here, but MultiQC wants extracted files.
command_str = ("./FastQC/fastqc --outdir={qc_directory} {files}")
files = ' '.join(file.get_synced_file_path() for file in job_context['original_files'])
formatted_command = command_str.format(qc_directory=job_context["qc_directory"],
files=files)
logger.info("Running FastQC using the following shell command: %s",
formatted_command,
processor_job=job_context["job_id"])
completed_command = subprocess.run(formatted_command.split(),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Java returns a 0 error code for runtime-related errors and FastQC puts progress
# information in stderr rather than stdout, so handle both.
if completed_command.returncode != 0 or b"complete for" not in completed_command.stderr:
stderr = str(completed_command.stderr)
logger.error("Shell call to FastQC failed with error message: %s",
stderr,
processor_job=job_context["job_id"])
# The failure_reason column is only 256 characters wide.
job_context["job"].failure_reason = stderr[0:255]
job_context["success"] = False
# We don't need to make a ComputationalResult here because
# MultiQC will read these files in as well.
return job_context
def _run_salmontools(job_context: Dict, skip_processed=SKIP_PROCESSED) -> Dict:
""" Run Salmontools to extract unmapped genes. """
logger.debug("Running SalmonTools ...")
skip = False
unmapped_filename = job_context['output_directory'] + 'aux_info/unmapped_names.txt'
if skip_processed and os.path.exists(unmapped_filename):
logger.info("Skipping pre-processed SalmonTools run!")
skip = True
if skip: # If this procedure should be skipped, return immediately
return job_context
command_str = "salmontools extract-unmapped -u {unmapped_file} -o {output} "
output_prefix = job_context["output_directory"] + "unmapped_by_salmon"
command_str = command_str.format(unmapped_file=unmapped_filename,
output=output_prefix)
if "input_file_path_2" in job_context:
command_str += "-1 {input_1} -2 {input_2}"
command_str = command_str.format(input_1=job_context["input_file_path"],
input_2=job_context["input_file_path_2"])
else:
command_str += "-r {input_1}"
command_str= command_str.format(input_1=job_context["input_file_path"])
start_time = timezone.now()
logger.info("Running the following SalmonTools command: %s",
command_str,
processor_job=job_context["job_id"])
completed_command = subprocess.run(command_str.split(),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
end_time = timezone.now()
# As of SalmonTools 0.1.0, completed_command.returncode is always 0,
# (even if error happens). completed_command.stderr is not totally
# reliable either, because it will output the following line even
# when the execution succeeds:
# "There were <N> unmapped reads\n"
# in which "<N>" is the number of lines in input unmapped_names.txt.
#
# As a workaround, we are using a regular expression here to test
# the status of SalmonTools execution. Any text in stderr that is
# not in the above format is treated as error message.
status_str = completed_command.stderr.decode().strip()
success_pattern = r'^There were \d+ unmapped reads$'
if re.match(success_pattern, status_str):
result = ComputationalResult()
result.commands.append(command_str)
result.time_start = start_time
result.time_end = end_time
result.is_ccdl = True
result.pipeline = "Salmontools" # TODO: should be removed
try:
processor_key = "SALMONTOOLS"
result.processor = utils.find_processor(processor_key)
except Exception as e:
return utils.handle_processor_exception(job_context, processor_key, e)
result.save()
job_context['pipeline'].steps.append(result.id)
assoc = SampleResultAssociation()
assoc.sample = job_context["sample"]
assoc.result = result
assoc.save()
job_context["result"] = result
job_context["success"] = True
else: # error in salmontools
logger.error("Shell call to salmontools failed with error message: %s",
status_str,
processor_job=job_context["job_id"])
job_context["job"].failure_reason = ("Shell call to salmontools failed because: "
+ status_str[0:256])
job_context["success"] = False
return job_context
def _zip_and_upload(job_context: Dict) -> Dict:
"""Zips the directory output by Salmon into a single file and uploads it.
Adds the 'success' key to job_context because this function is the
last in the job.
"""
try:
with tarfile.open(job_context['output_archive'], "w:gz") as tar:
tar.add(job_context["output_directory"], arcname=os.sep)
except Exception:
logger.exception("Exception caught while zipping processed directory %s",
job_context["output_directory"],
processor_job=job_context["job_id"]
)
failure_template = "Exception caught while zipping processed directory {}"
job_context["job"].failure_reason = failure_template.format(job_context['output_archive'])
job_context["success"] = False
return job_context
computed_file = ComputedFile()
computed_file.absolute_file_path = job_context["output_archive"]
computed_file.filename = os.path.split(job_context["output_archive"])[-1]
computed_file.calculate_sha1()
computed_file.calculate_size()
computed_file.is_public = True
computed_file.result = job_context['result']
computed_file.is_smashable = True
computed_file.is_qc = False
computed_file.save()
job_context['computed_files'].append(computed_file)
job_context["success"] = True
return job_context
def salmon(job_id: int) -> None:
"""Main processor function for the Salmon Processor.
Runs salmon quant command line tool, specifying either a long or
short read length.
"""
pipeline = Pipeline(name=utils.PipelineEnum.SALMON.value)
utils.run_pipeline({"job_id": job_id, "pipeline": pipeline},
[utils.start_job,
_set_job_prefix,
_prepare_files,
_determine_index_length,
_download_index,
_run_fastqc,
_run_salmon,
_run_salmontools,
_run_multiqc,
_zip_and_upload,
utils.end_job])
|
# Copyright (c) 2015 Intracom S.A. Telecom Solutions. All rights reserved.
#
# This program and the accompanying materials are made available under the
# terms of the Eclipse Public License v1.0 which accompanies this distribution,
# and is available at http://www.eclipse.org/legal/epl-v10.html
""" General network utilities """
import logging
import os
import paramiko
import select
import socket
import stat
import time
def ssh_connect_or_return(ipaddr, user, passwd, maxretries, remote_port=22):
"""Opens a connection and returns a connection object. If it fails to open
a connection after a specified number of tries, it returns -1.
:param ipaddr: Ip adress of the remote machine
:param user: username of the remote user
:param passwd: password of the remote user
:param maxretries: maximum number of times to connect
:returns: an ssh connection handle or -1 on failure
:rtype: paramiko.SSHClient (or -1 when failure)
:type ipaddr: str
:type user: str
:type passwd: str
:type maxretries: int
"""
retries = 1
while retries <= maxretries:
logging.info(
'[netutil] Trying to connect to {0}:{1} ({2}/{3})'.
format(ipaddr, remote_port, retries, maxretries))
try:
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(hostname=ipaddr, port=remote_port,
username=user, password=passwd)
logging.info('[netutil] Connected to {0} '.format(ipaddr))
return ssh
except paramiko.AuthenticationException:
logging.error(
'[netutil] Authentication failed when connecting to {0}'.
format(ipaddr))
except:
logging.error(
'[netutil] Could not SSH to {0}, waiting for it to start'.
format(ipaddr))
retries += 1
time.sleep(2)
# If we exit while without ssh object been returned, then return -1
logging.info('[netutil] Could not connect to {0}. Returning'
.format(ipaddr))
return None
def ssh_copy_file_to_target(ipaddr, user, passwd, local_file, remote_file,
remote_port=22):
"""Copies local file on a remote machine target.
:param ipaddr: Ip adress of the remote machine
:param user: username of the remote user
:param passwd: password of the remote user
:param local_file: file from local machine to copy,full location required
:param remote_file: remote destination, full location required
i.e /tmp/foo.txt
:param remote_port: port to perform sftp from
:type ipaddr: str
:type user: str
:type passwd: str
:type local_file: str
:type remote_file: str
:type remote_port: int
"""
transport_layer = paramiko.Transport((ipaddr, remote_port))
transport_layer.connect(username=user, password=passwd)
sftp = paramiko.SFTPClient.from_transport(transport_layer)
sftp.put(local_file, remote_file)
sftp.close()
transport_layer.close()
def copy_directory_to_target(ipaddr, user, passwd, local_path, remote_path,
remote_port=22):
"""Copy a local directory on a remote machine.
:param ipaddr: IP adress of the remote machine
:param user: username of the remote user
:param passwd: password of the remote user
:param local_path: directory path from local machine to copy, full location
required
:param remote_path: remote destination, full location required
:param remote_port: port to perform sftp from
:type ipaddr: str
:type user: str
:type passwd: str
:type local_path: str
:type remote_path: str
:type remote_port: int
"""
# recursively upload a full directory
if local_path.endswith('/'):
local_path = local_path[:-1]
transport_layer = paramiko.Transport((ipaddr, remote_port))
transport_layer.connect(username=user, password=passwd)
sftp = paramiko.SFTPClient.from_transport(transport_layer)
os.chdir(os.path.split(local_path)[0])
parent = os.path.split(local_path)[1]
for walker in os.walk(parent):
try:
folder_to_make = os.path.join(remote_path, walker[0])
sftp.mkdir(folder_to_make)
except:
pass
for curr_file in walker[2]:
local_file = os.path.join(walker[0], curr_file)
remote_file = os.path.join(remote_path, walker[0], curr_file)
sftp.put(local_file, remote_file)
sftp.close()
transport_layer.close()
def make_remote_file_executable(ipaddr, user, passwd, remote_file,
remote_port=22):
"""Makes the remote file executable.
:param ipaddr: Ip adress of the remote machine
:param user: username of the remote user
:param passwd: password of the remote user
:param remote_file: remote file to make executable
:param remote_port: port to perform sftp from
:type ipaddr: str
:type user: str
:type passwd: str
:type remote_file: str
:type remote_port: int
"""
transport_layer = paramiko.Transport((ipaddr, remote_port))
transport_layer.connect(username=user, password=passwd)
sftp = paramiko.SFTPClient.from_transport(transport_layer)
sftp.chmod(remote_file, stat.S_IEXEC | stat.S_IREAD | stat.S_IWRITE)
sftp.close()
transport_layer.close()
def create_remote_directory(ipaddr, user, passwd, remote_path, remote_port=22):
"""Opens an ssh connection to a remote machine and creates a new directory.
:param ipaddr: Ip adress of the remote machine
:param user: username of the remote user
:param passwd: password of the remote user
:param remote_path: maximum number of times to connect
:param remote_port: port to perform sftp from
:type ipaddr: str
:type user: str
:type passwd: str
:type remote_path: str
:type remote_port: int
"""
transport_layer = paramiko.Transport((ipaddr, remote_port))
transport_layer.connect(username=user, password=passwd)
sftp = paramiko.SFTPClient.from_transport(transport_layer)
try:
# Test if remote_path exists
sftp.chdir(remote_path)
except IOError:
# Create remote_path
sftp.mkdir(remote_path)
sftp.chdir(remote_path)
sftp.close()
transport_layer.close()
def isdir(path, sftp):
"""Checks if a given remote path is a directory
:param path: A string with the full path we want to check
:param sftp: An sftp connection object (paramiko)
:returns: True if the given path is a directory false otherwise.
:rtype: bool
:type path: str
:type sftp: paramiko.SFTPClient
"""
try:
return stat.S_ISDIR(sftp.stat(path).st_mode)
except IOError:
return False
def remove_remote_directory(ipaddr, user, passwd, path, remote_port=22):
"""Removes recursively remote directories (removes all files and
other sub-directories).
:param ipaddr: Ip adress of the remote machine
:param user: username of the remote user
:param passwd: password of the remote user
:param remote_file: remote file to make executable
:param remote_port: port to perform sftp from
:type ipaddr: str
:type user: str
:type passwd: str
:type remote_file: str
:type remote_port: int
"""
transport_layer = paramiko.Transport((ipaddr, remote_port))
transport_layer.connect(username=user, password=passwd)
sftp = paramiko.SFTPClient.from_transport(transport_layer)
files = sftp.listdir(path=path)
for file_item in files:
filepath = os.path.join(path, file_item)
if isdir(filepath, sftp):
remove_remote_directory(ipaddr, user, passwd, filepath)
else:
sftp.remove(filepath)
sftp.rmdir(path)
sftp.close()
transport_layer.close()
# TODO - To be removed
def ssh_run_command_old(ssh_client, command_to_run):
"""Runs the specified command on a remote machine
:param ssh_client : SSH client provided by paramiko to run the command
:param command_to_run: Command to execute
:returns: the output of the remotely executed command
:rtype: tuple (stdin, stdout, stderr)
:type ssh_client: paramiko.SSHClient
:type command_to_run: str
"""
return ssh_client.exec_command(command_to_run)
def ssh_run_command(ssh_client, command_to_run, prefix='', lines_queue=None,
print_flag=True, block_flag=True):
"""Runs the specified command on a remote machine
:param ssh_client : SSH client provided by paramiko to run the command
:param command_to_run: Command to execute
:param lines_queue: Queue datastructure to buffer the result of execution
:param print_flag: Flag that defines if the output of the command will be
printed on screen
:param block_flag: Defines if we block execution waiting for the running
command to return its exit status
:returns: the exit code of the command to be executed remotely and the
combined stdout - stderr of the executed command
:rtype: tuple<int, str>
:type ssh_client: paramiko.SSHClient
:type command_to_run: str
:type lines_queue: queue<str>
:type print_flag: bool
:type block_flag: bool
:exception SSHException: Raised when fails to open a channel from
ssh_client object
:exception UnicodeDecodeError: Raised when it fails to decode received
data into UTF-8
:exception socket.timeout: When the channel remains idle for a timeout
period (in sec) defined in implementation of the function
"""
channel = ssh_client.get_transport().open_session()
bufferSize = 4*1024
channel_timeout = 300
channel.setblocking(1)
channel.set_combine_stderr(True)
channel.settimeout(channel_timeout)
channel.exec_command(command_to_run)
if not block_flag:
return 0
channel_output = ''
while not channel.exit_status_ready():
data = ''
data = channel.recv(bufferSize).decode('utf-8')
while data:
channel_output += data
if print_flag:
logging.debug('{0} {1}'.format(prefix, data).rstrip())
if lines_queue is not None:
for line in data.splitlines():
lines_queue.put(line)
data = channel.recv(bufferSize).decode('utf-8')
channel_exit_status = channel.recv_exit_status()
channel.close()
return (channel_exit_status, channel_output)
def ssh_delete_file_if_exists(ipaddr, user, passwd, remote_file,
remote_port=22):
"""Deletes the file on e remote machine, if it exists
:param ipaddr: Ip adress of the remote machine
:param user: username of the remote user
:param passwd: password of the remote user
:param remote_file: remote file to remove, full path must be used.
:param remote_port: port to perform sftp from
:type ipaddr: str
:type user: str
:type passwd: str
:type remote_file: str
:type remote_port: int
"""
transport_layer = paramiko.Transport((ipaddr, remote_port))
transport_layer.connect(username=user, password=passwd)
sftp = paramiko.SFTPClient.from_transport(transport_layer)
try:
sftp.remove(remote_file)
logging.info('[netutil] [delete_file_if_exists]: file {0} removed'.
format(remote_file))
except IOError:
logging.error(
'[netutil] [delete_file_if_exists] IOError: The given remote_file '
'is not valid. Error message: {0}'.format(IOError.strerror))
except:
logging.error(
'[netutil] [delete_file_if_exists] Error: Unknown Error occured '
'while was trying to remove remote file.')
transport_layer.close()
logging.error(
'[netutil] [ssh_delete_file_if_exists]: transport layer closed')
Remove ssh_run_command_old from netutil
# Copyright (c) 2015 Intracom S.A. Telecom Solutions. All rights reserved.
#
# This program and the accompanying materials are made available under the
# terms of the Eclipse Public License v1.0 which accompanies this distribution,
# and is available at http://www.eclipse.org/legal/epl-v10.html
""" General network utilities """
import logging
import os
import paramiko
import select
import socket
import stat
import time
def ssh_connect_or_return(ipaddr, user, passwd, maxretries, remote_port=22):
"""Opens a connection and returns a connection object. If it fails to open
a connection after a specified number of tries, it returns -1.
:param ipaddr: Ip adress of the remote machine
:param user: username of the remote user
:param passwd: password of the remote user
:param maxretries: maximum number of times to connect
:returns: an ssh connection handle or -1 on failure
:rtype: paramiko.SSHClient (or -1 when failure)
:type ipaddr: str
:type user: str
:type passwd: str
:type maxretries: int
"""
retries = 1
while retries <= maxretries:
logging.info(
'[netutil] Trying to connect to {0}:{1} ({2}/{3})'.
format(ipaddr, remote_port, retries, maxretries))
try:
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(hostname=ipaddr, port=remote_port,
username=user, password=passwd)
logging.info('[netutil] Connected to {0} '.format(ipaddr))
return ssh
except paramiko.AuthenticationException:
logging.error(
'[netutil] Authentication failed when connecting to {0}'.
format(ipaddr))
except:
logging.error(
'[netutil] Could not SSH to {0}, waiting for it to start'.
format(ipaddr))
retries += 1
time.sleep(2)
# If we exit while without ssh object been returned, then return -1
logging.info('[netutil] Could not connect to {0}. Returning'
.format(ipaddr))
return None
def ssh_copy_file_to_target(ipaddr, user, passwd, local_file, remote_file,
remote_port=22):
"""Copies local file on a remote machine target.
:param ipaddr: Ip adress of the remote machine
:param user: username of the remote user
:param passwd: password of the remote user
:param local_file: file from local machine to copy,full location required
:param remote_file: remote destination, full location required
i.e /tmp/foo.txt
:param remote_port: port to perform sftp from
:type ipaddr: str
:type user: str
:type passwd: str
:type local_file: str
:type remote_file: str
:type remote_port: int
"""
transport_layer = paramiko.Transport((ipaddr, remote_port))
transport_layer.connect(username=user, password=passwd)
sftp = paramiko.SFTPClient.from_transport(transport_layer)
sftp.put(local_file, remote_file)
sftp.close()
transport_layer.close()
def copy_directory_to_target(ipaddr, user, passwd, local_path, remote_path,
remote_port=22):
"""Copy a local directory on a remote machine.
:param ipaddr: IP adress of the remote machine
:param user: username of the remote user
:param passwd: password of the remote user
:param local_path: directory path from local machine to copy, full location
required
:param remote_path: remote destination, full location required
:param remote_port: port to perform sftp from
:type ipaddr: str
:type user: str
:type passwd: str
:type local_path: str
:type remote_path: str
:type remote_port: int
"""
# recursively upload a full directory
if local_path.endswith('/'):
local_path = local_path[:-1]
transport_layer = paramiko.Transport((ipaddr, remote_port))
transport_layer.connect(username=user, password=passwd)
sftp = paramiko.SFTPClient.from_transport(transport_layer)
os.chdir(os.path.split(local_path)[0])
parent = os.path.split(local_path)[1]
for walker in os.walk(parent):
try:
folder_to_make = os.path.join(remote_path, walker[0])
sftp.mkdir(folder_to_make)
except:
pass
for curr_file in walker[2]:
local_file = os.path.join(walker[0], curr_file)
remote_file = os.path.join(remote_path, walker[0], curr_file)
sftp.put(local_file, remote_file)
sftp.close()
transport_layer.close()
def make_remote_file_executable(ipaddr, user, passwd, remote_file,
remote_port=22):
"""Makes the remote file executable.
:param ipaddr: Ip adress of the remote machine
:param user: username of the remote user
:param passwd: password of the remote user
:param remote_file: remote file to make executable
:param remote_port: port to perform sftp from
:type ipaddr: str
:type user: str
:type passwd: str
:type remote_file: str
:type remote_port: int
"""
transport_layer = paramiko.Transport((ipaddr, remote_port))
transport_layer.connect(username=user, password=passwd)
sftp = paramiko.SFTPClient.from_transport(transport_layer)
sftp.chmod(remote_file, stat.S_IEXEC | stat.S_IREAD | stat.S_IWRITE)
sftp.close()
transport_layer.close()
def create_remote_directory(ipaddr, user, passwd, remote_path, remote_port=22):
"""Opens an ssh connection to a remote machine and creates a new directory.
:param ipaddr: Ip adress of the remote machine
:param user: username of the remote user
:param passwd: password of the remote user
:param remote_path: maximum number of times to connect
:param remote_port: port to perform sftp from
:type ipaddr: str
:type user: str
:type passwd: str
:type remote_path: str
:type remote_port: int
"""
transport_layer = paramiko.Transport((ipaddr, remote_port))
transport_layer.connect(username=user, password=passwd)
sftp = paramiko.SFTPClient.from_transport(transport_layer)
try:
# Test if remote_path exists
sftp.chdir(remote_path)
except IOError:
# Create remote_path
sftp.mkdir(remote_path)
sftp.chdir(remote_path)
sftp.close()
transport_layer.close()
def isdir(path, sftp):
"""Checks if a given remote path is a directory
:param path: A string with the full path we want to check
:param sftp: An sftp connection object (paramiko)
:returns: True if the given path is a directory false otherwise.
:rtype: bool
:type path: str
:type sftp: paramiko.SFTPClient
"""
try:
return stat.S_ISDIR(sftp.stat(path).st_mode)
except IOError:
return False
def remove_remote_directory(ipaddr, user, passwd, path, remote_port=22):
"""Removes recursively remote directories (removes all files and
other sub-directories).
:param ipaddr: Ip adress of the remote machine
:param user: username of the remote user
:param passwd: password of the remote user
:param remote_file: remote file to make executable
:param remote_port: port to perform sftp from
:type ipaddr: str
:type user: str
:type passwd: str
:type remote_file: str
:type remote_port: int
"""
transport_layer = paramiko.Transport((ipaddr, remote_port))
transport_layer.connect(username=user, password=passwd)
sftp = paramiko.SFTPClient.from_transport(transport_layer)
files = sftp.listdir(path=path)
for file_item in files:
filepath = os.path.join(path, file_item)
if isdir(filepath, sftp):
remove_remote_directory(ipaddr, user, passwd, filepath)
else:
sftp.remove(filepath)
sftp.rmdir(path)
sftp.close()
transport_layer.close()
def ssh_run_command(ssh_client, command_to_run, prefix='', lines_queue=None,
print_flag=True, block_flag=True):
"""Runs the specified command on a remote machine
:param ssh_client : SSH client provided by paramiko to run the command
:param command_to_run: Command to execute
:param lines_queue: Queue datastructure to buffer the result of execution
:param print_flag: Flag that defines if the output of the command will be
printed on screen
:param block_flag: Defines if we block execution waiting for the running
command to return its exit status
:returns: the exit code of the command to be executed remotely and the
combined stdout - stderr of the executed command
:rtype: tuple<int, str>
:type ssh_client: paramiko.SSHClient
:type command_to_run: str
:type lines_queue: queue<str>
:type print_flag: bool
:type block_flag: bool
:exception SSHException: Raised when fails to open a channel from
ssh_client object
:exception UnicodeDecodeError: Raised when it fails to decode received
data into UTF-8
:exception socket.timeout: When the channel remains idle for a timeout
period (in sec) defined in implementation of the function
"""
channel = ssh_client.get_transport().open_session()
bufferSize = 4*1024
channel_timeout = 300
channel.setblocking(1)
channel.set_combine_stderr(True)
channel.settimeout(channel_timeout)
channel.exec_command(command_to_run)
if not block_flag:
return 0
channel_output = ''
while not channel.exit_status_ready():
data = ''
data = channel.recv(bufferSize).decode('utf-8')
while data:
channel_output += data
if print_flag:
logging.debug('{0} {1}'.format(prefix, data).rstrip())
if lines_queue is not None:
for line in data.splitlines():
lines_queue.put(line)
data = channel.recv(bufferSize).decode('utf-8')
channel_exit_status = channel.recv_exit_status()
channel.close()
return (channel_exit_status, channel_output)
def ssh_delete_file_if_exists(ipaddr, user, passwd, remote_file,
remote_port=22):
"""Deletes the file on e remote machine, if it exists
:param ipaddr: Ip adress of the remote machine
:param user: username of the remote user
:param passwd: password of the remote user
:param remote_file: remote file to remove, full path must be used.
:param remote_port: port to perform sftp from
:type ipaddr: str
:type user: str
:type passwd: str
:type remote_file: str
:type remote_port: int
"""
transport_layer = paramiko.Transport((ipaddr, remote_port))
transport_layer.connect(username=user, password=passwd)
sftp = paramiko.SFTPClient.from_transport(transport_layer)
try:
sftp.remove(remote_file)
logging.info('[netutil] [delete_file_if_exists]: file {0} removed'.
format(remote_file))
except IOError:
logging.error(
'[netutil] [delete_file_if_exists] IOError: The given remote_file '
'is not valid. Error message: {0}'.format(IOError.strerror))
except:
logging.error(
'[netutil] [delete_file_if_exists] Error: Unknown Error occured '
'while was trying to remove remote file.')
transport_layer.close()
logging.error(
'[netutil] [ssh_delete_file_if_exists]: transport layer closed')
|
"""
Tag definitions.
"""
DEFAULT_STOP_TAG = 'UNDEF'
def make_string(seq):
"""
Don't throw an exception when given an out of range character.
"""
string = ''
for c in seq:
# Screen out non-printing characters
if 32 <= c and c < 256:
string += chr(c)
# If no printing chars
if not string:
return str(seq)
return string
def make_string_uc(seq):
"""
Special version to deal with the code in the first 8 bytes of a user comment.
First 8 bytes gives coding system e.g. ASCII vs. JIS vs Unicode.
"""
#code = seq[0:8]
seq = seq[8:]
# Of course, this is only correct if ASCII, and the standard explicitly
# allows JIS and Unicode.
return make_string( make_string(seq) )
# field type descriptions as (length, abbreviation, full name) tuples
FIELD_TYPES = (
(0, 'X', 'Proprietary'), # no such type
(1, 'B', 'Byte'),
(1, 'A', 'ASCII'),
(2, 'S', 'Short'),
(4, 'L', 'Long'),
(8, 'R', 'Ratio'),
(1, 'SB', 'Signed Byte'),
(1, 'U', 'Undefined'),
(2, 'SS', 'Signed Short'),
(4, 'SL', 'Signed Long'),
(8, 'SR', 'Signed Ratio'),
)
# dictionary of main EXIF tag names
# first element of tuple is tag name, optional second element is
# another dictionary giving names to values
EXIF_TAGS = {
0x0100: ('ImageWidth', ),
0x0101: ('ImageLength', ),
0x0102: ('BitsPerSample', ),
0x0103: ('Compression',
{1: 'Uncompressed',
2: 'CCITT 1D',
3: 'T4/Group 3 Fax',
4: 'T6/Group 4 Fax',
5: 'LZW',
6: 'JPEG (old-style)',
7: 'JPEG',
8: 'Adobe Deflate',
9: 'JBIG B&W',
10: 'JBIG Color',
32766: 'Next',
32769: 'Epson ERF Compressed',
32771: 'CCIRLEW',
32773: 'PackBits',
32809: 'Thunderscan',
32895: 'IT8CTPAD',
32896: 'IT8LW',
32897: 'IT8MP',
32898: 'IT8BL',
32908: 'PixarFilm',
32909: 'PixarLog',
32946: 'Deflate',
32947: 'DCS',
34661: 'JBIG',
34676: 'SGILog',
34677: 'SGILog24',
34712: 'JPEG 2000',
34713: 'Nikon NEF Compressed',
65000: 'Kodak DCR Compressed',
65535: 'Pentax PEF Compressed'}),
0x0106: ('PhotometricInterpretation', ),
0x0107: ('Thresholding', ),
0x010A: ('FillOrder', ),
0x010D: ('DocumentName', ),
0x010E: ('ImageDescription', ),
0x010F: ('Make', ),
0x0110: ('Model', ),
0x0111: ('StripOffsets', ),
0x0112: ('Orientation',
{1: 'Horizontal (normal)',
2: 'Mirrored horizontal',
3: 'Rotated 180',
4: 'Mirrored vertical',
5: 'Mirrored horizontal then rotated 90 CCW',
6: 'Rotated 90 CCW',
7: 'Mirrored horizontal then rotated 90 CW',
8: 'Rotated 90 CW'}),
0x0115: ('SamplesPerPixel', ),
0x0116: ('RowsPerStrip', ),
0x0117: ('StripByteCounts', ),
0x011A: ('XResolution', ),
0x011B: ('YResolution', ),
0x011C: ('PlanarConfiguration', ),
0x011D: ('PageName', make_string),
0x0128: ('ResolutionUnit',
{1: 'Not Absolute',
2: 'Pixels/Inch',
3: 'Pixels/Centimeter'}),
0x012D: ('TransferFunction', ),
0x0131: ('Software', ),
0x0132: ('DateTime', ),
0x013B: ('Artist', ),
0x013E: ('WhitePoint', ),
0x013F: ('PrimaryChromaticities', ),
0x0156: ('TransferRange', ),
0x0200: ('JPEGProc', ),
0x0201: ('JPEGInterchangeFormat', ),
0x0202: ('JPEGInterchangeFormatLength', ),
0x0211: ('YCbCrCoefficients', ),
0x0212: ('YCbCrSubSampling', ),
0x0213: ('YCbCrPositioning',
{1: 'Centered',
2: 'Co-sited'}),
0x0214: ('ReferenceBlackWhite', ),
0x4746: ('Rating', ),
0x828D: ('CFARepeatPatternDim', ),
0x828E: ('CFAPattern', ),
0x828F: ('BatteryLevel', ),
0x8298: ('Copyright', ),
0x829A: ('ExposureTime', ),
0x829D: ('FNumber', ),
0x83BB: ('IPTC/NAA', ),
0x8769: ('ExifOffset', ),
0x8773: ('InterColorProfile', ),
0x8822: ('ExposureProgram',
{0: 'Unidentified',
1: 'Manual',
2: 'Program Normal',
3: 'Aperture Priority',
4: 'Shutter Priority',
5: 'Program Creative',
6: 'Program Action',
7: 'Portrait Mode',
8: 'Landscape Mode'}),
0x8824: ('SpectralSensitivity', ),
0x8825: ('GPSInfo', ),
0x8827: ('ISOSpeedRatings', ),
0x8828: ('OECF', ),
0x9000: ('ExifVersion', make_string),
0x9003: ('DateTimeOriginal', ),
0x9004: ('DateTimeDigitized', ),
0x9101: ('ComponentsConfiguration',
{0: '',
1: 'Y',
2: 'Cb',
3: 'Cr',
4: 'Red',
5: 'Green',
6: 'Blue'}),
0x9102: ('CompressedBitsPerPixel', ),
0x9201: ('ShutterSpeedValue', ),
0x9202: ('ApertureValue', ),
0x9203: ('BrightnessValue', ),
0x9204: ('ExposureBiasValue', ),
0x9205: ('MaxApertureValue', ),
0x9206: ('SubjectDistance', ),
0x9207: ('MeteringMode',
{0: 'Unidentified',
1: 'Average',
2: 'CenterWeightedAverage',
3: 'Spot',
4: 'MultiSpot',
5: 'Pattern',
6: 'Partial',
255: 'other'}),
0x9208: ('LightSource',
{0: 'Unknown',
1: 'Daylight',
2: 'Fluorescent',
3: 'Tungsten (incandescent light)',
4: 'Flash',
9: 'Fine weather',
10: 'Cloudy weather',
11: 'Shade',
12: 'Daylight fluorescent (D 5700 - 7100K)',
13: 'Day white fluorescent (N 4600 - 5400K)',
14: 'Cool white fluorescent (W 3900 - 4500K)',
15: 'White fluorescent (WW 3200 - 3700K)',
17: 'Standard light A',
18: 'Standard light B',
19: 'Standard light C',
20: 'D55',
21: 'D65',
22: 'D75',
23: 'D50',
24: 'ISO studio tungsten',
255: 'other light source',}),
0x9209: ('Flash',
{0: 'Flash did not fire',
1: 'Flash fired',
5: 'Strobe return light not detected',
7: 'Strobe return light detected',
9: 'Flash fired, compulsory flash mode',
13: 'Flash fired, compulsory flash mode, return light not detected',
15: 'Flash fired, compulsory flash mode, return light detected',
16: 'Flash did not fire, compulsory flash mode',
24: 'Flash did not fire, auto mode',
25: 'Flash fired, auto mode',
29: 'Flash fired, auto mode, return light not detected',
31: 'Flash fired, auto mode, return light detected',
32: 'No flash function',
65: 'Flash fired, red-eye reduction mode',
69: 'Flash fired, red-eye reduction mode, return light not detected',
71: 'Flash fired, red-eye reduction mode, return light detected',
73: 'Flash fired, compulsory flash mode, red-eye reduction mode',
77: 'Flash fired, compulsory flash mode, red-eye reduction mode, return light not detected',
79: 'Flash fired, compulsory flash mode, red-eye reduction mode, return light detected',
89: 'Flash fired, auto mode, red-eye reduction mode',
93: 'Flash fired, auto mode, return light not detected, red-eye reduction mode',
95: 'Flash fired, auto mode, return light detected, red-eye reduction mode'}),
0x920A: ('FocalLength', ),
0x9214: ('SubjectArea', ),
0x927C: ('MakerNote', ),
0x9286: ('UserComment', make_string_uc),
0x9290: ('SubSecTime', ),
0x9291: ('SubSecTimeOriginal', ),
0x9292: ('SubSecTimeDigitized', ),
# used by Windows Explorer
0x9C9B: ('XPTitle', ),
0x9C9C: ('XPComment', ),
0x9C9D: ('XPAuthor', ), #(ignored by Windows Explorer if Artist exists)
0x9C9E: ('XPKeywords', ),
0x9C9F: ('XPSubject', ),
0xA000: ('FlashPixVersion', make_string),
0xA001: ('ColorSpace',
{1: 'sRGB',
2: 'Adobe RGB',
65535: 'Uncalibrated'}),
0xA002: ('ExifImageWidth', ),
0xA003: ('ExifImageLength', ),
0xA005: ('InteroperabilityOffset', ),
0xA20B: ('FlashEnergy', ), # 0x920B in TIFF/EP
0xA20C: ('SpatialFrequencyResponse', ), # 0x920C
0xA20E: ('FocalPlaneXResolution', ), # 0x920E
0xA20F: ('FocalPlaneYResolution', ), # 0x920F
0xA210: ('FocalPlaneResolutionUnit', ), # 0x9210
0xA214: ('SubjectLocation', ), # 0x9214
0xA215: ('ExposureIndex', ), # 0x9215
0xA217: ('SensingMethod', # 0x9217
{1: 'Not defined',
2: 'One-chip color area',
3: 'Two-chip color area',
4: 'Three-chip color area',
5: 'Color sequential area',
7: 'Trilinear',
8: 'Color sequential linear'}),
0xA300: ('FileSource',
{1: 'Film Scanner',
2: 'Reflection Print Scanner',
3: 'Digital Camera'}),
0xA301: ('SceneType',
{1: 'Directly Photographed'}),
0xA302: ('CVAPattern', ),
0xA401: ('CustomRendered',
{0: 'Normal',
1: 'Custom'}),
0xA402: ('ExposureMode',
{0: 'Auto Exposure',
1: 'Manual Exposure',
2: 'Auto Bracket'}),
0xA403: ('WhiteBalance',
{0: 'Auto',
1: 'Manual'}),
0xA404: ('DigitalZoomRatio', ),
0xA405: ('FocalLengthIn35mmFilm', ),
0xA406: ('SceneCaptureType',
{0: 'Standard',
1: 'Landscape',
2: 'Portrait',
3: 'Night)'}),
0xA407: ('GainControl',
{0: 'None',
1: 'Low gain up',
2: 'High gain up',
3: 'Low gain down',
4: 'High gain down'}),
0xA408: ('Contrast',
{0: 'Normal',
1: 'Soft',
2: 'Hard'}),
0xA409: ('Saturation',
{0: 'Normal',
1: 'Soft',
2: 'Hard'}),
0xA40A: ('Sharpness',
{0: 'Normal',
1: 'Soft',
2: 'Hard'}),
0xA40B: ('DeviceSettingDescription', ),
0xA40C: ('SubjectDistanceRange', ),
0xA500: ('Gamma', ),
0xC4A5: ('PrintIM', ),
0xEA1C: ('Padding', ),
}
# interoperability tags
INTR_TAGS = {
0x0001: ('InteroperabilityIndex', ),
0x0002: ('InteroperabilityVersion', ),
0x1000: ('RelatedImageFileFormat', ),
0x1001: ('RelatedImageWidth', ),
0x1002: ('RelatedImageLength', ),
}
# GPS tags
GPS_TAGS = {
0x0000: ('GPSVersionID', ),
0x0001: ('GPSLatitudeRef', ),
0x0002: ('GPSLatitude', ),
0x0003: ('GPSLongitudeRef', ),
0x0004: ('GPSLongitude', ),
0x0005: ('GPSAltitudeRef', ),
0x0006: ('GPSAltitude', ),
0x0007: ('GPSTimeStamp', ),
0x0008: ('GPSSatellites', ),
0x0009: ('GPSStatus', ),
0x000A: ('GPSMeasureMode', ),
0x000B: ('GPSDOP', ),
0x000C: ('GPSSpeedRef', ),
0x000D: ('GPSSpeed', ),
0x000E: ('GPSTrackRef', ),
0x000F: ('GPSTrack', ),
0x0010: ('GPSImgDirectionRef', ),
0x0011: ('GPSImgDirection', ),
0x0012: ('GPSMapDatum', ),
0x0013: ('GPSDestLatitudeRef', ),
0x0014: ('GPSDestLatitude', ),
0x0015: ('GPSDestLongitudeRef', ),
0x0016: ('GPSDestLongitude', ),
0x0017: ('GPSDestBearingRef', ),
0x0018: ('GPSDestBearing', ),
0x0019: ('GPSDestDistanceRef', ),
0x001A: ('GPSDestDistance', ),
0x001B: ('GPSProcessingMethod', ),
0x001C: ('GPSAreaInformation', ),
0x001D: ('GPSDate', ),
0x001E: ('GPSDifferential', ),
}
# Ignore these tags when quick processing
# 0x927C is MakerNote Tags
# 0x9286 is user comment
IGNORE_TAGS = (0x9286, 0x927C)
def nikon_ev_bias(seq):
"""
First digit seems to be in steps of 1/6 EV.
Does the third value mean the step size? It is usually 6,
but it is 12 for the ExposureDifference.
Check for an error condition that could cause a crash.
This only happens if something has gone really wrong in
reading the Nikon MakerNote.
http://tomtia.plala.jp/DigitalCamera/MakerNote/index.asp
"""
if len( seq ) < 4 :
return ''
if seq == [252, 1, 6, 0]:
return "-2/3 EV"
if seq == [253, 1, 6, 0]:
return "-1/2 EV"
if seq == [254, 1, 6, 0]:
return "-1/3 EV"
if seq == [0, 1, 6, 0]:
return "0 EV"
if seq == [2, 1, 6, 0]:
return "+1/3 EV"
if seq == [3, 1, 6, 0]:
return "+1/2 EV"
if seq == [4, 1, 6, 0]:
return "+2/3 EV"
# Handle combinations not in the table.
a = seq[0]
# Causes headaches for the +/- logic, so special case it.
if a == 0:
return "0 EV"
if a > 127:
a = 256 - a
ret_str = "-"
else:
ret_str = "+"
b = seq[2] # Assume third value means the step size
whole = a / b
a = a % b
if whole != 0:
ret_str = ret_str + str(whole) + " "
if a == 0:
ret_str = ret_str + "EV"
else:
r = Ratio(a, b)
ret_str = ret_str + r.__repr__() + " EV"
return ret_str
# Nikon E99x MakerNote Tags
MAKERNOTE_NIKON_NEWER_TAGS = {
0x0001: ('MakernoteVersion', make_string), # Sometimes binary
0x0002: ('ISOSetting', make_string),
0x0003: ('ColorMode', ),
0x0004: ('Quality', ),
0x0005: ('Whitebalance', ),
0x0006: ('ImageSharpening', ),
0x0007: ('FocusMode', ),
0x0008: ('FlashSetting', ),
0x0009: ('AutoFlashMode', ),
0x000B: ('WhiteBalanceBias', ),
0x000C: ('WhiteBalanceRBCoeff', ),
0x000D: ('ProgramShift', nikon_ev_bias),
# Nearly the same as the other EV vals, but step size is 1/12 EV (?)
0x000E: ('ExposureDifference', nikon_ev_bias),
0x000F: ('ISOSelection', ),
0x0010: ('DataDump', ),
0x0011: ('NikonPreview', ),
0x0012: ('FlashCompensation', nikon_ev_bias),
0x0013: ('ISOSpeedRequested', ),
0x0016: ('PhotoCornerCoordinates', ),
# 0x0017: Unknown, but most likely an EV value
0x0018: ('FlashBracketCompensationApplied', nikon_ev_bias),
0x0019: ('AEBracketCompensationApplied', ),
0x001A: ('ImageProcessing', ),
0x001B: ('CropHiSpeed', ),
0x001D: ('SerialNumber', ), # Conflict with 0x00A0 ?
0x001E: ('ColorSpace', ),
0x001F: ('VRInfo', ),
0x0020: ('ImageAuthentication', ),
0x0022: ('ActiveDLighting', ),
0x0023: ('PictureControl', ),
0x0024: ('WorldTime', ),
0x0025: ('ISOInfo', ),
0x0080: ('ImageAdjustment', ),
0x0081: ('ToneCompensation', ),
0x0082: ('AuxiliaryLens', ),
0x0083: ('LensType', ),
0x0084: ('LensMinMaxFocalMaxAperture', ),
0x0085: ('ManualFocusDistance', ),
0x0086: ('DigitalZoomFactor', ),
0x0087: ('FlashMode',
{0x00: 'Did Not Fire',
0x01: 'Fired, Manual',
0x07: 'Fired, External',
0x08: 'Fired, Commander Mode ',
0x09: 'Fired, TTL Mode'}),
0x0088: ('AFFocusPosition',
{0x0000: 'Center',
0x0100: 'Top',
0x0200: 'Bottom',
0x0300: 'Left',
0x0400: 'Right'}),
0x0089: ('BracketingMode',
{0x00: 'Single frame, no bracketing',
0x01: 'Continuous, no bracketing',
0x02: 'Timer, no bracketing',
0x10: 'Single frame, exposure bracketing',
0x11: 'Continuous, exposure bracketing',
0x12: 'Timer, exposure bracketing',
0x40: 'Single frame, white balance bracketing',
0x41: 'Continuous, white balance bracketing',
0x42: 'Timer, white balance bracketing'}),
0x008A: ('AutoBracketRelease', ),
0x008B: ('LensFStops', ),
0x008C: ('NEFCurve1', ), # ExifTool calls this 'ContrastCurve'
0x008D: ('ColorMode', ),
0x008F: ('SceneMode', ),
0x0090: ('LightingType', ),
0x0091: ('ShotInfo', ), # First 4 bytes are a version number in ASCII
0x0092: ('HueAdjustment', ),
# ExifTool calls this 'NEFCompression', should be 1-4
0x0093: ('Compression', ),
0x0094: ('Saturation',
{-3: 'B&W',
-2: '-2',
-1: '-1',
0: '0',
1: '1',
2: '2'}),
0x0095: ('NoiseReduction', ),
0x0096: ('NEFCurve2', ), # ExifTool calls this 'LinearizationTable'
0x0097: ('ColorBalance', ), # First 4 bytes are a version number in ASCII
0x0098: ('LensData', ), # First 4 bytes are a version number in ASCII
0x0099: ('RawImageCenter', ),
0x009A: ('SensorPixelSize', ),
0x009C: ('Scene Assist', ),
0x009E: ('RetouchHistory', ),
0x00A0: ('SerialNumber', ),
0x00A2: ('ImageDataSize', ),
# 00A3: unknown - a single byte 0
# 00A4: In NEF, looks like a 4 byte ASCII version number ('0200')
0x00A5: ('ImageCount', ),
0x00A6: ('DeletedImageCount', ),
0x00A7: ('TotalShutterReleases', ),
# First 4 bytes are a version number in ASCII, with version specific
# info to follow. Its hard to treat it as a string due to embedded nulls.
0x00A8: ('FlashInfo', ),
0x00A9: ('ImageOptimization', ),
0x00AA: ('Saturation', ),
0x00AB: ('DigitalVariProgram', ),
0x00AC: ('ImageStabilization', ),
0x00AD: ('Responsive AF', ), # 'AFResponse'
0x00B0: ('MultiExposure', ),
0x00B1: ('HighISONoiseReduction', ),
0x00B7: ('AFInfo', ),
0x00B8: ('FileInfo', ),
# 00B9: unknown
0x0100: ('DigitalICE', ),
0x0103: ('PreviewCompression',
{1: 'Uncompressed',
2: 'CCITT 1D',
3: 'T4/Group 3 Fax',
4: 'T6/Group 4 Fax',
5: 'LZW',
6: 'JPEG (old-style)',
7: 'JPEG',
8: 'Adobe Deflate',
9: 'JBIG B&W',
10: 'JBIG Color',
32766: 'Next',
32769: 'Epson ERF Compressed',
32771: 'CCIRLEW',
32773: 'PackBits',
32809: 'Thunderscan',
32895: 'IT8CTPAD',
32896: 'IT8LW',
32897: 'IT8MP',
32898: 'IT8BL',
32908: 'PixarFilm',
32909: 'PixarLog',
32946: 'Deflate',
32947: 'DCS',
34661: 'JBIG',
34676: 'SGILog',
34677: 'SGILog24',
34712: 'JPEG 2000',
34713: 'Nikon NEF Compressed',
65000: 'Kodak DCR Compressed',
65535: 'Pentax PEF Compressed',}),
0x0201: ('PreviewImageStart', ),
0x0202: ('PreviewImageLength', ),
0x0213: ('PreviewYCbCrPositioning',
{1: 'Centered',
2: 'Co-sited'}),
0x0E09: ('NikonCaptureVersion', ),
0x0E0E: ('NikonCaptureOffsets', ),
0x0E10: ('NikonScan', ),
0x0E22: ('NEFBitDepth', ),
}
MAKERNOTE_NIKON_OLDER_TAGS = {
0x0003: ('Quality',
{1: 'VGA Basic',
2: 'VGA Normal',
3: 'VGA Fine',
4: 'SXGA Basic',
5: 'SXGA Normal',
6: 'SXGA Fine'}),
0x0004: ('ColorMode',
{1: 'Color',
2: 'Monochrome'}),
0x0005: ('ImageAdjustment',
{0: 'Normal',
1: 'Bright+',
2: 'Bright-',
3: 'Contrast+',
4: 'Contrast-'}),
0x0006: ('CCDSpeed',
{0: 'ISO 80',
2: 'ISO 160',
4: 'ISO 320',
5: 'ISO 100'}),
0x0007: ('WhiteBalance',
{0: 'Auto',
1: 'Preset',
2: 'Daylight',
3: 'Incandescent',
4: 'Fluorescent',
5: 'Cloudy',
6: 'Speed Light'}),
}
def olympus_special_mode(v):
"""decode Olympus SpecialMode tag in MakerNote"""
mode1 = {
0: 'Normal',
1: 'Unknown',
2: 'Fast',
3: 'Panorama'}
mode2 = {
0: 'Non-panoramic',
1: 'Left to right',
2: 'Right to left',
3: 'Bottom to top',
4: 'Top to bottom'}
if v[0] not in mode1 or v[2] not in mode2:
return v
return '%s - sequence %d - %s' % (mode1[v[0]], v[1], mode2[v[2]])
MAKERNOTE_OLYMPUS_TAGS = {
# ah HAH! those sneeeeeaky bastids! this is how they get past the fact
# that a JPEG thumbnail is not allowed in an uncompressed TIFF file
0x0100: ('JPEGThumbnail', ),
0x0200: ('SpecialMode', olympus_special_mode),
0x0201: ('JPEGQual',
{1: 'SQ',
2: 'HQ',
3: 'SHQ'}),
0x0202: ('Macro',
{0: 'Normal',
1: 'Macro',
2: 'SuperMacro'}),
0x0203: ('BWMode',
{0: 'Off',
1: 'On'}),
0x0204: ('DigitalZoom', ),
0x0205: ('FocalPlaneDiagonal', ),
0x0206: ('LensDistortionParams', ),
0x0207: ('SoftwareRelease', ),
0x0208: ('PictureInfo', ),
0x0209: ('CameraID', make_string), # print as string
0x0F00: ('DataDump', ),
0x0300: ('PreCaptureFrames', ),
0x0404: ('SerialNumber', ),
0x1000: ('ShutterSpeedValue', ),
0x1001: ('ISOValue', ),
0x1002: ('ApertureValue', ),
0x1003: ('BrightnessValue', ),
0x1004: ('FlashMode', ),
0x1004: ('FlashMode',
{2: 'On',
3: 'Off'}),
0x1005: ('FlashDevice',
{0: 'None',
1: 'Internal',
4: 'External',
5: 'Internal + External'}),
0x1006: ('ExposureCompensation', ),
0x1007: ('SensorTemperature', ),
0x1008: ('LensTemperature', ),
0x100b: ('FocusMode',
{0: 'Auto',
1: 'Manual'}),
0x1017: ('RedBalance', ),
0x1018: ('BlueBalance', ),
0x101a: ('SerialNumber', ),
0x1023: ('FlashExposureComp', ),
0x1026: ('ExternalFlashBounce',
{0: 'No',
1: 'Yes'}),
0x1027: ('ExternalFlashZoom', ),
0x1028: ('ExternalFlashMode', ),
0x1029: ('Contrast int16u',
{0: 'High',
1: 'Normal',
2: 'Low'}),
0x102a: ('SharpnessFactor', ),
0x102b: ('ColorControl', ),
0x102c: ('ValidBits', ),
0x102d: ('CoringFilter', ),
0x102e: ('OlympusImageWidth', ),
0x102f: ('OlympusImageHeight', ),
0x1034: ('CompressionRatio', ),
0x1035: ('PreviewImageValid',
{0: 'No',
1: 'Yes'}),
0x1036: ('PreviewImageStart', ),
0x1037: ('PreviewImageLength', ),
0x1039: ('CCDScanMode',
{0: 'Interlaced',
1: 'Progressive'}),
0x103a: ('NoiseReduction',
{0: 'Off',
1: 'On'}),
0x103b: ('InfinityLensStep', ),
0x103c: ('NearLensStep', ),
# TODO - these need extra definitions
# http://search.cpan.org/src/EXIFTOOL/Image-ExifTool-6.90/html/TagNames/Olympus.html
0x2010: ('Equipment', ),
0x2020: ('CameraSettings', ),
0x2030: ('RawDevelopment', ),
0x2040: ('ImageProcessing', ),
0x2050: ('FocusInfo', ),
0x3000: ('RawInfo ', ),
}
# 0x2020 CameraSettings
MAKERNOTE_OLYMPUS_TAG_0x2020 = {
0x0100: ('PreviewImageValid',
{0: 'No',
1: 'Yes'}),
0x0101: ('PreviewImageStart', ),
0x0102: ('PreviewImageLength', ),
0x0200: ('ExposureMode',
{1: 'Manual',
2: 'Program',
3: 'Aperture-priority AE',
4: 'Shutter speed priority AE',
5: 'Program-shift'}),
0x0201: ('AELock',
{0: 'Off',
1: 'On'}),
0x0202: ('MeteringMode',
{2: 'Center Weighted',
3: 'Spot',
5: 'ESP',
261: 'Pattern+AF',
515: 'Spot+Highlight control',
1027: 'Spot+Shadow control'}),
0x0300: ('MacroMode',
{0: 'Off',
1: 'On'}),
0x0301: ('FocusMode',
{0: 'Single AF',
1: 'Sequential shooting AF',
2: 'Continuous AF',
3: 'Multi AF',
10: 'MF'}),
0x0302: ('FocusProcess',
{0: 'AF Not Used',
1: 'AF Used'}),
0x0303: ('AFSearch',
{0: 'Not Ready',
1: 'Ready'}),
0x0304: ('AFAreas', ),
0x0401: ('FlashExposureCompensation', ),
0x0500: ('WhiteBalance2',
{0: 'Auto',
16: '7500K (Fine Weather with Shade)',
17: '6000K (Cloudy)',
18: '5300K (Fine Weather)',
20: '3000K (Tungsten light)',
21: '3600K (Tungsten light-like)',
33: '6600K (Daylight fluorescent)',
34: '4500K (Neutral white fluorescent)',
35: '4000K (Cool white fluorescent)',
48: '3600K (Tungsten light-like)',
256: 'Custom WB 1',
257: 'Custom WB 2',
258: 'Custom WB 3',
259: 'Custom WB 4',
512: 'Custom WB 5400K',
513: 'Custom WB 2900K',
514: 'Custom WB 8000K', }),
0x0501: ('WhiteBalanceTemperature', ),
0x0502: ('WhiteBalanceBracket', ),
0x0503: ('CustomSaturation', ), # (3 numbers: 1. CS Value, 2. Min, 3. Max)
0x0504: ('ModifiedSaturation',
{0: 'Off',
1: 'CM1 (Red Enhance)',
2: 'CM2 (Green Enhance)',
3: 'CM3 (Blue Enhance)',
4: 'CM4 (Skin Tones)'}),
0x0505: ('ContrastSetting', ), # (3 numbers: 1. Contrast, 2. Min, 3. Max)
0x0506: ('SharpnessSetting', ), # (3 numbers: 1. Sharpness, 2. Min, 3. Max)
0x0507: ('ColorSpace',
{0: 'sRGB',
1: 'Adobe RGB',
2: 'Pro Photo RGB'}),
0x0509: ('SceneMode',
{0: 'Standard',
6: 'Auto',
7: 'Sport',
8: 'Portrait',
9: 'Landscape+Portrait',
10: 'Landscape',
11: 'Night scene',
13: 'Panorama',
16: 'Landscape+Portrait',
17: 'Night+Portrait',
19: 'Fireworks',
20: 'Sunset',
22: 'Macro',
25: 'Documents',
26: 'Museum',
28: 'Beach&Snow',
30: 'Candle',
35: 'Underwater Wide1',
36: 'Underwater Macro',
39: 'High Key',
40: 'Digital Image Stabilization',
44: 'Underwater Wide2',
45: 'Low Key',
46: 'Children',
48: 'Nature Macro'}),
0x050a: ('NoiseReduction',
{0: 'Off',
1: 'Noise Reduction',
2: 'Noise Filter',
3: 'Noise Reduction + Noise Filter',
4: 'Noise Filter (ISO Boost)',
5: 'Noise Reduction + Noise Filter (ISO Boost)'}),
0x050b: ('DistortionCorrection',
{0: 'Off',
1: 'On'}),
0x050c: ('ShadingCompensation',
{0: 'Off',
1: 'On'}),
0x050d: ('CompressionFactor', ),
0x050f: ('Gradation',
{'-1 -1 1': 'Low Key',
'0 -1 1': 'Normal',
'1 -1 1': 'High Key'}),
0x0520: ('PictureMode',
{1: 'Vivid',
2: 'Natural',
3: 'Muted',
256: 'Monotone',
512: 'Sepia'}),
0x0521: ('PictureModeSaturation', ),
0x0522: ('PictureModeHue?', ),
0x0523: ('PictureModeContrast', ),
0x0524: ('PictureModeSharpness', ),
0x0525: ('PictureModeBWFilter',
{0: 'n/a',
1: 'Neutral',
2: 'Yellow',
3: 'Orange',
4: 'Red',
5: 'Green'}),
0x0526: ('PictureModeTone',
{0: 'n/a',
1: 'Neutral',
2: 'Sepia',
3: 'Blue',
4: 'Purple',
5: 'Green'}),
0x0600: ('Sequence', ), # 2 or 3 numbers: 1. Mode, 2. Shot number, 3. Mode bits
0x0601: ('PanoramaMode', ), # (2 numbers: 1. Mode, 2. Shot number)
0x0603: ('ImageQuality2',
{1: 'SQ',
2: 'HQ',
3: 'SHQ',
4: 'RAW'}),
0x0901: ('ManometerReading', ),
}
MAKERNOTE_CASIO_TAGS = {
0x0001: ('RecordingMode',
{1: 'Single Shutter',
2: 'Panorama',
3: 'Night Scene',
4: 'Portrait',
5: 'Landscape'}),
0x0002: ('Quality',
{1: 'Economy',
2: 'Normal',
3: 'Fine'}),
0x0003: ('FocusingMode',
{2: 'Macro',
3: 'Auto Focus',
4: 'Manual Focus',
5: 'Infinity'}),
0x0004: ('FlashMode',
{1: 'Auto',
2: 'On',
3: 'Off',
4: 'Red Eye Reduction'}),
0x0005: ('FlashIntensity',
{11: 'Weak',
13: 'Normal',
15: 'Strong'}),
0x0006: ('Object Distance', ),
0x0007: ('WhiteBalance',
{1: 'Auto',
2: 'Tungsten',
3: 'Daylight',
4: 'Fluorescent',
5: 'Shade',
129: 'Manual'}),
0x000B: ('Sharpness',
{0: 'Normal',
1: 'Soft',
2: 'Hard'}),
0x000C: ('Contrast',
{0: 'Normal',
1: 'Low',
2: 'High'}),
0x000D: ('Saturation',
{0: 'Normal',
1: 'Low',
2: 'High'}),
0x0014: ('CCDSpeed',
{64: 'Normal',
80: 'Normal',
100: 'High',
125: '+1.0',
244: '+3.0',
250: '+2.0'}),
}
MAKERNOTE_FUJIFILM_TAGS = {
0x0000: ('NoteVersion', make_string),
0x1000: ('Quality', ),
0x1001: ('Sharpness',
{1: 'Soft',
2: 'Soft',
3: 'Normal',
4: 'Hard',
5: 'Hard'}),
0x1002: ('WhiteBalance',
{0: 'Auto',
256: 'Daylight',
512: 'Cloudy',
768: 'DaylightColor-Fluorescent',
769: 'DaywhiteColor-Fluorescent',
770: 'White-Fluorescent',
1024: 'Incandescent',
3840: 'Custom'}),
0x1003: ('Color',
{0: 'Normal',
256: 'High',
512: 'Low'}),
0x1004: ('Tone',
{0: 'Normal',
256: 'High',
512: 'Low'}),
0x1010: ('FlashMode',
{0: 'Auto',
1: 'On',
2: 'Off',
3: 'Red Eye Reduction'}),
0x1011: ('FlashStrength', ),
0x1020: ('Macro',
{0: 'Off',
1: 'On'}),
0x1021: ('FocusMode',
{0: 'Auto',
1: 'Manual'}),
0x1030: ('SlowSync',
{0: 'Off',
1: 'On'}),
0x1031: ('PictureMode',
{0: 'Auto',
1: 'Portrait',
2: 'Landscape',
4: 'Sports',
5: 'Night',
6: 'Program AE',
256: 'Aperture Priority AE',
512: 'Shutter Priority AE',
768: 'Manual Exposure'}),
0x1100: ('MotorOrBracket',
{0: 'Off',
1: 'On'}),
0x1300: ('BlurWarning',
{0: 'Off',
1: 'On'}),
0x1301: ('FocusWarning',
{0: 'Off',
1: 'On'}),
0x1302: ('AEWarning',
{0: 'Off',
1: 'On'}),
}
MAKERNOTE_CANON_TAGS = {
0x0006: ('ImageType', ),
0x0007: ('FirmwareVersion', ),
0x0008: ('ImageNumber', ),
0x0009: ('OwnerName', ),
}
# this is in element offset, name, optional value dictionary format
MAKERNOTE_CANON_TAG_0x001 = {
1: ('Macromode',
{1: 'Macro',
2: 'Normal'}),
2: ('SelfTimer', ),
3: ('Quality',
{2: 'Normal',
3: 'Fine',
5: 'Superfine'}),
4: ('FlashMode',
{0: 'Flash Not Fired',
1: 'Auto',
2: 'On',
3: 'Red-Eye Reduction',
4: 'Slow Synchro',
5: 'Auto + Red-Eye Reduction',
6: 'On + Red-Eye Reduction',
16: 'external flash'}),
5: ('ContinuousDriveMode',
{0: 'Single Or Timer',
1: 'Continuous'}),
7: ('FocusMode',
{0: 'One-Shot',
1: 'AI Servo',
2: 'AI Focus',
3: 'MF',
4: 'Single',
5: 'Continuous',
6: 'MF'}),
10: ('ImageSize',
{0: 'Large',
1: 'Medium',
2: 'Small'}),
11: ('EasyShootingMode',
{0: 'Full Auto',
1: 'Manual',
2: 'Landscape',
3: 'Fast Shutter',
4: 'Slow Shutter',
5: 'Night',
6: 'B&W',
7: 'Sepia',
8: 'Portrait',
9: 'Sports',
10: 'Macro/Close-Up',
11: 'Pan Focus'}),
12: ('DigitalZoom',
{0: 'None',
1: '2x',
2: '4x'}),
13: ('Contrast',
{0xFFFF: 'Low',
0: 'Normal',
1: 'High'}),
14: ('Saturation',
{0xFFFF: 'Low',
0: 'Normal',
1: 'High'}),
15: ('Sharpness',
{0xFFFF: 'Low',
0: 'Normal',
1: 'High'}),
16: ('ISO',
{0: 'See ISOSpeedRatings Tag',
15: 'Auto',
16: '50',
17: '100',
18: '200',
19: '400'}),
17: ('MeteringMode',
{3: 'Evaluative',
4: 'Partial',
5: 'Center-weighted'}),
18: ('FocusType',
{0: 'Manual',
1: 'Auto',
3: 'Close-Up (Macro)',
8: 'Locked (Pan Mode)'}),
19: ('AFPointSelected',
{0x3000: 'None (MF)',
0x3001: 'Auto-Selected',
0x3002: 'Right',
0x3003: 'Center',
0x3004: 'Left'}),
20: ('ExposureMode',
{0: 'Easy Shooting',
1: 'Program',
2: 'Tv-priority',
3: 'Av-priority',
4: 'Manual',
5: 'A-DEP'}),
23: ('LongFocalLengthOfLensInFocalUnits', ),
24: ('ShortFocalLengthOfLensInFocalUnits', ),
25: ('FocalUnitsPerMM', ),
28: ('FlashActivity',
{0: 'Did Not Fire',
1: 'Fired'}),
29: ('FlashDetails',
{14: 'External E-TTL',
13: 'Internal Flash',
11: 'FP Sync Used',
7: '2nd("Rear")-Curtain Sync Used',
4: 'FP Sync Enabled'}),
32: ('FocusMode',
{0: 'Single',
1: 'Continuous'}),
}
MAKERNOTE_CANON_TAG_0x004 = {
7: ('WhiteBalance',
{0: 'Auto',
1: 'Sunny',
2: 'Cloudy',
3: 'Tungsten',
4: 'Fluorescent',
5: 'Flash',
6: 'Custom'}),
9: ('SequenceNumber', ),
14: ('AFPointUsed', ),
15: ('FlashBias',
{0xFFC0: '-2 EV',
0xFFCC: '-1.67 EV',
0xFFD0: '-1.50 EV',
0xFFD4: '-1.33 EV',
0xFFE0: '-1 EV',
0xFFEC: '-0.67 EV',
0xFFF0: '-0.50 EV',
0xFFF4: '-0.33 EV',
0x0000: '0 EV',
0x000C: '0.33 EV',
0x0010: '0.50 EV',
0x0014: '0.67 EV',
0x0020: '1 EV',
0x002C: '1.33 EV',
0x0030: '1.50 EV',
0x0034: '1.67 EV',
0x0040: '2 EV'}),
19: ('SubjectDistance', ),
}
Adding a missing tag from the Exif IFD
"""
Tag definitions.
"""
DEFAULT_STOP_TAG = 'UNDEF'
def make_string(seq):
"""
Don't throw an exception when given an out of range character.
"""
string = ''
for c in seq:
# Screen out non-printing characters
if 32 <= c and c < 256:
string += chr(c)
# If no printing chars
if not string:
return str(seq)
return string
def make_string_uc(seq):
"""
Special version to deal with the code in the first 8 bytes of a user comment.
First 8 bytes gives coding system e.g. ASCII vs. JIS vs Unicode.
"""
#code = seq[0:8]
seq = seq[8:]
# Of course, this is only correct if ASCII, and the standard explicitly
# allows JIS and Unicode.
return make_string( make_string(seq) )
# field type descriptions as (length, abbreviation, full name) tuples
FIELD_TYPES = (
(0, 'X', 'Proprietary'), # no such type
(1, 'B', 'Byte'),
(1, 'A', 'ASCII'),
(2, 'S', 'Short'),
(4, 'L', 'Long'),
(8, 'R', 'Ratio'),
(1, 'SB', 'Signed Byte'),
(1, 'U', 'Undefined'),
(2, 'SS', 'Signed Short'),
(4, 'SL', 'Signed Long'),
(8, 'SR', 'Signed Ratio'),
)
# dictionary of main EXIF tag names
# first element of tuple is tag name, optional second element is
# another dictionary giving names to values
EXIF_TAGS = {
0x0100: ('ImageWidth', ),
0x0101: ('ImageLength', ),
0x0102: ('BitsPerSample', ),
0x0103: ('Compression',
{1: 'Uncompressed',
2: 'CCITT 1D',
3: 'T4/Group 3 Fax',
4: 'T6/Group 4 Fax',
5: 'LZW',
6: 'JPEG (old-style)',
7: 'JPEG',
8: 'Adobe Deflate',
9: 'JBIG B&W',
10: 'JBIG Color',
32766: 'Next',
32769: 'Epson ERF Compressed',
32771: 'CCIRLEW',
32773: 'PackBits',
32809: 'Thunderscan',
32895: 'IT8CTPAD',
32896: 'IT8LW',
32897: 'IT8MP',
32898: 'IT8BL',
32908: 'PixarFilm',
32909: 'PixarLog',
32946: 'Deflate',
32947: 'DCS',
34661: 'JBIG',
34676: 'SGILog',
34677: 'SGILog24',
34712: 'JPEG 2000',
34713: 'Nikon NEF Compressed',
65000: 'Kodak DCR Compressed',
65535: 'Pentax PEF Compressed'}),
0x0106: ('PhotometricInterpretation', ),
0x0107: ('Thresholding', ),
0x010A: ('FillOrder', ),
0x010D: ('DocumentName', ),
0x010E: ('ImageDescription', ),
0x010F: ('Make', ),
0x0110: ('Model', ),
0x0111: ('StripOffsets', ),
0x0112: ('Orientation',
{1: 'Horizontal (normal)',
2: 'Mirrored horizontal',
3: 'Rotated 180',
4: 'Mirrored vertical',
5: 'Mirrored horizontal then rotated 90 CCW',
6: 'Rotated 90 CCW',
7: 'Mirrored horizontal then rotated 90 CW',
8: 'Rotated 90 CW'}),
0x0115: ('SamplesPerPixel', ),
0x0116: ('RowsPerStrip', ),
0x0117: ('StripByteCounts', ),
0x011A: ('XResolution', ),
0x011B: ('YResolution', ),
0x011C: ('PlanarConfiguration', ),
0x011D: ('PageName', make_string),
0x0128: ('ResolutionUnit',
{1: 'Not Absolute',
2: 'Pixels/Inch',
3: 'Pixels/Centimeter'}),
0x012D: ('TransferFunction', ),
0x0131: ('Software', ),
0x0132: ('DateTime', ),
0x013B: ('Artist', ),
0x013E: ('WhitePoint', ),
0x013F: ('PrimaryChromaticities', ),
0x0156: ('TransferRange', ),
0x0200: ('JPEGProc', ),
0x0201: ('JPEGInterchangeFormat', ),
0x0202: ('JPEGInterchangeFormatLength', ),
0x0211: ('YCbCrCoefficients', ),
0x0212: ('YCbCrSubSampling', ),
0x0213: ('YCbCrPositioning',
{1: 'Centered',
2: 'Co-sited'}),
0x0214: ('ReferenceBlackWhite', ),
0x4746: ('Rating', ),
0x828D: ('CFARepeatPatternDim', ),
0x828E: ('CFAPattern', ),
0x828F: ('BatteryLevel', ),
0x8298: ('Copyright', ),
0x829A: ('ExposureTime', ),
0x829D: ('FNumber', ),
0x83BB: ('IPTC/NAA', ),
0x8769: ('ExifOffset', ),
0x8773: ('InterColorProfile', ),
0x8822: ('ExposureProgram',
{0: 'Unidentified',
1: 'Manual',
2: 'Program Normal',
3: 'Aperture Priority',
4: 'Shutter Priority',
5: 'Program Creative',
6: 'Program Action',
7: 'Portrait Mode',
8: 'Landscape Mode'}),
0x8824: ('SpectralSensitivity', ),
0x8825: ('GPSInfo', ),
0x8827: ('ISOSpeedRatings', ),
0x8828: ('OECF', ),
0x9000: ('ExifVersion', make_string),
0x9003: ('DateTimeOriginal', ),
0x9004: ('DateTimeDigitized', ),
0x9101: ('ComponentsConfiguration',
{0: '',
1: 'Y',
2: 'Cb',
3: 'Cr',
4: 'Red',
5: 'Green',
6: 'Blue'}),
0x9102: ('CompressedBitsPerPixel', ),
0x9201: ('ShutterSpeedValue', ),
0x9202: ('ApertureValue', ),
0x9203: ('BrightnessValue', ),
0x9204: ('ExposureBiasValue', ),
0x9205: ('MaxApertureValue', ),
0x9206: ('SubjectDistance', ),
0x9207: ('MeteringMode',
{0: 'Unidentified',
1: 'Average',
2: 'CenterWeightedAverage',
3: 'Spot',
4: 'MultiSpot',
5: 'Pattern',
6: 'Partial',
255: 'other'}),
0x9208: ('LightSource',
{0: 'Unknown',
1: 'Daylight',
2: 'Fluorescent',
3: 'Tungsten (incandescent light)',
4: 'Flash',
9: 'Fine weather',
10: 'Cloudy weather',
11: 'Shade',
12: 'Daylight fluorescent (D 5700 - 7100K)',
13: 'Day white fluorescent (N 4600 - 5400K)',
14: 'Cool white fluorescent (W 3900 - 4500K)',
15: 'White fluorescent (WW 3200 - 3700K)',
17: 'Standard light A',
18: 'Standard light B',
19: 'Standard light C',
20: 'D55',
21: 'D65',
22: 'D75',
23: 'D50',
24: 'ISO studio tungsten',
255: 'other light source',}),
0x9209: ('Flash',
{0: 'Flash did not fire',
1: 'Flash fired',
5: 'Strobe return light not detected',
7: 'Strobe return light detected',
9: 'Flash fired, compulsory flash mode',
13: 'Flash fired, compulsory flash mode, return light not detected',
15: 'Flash fired, compulsory flash mode, return light detected',
16: 'Flash did not fire, compulsory flash mode',
24: 'Flash did not fire, auto mode',
25: 'Flash fired, auto mode',
29: 'Flash fired, auto mode, return light not detected',
31: 'Flash fired, auto mode, return light detected',
32: 'No flash function',
65: 'Flash fired, red-eye reduction mode',
69: 'Flash fired, red-eye reduction mode, return light not detected',
71: 'Flash fired, red-eye reduction mode, return light detected',
73: 'Flash fired, compulsory flash mode, red-eye reduction mode',
77: 'Flash fired, compulsory flash mode, red-eye reduction mode, return light not detected',
79: 'Flash fired, compulsory flash mode, red-eye reduction mode, return light detected',
89: 'Flash fired, auto mode, red-eye reduction mode',
93: 'Flash fired, auto mode, return light not detected, red-eye reduction mode',
95: 'Flash fired, auto mode, return light detected, red-eye reduction mode'}),
0x920A: ('FocalLength', ),
0x9214: ('SubjectArea', ),
0x927C: ('MakerNote', ),
0x9286: ('UserComment', make_string_uc),
0x9290: ('SubSecTime', ),
0x9291: ('SubSecTimeOriginal', ),
0x9292: ('SubSecTimeDigitized', ),
# used by Windows Explorer
0x9C9B: ('XPTitle', ),
0x9C9C: ('XPComment', ),
0x9C9D: ('XPAuthor', ), #(ignored by Windows Explorer if Artist exists)
0x9C9E: ('XPKeywords', ),
0x9C9F: ('XPSubject', ),
0xA000: ('FlashPixVersion', make_string),
0xA001: ('ColorSpace',
{1: 'sRGB',
2: 'Adobe RGB',
65535: 'Uncalibrated'}),
0xA002: ('ExifImageWidth', ),
0xA003: ('ExifImageLength', ),
0xA005: ('InteroperabilityOffset', ),
0xA20B: ('FlashEnergy', ), # 0x920B in TIFF/EP
0xA20C: ('SpatialFrequencyResponse', ), # 0x920C
0xA20E: ('FocalPlaneXResolution', ), # 0x920E
0xA20F: ('FocalPlaneYResolution', ), # 0x920F
0xA210: ('FocalPlaneResolutionUnit', ), # 0x9210
0xA214: ('SubjectLocation', ), # 0x9214
0xA215: ('ExposureIndex', ), # 0x9215
0xA217: ('SensingMethod', # 0x9217
{1: 'Not defined',
2: 'One-chip color area',
3: 'Two-chip color area',
4: 'Three-chip color area',
5: 'Color sequential area',
7: 'Trilinear',
8: 'Color sequential linear'}),
0xA300: ('FileSource',
{1: 'Film Scanner',
2: 'Reflection Print Scanner',
3: 'Digital Camera'}),
0xA301: ('SceneType',
{1: 'Directly Photographed'}),
0xA302: ('CVAPattern', ),
0xA401: ('CustomRendered',
{0: 'Normal',
1: 'Custom'}),
0xA402: ('ExposureMode',
{0: 'Auto Exposure',
1: 'Manual Exposure',
2: 'Auto Bracket'}),
0xA403: ('WhiteBalance',
{0: 'Auto',
1: 'Manual'}),
0xA404: ('DigitalZoomRatio', ),
0xA405: ('FocalLengthIn35mmFilm', ),
0xA406: ('SceneCaptureType',
{0: 'Standard',
1: 'Landscape',
2: 'Portrait',
3: 'Night)'}),
0xA407: ('GainControl',
{0: 'None',
1: 'Low gain up',
2: 'High gain up',
3: 'Low gain down',
4: 'High gain down'}),
0xA408: ('Contrast',
{0: 'Normal',
1: 'Soft',
2: 'Hard'}),
0xA409: ('Saturation',
{0: 'Normal',
1: 'Soft',
2: 'Hard'}),
0xA40A: ('Sharpness',
{0: 'Normal',
1: 'Soft',
2: 'Hard'}),
0xA40B: ('DeviceSettingDescription', ),
0xA40C: ('SubjectDistanceRange', ),
0xA420: ('ImageUniqueID', ),
0xA500: ('Gamma', ),
0xC4A5: ('PrintIM', ),
0xEA1C: ('Padding', ),
}
# interoperability tags
INTR_TAGS = {
0x0001: ('InteroperabilityIndex', ),
0x0002: ('InteroperabilityVersion', ),
0x1000: ('RelatedImageFileFormat', ),
0x1001: ('RelatedImageWidth', ),
0x1002: ('RelatedImageLength', ),
}
# GPS tags
GPS_TAGS = {
0x0000: ('GPSVersionID', ),
0x0001: ('GPSLatitudeRef', ),
0x0002: ('GPSLatitude', ),
0x0003: ('GPSLongitudeRef', ),
0x0004: ('GPSLongitude', ),
0x0005: ('GPSAltitudeRef', ),
0x0006: ('GPSAltitude', ),
0x0007: ('GPSTimeStamp', ),
0x0008: ('GPSSatellites', ),
0x0009: ('GPSStatus', ),
0x000A: ('GPSMeasureMode', ),
0x000B: ('GPSDOP', ),
0x000C: ('GPSSpeedRef', ),
0x000D: ('GPSSpeed', ),
0x000E: ('GPSTrackRef', ),
0x000F: ('GPSTrack', ),
0x0010: ('GPSImgDirectionRef', ),
0x0011: ('GPSImgDirection', ),
0x0012: ('GPSMapDatum', ),
0x0013: ('GPSDestLatitudeRef', ),
0x0014: ('GPSDestLatitude', ),
0x0015: ('GPSDestLongitudeRef', ),
0x0016: ('GPSDestLongitude', ),
0x0017: ('GPSDestBearingRef', ),
0x0018: ('GPSDestBearing', ),
0x0019: ('GPSDestDistanceRef', ),
0x001A: ('GPSDestDistance', ),
0x001B: ('GPSProcessingMethod', ),
0x001C: ('GPSAreaInformation', ),
0x001D: ('GPSDate', ),
0x001E: ('GPSDifferential', ),
}
# Ignore these tags when quick processing
# 0x927C is MakerNote Tags
# 0x9286 is user comment
IGNORE_TAGS = (0x9286, 0x927C)
def nikon_ev_bias(seq):
"""
First digit seems to be in steps of 1/6 EV.
Does the third value mean the step size? It is usually 6,
but it is 12 for the ExposureDifference.
Check for an error condition that could cause a crash.
This only happens if something has gone really wrong in
reading the Nikon MakerNote.
http://tomtia.plala.jp/DigitalCamera/MakerNote/index.asp
"""
if len( seq ) < 4 :
return ''
if seq == [252, 1, 6, 0]:
return "-2/3 EV"
if seq == [253, 1, 6, 0]:
return "-1/2 EV"
if seq == [254, 1, 6, 0]:
return "-1/3 EV"
if seq == [0, 1, 6, 0]:
return "0 EV"
if seq == [2, 1, 6, 0]:
return "+1/3 EV"
if seq == [3, 1, 6, 0]:
return "+1/2 EV"
if seq == [4, 1, 6, 0]:
return "+2/3 EV"
# Handle combinations not in the table.
a = seq[0]
# Causes headaches for the +/- logic, so special case it.
if a == 0:
return "0 EV"
if a > 127:
a = 256 - a
ret_str = "-"
else:
ret_str = "+"
b = seq[2] # Assume third value means the step size
whole = a / b
a = a % b
if whole != 0:
ret_str = ret_str + str(whole) + " "
if a == 0:
ret_str = ret_str + "EV"
else:
r = Ratio(a, b)
ret_str = ret_str + r.__repr__() + " EV"
return ret_str
# Nikon E99x MakerNote Tags
MAKERNOTE_NIKON_NEWER_TAGS = {
0x0001: ('MakernoteVersion', make_string), # Sometimes binary
0x0002: ('ISOSetting', make_string),
0x0003: ('ColorMode', ),
0x0004: ('Quality', ),
0x0005: ('Whitebalance', ),
0x0006: ('ImageSharpening', ),
0x0007: ('FocusMode', ),
0x0008: ('FlashSetting', ),
0x0009: ('AutoFlashMode', ),
0x000B: ('WhiteBalanceBias', ),
0x000C: ('WhiteBalanceRBCoeff', ),
0x000D: ('ProgramShift', nikon_ev_bias),
# Nearly the same as the other EV vals, but step size is 1/12 EV (?)
0x000E: ('ExposureDifference', nikon_ev_bias),
0x000F: ('ISOSelection', ),
0x0010: ('DataDump', ),
0x0011: ('NikonPreview', ),
0x0012: ('FlashCompensation', nikon_ev_bias),
0x0013: ('ISOSpeedRequested', ),
0x0016: ('PhotoCornerCoordinates', ),
# 0x0017: Unknown, but most likely an EV value
0x0018: ('FlashBracketCompensationApplied', nikon_ev_bias),
0x0019: ('AEBracketCompensationApplied', ),
0x001A: ('ImageProcessing', ),
0x001B: ('CropHiSpeed', ),
0x001D: ('SerialNumber', ), # Conflict with 0x00A0 ?
0x001E: ('ColorSpace', ),
0x001F: ('VRInfo', ),
0x0020: ('ImageAuthentication', ),
0x0022: ('ActiveDLighting', ),
0x0023: ('PictureControl', ),
0x0024: ('WorldTime', ),
0x0025: ('ISOInfo', ),
0x0080: ('ImageAdjustment', ),
0x0081: ('ToneCompensation', ),
0x0082: ('AuxiliaryLens', ),
0x0083: ('LensType', ),
0x0084: ('LensMinMaxFocalMaxAperture', ),
0x0085: ('ManualFocusDistance', ),
0x0086: ('DigitalZoomFactor', ),
0x0087: ('FlashMode',
{0x00: 'Did Not Fire',
0x01: 'Fired, Manual',
0x07: 'Fired, External',
0x08: 'Fired, Commander Mode ',
0x09: 'Fired, TTL Mode'}),
0x0088: ('AFFocusPosition',
{0x0000: 'Center',
0x0100: 'Top',
0x0200: 'Bottom',
0x0300: 'Left',
0x0400: 'Right'}),
0x0089: ('BracketingMode',
{0x00: 'Single frame, no bracketing',
0x01: 'Continuous, no bracketing',
0x02: 'Timer, no bracketing',
0x10: 'Single frame, exposure bracketing',
0x11: 'Continuous, exposure bracketing',
0x12: 'Timer, exposure bracketing',
0x40: 'Single frame, white balance bracketing',
0x41: 'Continuous, white balance bracketing',
0x42: 'Timer, white balance bracketing'}),
0x008A: ('AutoBracketRelease', ),
0x008B: ('LensFStops', ),
0x008C: ('NEFCurve1', ), # ExifTool calls this 'ContrastCurve'
0x008D: ('ColorMode', ),
0x008F: ('SceneMode', ),
0x0090: ('LightingType', ),
0x0091: ('ShotInfo', ), # First 4 bytes are a version number in ASCII
0x0092: ('HueAdjustment', ),
# ExifTool calls this 'NEFCompression', should be 1-4
0x0093: ('Compression', ),
0x0094: ('Saturation',
{-3: 'B&W',
-2: '-2',
-1: '-1',
0: '0',
1: '1',
2: '2'}),
0x0095: ('NoiseReduction', ),
0x0096: ('NEFCurve2', ), # ExifTool calls this 'LinearizationTable'
0x0097: ('ColorBalance', ), # First 4 bytes are a version number in ASCII
0x0098: ('LensData', ), # First 4 bytes are a version number in ASCII
0x0099: ('RawImageCenter', ),
0x009A: ('SensorPixelSize', ),
0x009C: ('Scene Assist', ),
0x009E: ('RetouchHistory', ),
0x00A0: ('SerialNumber', ),
0x00A2: ('ImageDataSize', ),
# 00A3: unknown - a single byte 0
# 00A4: In NEF, looks like a 4 byte ASCII version number ('0200')
0x00A5: ('ImageCount', ),
0x00A6: ('DeletedImageCount', ),
0x00A7: ('TotalShutterReleases', ),
# First 4 bytes are a version number in ASCII, with version specific
# info to follow. Its hard to treat it as a string due to embedded nulls.
0x00A8: ('FlashInfo', ),
0x00A9: ('ImageOptimization', ),
0x00AA: ('Saturation', ),
0x00AB: ('DigitalVariProgram', ),
0x00AC: ('ImageStabilization', ),
0x00AD: ('Responsive AF', ), # 'AFResponse'
0x00B0: ('MultiExposure', ),
0x00B1: ('HighISONoiseReduction', ),
0x00B7: ('AFInfo', ),
0x00B8: ('FileInfo', ),
# 00B9: unknown
0x0100: ('DigitalICE', ),
0x0103: ('PreviewCompression',
{1: 'Uncompressed',
2: 'CCITT 1D',
3: 'T4/Group 3 Fax',
4: 'T6/Group 4 Fax',
5: 'LZW',
6: 'JPEG (old-style)',
7: 'JPEG',
8: 'Adobe Deflate',
9: 'JBIG B&W',
10: 'JBIG Color',
32766: 'Next',
32769: 'Epson ERF Compressed',
32771: 'CCIRLEW',
32773: 'PackBits',
32809: 'Thunderscan',
32895: 'IT8CTPAD',
32896: 'IT8LW',
32897: 'IT8MP',
32898: 'IT8BL',
32908: 'PixarFilm',
32909: 'PixarLog',
32946: 'Deflate',
32947: 'DCS',
34661: 'JBIG',
34676: 'SGILog',
34677: 'SGILog24',
34712: 'JPEG 2000',
34713: 'Nikon NEF Compressed',
65000: 'Kodak DCR Compressed',
65535: 'Pentax PEF Compressed',}),
0x0201: ('PreviewImageStart', ),
0x0202: ('PreviewImageLength', ),
0x0213: ('PreviewYCbCrPositioning',
{1: 'Centered',
2: 'Co-sited'}),
0x0E09: ('NikonCaptureVersion', ),
0x0E0E: ('NikonCaptureOffsets', ),
0x0E10: ('NikonScan', ),
0x0E22: ('NEFBitDepth', ),
}
MAKERNOTE_NIKON_OLDER_TAGS = {
0x0003: ('Quality',
{1: 'VGA Basic',
2: 'VGA Normal',
3: 'VGA Fine',
4: 'SXGA Basic',
5: 'SXGA Normal',
6: 'SXGA Fine'}),
0x0004: ('ColorMode',
{1: 'Color',
2: 'Monochrome'}),
0x0005: ('ImageAdjustment',
{0: 'Normal',
1: 'Bright+',
2: 'Bright-',
3: 'Contrast+',
4: 'Contrast-'}),
0x0006: ('CCDSpeed',
{0: 'ISO 80',
2: 'ISO 160',
4: 'ISO 320',
5: 'ISO 100'}),
0x0007: ('WhiteBalance',
{0: 'Auto',
1: 'Preset',
2: 'Daylight',
3: 'Incandescent',
4: 'Fluorescent',
5: 'Cloudy',
6: 'Speed Light'}),
}
def olympus_special_mode(v):
"""decode Olympus SpecialMode tag in MakerNote"""
mode1 = {
0: 'Normal',
1: 'Unknown',
2: 'Fast',
3: 'Panorama'}
mode2 = {
0: 'Non-panoramic',
1: 'Left to right',
2: 'Right to left',
3: 'Bottom to top',
4: 'Top to bottom'}
if v[0] not in mode1 or v[2] not in mode2:
return v
return '%s - sequence %d - %s' % (mode1[v[0]], v[1], mode2[v[2]])
MAKERNOTE_OLYMPUS_TAGS = {
# ah HAH! those sneeeeeaky bastids! this is how they get past the fact
# that a JPEG thumbnail is not allowed in an uncompressed TIFF file
0x0100: ('JPEGThumbnail', ),
0x0200: ('SpecialMode', olympus_special_mode),
0x0201: ('JPEGQual',
{1: 'SQ',
2: 'HQ',
3: 'SHQ'}),
0x0202: ('Macro',
{0: 'Normal',
1: 'Macro',
2: 'SuperMacro'}),
0x0203: ('BWMode',
{0: 'Off',
1: 'On'}),
0x0204: ('DigitalZoom', ),
0x0205: ('FocalPlaneDiagonal', ),
0x0206: ('LensDistortionParams', ),
0x0207: ('SoftwareRelease', ),
0x0208: ('PictureInfo', ),
0x0209: ('CameraID', make_string), # print as string
0x0F00: ('DataDump', ),
0x0300: ('PreCaptureFrames', ),
0x0404: ('SerialNumber', ),
0x1000: ('ShutterSpeedValue', ),
0x1001: ('ISOValue', ),
0x1002: ('ApertureValue', ),
0x1003: ('BrightnessValue', ),
0x1004: ('FlashMode', ),
0x1004: ('FlashMode',
{2: 'On',
3: 'Off'}),
0x1005: ('FlashDevice',
{0: 'None',
1: 'Internal',
4: 'External',
5: 'Internal + External'}),
0x1006: ('ExposureCompensation', ),
0x1007: ('SensorTemperature', ),
0x1008: ('LensTemperature', ),
0x100b: ('FocusMode',
{0: 'Auto',
1: 'Manual'}),
0x1017: ('RedBalance', ),
0x1018: ('BlueBalance', ),
0x101a: ('SerialNumber', ),
0x1023: ('FlashExposureComp', ),
0x1026: ('ExternalFlashBounce',
{0: 'No',
1: 'Yes'}),
0x1027: ('ExternalFlashZoom', ),
0x1028: ('ExternalFlashMode', ),
0x1029: ('Contrast int16u',
{0: 'High',
1: 'Normal',
2: 'Low'}),
0x102a: ('SharpnessFactor', ),
0x102b: ('ColorControl', ),
0x102c: ('ValidBits', ),
0x102d: ('CoringFilter', ),
0x102e: ('OlympusImageWidth', ),
0x102f: ('OlympusImageHeight', ),
0x1034: ('CompressionRatio', ),
0x1035: ('PreviewImageValid',
{0: 'No',
1: 'Yes'}),
0x1036: ('PreviewImageStart', ),
0x1037: ('PreviewImageLength', ),
0x1039: ('CCDScanMode',
{0: 'Interlaced',
1: 'Progressive'}),
0x103a: ('NoiseReduction',
{0: 'Off',
1: 'On'}),
0x103b: ('InfinityLensStep', ),
0x103c: ('NearLensStep', ),
# TODO - these need extra definitions
# http://search.cpan.org/src/EXIFTOOL/Image-ExifTool-6.90/html/TagNames/Olympus.html
0x2010: ('Equipment', ),
0x2020: ('CameraSettings', ),
0x2030: ('RawDevelopment', ),
0x2040: ('ImageProcessing', ),
0x2050: ('FocusInfo', ),
0x3000: ('RawInfo ', ),
}
# 0x2020 CameraSettings
MAKERNOTE_OLYMPUS_TAG_0x2020 = {
0x0100: ('PreviewImageValid',
{0: 'No',
1: 'Yes'}),
0x0101: ('PreviewImageStart', ),
0x0102: ('PreviewImageLength', ),
0x0200: ('ExposureMode',
{1: 'Manual',
2: 'Program',
3: 'Aperture-priority AE',
4: 'Shutter speed priority AE',
5: 'Program-shift'}),
0x0201: ('AELock',
{0: 'Off',
1: 'On'}),
0x0202: ('MeteringMode',
{2: 'Center Weighted',
3: 'Spot',
5: 'ESP',
261: 'Pattern+AF',
515: 'Spot+Highlight control',
1027: 'Spot+Shadow control'}),
0x0300: ('MacroMode',
{0: 'Off',
1: 'On'}),
0x0301: ('FocusMode',
{0: 'Single AF',
1: 'Sequential shooting AF',
2: 'Continuous AF',
3: 'Multi AF',
10: 'MF'}),
0x0302: ('FocusProcess',
{0: 'AF Not Used',
1: 'AF Used'}),
0x0303: ('AFSearch',
{0: 'Not Ready',
1: 'Ready'}),
0x0304: ('AFAreas', ),
0x0401: ('FlashExposureCompensation', ),
0x0500: ('WhiteBalance2',
{0: 'Auto',
16: '7500K (Fine Weather with Shade)',
17: '6000K (Cloudy)',
18: '5300K (Fine Weather)',
20: '3000K (Tungsten light)',
21: '3600K (Tungsten light-like)',
33: '6600K (Daylight fluorescent)',
34: '4500K (Neutral white fluorescent)',
35: '4000K (Cool white fluorescent)',
48: '3600K (Tungsten light-like)',
256: 'Custom WB 1',
257: 'Custom WB 2',
258: 'Custom WB 3',
259: 'Custom WB 4',
512: 'Custom WB 5400K',
513: 'Custom WB 2900K',
514: 'Custom WB 8000K', }),
0x0501: ('WhiteBalanceTemperature', ),
0x0502: ('WhiteBalanceBracket', ),
0x0503: ('CustomSaturation', ), # (3 numbers: 1. CS Value, 2. Min, 3. Max)
0x0504: ('ModifiedSaturation',
{0: 'Off',
1: 'CM1 (Red Enhance)',
2: 'CM2 (Green Enhance)',
3: 'CM3 (Blue Enhance)',
4: 'CM4 (Skin Tones)'}),
0x0505: ('ContrastSetting', ), # (3 numbers: 1. Contrast, 2. Min, 3. Max)
0x0506: ('SharpnessSetting', ), # (3 numbers: 1. Sharpness, 2. Min, 3. Max)
0x0507: ('ColorSpace',
{0: 'sRGB',
1: 'Adobe RGB',
2: 'Pro Photo RGB'}),
0x0509: ('SceneMode',
{0: 'Standard',
6: 'Auto',
7: 'Sport',
8: 'Portrait',
9: 'Landscape+Portrait',
10: 'Landscape',
11: 'Night scene',
13: 'Panorama',
16: 'Landscape+Portrait',
17: 'Night+Portrait',
19: 'Fireworks',
20: 'Sunset',
22: 'Macro',
25: 'Documents',
26: 'Museum',
28: 'Beach&Snow',
30: 'Candle',
35: 'Underwater Wide1',
36: 'Underwater Macro',
39: 'High Key',
40: 'Digital Image Stabilization',
44: 'Underwater Wide2',
45: 'Low Key',
46: 'Children',
48: 'Nature Macro'}),
0x050a: ('NoiseReduction',
{0: 'Off',
1: 'Noise Reduction',
2: 'Noise Filter',
3: 'Noise Reduction + Noise Filter',
4: 'Noise Filter (ISO Boost)',
5: 'Noise Reduction + Noise Filter (ISO Boost)'}),
0x050b: ('DistortionCorrection',
{0: 'Off',
1: 'On'}),
0x050c: ('ShadingCompensation',
{0: 'Off',
1: 'On'}),
0x050d: ('CompressionFactor', ),
0x050f: ('Gradation',
{'-1 -1 1': 'Low Key',
'0 -1 1': 'Normal',
'1 -1 1': 'High Key'}),
0x0520: ('PictureMode',
{1: 'Vivid',
2: 'Natural',
3: 'Muted',
256: 'Monotone',
512: 'Sepia'}),
0x0521: ('PictureModeSaturation', ),
0x0522: ('PictureModeHue?', ),
0x0523: ('PictureModeContrast', ),
0x0524: ('PictureModeSharpness', ),
0x0525: ('PictureModeBWFilter',
{0: 'n/a',
1: 'Neutral',
2: 'Yellow',
3: 'Orange',
4: 'Red',
5: 'Green'}),
0x0526: ('PictureModeTone',
{0: 'n/a',
1: 'Neutral',
2: 'Sepia',
3: 'Blue',
4: 'Purple',
5: 'Green'}),
0x0600: ('Sequence', ), # 2 or 3 numbers: 1. Mode, 2. Shot number, 3. Mode bits
0x0601: ('PanoramaMode', ), # (2 numbers: 1. Mode, 2. Shot number)
0x0603: ('ImageQuality2',
{1: 'SQ',
2: 'HQ',
3: 'SHQ',
4: 'RAW'}),
0x0901: ('ManometerReading', ),
}
MAKERNOTE_CASIO_TAGS = {
0x0001: ('RecordingMode',
{1: 'Single Shutter',
2: 'Panorama',
3: 'Night Scene',
4: 'Portrait',
5: 'Landscape'}),
0x0002: ('Quality',
{1: 'Economy',
2: 'Normal',
3: 'Fine'}),
0x0003: ('FocusingMode',
{2: 'Macro',
3: 'Auto Focus',
4: 'Manual Focus',
5: 'Infinity'}),
0x0004: ('FlashMode',
{1: 'Auto',
2: 'On',
3: 'Off',
4: 'Red Eye Reduction'}),
0x0005: ('FlashIntensity',
{11: 'Weak',
13: 'Normal',
15: 'Strong'}),
0x0006: ('Object Distance', ),
0x0007: ('WhiteBalance',
{1: 'Auto',
2: 'Tungsten',
3: 'Daylight',
4: 'Fluorescent',
5: 'Shade',
129: 'Manual'}),
0x000B: ('Sharpness',
{0: 'Normal',
1: 'Soft',
2: 'Hard'}),
0x000C: ('Contrast',
{0: 'Normal',
1: 'Low',
2: 'High'}),
0x000D: ('Saturation',
{0: 'Normal',
1: 'Low',
2: 'High'}),
0x0014: ('CCDSpeed',
{64: 'Normal',
80: 'Normal',
100: 'High',
125: '+1.0',
244: '+3.0',
250: '+2.0'}),
}
MAKERNOTE_FUJIFILM_TAGS = {
0x0000: ('NoteVersion', make_string),
0x1000: ('Quality', ),
0x1001: ('Sharpness',
{1: 'Soft',
2: 'Soft',
3: 'Normal',
4: 'Hard',
5: 'Hard'}),
0x1002: ('WhiteBalance',
{0: 'Auto',
256: 'Daylight',
512: 'Cloudy',
768: 'DaylightColor-Fluorescent',
769: 'DaywhiteColor-Fluorescent',
770: 'White-Fluorescent',
1024: 'Incandescent',
3840: 'Custom'}),
0x1003: ('Color',
{0: 'Normal',
256: 'High',
512: 'Low'}),
0x1004: ('Tone',
{0: 'Normal',
256: 'High',
512: 'Low'}),
0x1010: ('FlashMode',
{0: 'Auto',
1: 'On',
2: 'Off',
3: 'Red Eye Reduction'}),
0x1011: ('FlashStrength', ),
0x1020: ('Macro',
{0: 'Off',
1: 'On'}),
0x1021: ('FocusMode',
{0: 'Auto',
1: 'Manual'}),
0x1030: ('SlowSync',
{0: 'Off',
1: 'On'}),
0x1031: ('PictureMode',
{0: 'Auto',
1: 'Portrait',
2: 'Landscape',
4: 'Sports',
5: 'Night',
6: 'Program AE',
256: 'Aperture Priority AE',
512: 'Shutter Priority AE',
768: 'Manual Exposure'}),
0x1100: ('MotorOrBracket',
{0: 'Off',
1: 'On'}),
0x1300: ('BlurWarning',
{0: 'Off',
1: 'On'}),
0x1301: ('FocusWarning',
{0: 'Off',
1: 'On'}),
0x1302: ('AEWarning',
{0: 'Off',
1: 'On'}),
}
MAKERNOTE_CANON_TAGS = {
0x0006: ('ImageType', ),
0x0007: ('FirmwareVersion', ),
0x0008: ('ImageNumber', ),
0x0009: ('OwnerName', ),
}
# this is in element offset, name, optional value dictionary format
MAKERNOTE_CANON_TAG_0x001 = {
1: ('Macromode',
{1: 'Macro',
2: 'Normal'}),
2: ('SelfTimer', ),
3: ('Quality',
{2: 'Normal',
3: 'Fine',
5: 'Superfine'}),
4: ('FlashMode',
{0: 'Flash Not Fired',
1: 'Auto',
2: 'On',
3: 'Red-Eye Reduction',
4: 'Slow Synchro',
5: 'Auto + Red-Eye Reduction',
6: 'On + Red-Eye Reduction',
16: 'external flash'}),
5: ('ContinuousDriveMode',
{0: 'Single Or Timer',
1: 'Continuous'}),
7: ('FocusMode',
{0: 'One-Shot',
1: 'AI Servo',
2: 'AI Focus',
3: 'MF',
4: 'Single',
5: 'Continuous',
6: 'MF'}),
10: ('ImageSize',
{0: 'Large',
1: 'Medium',
2: 'Small'}),
11: ('EasyShootingMode',
{0: 'Full Auto',
1: 'Manual',
2: 'Landscape',
3: 'Fast Shutter',
4: 'Slow Shutter',
5: 'Night',
6: 'B&W',
7: 'Sepia',
8: 'Portrait',
9: 'Sports',
10: 'Macro/Close-Up',
11: 'Pan Focus'}),
12: ('DigitalZoom',
{0: 'None',
1: '2x',
2: '4x'}),
13: ('Contrast',
{0xFFFF: 'Low',
0: 'Normal',
1: 'High'}),
14: ('Saturation',
{0xFFFF: 'Low',
0: 'Normal',
1: 'High'}),
15: ('Sharpness',
{0xFFFF: 'Low',
0: 'Normal',
1: 'High'}),
16: ('ISO',
{0: 'See ISOSpeedRatings Tag',
15: 'Auto',
16: '50',
17: '100',
18: '200',
19: '400'}),
17: ('MeteringMode',
{3: 'Evaluative',
4: 'Partial',
5: 'Center-weighted'}),
18: ('FocusType',
{0: 'Manual',
1: 'Auto',
3: 'Close-Up (Macro)',
8: 'Locked (Pan Mode)'}),
19: ('AFPointSelected',
{0x3000: 'None (MF)',
0x3001: 'Auto-Selected',
0x3002: 'Right',
0x3003: 'Center',
0x3004: 'Left'}),
20: ('ExposureMode',
{0: 'Easy Shooting',
1: 'Program',
2: 'Tv-priority',
3: 'Av-priority',
4: 'Manual',
5: 'A-DEP'}),
23: ('LongFocalLengthOfLensInFocalUnits', ),
24: ('ShortFocalLengthOfLensInFocalUnits', ),
25: ('FocalUnitsPerMM', ),
28: ('FlashActivity',
{0: 'Did Not Fire',
1: 'Fired'}),
29: ('FlashDetails',
{14: 'External E-TTL',
13: 'Internal Flash',
11: 'FP Sync Used',
7: '2nd("Rear")-Curtain Sync Used',
4: 'FP Sync Enabled'}),
32: ('FocusMode',
{0: 'Single',
1: 'Continuous'}),
}
MAKERNOTE_CANON_TAG_0x004 = {
7: ('WhiteBalance',
{0: 'Auto',
1: 'Sunny',
2: 'Cloudy',
3: 'Tungsten',
4: 'Fluorescent',
5: 'Flash',
6: 'Custom'}),
9: ('SequenceNumber', ),
14: ('AFPointUsed', ),
15: ('FlashBias',
{0xFFC0: '-2 EV',
0xFFCC: '-1.67 EV',
0xFFD0: '-1.50 EV',
0xFFD4: '-1.33 EV',
0xFFE0: '-1 EV',
0xFFEC: '-0.67 EV',
0xFFF0: '-0.50 EV',
0xFFF4: '-0.33 EV',
0x0000: '0 EV',
0x000C: '0.33 EV',
0x0010: '0.50 EV',
0x0014: '0.67 EV',
0x0020: '1 EV',
0x002C: '1.33 EV',
0x0030: '1.50 EV',
0x0034: '1.67 EV',
0x0040: '2 EV'}),
19: ('SubjectDistance', ),
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (division, print_function, absolute_import,
unicode_literals)
__all__ = ["PTSampler"]
import numpy as np
import numpy.random as nr
import multiprocessing as multi
from . import autocorr
from .sampler import Sampler
def default_beta_ladder(ndim, ntemps=None, Tmax=None):
"""Returns a ladder of :math:`\beta \equiv 1/T` with temperatures
geometrically spaced with spacing chosen so that a Gaussian
posterior would have a 0.25 temperature swap acceptance rate.
:param ndim:
The number of dimensions in the parameter space.
:param ntemps: (optional)
If set, the number of temperatures to use. If ``None``, the
``Tmax`` argument must be given, and the number of
temperatures is chosen so that the highest temperature is
greater than ``Tmax``.
:param Tmax: (optional)
If ``ntemps`` is not given, this argument controls the number
of temperatures. Temperatures are chosen according to the
spacing criteria until the maximum temperature exceeds
``Tmax``
"""
tstep = np.array([25.2741, 7., 4.47502, 3.5236, 3.0232,
2.71225, 2.49879, 2.34226, 2.22198, 2.12628,
2.04807, 1.98276, 1.92728, 1.87946, 1.83774,
1.80096, 1.76826, 1.73895, 1.7125, 1.68849,
1.66657, 1.64647, 1.62795, 1.61083, 1.59494,
1.58014, 1.56632, 1.55338, 1.54123, 1.5298,
1.51901, 1.50881, 1.49916, 1.49, 1.4813,
1.47302, 1.46512, 1.45759, 1.45039, 1.4435,
1.4369, 1.43056, 1.42448, 1.41864, 1.41302,
1.40761, 1.40239, 1.39736, 1.3925, 1.38781,
1.38327, 1.37888, 1.37463, 1.37051, 1.36652,
1.36265, 1.35889, 1.35524, 1.3517, 1.34825,
1.3449, 1.34164, 1.33847, 1.33538, 1.33236,
1.32943, 1.32656, 1.32377, 1.32104, 1.31838,
1.31578, 1.31325, 1.31076, 1.30834, 1.30596,
1.30364, 1.30137, 1.29915, 1.29697, 1.29484,
1.29275, 1.29071, 1.2887, 1.28673, 1.2848,
1.28291, 1.28106, 1.27923, 1.27745, 1.27569,
1.27397, 1.27227, 1.27061, 1.26898, 1.26737,
1.26579, 1.26424, 1.26271, 1.26121,
1.25973])
dmax = tstep.shape[0]
if ndim > dmax:
# An approximation to the temperature step at large
# dimension
tstep = 1.0 + 2.0*np.sqrt(np.log(4.0))/np.sqrt(ndim)
else:
tstep = tstep[ndim-1]
if ntemps is None and Tmax is None:
raise ValueError('must specify one of ``ntemps`` and ``Tmax``')
elif ntemps is None:
ntemps = int(np.log(Tmax)/np.log(tstep)+2)
return np.exp(np.linspace(0, -(ntemps-1)*np.log(tstep), ntemps))
class PTLikePrior(object):
"""
Wrapper class for logl and logp.
"""
def __init__(self, logl, logp, loglargs=[], logpargs=[], loglkwargs={},
logpkwargs={}):
self.logl = logl
self.logp = logp
self.loglargs = loglargs
self.logpargs = logpargs
self.loglkwargs = loglkwargs
self.logpkwargs = logpkwargs
def __call__(self, x):
lp = self.logp(x, *self.logpargs, **self.logpkwargs)
if lp == float('-inf'):
return lp, lp
return self.logl(x, *self.loglargs, **self.loglkwargs), lp
class PTSampler(Sampler):
"""
A parallel-tempered ensemble sampler, using :class:`EnsembleSampler`
for sampling within each parallel chain.
:param ntemps:
The number of temperatures. Can be ``None``, in which case
the ``Tmax`` argument sets the maximum temperature.
:param nwalkers:
The number of ensemble walkers at each temperature.
:param dim:
The dimension of parameter space.
:param logl:
The log-likelihood function.
:param logp:
The log-prior function.
:param threads: (optional)
The number of parallel threads to use in sampling.
:param pool: (optional)
Alternative to ``threads``. Any object that implements a
``map`` method compatible with the built-in ``map`` will do
here. For example, :class:`multi.Pool` will do.
:param betas: (optional)
Array giving the inverse temperatures, :math:`\\beta=1/T`,
used in the ladder. The default is chosen so that a Gaussian
posterior in the given number of dimensions will have a 0.25
tswap acceptance rate.
:param a: (optional)
Proposal scale factor.
:param Tmax: (optional)
Maximum temperature for the ladder. If ``ntemps`` is
``None``, this argument is used to set the temperature ladder.
:param loglargs: (optional)
Positional arguments for the log-likelihood function.
:param logpargs: (optional)
Positional arguments for the log-prior function.
:param loglkwargs: (optional)
Keyword arguments for the log-likelihood function.
:param logpkwargs: (optional)
Keyword arguments for the log-prior function.
"""
def __init__(self, ntemps, nwalkers, dim, logl, logp, threads=1,
pool=None, betas=None, a=2.0, Tmax=None, loglargs=[], logpargs=[],
loglkwargs={}, logpkwargs={}):
self.logl = logl
self.logp = logp
self.a = a
self.loglargs = loglargs
self.logpargs = logpargs
self.loglkwargs = loglkwargs
self.logpkwargs = logpkwargs
self.nwalkers = nwalkers
self.dim = dim
if betas is None:
self._betas = default_beta_ladder(self.dim, ntemps=ntemps, Tmax=Tmax)
else:
self._betas = betas
self.ntemps = self.betas.shape[0]
assert self.nwalkers % 2 == 0, \
"The number of walkers must be even."
assert self.nwalkers >= 2*self.dim, \
"The number of walkers must be greater than 2*dimension."
self._chain = None
self._lnprob = None
self._lnlikelihood = None
self.nswap = np.zeros(self.ntemps, dtype=np.float)
self.nswap_accepted = np.zeros(self.ntemps, dtype=np.float)
self.nprop = np.zeros((self.ntemps, self.nwalkers), dtype=np.float)
self.nprop_accepted = np.zeros((self.ntemps, self.nwalkers),
dtype=np.float)
self.pool = pool
if threads > 1 and pool is None:
self.pool = multi.Pool(threads)
def reset(self):
"""
Clear the ``chain``, ``lnprobability``, ``lnlikelihood``,
``acceptance_fraction``, ``tswap_acceptance_fraction`` stored
properties.
"""
self.nswap = np.zeros(self.ntemps, dtype=np.float)
self.nswap_accepted = np.zeros(self.ntemps, dtype=np.float)
self.nprop = np.zeros((self.ntemps, self.nwalkers), dtype=np.float)
self.nprop_accepted = np.zeros((self.ntemps, self.nwalkers),
dtype=np.float)
self._chain = None
self._lnprob = None
self._lnlikelihood = None
def sample(self, p0, lnprob0=None, lnlike0=None, iterations=1,
thin=1, storechain=True):
"""
Advance the chains ``iterations`` steps as a generator.
:param p0:
The initial positions of the walkers. Shape should be
``(ntemps, nwalkers, dim)``.
:param lnprob0: (optional)
The initial posterior values for the ensembles. Shape
``(ntemps, nwalkers)``.
:param lnlike0: (optional)
The initial likelihood values for the ensembles. Shape
``(ntemps, nwalkers)``.
:param iterations: (optional)
The number of iterations to preform.
:param thin: (optional)
The number of iterations to perform between saving the
state to the internal chain.
:param storechain: (optional)
If ``True`` store the iterations in the ``chain``
property.
At each iteration, this generator yields
* ``p``, the current position of the walkers.
* ``lnprob`` the current posterior values for the walkers.
* ``lnlike`` the current likelihood values for the walkers.
"""
p = np.copy(np.array(p0))
# If we have no lnprob or logls compute them
if lnprob0 is None or lnlike0 is None:
fn = PTLikePrior(self.logl, self.logp, self.loglargs,
self.logpargs, self.loglkwargs, self.logpkwargs)
if self.pool is None:
results = list(map(fn, p.reshape((-1, self.dim))))
else:
results = list(self.pool.map(fn, p.reshape((-1, self.dim))))
logls = np.array([r[0] for r in results]).reshape((self.ntemps,
self.nwalkers))
logps = np.array([r[1] for r in results]).reshape((self.ntemps,
self.nwalkers))
lnlike0 = logls
lnprob0 = logls * self.betas.reshape((self.ntemps, 1)) + logps
lnprob = lnprob0
logl = lnlike0
# Expand the chain in advance of the iterations
if storechain:
nsave = iterations // thin
if self._chain is None:
isave = 0
self._chain = np.zeros((self.ntemps, self.nwalkers, nsave,
self.dim))
self._lnprob = np.zeros((self.ntemps, self.nwalkers, nsave))
self._lnlikelihood = np.zeros((self.ntemps, self.nwalkers,
nsave))
else:
isave = self._chain.shape[2]
self._chain = np.concatenate((self._chain,
np.zeros((self.ntemps,
self.nwalkers,
nsave, self.dim))),
axis=2)
self._lnprob = np.concatenate((self._lnprob,
np.zeros((self.ntemps,
self.nwalkers,
nsave))),
axis=2)
self._lnlikelihood = np.concatenate((self._lnlikelihood,
np.zeros((self.ntemps,
self.nwalkers,
nsave))),
axis=2)
for i in range(iterations):
for j in [0, 1]:
jupdate = j
jsample = (j + 1) % 2
pupdate = p[:, jupdate::2, :]
psample = p[:, jsample::2, :]
zs = np.exp(np.random.uniform(low=-np.log(self.a), high=np.log(self.a), size=(self.ntemps, self.nwalkers//2)))
qs = np.zeros((self.ntemps, self.nwalkers//2, self.dim))
for k in range(self.ntemps):
js = np.random.randint(0, high=self.nwalkers // 2,
size=self.nwalkers // 2)
qs[k, :, :] = psample[k, js, :] + zs[k, :].reshape(
(self.nwalkers // 2, 1)) * (pupdate[k, :, :] -
psample[k, js, :])
fn = PTLikePrior(self.logl, self.logp, self.loglargs,
self.logpargs, self.loglkwargs,
self.logpkwargs)
if self.pool is None:
results = list(map(fn, qs.reshape((-1, self.dim))))
else:
results = list(self.pool.map(fn, qs.reshape((-1,
self.dim))))
qslogls = np.array([r[0] for r in results]).reshape(
(self.ntemps, self.nwalkers//2))
qslogps = np.array([r[1] for r in results]).reshape(
(self.ntemps, self.nwalkers//2))
qslnprob = qslogls * self.betas.reshape((self.ntemps, 1)) \
+ qslogps
logpaccept = self.dim*np.log(zs) + qslnprob \
- lnprob[:, jupdate::2]
logrs = np.log(np.random.uniform(low=0.0, high=1.0,
size=(self.ntemps,
self.nwalkers//2)))
accepts = logrs < logpaccept
accepts = accepts.flatten()
pupdate.reshape((-1, self.dim))[accepts, :] = \
qs.reshape((-1, self.dim))[accepts, :]
lnprob[:, jupdate::2].reshape((-1,))[accepts] = \
qslnprob.reshape((-1,))[accepts]
logl[:, jupdate::2].reshape((-1,))[accepts] = \
qslogls.reshape((-1,))[accepts]
accepts = accepts.reshape((self.ntemps, self.nwalkers//2))
self.nprop[:, jupdate::2] += 1.0
self.nprop_accepted[:, jupdate::2] += accepts
p, lnprob, logl = self._temperature_swaps(p, lnprob, logl)
if (i + 1) % thin == 0:
if storechain:
self._chain[:, :, isave, :] = p
self._lnprob[:, :, isave, ] = lnprob
self._lnlikelihood[:, :, isave] = logl
isave += 1
yield p, lnprob, logl
def _temperature_swaps(self, p, lnprob, logl):
"""
Perform parallel-tempering temperature swaps on the state
in ``p`` with associated ``lnprob`` and ``logl``.
"""
ntemps = self.ntemps
for i in range(ntemps - 1, 0, -1):
bi = self.betas[i]
bi1 = self.betas[i - 1]
dbeta = bi1 - bi
iperm = nr.permutation(self.nwalkers)
i1perm = nr.permutation(self.nwalkers)
raccept = np.log(nr.uniform(size=self.nwalkers))
paccept = dbeta * (logl[i, iperm] - logl[i - 1, i1perm])
self.nswap[i] += self.nwalkers
self.nswap[i - 1] += self.nwalkers
asel = (paccept > raccept)
nacc = np.sum(asel)
self.nswap_accepted[i] += nacc
self.nswap_accepted[i - 1] += nacc
ptemp = np.copy(p[i, iperm[asel], :])
ltemp = np.copy(logl[i, iperm[asel]])
prtemp = np.copy(lnprob[i, iperm[asel]])
p[i, iperm[asel], :] = p[i - 1, i1perm[asel], :]
logl[i, iperm[asel]] = logl[i - 1, i1perm[asel]]
lnprob[i, iperm[asel]] = lnprob[i - 1, i1perm[asel]] \
- dbeta * logl[i - 1, i1perm[asel]]
p[i - 1, i1perm[asel], :] = ptemp
logl[i - 1, i1perm[asel]] = ltemp
lnprob[i - 1, i1perm[asel]] = prtemp + dbeta * ltemp
return p, lnprob, logl
def thermodynamic_integration_log_evidence(self, logls=None, fburnin=0.1):
"""
Thermodynamic integration estimate of the evidence.
:param logls: (optional) The log-likelihoods to use for
computing the thermodynamic evidence. If ``None`` (the
default), use the stored log-likelihoods in the sampler.
Should be of shape ``(Ntemps, Nwalkers, Nsamples)``.
:param fburnin: (optional)
The fraction of the chain to discard as burnin samples; only the
final ``1-fburnin`` fraction of the samples will be used to
compute the evidence; the default is ``fburnin = 0.1``.
:return ``(lnZ, dlnZ)``: Returns an estimate of the
log-evidence and the error associated with the finite
number of temperatures at which the posterior has been
sampled.
The evidence is the integral of the un-normalized posterior
over all of parameter space:
.. math::
Z \\equiv \\int d\\theta \\, l(\\theta) p(\\theta)
Thermodymanic integration is a technique for estimating the
evidence integral using information from the chains at various
temperatures. Let
.. math::
Z(\\beta) = \\int d\\theta \\, l^\\beta(\\theta) p(\\theta)
Then
.. math::
\\frac{d \\ln Z}{d \\beta}
= \\frac{1}{Z(\\beta)} \\int d\\theta l^\\beta p \\ln l
= \\left \\langle \\ln l \\right \\rangle_\\beta
so
.. math::
\\ln Z(\\beta = 1)
= \\int_0^1 d\\beta \\left \\langle \\ln l \\right\\rangle_\\beta
By computing the average of the log-likelihood at the
difference temperatures, the sampler can approximate the above
integral.
"""
if logls is None:
return self.thermodynamic_integration_log_evidence(
logls=self.lnlikelihood, fburnin=fburnin)
else:
betas = np.concatenate((self.betas, np.array([0])))
betas2 = np.concatenate((self.betas[::2], np.array([0])))
istart = int(logls.shape[2] * fburnin + 0.5)
mean_logls = np.mean(np.mean(logls, axis=1)[:, istart:], axis=1)
mean_logls2 = mean_logls[::2]
lnZ = -np.dot(mean_logls, np.diff(betas))
lnZ2 = -np.dot(mean_logls2, np.diff(betas2))
return lnZ, np.abs(lnZ - lnZ2)
@property
def betas(self):
"""
Returns the sequence of inverse temperatures in the ladder.
"""
return self._betas
@property
def chain(self):
"""
Returns the stored chain of samples; shape ``(Ntemps,
Nwalkers, Nsteps, Ndim)``.
"""
return self._chain
@property
def flatchain(self):
"""Returns the stored chain, but flattened along the walker axis, so
of shape ``(Ntemps, Nwalkers*Nsteps, Ndim)``.
"""
s = self.chain.shape
return self._chain.reshape((s[0], -1, s[3]))
@property
def lnprobability(self):
"""
Matrix of lnprobability values; shape ``(Ntemps, Nwalkers, Nsteps)``.
"""
return self._lnprob
@property
def lnlikelihood(self):
"""
Matrix of ln-likelihood values; shape ``(Ntemps, Nwalkers, Nsteps)``.
"""
return self._lnlikelihood
@property
def tswap_acceptance_fraction(self):
"""
Returns an array of accepted temperature swap fractions for
each temperature; shape ``(ntemps, )``.
"""
return self.nswap_accepted / self.nswap
@property
def acceptance_fraction(self):
"""
Matrix of shape ``(Ntemps, Nwalkers)`` detailing the
acceptance fraction for each walker.
"""
return self.nprop_accepted / self.nprop
@property
def acor(self):
"""
Returns a matrix of autocorrelation lengths for each
parameter in each temperature of shape ``(Ntemps, Ndim)``.
"""
return self.get_autocorr_time()
def get_autocorr_time(self, **kwargs):
"""
Returns a matrix of autocorrelation lengths for each
parameter in each temperature of shape ``(Ntemps, Ndim)``.
Any arguments will be passed to :func:`autocorr.integrate_time`.
"""
acors = np.zeros((self.ntemps, self.dim))
for i in range(self.ntemps):
x = np.mean(self._chain[i, :, :, :], axis=0)
acors[i, :] = autocorr.integrated_time(x, **kwargs)
return acors
Minor assertion message change
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (division, print_function, absolute_import,
unicode_literals)
__all__ = ["PTSampler"]
import numpy as np
import numpy.random as nr
import multiprocessing as multi
from . import autocorr
from .sampler import Sampler
def default_beta_ladder(ndim, ntemps=None, Tmax=None):
"""Returns a ladder of :math:`\beta \equiv 1/T` with temperatures
geometrically spaced with spacing chosen so that a Gaussian
posterior would have a 0.25 temperature swap acceptance rate.
:param ndim:
The number of dimensions in the parameter space.
:param ntemps: (optional)
If set, the number of temperatures to use. If ``None``, the
``Tmax`` argument must be given, and the number of
temperatures is chosen so that the highest temperature is
greater than ``Tmax``.
:param Tmax: (optional)
If ``ntemps`` is not given, this argument controls the number
of temperatures. Temperatures are chosen according to the
spacing criteria until the maximum temperature exceeds
``Tmax``
"""
tstep = np.array([25.2741, 7., 4.47502, 3.5236, 3.0232,
2.71225, 2.49879, 2.34226, 2.22198, 2.12628,
2.04807, 1.98276, 1.92728, 1.87946, 1.83774,
1.80096, 1.76826, 1.73895, 1.7125, 1.68849,
1.66657, 1.64647, 1.62795, 1.61083, 1.59494,
1.58014, 1.56632, 1.55338, 1.54123, 1.5298,
1.51901, 1.50881, 1.49916, 1.49, 1.4813,
1.47302, 1.46512, 1.45759, 1.45039, 1.4435,
1.4369, 1.43056, 1.42448, 1.41864, 1.41302,
1.40761, 1.40239, 1.39736, 1.3925, 1.38781,
1.38327, 1.37888, 1.37463, 1.37051, 1.36652,
1.36265, 1.35889, 1.35524, 1.3517, 1.34825,
1.3449, 1.34164, 1.33847, 1.33538, 1.33236,
1.32943, 1.32656, 1.32377, 1.32104, 1.31838,
1.31578, 1.31325, 1.31076, 1.30834, 1.30596,
1.30364, 1.30137, 1.29915, 1.29697, 1.29484,
1.29275, 1.29071, 1.2887, 1.28673, 1.2848,
1.28291, 1.28106, 1.27923, 1.27745, 1.27569,
1.27397, 1.27227, 1.27061, 1.26898, 1.26737,
1.26579, 1.26424, 1.26271, 1.26121,
1.25973])
dmax = tstep.shape[0]
if ndim > dmax:
# An approximation to the temperature step at large
# dimension
tstep = 1.0 + 2.0*np.sqrt(np.log(4.0))/np.sqrt(ndim)
else:
tstep = tstep[ndim-1]
if ntemps is None and Tmax is None:
raise ValueError('must specify one of ``ntemps`` and ``Tmax``')
elif ntemps is None:
ntemps = int(np.log(Tmax)/np.log(tstep)+2)
return np.exp(np.linspace(0, -(ntemps-1)*np.log(tstep), ntemps))
class PTLikePrior(object):
"""
Wrapper class for logl and logp.
"""
def __init__(self, logl, logp, loglargs=[], logpargs=[], loglkwargs={},
logpkwargs={}):
self.logl = logl
self.logp = logp
self.loglargs = loglargs
self.logpargs = logpargs
self.loglkwargs = loglkwargs
self.logpkwargs = logpkwargs
def __call__(self, x):
lp = self.logp(x, *self.logpargs, **self.logpkwargs)
if lp == float('-inf'):
return lp, lp
return self.logl(x, *self.loglargs, **self.loglkwargs), lp
class PTSampler(Sampler):
"""
A parallel-tempered ensemble sampler, using :class:`EnsembleSampler`
for sampling within each parallel chain.
:param ntemps:
The number of temperatures. Can be ``None``, in which case
the ``Tmax`` argument sets the maximum temperature.
:param nwalkers:
The number of ensemble walkers at each temperature.
:param dim:
The dimension of parameter space.
:param logl:
The log-likelihood function.
:param logp:
The log-prior function.
:param threads: (optional)
The number of parallel threads to use in sampling.
:param pool: (optional)
Alternative to ``threads``. Any object that implements a
``map`` method compatible with the built-in ``map`` will do
here. For example, :class:`multi.Pool` will do.
:param betas: (optional)
Array giving the inverse temperatures, :math:`\\beta=1/T`,
used in the ladder. The default is chosen so that a Gaussian
posterior in the given number of dimensions will have a 0.25
tswap acceptance rate.
:param a: (optional)
Proposal scale factor.
:param Tmax: (optional)
Maximum temperature for the ladder. If ``ntemps`` is
``None``, this argument is used to set the temperature ladder.
:param loglargs: (optional)
Positional arguments for the log-likelihood function.
:param logpargs: (optional)
Positional arguments for the log-prior function.
:param loglkwargs: (optional)
Keyword arguments for the log-likelihood function.
:param logpkwargs: (optional)
Keyword arguments for the log-prior function.
"""
def __init__(self, ntemps, nwalkers, dim, logl, logp, threads=1,
pool=None, betas=None, a=2.0, Tmax=None, loglargs=[], logpargs=[],
loglkwargs={}, logpkwargs={}):
self.logl = logl
self.logp = logp
self.a = a
self.loglargs = loglargs
self.logpargs = logpargs
self.loglkwargs = loglkwargs
self.logpkwargs = logpkwargs
self.nwalkers = nwalkers
self.dim = dim
if betas is None:
self._betas = default_beta_ladder(self.dim, ntemps=ntemps, Tmax=Tmax)
else:
self._betas = betas
self.ntemps = self.betas.shape[0]
assert self.nwalkers % 2 == 0, \
"The number of walkers must be even."
assert self.nwalkers >= 2*self.dim, \
"The number of walkers must be greater than or equal to 2*dimension."
self._chain = None
self._lnprob = None
self._lnlikelihood = None
self.nswap = np.zeros(self.ntemps, dtype=np.float)
self.nswap_accepted = np.zeros(self.ntemps, dtype=np.float)
self.nprop = np.zeros((self.ntemps, self.nwalkers), dtype=np.float)
self.nprop_accepted = np.zeros((self.ntemps, self.nwalkers),
dtype=np.float)
self.pool = pool
if threads > 1 and pool is None:
self.pool = multi.Pool(threads)
def reset(self):
"""
Clear the ``chain``, ``lnprobability``, ``lnlikelihood``,
``acceptance_fraction``, ``tswap_acceptance_fraction`` stored
properties.
"""
self.nswap = np.zeros(self.ntemps, dtype=np.float)
self.nswap_accepted = np.zeros(self.ntemps, dtype=np.float)
self.nprop = np.zeros((self.ntemps, self.nwalkers), dtype=np.float)
self.nprop_accepted = np.zeros((self.ntemps, self.nwalkers),
dtype=np.float)
self._chain = None
self._lnprob = None
self._lnlikelihood = None
def sample(self, p0, lnprob0=None, lnlike0=None, iterations=1,
thin=1, storechain=True):
"""
Advance the chains ``iterations`` steps as a generator.
:param p0:
The initial positions of the walkers. Shape should be
``(ntemps, nwalkers, dim)``.
:param lnprob0: (optional)
The initial posterior values for the ensembles. Shape
``(ntemps, nwalkers)``.
:param lnlike0: (optional)
The initial likelihood values for the ensembles. Shape
``(ntemps, nwalkers)``.
:param iterations: (optional)
The number of iterations to preform.
:param thin: (optional)
The number of iterations to perform between saving the
state to the internal chain.
:param storechain: (optional)
If ``True`` store the iterations in the ``chain``
property.
At each iteration, this generator yields
* ``p``, the current position of the walkers.
* ``lnprob`` the current posterior values for the walkers.
* ``lnlike`` the current likelihood values for the walkers.
"""
p = np.copy(np.array(p0))
# If we have no lnprob or logls compute them
if lnprob0 is None or lnlike0 is None:
fn = PTLikePrior(self.logl, self.logp, self.loglargs,
self.logpargs, self.loglkwargs, self.logpkwargs)
if self.pool is None:
results = list(map(fn, p.reshape((-1, self.dim))))
else:
results = list(self.pool.map(fn, p.reshape((-1, self.dim))))
logls = np.array([r[0] for r in results]).reshape((self.ntemps,
self.nwalkers))
logps = np.array([r[1] for r in results]).reshape((self.ntemps,
self.nwalkers))
lnlike0 = logls
lnprob0 = logls * self.betas.reshape((self.ntemps, 1)) + logps
lnprob = lnprob0
logl = lnlike0
# Expand the chain in advance of the iterations
if storechain:
nsave = iterations // thin
if self._chain is None:
isave = 0
self._chain = np.zeros((self.ntemps, self.nwalkers, nsave,
self.dim))
self._lnprob = np.zeros((self.ntemps, self.nwalkers, nsave))
self._lnlikelihood = np.zeros((self.ntemps, self.nwalkers,
nsave))
else:
isave = self._chain.shape[2]
self._chain = np.concatenate((self._chain,
np.zeros((self.ntemps,
self.nwalkers,
nsave, self.dim))),
axis=2)
self._lnprob = np.concatenate((self._lnprob,
np.zeros((self.ntemps,
self.nwalkers,
nsave))),
axis=2)
self._lnlikelihood = np.concatenate((self._lnlikelihood,
np.zeros((self.ntemps,
self.nwalkers,
nsave))),
axis=2)
for i in range(iterations):
for j in [0, 1]:
jupdate = j
jsample = (j + 1) % 2
pupdate = p[:, jupdate::2, :]
psample = p[:, jsample::2, :]
zs = np.exp(np.random.uniform(low=-np.log(self.a), high=np.log(self.a), size=(self.ntemps, self.nwalkers//2)))
qs = np.zeros((self.ntemps, self.nwalkers//2, self.dim))
for k in range(self.ntemps):
js = np.random.randint(0, high=self.nwalkers // 2,
size=self.nwalkers // 2)
qs[k, :, :] = psample[k, js, :] + zs[k, :].reshape(
(self.nwalkers // 2, 1)) * (pupdate[k, :, :] -
psample[k, js, :])
fn = PTLikePrior(self.logl, self.logp, self.loglargs,
self.logpargs, self.loglkwargs,
self.logpkwargs)
if self.pool is None:
results = list(map(fn, qs.reshape((-1, self.dim))))
else:
results = list(self.pool.map(fn, qs.reshape((-1,
self.dim))))
qslogls = np.array([r[0] for r in results]).reshape(
(self.ntemps, self.nwalkers//2))
qslogps = np.array([r[1] for r in results]).reshape(
(self.ntemps, self.nwalkers//2))
qslnprob = qslogls * self.betas.reshape((self.ntemps, 1)) \
+ qslogps
logpaccept = self.dim*np.log(zs) + qslnprob \
- lnprob[:, jupdate::2]
logrs = np.log(np.random.uniform(low=0.0, high=1.0,
size=(self.ntemps,
self.nwalkers//2)))
accepts = logrs < logpaccept
accepts = accepts.flatten()
pupdate.reshape((-1, self.dim))[accepts, :] = \
qs.reshape((-1, self.dim))[accepts, :]
lnprob[:, jupdate::2].reshape((-1,))[accepts] = \
qslnprob.reshape((-1,))[accepts]
logl[:, jupdate::2].reshape((-1,))[accepts] = \
qslogls.reshape((-1,))[accepts]
accepts = accepts.reshape((self.ntemps, self.nwalkers//2))
self.nprop[:, jupdate::2] += 1.0
self.nprop_accepted[:, jupdate::2] += accepts
p, lnprob, logl = self._temperature_swaps(p, lnprob, logl)
if (i + 1) % thin == 0:
if storechain:
self._chain[:, :, isave, :] = p
self._lnprob[:, :, isave, ] = lnprob
self._lnlikelihood[:, :, isave] = logl
isave += 1
yield p, lnprob, logl
def _temperature_swaps(self, p, lnprob, logl):
"""
Perform parallel-tempering temperature swaps on the state
in ``p`` with associated ``lnprob`` and ``logl``.
"""
ntemps = self.ntemps
for i in range(ntemps - 1, 0, -1):
bi = self.betas[i]
bi1 = self.betas[i - 1]
dbeta = bi1 - bi
iperm = nr.permutation(self.nwalkers)
i1perm = nr.permutation(self.nwalkers)
raccept = np.log(nr.uniform(size=self.nwalkers))
paccept = dbeta * (logl[i, iperm] - logl[i - 1, i1perm])
self.nswap[i] += self.nwalkers
self.nswap[i - 1] += self.nwalkers
asel = (paccept > raccept)
nacc = np.sum(asel)
self.nswap_accepted[i] += nacc
self.nswap_accepted[i - 1] += nacc
ptemp = np.copy(p[i, iperm[asel], :])
ltemp = np.copy(logl[i, iperm[asel]])
prtemp = np.copy(lnprob[i, iperm[asel]])
p[i, iperm[asel], :] = p[i - 1, i1perm[asel], :]
logl[i, iperm[asel]] = logl[i - 1, i1perm[asel]]
lnprob[i, iperm[asel]] = lnprob[i - 1, i1perm[asel]] \
- dbeta * logl[i - 1, i1perm[asel]]
p[i - 1, i1perm[asel], :] = ptemp
logl[i - 1, i1perm[asel]] = ltemp
lnprob[i - 1, i1perm[asel]] = prtemp + dbeta * ltemp
return p, lnprob, logl
def thermodynamic_integration_log_evidence(self, logls=None, fburnin=0.1):
"""
Thermodynamic integration estimate of the evidence.
:param logls: (optional) The log-likelihoods to use for
computing the thermodynamic evidence. If ``None`` (the
default), use the stored log-likelihoods in the sampler.
Should be of shape ``(Ntemps, Nwalkers, Nsamples)``.
:param fburnin: (optional)
The fraction of the chain to discard as burnin samples; only the
final ``1-fburnin`` fraction of the samples will be used to
compute the evidence; the default is ``fburnin = 0.1``.
:return ``(lnZ, dlnZ)``: Returns an estimate of the
log-evidence and the error associated with the finite
number of temperatures at which the posterior has been
sampled.
The evidence is the integral of the un-normalized posterior
over all of parameter space:
.. math::
Z \\equiv \\int d\\theta \\, l(\\theta) p(\\theta)
Thermodymanic integration is a technique for estimating the
evidence integral using information from the chains at various
temperatures. Let
.. math::
Z(\\beta) = \\int d\\theta \\, l^\\beta(\\theta) p(\\theta)
Then
.. math::
\\frac{d \\ln Z}{d \\beta}
= \\frac{1}{Z(\\beta)} \\int d\\theta l^\\beta p \\ln l
= \\left \\langle \\ln l \\right \\rangle_\\beta
so
.. math::
\\ln Z(\\beta = 1)
= \\int_0^1 d\\beta \\left \\langle \\ln l \\right\\rangle_\\beta
By computing the average of the log-likelihood at the
difference temperatures, the sampler can approximate the above
integral.
"""
if logls is None:
return self.thermodynamic_integration_log_evidence(
logls=self.lnlikelihood, fburnin=fburnin)
else:
betas = np.concatenate((self.betas, np.array([0])))
betas2 = np.concatenate((self.betas[::2], np.array([0])))
istart = int(logls.shape[2] * fburnin + 0.5)
mean_logls = np.mean(np.mean(logls, axis=1)[:, istart:], axis=1)
mean_logls2 = mean_logls[::2]
lnZ = -np.dot(mean_logls, np.diff(betas))
lnZ2 = -np.dot(mean_logls2, np.diff(betas2))
return lnZ, np.abs(lnZ - lnZ2)
@property
def betas(self):
"""
Returns the sequence of inverse temperatures in the ladder.
"""
return self._betas
@property
def chain(self):
"""
Returns the stored chain of samples; shape ``(Ntemps,
Nwalkers, Nsteps, Ndim)``.
"""
return self._chain
@property
def flatchain(self):
"""Returns the stored chain, but flattened along the walker axis, so
of shape ``(Ntemps, Nwalkers*Nsteps, Ndim)``.
"""
s = self.chain.shape
return self._chain.reshape((s[0], -1, s[3]))
@property
def lnprobability(self):
"""
Matrix of lnprobability values; shape ``(Ntemps, Nwalkers, Nsteps)``.
"""
return self._lnprob
@property
def lnlikelihood(self):
"""
Matrix of ln-likelihood values; shape ``(Ntemps, Nwalkers, Nsteps)``.
"""
return self._lnlikelihood
@property
def tswap_acceptance_fraction(self):
"""
Returns an array of accepted temperature swap fractions for
each temperature; shape ``(ntemps, )``.
"""
return self.nswap_accepted / self.nswap
@property
def acceptance_fraction(self):
"""
Matrix of shape ``(Ntemps, Nwalkers)`` detailing the
acceptance fraction for each walker.
"""
return self.nprop_accepted / self.nprop
@property
def acor(self):
"""
Returns a matrix of autocorrelation lengths for each
parameter in each temperature of shape ``(Ntemps, Ndim)``.
"""
return self.get_autocorr_time()
def get_autocorr_time(self, **kwargs):
"""
Returns a matrix of autocorrelation lengths for each
parameter in each temperature of shape ``(Ntemps, Ndim)``.
Any arguments will be passed to :func:`autocorr.integrate_time`.
"""
acors = np.zeros((self.ntemps, self.dim))
for i in range(self.ntemps):
x = np.mean(self._chain[i, :, :, :], axis=0)
acors[i, :] = autocorr.integrated_time(x, **kwargs)
return acors
|
from __future__ import print_function, division, absolute_import
import math
import copy
import numbers
import sys
import os
import json
import types
import warnings
import numpy as np
import cv2
import imageio
import scipy.spatial.distance
import six
import six.moves as sm
import skimage.draw
import skimage.measure
import collections
from PIL import Image as PIL_Image, ImageDraw as PIL_ImageDraw, ImageFont as PIL_ImageFont
ALL = "ALL"
FILE_DIR = os.path.dirname(os.path.abspath(__file__))
# filepath to the quokka image, its annotations and depth map
QUOKKA_FP = os.path.join(FILE_DIR, "quokka.jpg")
QUOKKA_ANNOTATIONS_FP = os.path.join(FILE_DIR, "quokka_annotations.json")
QUOKKA_DEPTH_MAP_HALFRES_FP = os.path.join(FILE_DIR, "quokka_depth_map_halfres.png")
DEFAULT_FONT_FP = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"DejaVuSans.ttf"
)
# We instantiate a current/global random state here once.
# One can also call np.random, but that is (in contrast to np.random.RandomState)
# a module and hence cannot be copied via deepcopy. That's why we use RandomState
# here (and in all augmenters) instead of np.random.
CURRENT_RANDOM_STATE = np.random.RandomState(42)
SEED_MIN_VALUE = 0
SEED_MAX_VALUE = 2**31-1 # use 2**31 instead of 2**32 here because 2**31 errored on some systems
# to check if a dtype instance is among these dtypes, use e.g. `dtype.type in NP_FLOAT_TYPES`
# do not just use `dtype in NP_FLOAT_TYPES` as that would fail
NP_FLOAT_TYPES = set(np.sctypes["float"])
NP_INT_TYPES = set(np.sctypes["int"])
NP_UINT_TYPES = set(np.sctypes["uint"])
IMSHOW_BACKEND_DEFAULT = "matplotlib"
IMRESIZE_VALID_INTERPOLATIONS = ["nearest", "linear", "area", "cubic",
cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_AREA, cv2.INTER_CUBIC]
def is_np_array(val):
"""
Checks whether a variable is a numpy array.
Parameters
----------
val
The variable to check.
Returns
-------
out : bool
True if the variable is a numpy array. Otherwise False.
"""
# using np.generic here via isinstance(val, (np.ndarray, np.generic)) seems to also fire for scalar numpy values
# even though those are not arrays
return isinstance(val, np.ndarray)
def is_single_integer(val):
"""
Checks whether a variable is an integer.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is an integer. Otherwise False.
"""
return isinstance(val, numbers.Integral) and not isinstance(val, bool)
def is_single_float(val):
"""
Checks whether a variable is a float.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a float. Otherwise False.
"""
return isinstance(val, numbers.Real) and not is_single_integer(val) and not isinstance(val, bool)
def is_single_number(val):
"""
Checks whether a variable is a number, i.e. an integer or float.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a number. Otherwise False.
"""
return is_single_integer(val) or is_single_float(val)
def is_iterable(val):
"""
Checks whether a variable is iterable.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is an iterable. Otherwise False.
"""
return isinstance(val, collections.Iterable)
# TODO convert to is_single_string() or rename is_single_integer/float/number()
def is_string(val):
"""
Checks whether a variable is a string.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a string. Otherwise False.
"""
return isinstance(val, six.string_types)
def is_single_bool(val):
"""
Checks whether a variable is a boolean.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a boolean. Otherwise False.
"""
return type(val) == type(True)
def is_integer_array(val):
"""
Checks whether a variable is a numpy integer array.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a numpy integer array. Otherwise False.
"""
return is_np_array(val) and issubclass(val.dtype.type, np.integer)
def is_float_array(val):
"""
Checks whether a variable is a numpy float array.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a numpy float array. Otherwise False.
"""
return is_np_array(val) and issubclass(val.dtype.type, np.floating)
def is_callable(val):
"""
Checks whether a variable is a callable, e.g. a function.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a callable. Otherwise False.
"""
# python 3.x with x <= 2 does not support callable(), apparently
if sys.version_info[0] == 3 and sys.version_info[1] <= 2:
return hasattr(val, '__call__')
else:
return callable(val)
def is_generator(val):
"""
Checks whether a variable is a generator.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True is the variable is a generator. Otherwise False.
"""
return isinstance(val, types.GeneratorType)
def caller_name():
"""
Returns the name of the caller, e.g. a function.
Returns
-------
str
The name of the caller as a string
"""
return sys._getframe(1).f_code.co_name
def seed(seedval):
"""
Set the seed used by the global random state and thereby all randomness
in the library.
This random state is by default by all augmenters. Under special
circumstances (e.g. when an augmenter is switched to deterministic mode),
the global random state is replaced by another -- local -- one.
The replacement is dependent on the global random state.
Parameters
----------
seedval : int
The seed to use.
"""
CURRENT_RANDOM_STATE.seed(seedval)
def current_random_state():
"""
Returns the current/global random state of the library.
Returns
----------
numpy.random.RandomState
The current/global random state.
"""
return CURRENT_RANDOM_STATE
def new_random_state(seed=None, fully_random=False):
"""
Returns a new random state.
Parameters
----------
seed : None or int, optional
Optional seed value to use.
The same datatypes are allowed as for ``numpy.random.RandomState(seed)``.
fully_random : bool, optional
Whether to use numpy's random initialization for the
RandomState (used if set to True). If False, a seed is sampled from
the global random state, which is a bit faster and hence the default.
Returns
-------
numpy.random.RandomState
The new random state.
"""
if seed is None:
if not fully_random:
# sample manually a seed instead of just RandomState(),
# because the latter one
# is way slower.
seed = CURRENT_RANDOM_STATE.randint(SEED_MIN_VALUE, SEED_MAX_VALUE, 1)[0]
return np.random.RandomState(seed)
def dummy_random_state():
"""
Returns a dummy random state that is always based on a seed of 1.
Returns
-------
numpy.random.RandomState
The new random state.
"""
return np.random.RandomState(1)
def copy_random_state(random_state, force_copy=False):
"""
Creates a copy of a random state.
Parameters
----------
random_state : numpy.random.RandomState
The random state to copy.
force_copy : bool, optional
If True, this function will always create a copy of every random
state. If False, it will not copy numpy's default random state,
but all other random states.
Returns
-------
rs_copy : numpy.random.RandomState
The copied random state.
"""
if random_state == np.random and not force_copy:
return random_state
else:
rs_copy = dummy_random_state()
orig_state = random_state.get_state()
rs_copy.set_state(orig_state)
return rs_copy
def derive_random_state(random_state):
"""
Create a new random states based on an existing random state or seed.
Parameters
----------
random_state : numpy.random.RandomState
Random state or seed from which to derive the new random state.
Returns
-------
numpy.random.RandomState
Derived random state.
"""
return derive_random_states(random_state, n=1)[0]
# TODO use this everywhere instead of manual seed + create
def derive_random_states(random_state, n=1):
"""
Create N new random states based on an existing random state or seed.
Parameters
----------
random_state : numpy.random.RandomState
Random state or seed from which to derive new random states.
n : int, optional
Number of random states to derive.
Returns
-------
list of numpy.random.RandomState
Derived random states.
"""
seed_ = random_state.randint(SEED_MIN_VALUE, SEED_MAX_VALUE, 1)[0]
return [new_random_state(seed_+i) for i in sm.xrange(n)]
def forward_random_state(random_state):
"""
Forward the internal state of a random state.
This makes sure that future calls to the random_state will produce new random values.
Parameters
----------
random_state : numpy.random.RandomState
Random state to forward.
"""
random_state.uniform()
def _quokka_normalize_extract(extract):
"""
Generate a normalized rectangle to be extract from the standard quokka image.
Parameters
----------
extract : 'square' or tuple of number or imgaug.BoundingBox or imgaug.BoundingBoxesOnImage
Unnormalized representation of the image subarea to be extracted.
* If string ``square``, then a squared area ``(x: 0 to max 643, y: 0 to max 643)``
will be extracted from the image.
* If a tuple, then expected to contain four numbers denoting ``x1``, ``y1``, ``x2``
and ``y2``.
* If a BoundingBox, then that bounding box's area will be extracted from the image.
* If a BoundingBoxesOnImage, then expected to contain exactly one bounding box
and a shape matching the full image dimensions (i.e. (643, 960, *)). Then the
one bounding box will be used similar to BoundingBox.
Returns
-------
bb : imgaug.BoundingBox
Normalized representation of the area to extract from the standard quokka image.
"""
if extract == "square":
bb = BoundingBox(x1=0, y1=0, x2=643, y2=643)
elif isinstance(extract, tuple) and len(extract) == 4:
bb = BoundingBox(x1=extract[0], y1=extract[1], x2=extract[2], y2=extract[3])
elif isinstance(extract, BoundingBox):
bb = extract
elif isinstance(extract, BoundingBoxesOnImage):
do_assert(len(extract.bounding_boxes) == 1)
do_assert(extract.shape[0:2] == (643, 960))
bb = extract.bounding_boxes[0]
else:
raise Exception(
"Expected 'square' or tuple of four entries or BoundingBox or BoundingBoxesOnImage "
+ "for parameter 'extract', got %s." % (type(extract),)
)
return bb
def _compute_resized_shape(from_shape, to_shape):
"""
Computes the intended new shape of an image-like array after resizing.
Parameters
----------
from_shape : tuple or ndarray
Old shape of the array. Usually expected to be a tuple of form ``(H, W)`` or ``(H, W, C)`` or
alternatively an array with two or three dimensions.
to_shape : None or tuple of ints or tuple of floats or int or float or ndarray
New shape of the array.
* If None, then `from_shape` will be used as the new shape.
* If an int ``V``, then the new shape will be ``(V, V, [C])``, where ``C`` will be added if it
is part of `from_shape`.
* If a float ``V``, then the new shape will be ``(H*V, W*V, [C])``, where ``H`` and ``W`` are the old
height/width.
* If a tuple ``(H', W', [C'])`` of ints, then ``H'`` and ``W'`` will be used as the new height
and width.
* If a tuple ``(H', W', [C'])`` of floats (except ``C``), then ``H'`` and ``W'`` will
be used as the new height and width.
* If a numpy array, then the array's shape will be used.
Returns
-------
to_shape_computed : tuple of int
New shape.
"""
if is_np_array(from_shape):
from_shape = from_shape.shape
if is_np_array(to_shape):
to_shape = to_shape.shape
to_shape_computed = list(from_shape)
if to_shape is None:
pass
elif isinstance(to_shape, tuple):
do_assert(len(from_shape) in [2, 3])
do_assert(len(to_shape) in [2, 3])
if len(from_shape) == 3 and len(to_shape) == 3:
do_assert(from_shape[2] == to_shape[2])
elif len(to_shape) == 3:
to_shape_computed.append(to_shape[2])
do_assert(all([v is None or is_single_number(v) for v in to_shape[0:2]]),
"Expected the first two entries in to_shape to be None or numbers, "
+ "got types %s." % (str([type(v) for v in to_shape[0:2]]),))
for i, from_shape_i in enumerate(from_shape[0:2]):
if to_shape[i] is None:
to_shape_computed[i] = from_shape_i
elif is_single_integer(to_shape[i]):
to_shape_computed[i] = to_shape[i]
else: # float
to_shape_computed[i] = int(np.round(from_shape_i * to_shape[i]))
elif is_single_integer(to_shape) or is_single_float(to_shape):
to_shape_computed = _compute_resized_shape(from_shape, (to_shape, to_shape))
else:
raise Exception("Expected to_shape to be None or ndarray or tuple of floats or tuple of ints or single int "
+ "or single float, got %s." % (type(to_shape),))
return tuple(to_shape_computed)
def quokka(size=None, extract=None):
"""
Returns an image of a quokka as a numpy array.
Parameters
----------
size : None or float or tuple of int, optional
Size of the output image. Input into :func:`imgaug.imgaug.imresize_single_image`.
Usually expected to be a tuple ``(H, W)``, where ``H`` is the desired height
and ``W`` is the width. If None, then the image will not be resized.
extract : None or 'square' or tuple of number or imgaug.BoundingBox or imgaug.BoundingBoxesOnImage
Subarea of the quokka image to extract:
* If None, then the whole image will be used.
* If string ``square``, then a squared area ``(x: 0 to max 643, y: 0 to max 643)`` will
be extracted from the image.
* If a tuple, then expected to contain four numbers denoting ``x1``, ``y1``, ``x2``
and ``y2``.
* If a BoundingBox, then that bounding box's area will be extracted from the image.
* If a BoundingBoxesOnImage, then expected to contain exactly one bounding box
and a shape matching the full image dimensions (i.e. ``(643, 960, *)``). Then the
one bounding box will be used similar to BoundingBox.
Returns
-------
img : (H,W,3) ndarray
The image array of dtype uint8.
"""
img = imageio.imread(QUOKKA_FP, pilmode="RGB")
if extract is not None:
bb = _quokka_normalize_extract(extract)
img = bb.extract_from_image(img)
if size is not None:
shape_resized = _compute_resized_shape(img.shape, size)
img = imresize_single_image(img, shape_resized[0:2])
return img
def quokka_square(size=None):
"""
Returns an (square) image of a quokka as a numpy array.
Parameters
----------
size : None or float or tuple of int, optional
Size of the output image. Input into :func:`imgaug.imgaug.imresize_single_image`.
Usually expected to be a tuple ``(H, W)``, where ``H`` is the desired height
and ``W`` is the width. If None, then the image will not be resized.
Returns
-------
img : (H,W,3) ndarray
The image array of dtype uint8.
"""
return quokka(size=size, extract="square")
def quokka_heatmap(size=None, extract=None):
"""
Returns a heatmap (here: depth map) for the standard example quokka image.
Parameters
----------
size : None or float or tuple of int, optional
See :func:`imgaug.quokka`.
extract : None or 'square' or tuple of number or imgaug.BoundingBox or imgaug.BoundingBoxesOnImage
See :func:`imgaug.quokka`.
Returns
-------
result : imgaug.HeatmapsOnImage
Depth map as an heatmap object. Values close to 0.0 denote objects that are close to
the camera. Values close to 1.0 denote objects that are furthest away (among all shown
objects).
"""
img = imageio.imread(QUOKKA_DEPTH_MAP_HALFRES_FP, pilmode="RGB")
img = imresize_single_image(img, (643, 960), interpolation="cubic")
if extract is not None:
bb = _quokka_normalize_extract(extract)
img = bb.extract_from_image(img)
if size is None:
size = img.shape[0:2]
shape_resized = _compute_resized_shape(img.shape, size)
img = imresize_single_image(img, shape_resized[0:2])
img_0to1 = img[..., 0] # depth map was saved as 3-channel RGB
img_0to1 = img_0to1.astype(np.float32) / 255.0
img_0to1 = 1 - img_0to1 # depth map was saved as 0 being furthest away
return HeatmapsOnImage(img_0to1, shape=img_0to1.shape[0:2] + (3,))
def quokka_segmentation_map(size=None, extract=None):
"""
Returns a segmentation map for the standard example quokka image.
Parameters
----------
size : None or float or tuple of int, optional
See :func:`imgaug.quokka`.
extract : None or 'square' or tuple of number or imgaug.BoundingBox or imgaug.BoundingBoxesOnImage
See :func:`imgaug.quokka`.
Returns
-------
result : imgaug.SegmentationMapOnImage
Segmentation map object.
"""
with open(QUOKKA_ANNOTATIONS_FP, "r") as f:
json_dict = json.load(f)
xx = []
yy = []
for kp_dict in json_dict["polygons"][0]["keypoints"]:
x = kp_dict["x"]
y = kp_dict["y"]
xx.append(x)
yy.append(y)
img_seg = np.zeros((643, 960, 1), dtype=np.float32)
rr, cc = skimage.draw.polygon(np.array(yy), np.array(xx), shape=img_seg.shape)
img_seg[rr, cc] = 1.0
if extract is not None:
bb = _quokka_normalize_extract(extract)
img_seg = bb.extract_from_image(img_seg)
segmap = SegmentationMapOnImage(img_seg, shape=img_seg.shape[0:2] + (3,))
if size is not None:
shape_resized = _compute_resized_shape(img_seg.shape, size)
segmap = segmap.resize(shape_resized[0:2])
segmap.shape = tuple(shape_resized[0:2]) + (3,)
return segmap
def quokka_keypoints(size=None, extract=None):
"""
Returns example keypoints on the standard example quokke image.
The keypoints cover the eyes, ears, nose and paws.
Parameters
----------
size : None or float or tuple of int or tuple of float, optional
Size of the output image on which the keypoints are placed. If None, then the keypoints
are not projected to any new size (positions on the original image are used).
Floats lead to relative size changes, ints to absolute sizes in pixels.
extract : None or 'square' or tuple of number or imgaug.BoundingBox or imgaug.BoundingBoxesOnImage
Subarea to extract from the image. See :func:`imgaug.quokka`.
Returns
-------
kpsoi : imgaug.KeypointsOnImage
Example keypoints on the quokka image.
"""
left, top = 0, 0
if extract is not None:
bb_extract = _quokka_normalize_extract(extract)
left = bb_extract.x1
top = bb_extract.y1
with open(QUOKKA_ANNOTATIONS_FP, "r") as f:
json_dict = json.load(f)
keypoints = []
for kp_dict in json_dict["keypoints"]:
keypoints.append(Keypoint(x=kp_dict["x"] - left, y=kp_dict["y"] - top))
if extract is not None:
shape = (bb_extract.height, bb_extract.width, 3)
else:
shape = (643, 960, 3)
kpsoi = KeypointsOnImage(keypoints, shape=shape)
if size is not None:
shape_resized = _compute_resized_shape(shape, size)
kpsoi = kpsoi.on(shape_resized)
return kpsoi
def quokka_bounding_boxes(size=None, extract=None):
"""
Returns example bounding boxes on the standard example quokke image.
Currently only a single bounding box is returned that covers the quokka.
Parameters
----------
size : None or float or tuple of int or tuple of float, optional
Size of the output image on which the BBs are placed. If None, then the BBs
are not projected to any new size (positions on the original image are used).
Floats lead to relative size changes, ints to absolute sizes in pixels.
extract : None or 'square' or tuple of number or imgaug.BoundingBox or imgaug.BoundingBoxesOnImage
Subarea to extract from the image. See :func:`imgaug.quokka`.
Returns
-------
bbsoi : imgaug.BoundingBoxesOnImage
Example BBs on the quokka image.
"""
left, top = 0, 0
if extract is not None:
bb_extract = _quokka_normalize_extract(extract)
left = bb_extract.x1
top = bb_extract.y1
with open(QUOKKA_ANNOTATIONS_FP, "r") as f:
json_dict = json.load(f)
bbs = []
for bb_dict in json_dict["bounding_boxes"]:
bbs.append(
BoundingBox(
x1=bb_dict["x1"] - left,
y1=bb_dict["y1"] - top,
x2=bb_dict["x2"] - left,
y2=bb_dict["y2"] - top
)
)
if extract is not None:
shape = (bb_extract.height, bb_extract.width, 3)
else:
shape = (643, 960, 3)
bbsoi = BoundingBoxesOnImage(bbs, shape=shape)
if size is not None:
shape_resized = _compute_resized_shape(shape, size)
bbsoi = bbsoi.on(shape_resized)
return bbsoi
def quokka_polygons(size=None, extract=None):
"""
Returns example polygons on the standard example quokke image.
The result contains one polygon, covering the quokka's outline.
Parameters
----------
size : None or float or tuple of int or tuple of float, optional
Size of the output image on which the polygons are placed. If None,
then the polygons are not projected to any new size (positions on the
original image are used). Floats lead to relative size changes, ints
to absolute sizes in pixels.
extract : None or 'square' or tuple of number or imgaug.BoundingBox or \
imgaug.BoundingBoxesOnImage
Subarea to extract from the image. See :func:`imgaug.quokka`.
Returns
-------
psoi : imgaug.PolygonsOnImage
Example polygons on the quokka image.
"""
left, top = 0, 0
if extract is not None:
bb_extract = _quokka_normalize_extract(extract)
left = bb_extract.x1
top = bb_extract.y1
with open(QUOKKA_ANNOTATIONS_FP, "r") as f:
json_dict = json.load(f)
polygons = []
for poly_json in json_dict["polygons"]:
polygons.append(
Polygon([(point["x"] - left, point["y"] - top)
for point in poly_json["keypoints"]])
)
if extract is not None:
shape = (bb_extract.height, bb_extract.width, 3)
else:
shape = (643, 960, 3)
psoi = PolygonsOnImage(polygons, shape=shape)
if size is not None:
shape_resized = _compute_resized_shape(shape, size)
psoi = psoi.on(shape_resized)
return psoi
def angle_between_vectors(v1, v2):
"""
Returns the angle in radians between vectors `v1` and `v2`.
From http://stackoverflow.com/questions/2827393/angles-between-two-n-dimensional-vectors-in-python
Parameters
----------
v1 : (N,) ndarray
First vector.
v2 : (N,) ndarray
Second vector.
Returns
-------
out : float
Angle in radians.
Examples
--------
>>> angle_between_vectors(np.float32([1, 0, 0]), np.float32([0, 1, 0]))
1.570796...
>>> angle_between_vectors(np.float32([1, 0, 0]), np.float32([1, 0, 0]))
0.0
>>> angle_between_vectors(np.float32([1, 0, 0]), np.float32([-1, 0, 0]))
3.141592...
"""
l1 = np.linalg.norm(v1)
l2 = np.linalg.norm(v2)
v1_u = (v1 / l1) if l1 > 0 else np.float32(v1) * 0
v2_u = (v2 / l2) if l2 > 0 else np.float32(v2) * 0
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
# TODO is this used anywhere?
def compute_line_intersection_point(x1, y1, x2, y2, x3, y3, x4, y4):
"""
Compute the intersection point of two lines.
Taken from https://stackoverflow.com/a/20679579 .
Parameters
----------
x1 : number
x coordinate of the first point on line 1. (The lines extends beyond this point.)
y1 : number:
y coordinate of the first point on line 1. (The lines extends beyond this point.)
x2 : number
x coordinate of the second point on line 1. (The lines extends beyond this point.)
y2 : number:
y coordinate of the second point on line 1. (The lines extends beyond this point.)
x3 : number
x coordinate of the first point on line 2. (The lines extends beyond this point.)
y3 : number:
y coordinate of the first point on line 2. (The lines extends beyond this point.)
x4 : number
x coordinate of the second point on line 2. (The lines extends beyond this point.)
y4 : number:
y coordinate of the second point on line 2. (The lines extends beyond this point.)
Returns
-------
tuple of number or bool
The coordinate of the intersection point as a tuple ``(x, y)``.
If the lines are parallel (no intersection point or an infinite number of them), the result is False.
"""
def _make_line(p1, p2):
A = (p1[1] - p2[1])
B = (p2[0] - p1[0])
C = (p1[0]*p2[1] - p2[0]*p1[1])
return A, B, -C
L1 = _make_line((x1, y1), (x2, y2))
L2 = _make_line((x3, y3), (x4, y4))
D = L1[0] * L2[1] - L1[1] * L2[0]
Dx = L1[2] * L2[1] - L1[1] * L2[2]
Dy = L1[0] * L2[2] - L1[2] * L2[0]
if D != 0:
x = Dx / D
y = Dy / D
return x, y
else:
return False
# TODO replace by cv2.putText()?
def draw_text(img, y, x, text, color=(0, 255, 0), size=25):
"""
Draw text on an image.
This uses by default DejaVuSans as its font, which is included in this library.
dtype support::
* ``uint8``: yes; fully tested
* ``uint16``: no
* ``uint32``: no
* ``uint64``: no
* ``int8``: no
* ``int16``: no
* ``int32``: no
* ``int64``: no
* ``float16``: no
* ``float32``: yes; not tested
* ``float64``: no
* ``float128``: no
* ``bool``: no
TODO check if other dtypes could be enabled
Parameters
----------
img : (H,W,3) ndarray
The image array to draw text on.
Expected to be of dtype uint8 or float32 (value range 0.0 to 255.0).
y : int
x-coordinate of the top left corner of the text.
x : int
y- coordinate of the top left corner of the text.
text : str
The text to draw.
color : iterable of int, optional
Color of the text to draw. For RGB-images this is expected to be an RGB color.
size : int, optional
Font size of the text to draw.
Returns
-------
img_np : (H,W,3) ndarray
Input image with text drawn on it.
"""
do_assert(img.dtype in [np.uint8, np.float32])
input_dtype = img.dtype
if img.dtype == np.float32:
img = img.astype(np.uint8)
img = PIL_Image.fromarray(img)
font = PIL_ImageFont.truetype(DEFAULT_FONT_FP, size)
context = PIL_ImageDraw.Draw(img)
context.text((x, y), text, fill=tuple(color), font=font)
img_np = np.asarray(img)
# PIL/asarray returns read only array
if not img_np.flags["WRITEABLE"]:
try:
# this seems to no longer work with np 1.16 (or was pillow updated?)
img_np.setflags(write=True)
except ValueError as ex:
if "cannot set WRITEABLE flag to True of this array" in str(ex):
img_np = np.copy(img_np)
if img_np.dtype != input_dtype:
img_np = img_np.astype(input_dtype)
return img_np
# TODO rename sizes to size?
def imresize_many_images(images, sizes=None, interpolation=None):
"""
Resize many images to a specified size.
dtype support::
* ``uint8``: yes; fully tested
* ``uint16``: yes; tested
* ``uint32``: no (1)
* ``uint64``: no (2)
* ``int8``: yes; tested (3)
* ``int16``: yes; tested
* ``int32``: limited; tested (4)
* ``int64``: no (2)
* ``float16``: yes; tested (5)
* ``float32``: yes; tested
* ``float64``: yes; tested
* ``float128``: no (1)
* ``bool``: yes; tested (6)
- (1) rejected by ``cv2.imresize``
- (2) results too inaccurate
- (3) mapped internally to ``int16`` when interpolation!="nearest"
- (4) only supported for interpolation="nearest", other interpolations lead to cv2 error
- (5) mapped internally to ``float32``
- (6) mapped internally to ``uint8``
Parameters
----------
images : (N,H,W,[C]) ndarray or list of (H,W,[C]) ndarray
Array of the images to resize.
Usually recommended to be of dtype uint8.
sizes : float or iterable of int or iterable of float
The new size of the images, given either as a fraction (a single float) or as
a ``(height, width)`` tuple of two integers or as a ``(height fraction, width fraction)``
tuple of two floats.
interpolation : None or str or int, optional
The interpolation to use during resize.
If int, then expected to be one of:
* ``cv2.INTER_NEAREST`` (nearest neighbour interpolation)
* ``cv2.INTER_LINEAR`` (linear interpolation)
* ``cv2.INTER_AREA`` (area interpolation)
* ``cv2.INTER_CUBIC`` (cubic interpolation)
If string, then expected to be one of:
* ``nearest`` (identical to ``cv2.INTER_NEAREST``)
* ``linear`` (identical to ``cv2.INTER_LINEAR``)
* ``area`` (identical to ``cv2.INTER_AREA``)
* ``cubic`` (identical to ``cv2.INTER_CUBIC``)
If None, the interpolation will be chosen automatically. For size
increases, area interpolation will be picked and for size decreases,
linear interpolation will be picked.
Returns
-------
result : (N,H',W',[C]) ndarray
Array of the resized images.
Examples
--------
>>> imresize_many_images(np.zeros((2, 16, 16, 3), dtype=np.uint8), 2.0)
Converts 2 RGB images of height and width 16 to images of height and width 16*2 = 32.
>>> imresize_many_images(np.zeros((2, 16, 16, 3), dtype=np.uint8), (16, 32))
Converts 2 RGB images of height and width 16 to images of height 16 and width 32.
>>> imresize_many_images(np.zeros((2, 16, 16, 3), dtype=np.uint8), (2.0, 4.0))
Converts 2 RGB images of height and width 16 to images of height 32 and width 64.
"""
# we just do nothing if the input contains zero images
# one could also argue that an exception would be appropriate here
if len(images) == 0:
return images
# verify that all input images have height/width > 0
do_assert(
all([image.shape[0] > 0 and image.shape[1] > 0 for image in images]),
("Cannot resize images, because at least one image has a height and/or width of zero. "
+ "Observed shapes were: %s.") % (str([image.shape for image in images]),)
)
# verify that sizes contains only values >0
if is_single_number(sizes) and sizes <= 0:
raise Exception(
"Cannot resize to the target size %.8f, because the value is zero or lower than zero." % (sizes,))
elif isinstance(sizes, tuple) and (sizes[0] <= 0 or sizes[1] <= 0):
sizes_str = [
"int %d" % (sizes[0],) if is_single_integer(sizes[0]) else "float %.8f" % (sizes[0],),
"int %d" % (sizes[1],) if is_single_integer(sizes[1]) else "float %.8f" % (sizes[1],),
]
sizes_str = "(%s, %s)" % (sizes_str[0], sizes_str[1])
raise Exception(
"Cannot resize to the target sizes %s. At least one value is zero or lower than zero." % (sizes_str,))
# change after the validation to make the above error messages match the original input
if is_single_number(sizes):
sizes = (sizes, sizes)
else:
do_assert(len(sizes) == 2, "Expected tuple with exactly two entries, got %d entries." % (len(sizes),))
do_assert(all([is_single_number(val) for val in sizes]),
"Expected tuple with two ints or floats, got types %s." % (str([type(val) for val in sizes]),))
# if input is a list, call this function N times for N images
# but check beforehand if all images have the same shape, then just convert to a single array and de-convert
# afterwards
if isinstance(images, list):
nb_shapes = len(set([image.shape for image in images]))
if nb_shapes == 1:
return list(imresize_many_images(np.array(images), sizes=sizes, interpolation=interpolation))
else:
return [imresize_many_images(image[np.newaxis, ...], sizes=sizes, interpolation=interpolation)[0, ...]
for image in images]
shape = images.shape
do_assert(images.ndim in [3, 4], "Expected array of shape (N, H, W, [C]), got shape %s" % (str(shape),))
nb_images = shape[0]
im_height, im_width = shape[1], shape[2]
nb_channels = shape[3] if images.ndim > 3 else None
height, width = sizes[0], sizes[1]
height = int(np.round(im_height * height)) if is_single_float(height) else height
width = int(np.round(im_width * width)) if is_single_float(width) else width
if height == im_height and width == im_width:
return np.copy(images)
ip = interpolation
do_assert(ip is None or ip in IMRESIZE_VALID_INTERPOLATIONS)
if ip is None:
if height > im_height or width > im_width:
ip = cv2.INTER_AREA
else:
ip = cv2.INTER_LINEAR
elif ip in ["nearest", cv2.INTER_NEAREST]:
ip = cv2.INTER_NEAREST
elif ip in ["linear", cv2.INTER_LINEAR]:
ip = cv2.INTER_LINEAR
elif ip in ["area", cv2.INTER_AREA]:
ip = cv2.INTER_AREA
else: # if ip in ["cubic", cv2.INTER_CUBIC]:
ip = cv2.INTER_CUBIC
# TODO find more beautiful way to avoid circular imports
from . import dtypes as iadt
if ip == cv2.INTER_NEAREST:
iadt.gate_dtypes(images,
allowed=["bool", "uint8", "uint16", "int8", "int16", "int32", "float16", "float32", "float64"],
disallowed=["uint32", "uint64", "uint128", "uint256", "int64", "int128", "int256",
"float96", "float128", "float256"],
augmenter=None)
else:
iadt.gate_dtypes(images,
allowed=["bool", "uint8", "uint16", "int8", "int16", "float16", "float32", "float64"],
disallowed=["uint32", "uint64", "uint128", "uint256", "int32", "int64", "int128", "int256",
"float96", "float128", "float256"],
augmenter=None)
result_shape = (nb_images, height, width)
if nb_channels is not None:
result_shape = result_shape + (nb_channels,)
result = np.zeros(result_shape, dtype=images.dtype)
for i, image in enumerate(images):
input_dtype = image.dtype
if image.dtype.type == np.bool_:
image = image.astype(np.uint8) * 255
elif image.dtype.type == np.int8 and ip != cv2.INTER_NEAREST:
image = image.astype(np.int16)
elif image.dtype.type == np.float16:
image = image.astype(np.float32)
result_img = cv2.resize(image, (width, height), interpolation=ip)
assert result_img.dtype == image.dtype
# cv2 removes the channel axis if input was (H, W, 1)
# we re-add it (but only if input was not (H, W))
if len(result_img.shape) == 2 and nb_channels is not None and nb_channels == 1:
result_img = result_img[:, :, np.newaxis]
if input_dtype.type == np.bool_:
result_img = result_img > 127
elif input_dtype.type == np.int8 and ip != cv2.INTER_NEAREST:
# TODO somehow better avoid circular imports here
from . import dtypes as iadt
result_img = iadt.restore_dtypes_(result_img, np.int8)
elif input_dtype.type == np.float16:
# TODO see above
from . import dtypes as iadt
result_img = iadt.restore_dtypes_(result_img, np.float16)
result[i] = result_img
return result
def imresize_single_image(image, sizes, interpolation=None):
"""
Resizes a single image.
dtype support::
See :func:`imgaug.imgaug.imresize_many_images`.
Parameters
----------
image : (H,W,C) ndarray or (H,W) ndarray
Array of the image to resize.
Usually recommended to be of dtype uint8.
sizes : float or iterable of int or iterable of float
See :func:`imgaug.imgaug.imresize_many_images`.
interpolation : None or str or int, optional
See :func:`imgaug.imgaug.imresize_many_images`.
Returns
-------
out : (H',W',C) ndarray or (H',W') ndarray
The resized image.
"""
grayscale = False
if image.ndim == 2:
grayscale = True
image = image[:, :, np.newaxis]
do_assert(len(image.shape) == 3, image.shape)
rs = imresize_many_images(image[np.newaxis, :, :, :], sizes, interpolation=interpolation)
if grayscale:
return np.squeeze(rs[0, :, :, 0])
else:
return rs[0, ...]
# TODO add crop() function too
def pad(arr, top=0, right=0, bottom=0, left=0, mode="constant", cval=0):
"""
Pad an image-like array on its top/right/bottom/left side.
This function is a wrapper around :func:`numpy.pad`.
dtype support::
* ``uint8``: yes; fully tested (1)
* ``uint16``: yes; fully tested (1)
* ``uint32``: yes; fully tested (2) (3)
* ``uint64``: yes; fully tested (2) (3)
* ``int8``: yes; fully tested (1)
* ``int16``: yes; fully tested (1)
* ``int32``: yes; fully tested (1)
* ``int64``: yes; fully tested (2) (3)
* ``float16``: yes; fully tested (2) (3)
* ``float32``: yes; fully tested (1)
* ``float64``: yes; fully tested (1)
* ``float128``: yes; fully tested (2) (3)
* ``bool``: yes; tested (2) (3)
- (1) Uses ``cv2`` if `mode` is one of: ``"constant"``, ``"edge"``, ``"reflect"``, ``"symmetric"``.
Otherwise uses ``numpy``.
- (2) Uses ``numpy``.
- (3) Rejected by ``cv2``.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Image-like array to pad.
top : int, optional
Amount of pixels to add at the top side of the image. Must be 0 or greater.
right : int, optional
Amount of pixels to add at the right side of the image. Must be 0 or greater.
bottom : int, optional
Amount of pixels to add at the bottom side of the image. Must be 0 or greater.
left : int, optional
Amount of pixels to add at the left side of the image. Must be 0 or greater.
mode : str, optional
Padding mode to use. See :func:`numpy.pad` for details.
In case of mode ``constant``, the parameter `cval` will be used as the ``constant_values``
parameter to :func:`numpy.pad`.
In case of mode ``linear_ramp``, the parameter `cval` will be used as the ``end_values``
parameter to :func:`numpy.pad`.
cval : number, optional
Value to use for padding if `mode` is ``constant``. See :func:`numpy.pad` for details.
The cval is expected to match the input array's dtype and value range.
Returns
-------
arr_pad : (H',W') ndarray or (H',W',C) ndarray
Padded array with height ``H'=H+top+bottom`` and width ``W'=W+left+right``.
"""
do_assert(arr.ndim in [2, 3])
do_assert(top >= 0)
do_assert(right >= 0)
do_assert(bottom >= 0)
do_assert(left >= 0)
if top > 0 or right > 0 or bottom > 0 or left > 0:
mapping_mode_np_to_cv2 = {
"constant": cv2.BORDER_CONSTANT,
"edge": cv2.BORDER_REPLICATE,
"linear_ramp": None,
"maximum": None,
"mean": None,
"median": None,
"minimum": None,
"reflect": cv2.BORDER_REFLECT_101,
"symmetric": cv2.BORDER_REFLECT,
"wrap": None,
cv2.BORDER_CONSTANT: cv2.BORDER_CONSTANT,
cv2.BORDER_REPLICATE: cv2.BORDER_REPLICATE,
cv2.BORDER_REFLECT_101: cv2.BORDER_REFLECT_101,
cv2.BORDER_REFLECT: cv2.BORDER_REFLECT
}
bad_mode_cv2 = mapping_mode_np_to_cv2.get(mode, None) is None
# these datatypes all simply generate a "TypeError: src data type = X is not supported" error
bad_datatype_cv2 = arr.dtype.name in ["uint32", "uint64", "int64", "float16", "float128", "bool"]
if not bad_datatype_cv2 and not bad_mode_cv2:
cval = float(cval) if arr.dtype.kind == "f" else int(cval) # results in TypeError otherwise for np inputs
if arr.ndim == 2 or arr.shape[2] <= 4:
# without this, only the first channel is padded with the cval, all following channels with 0
if arr.ndim == 3:
cval = tuple([cval] * arr.shape[2])
arr_pad = cv2.copyMakeBorder(arr, top=top, bottom=bottom, left=left, right=right,
borderType=mapping_mode_np_to_cv2[mode], value=cval)
if arr.ndim == 3 and arr_pad.ndim == 2:
arr_pad = arr_pad[..., np.newaxis]
else:
result = []
channel_start_idx = 0
while channel_start_idx < arr.shape[2]:
arr_c = arr[..., channel_start_idx:channel_start_idx+4]
cval_c = tuple([cval] * arr_c.shape[2])
arr_pad_c = cv2.copyMakeBorder(arr_c, top=top, bottom=bottom, left=left, right=right,
borderType=mapping_mode_np_to_cv2[mode], value=cval_c)
arr_pad_c = np.atleast_3d(arr_pad_c)
result.append(arr_pad_c)
channel_start_idx += 4
arr_pad = np.concatenate(result, axis=2)
else:
paddings_np = [(top, bottom), (left, right)] # paddings for 2d case
if arr.ndim == 3:
paddings_np.append((0, 0)) # add paddings for 3d case
if mode == "constant":
arr_pad = np.pad(arr, paddings_np, mode=mode, constant_values=cval)
elif mode == "linear_ramp":
arr_pad = np.pad(arr, paddings_np, mode=mode, end_values=cval)
else:
arr_pad = np.pad(arr, paddings_np, mode=mode)
return arr_pad
return np.copy(arr)
# TODO allow shape as input instead of array
def compute_paddings_for_aspect_ratio(arr, aspect_ratio):
"""
Compute the amount of pixels by which an array has to be padded to fulfill an aspect ratio.
The aspect ratio is given as width/height.
Depending on which dimension is smaller (height or width), only the corresponding
sides (left/right or top/bottom) will be padded. In each case, both of the sides will
be padded equally.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Image-like array for which to compute pad amounts.
aspect_ratio : float
Target aspect ratio, given as width/height. E.g. 2.0 denotes the image having twice
as much width as height.
Returns
-------
result : tuple of int
Required paddign amounts to reach the target aspect ratio, given as a tuple
of the form ``(top, right, bottom, left)``.
"""
do_assert(arr.ndim in [2, 3])
do_assert(aspect_ratio > 0)
height, width = arr.shape[0:2]
do_assert(height > 0)
aspect_ratio_current = width / height
pad_top = 0
pad_right = 0
pad_bottom = 0
pad_left = 0
if aspect_ratio_current < aspect_ratio:
# vertical image, height > width
diff = (aspect_ratio * height) - width
pad_right = int(np.ceil(diff / 2))
pad_left = int(np.floor(diff / 2))
elif aspect_ratio_current > aspect_ratio:
# horizontal image, width > height
diff = ((1/aspect_ratio) * width) - height
pad_top = int(np.floor(diff / 2))
pad_bottom = int(np.ceil(diff / 2))
return pad_top, pad_right, pad_bottom, pad_left
def pad_to_aspect_ratio(arr, aspect_ratio, mode="constant", cval=0, return_pad_amounts=False):
"""
Pad an image-like array on its sides so that it matches a target aspect ratio.
Depending on which dimension is smaller (height or width), only the corresponding
sides (left/right or top/bottom) will be padded. In each case, both of the sides will
be padded equally.
dtype support::
See :func:`imgaug.imgaug.pad`.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Image-like array to pad.
aspect_ratio : float
Target aspect ratio, given as width/height. E.g. 2.0 denotes the image having twice
as much width as height.
mode : str, optional
Padding mode to use. See :func:`numpy.pad` for details.
cval : number, optional
Value to use for padding if `mode` is ``constant``. See :func:`numpy.pad` for details.
return_pad_amounts : bool, optional
If False, then only the padded image will be returned. If True, a tuple with two
entries will be returned, where the first entry is the padded image and the second
entry are the amounts by which each image side was padded. These amounts are again a
tuple of the form (top, right, bottom, left), with each value being an integer.
Returns
-------
arr_padded : (H',W') ndarray or (H',W',C) ndarray
Padded image as (H',W') or (H',W',C) ndarray, fulfulling the given aspect_ratio.
tuple of int
Amounts by which the image was padded on each side, given as a tuple ``(top, right, bottom, left)``.
This tuple is only returned if `return_pad_amounts` was set to True.
Otherwise only ``arr_padded`` is returned.
"""
pad_top, pad_right, pad_bottom, pad_left = compute_paddings_for_aspect_ratio(arr, aspect_ratio)
arr_padded = pad(
arr,
top=pad_top,
right=pad_right,
bottom=pad_bottom,
left=pad_left,
mode=mode,
cval=cval
)
if return_pad_amounts:
return arr_padded, (pad_top, pad_right, pad_bottom, pad_left)
else:
return arr_padded
def pool(arr, block_size, func, cval=0, preserve_dtype=True):
"""
Resize an array by pooling values within blocks.
dtype support::
* ``uint8``: yes; fully tested
* ``uint16``: yes; tested
* ``uint32``: yes; tested (2)
* ``uint64``: no (1)
* ``int8``: yes; tested
* ``int16``: yes; tested
* ``int32``: yes; tested (2)
* ``int64``: no (1)
* ``float16``: yes; tested
* ``float32``: yes; tested
* ``float64``: yes; tested
* ``float128``: yes; tested (2)
* ``bool``: yes; tested
- (1) results too inaccurate (at least when using np.average as func)
- (2) Note that scikit-image documentation says that the wrapped pooling function converts
inputs to float64. Actual tests showed no indication of that happening (at least when
using preserve_dtype=True).
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Image-like array to pool. Ideally of datatype ``numpy.float64``.
block_size : int or tuple of int
Spatial size of each group of values to pool, aka kernel size.
If a single integer, then a symmetric block of that size along height and width will be used.
If a tuple of two values, it is assumed to be the block size along height and width of the image-like,
with pooling happening per channel.
If a tuple of three values, it is assumed to be the block size along height, width and channels.
func : callable
Function to apply to a given block in order to convert it to a single number,
e.g. :func:`numpy.average`, :func:`numpy.min`, :func:`numpy.max`.
cval : number, optional
Value to use in order to pad the array along its border if the array cannot be divided
by `block_size` without remainder.
preserve_dtype : bool, optional
Whether to convert the array back to the input datatype if it is changed away from
that in the pooling process.
Returns
-------
arr_reduced : (H',W') ndarray or (H',W',C') ndarray
Array after pooling.
"""
# TODO find better way to avoid circular import
from . import dtypes as iadt
iadt.gate_dtypes(arr,
allowed=["bool", "uint8", "uint16", "uint32", "int8", "int16", "int32",
"float16", "float32", "float64", "float128"],
disallowed=["uint64", "uint128", "uint256", "int64", "int128", "int256",
"float256"],
augmenter=None)
do_assert(arr.ndim in [2, 3])
is_valid_int = is_single_integer(block_size) and block_size >= 1
is_valid_tuple = is_iterable(block_size) and len(block_size) in [2, 3] \
and [is_single_integer(val) and val >= 1 for val in block_size]
do_assert(is_valid_int or is_valid_tuple)
if is_single_integer(block_size):
block_size = [block_size, block_size]
if len(block_size) < arr.ndim:
block_size = list(block_size) + [1]
input_dtype = arr.dtype
arr_reduced = skimage.measure.block_reduce(arr, tuple(block_size), func, cval=cval)
if preserve_dtype and arr_reduced.dtype.type != input_dtype:
arr_reduced = arr_reduced.astype(input_dtype)
return arr_reduced
def avg_pool(arr, block_size, cval=0, preserve_dtype=True):
"""
Resize an array using average pooling.
dtype support::
See :func:`imgaug.imgaug.pool`.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Image-like array to pool. See :func:`imgaug.pool` for details.
block_size : int or tuple of int or tuple of int
Size of each block of values to pool. See :func:`imgaug.pool` for details.
cval : number, optional
Padding value. See :func:`imgaug.pool` for details.
preserve_dtype : bool, optional
Whether to preserve the input array dtype. See :func:`imgaug.pool` for details.
Returns
-------
arr_reduced : (H',W') ndarray or (H',W',C') ndarray
Array after average pooling.
"""
return pool(arr, block_size, np.average, cval=cval, preserve_dtype=preserve_dtype)
def max_pool(arr, block_size, cval=0, preserve_dtype=True):
"""
Resize an array using max-pooling.
dtype support::
See :func:`imgaug.imgaug.pool`.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Image-like array to pool. See :func:`imgaug.pool` for details.
block_size : int or tuple of int or tuple of int
Size of each block of values to pool. See `imgaug.pool` for details.
cval : number, optional
Padding value. See :func:`imgaug.pool` for details.
preserve_dtype : bool, optional
Whether to preserve the input array dtype. See :func:`imgaug.pool` for details.
Returns
-------
arr_reduced : (H',W') ndarray or (H',W',C') ndarray
Array after max-pooling.
"""
return pool(arr, block_size, np.max, cval=cval, preserve_dtype=preserve_dtype)
def draw_grid(images, rows=None, cols=None):
"""
Converts multiple input images into a single image showing them in a grid.
dtype support::
* ``uint8``: yes; fully tested
* ``uint16``: yes; fully tested
* ``uint32``: yes; fully tested
* ``uint64``: yes; fully tested
* ``int8``: yes; fully tested
* ``int16``: yes; fully tested
* ``int32``: yes; fully tested
* ``int64``: yes; fully tested
* ``float16``: yes; fully tested
* ``float32``: yes; fully tested
* ``float64``: yes; fully tested
* ``float128``: yes; fully tested
* ``bool``: yes; fully tested
Parameters
----------
images : (N,H,W,3) ndarray or iterable of (H,W,3) array
The input images to convert to a grid.
rows : None or int, optional
The number of rows to show in the grid.
If None, it will be automatically derived.
cols : None or int, optional
The number of cols to show in the grid.
If None, it will be automatically derived.
Returns
-------
grid : (H',W',3) ndarray
Image of the generated grid.
"""
nb_images = len(images)
do_assert(nb_images > 0)
if is_np_array(images):
do_assert(images.ndim == 4)
else:
do_assert(is_iterable(images) and is_np_array(images[0]) and images[0].ndim == 3)
dts = [image.dtype.name for image in images]
nb_dtypes = len(set(dts))
do_assert(nb_dtypes == 1, ("All images provided to draw_grid() must have the same dtype, "
+ "found %d dtypes (%s)") % (nb_dtypes, ", ".join(dts)))
cell_height = max([image.shape[0] for image in images])
cell_width = max([image.shape[1] for image in images])
channels = set([image.shape[2] for image in images])
do_assert(
len(channels) == 1,
"All images are expected to have the same number of channels, "
+ "but got channel set %s with length %d instead." % (str(channels), len(channels))
)
nb_channels = list(channels)[0]
if rows is None and cols is None:
rows = cols = int(math.ceil(math.sqrt(nb_images)))
elif rows is not None:
cols = int(math.ceil(nb_images / rows))
elif cols is not None:
rows = int(math.ceil(nb_images / cols))
do_assert(rows * cols >= nb_images)
width = cell_width * cols
height = cell_height * rows
dt = images.dtype if is_np_array(images) else images[0].dtype
grid = np.zeros((height, width, nb_channels), dtype=dt)
cell_idx = 0
for row_idx in sm.xrange(rows):
for col_idx in sm.xrange(cols):
if cell_idx < nb_images:
image = images[cell_idx]
cell_y1 = cell_height * row_idx
cell_y2 = cell_y1 + image.shape[0]
cell_x1 = cell_width * col_idx
cell_x2 = cell_x1 + image.shape[1]
grid[cell_y1:cell_y2, cell_x1:cell_x2, :] = image
cell_idx += 1
return grid
def show_grid(images, rows=None, cols=None):
"""
Converts the input images to a grid image and shows it in a new window.
dtype support::
minimum of (
:func:`imgaug.imgaug.draw_grid`,
:func:`imgaug.imgaug.imshow`
)
Parameters
----------
images : (N,H,W,3) ndarray or iterable of (H,W,3) array
See :func:`imgaug.draw_grid`.
rows : None or int, optional
See :func:`imgaug.draw_grid`.
cols : None or int, optional
See :func:`imgaug.draw_grid`.
"""
grid = draw_grid(images, rows=rows, cols=cols)
imshow(grid)
def imshow(image, backend=IMSHOW_BACKEND_DEFAULT):
"""
Shows an image in a window.
dtype support::
* ``uint8``: yes; not tested
* ``uint16``: ?
* ``uint32``: ?
* ``uint64``: ?
* ``int8``: ?
* ``int16``: ?
* ``int32``: ?
* ``int64``: ?
* ``float16``: ?
* ``float32``: ?
* ``float64``: ?
* ``float128``: ?
* ``bool``: ?
Parameters
----------
image : (H,W,3) ndarray
Image to show.
backend : {'matplotlib', 'cv2'}, optional
Library to use to show the image. May be either matplotlib or OpenCV ('cv2').
OpenCV tends to be faster, but apparently causes more technical issues.
"""
do_assert(backend in ["matplotlib", "cv2"], "Expected backend 'matplotlib' or 'cv2', got %s." % (backend,))
if backend == "cv2":
image_bgr = image
if image.ndim == 3 and image.shape[2] in [3, 4]:
image_bgr = image[..., 0:3][..., ::-1]
win_name = "imgaug-default-window"
cv2.namedWindow(win_name, cv2.WINDOW_NORMAL)
cv2.imshow(win_name, image_bgr)
cv2.waitKey(0)
cv2.destroyWindow(win_name)
else:
# import only when necessary (faster startup; optional dependency; less fragile -- see issue #225)
import matplotlib.pyplot as plt
dpi = 96
h, w = image.shape[0] / dpi, image.shape[1] / dpi
w = max(w, 6) # if the figure is too narrow, the footer may appear and make the fig suddenly wider (ugly)
fig, ax = plt.subplots(figsize=(w, h), dpi=dpi)
fig.canvas.set_window_title("imgaug.imshow(%s)" % (image.shape,))
ax.imshow(image, cmap="gray") # cmap is only activate for grayscale images
plt.show()
def do_assert(condition, message="Assertion failed."):
"""
Function that behaves equally to an `assert` statement, but raises an
Exception.
This is added because `assert` statements are removed in optimized code.
It replaces `assert` statements throughout the library that should be
kept even in optimized code.
Parameters
----------
condition : bool
If False, an exception is raised.
message : str, optional
Error message.
"""
if not condition:
raise AssertionError(str(message))
class HooksImages(object):
"""
Class to intervene with image augmentation runs.
This is e.g. useful to dynamically deactivate some augmenters.
Parameters
----------
activator : None or callable, optional
A function that gives permission to execute an augmenter.
The expected interface is ``f(images, augmenter, parents, default)``,
where ``images`` are the input images to augment, ``augmenter`` is the
instance of the augmenter to execute, ``parents`` are previously
executed augmenters and ``default`` is an expected default value to be
returned if the activator function does not plan to make a decision
for the given inputs.
propagator : None or callable, optional
A function that gives permission to propagate the augmentation further
to the children of an augmenter. This happens after the activator.
In theory, an augmenter may augment images itself (if allowed by the
activator) and then execute child augmenters afterwards (if allowed by
the propagator). If the activator returned False, the propagation step
will never be executed.
The expected interface is ``f(images, augmenter, parents, default)``,
with all arguments having identical meaning to the activator.
preprocessor : None or callable, optional
A function to call before an augmenter performed any augmentations.
The interface is ``f(images, augmenter, parents)``,
with all arguments having identical meaning to the activator.
It is expected to return the input images, optionally modified.
postprocessor : None or callable, optional
A function to call after an augmenter performed augmentations.
The interface is the same as for the preprocessor.
Examples
--------
>>> seq = iaa.Sequential([
>>> iaa.GaussianBlur(3.0, name="blur"),
>>> iaa.Dropout(0.05, name="dropout"),
>>> iaa.Affine(translate_px=-5, name="affine")
>>> ])
>>> images = [np.zeros((10, 10), dtype=np.uint8)]
>>>
>>> def activator(images, augmenter, parents, default):
>>> return False if augmenter.name in ["blur", "dropout"] else default
>>>
>>> seq_det = seq.to_deterministic()
>>> images_aug = seq_det.augment_images(images)
>>> heatmaps = [np.random.rand(*(3, 10, 10))]
>>> heatmaps_aug = seq_det.augment_images(
>>> heatmaps,
>>> hooks=ia.HooksImages(activator=activator)
>>> )
This augments images and their respective heatmaps in the same way.
The heatmaps however are only modified by Affine, not by GaussianBlur or
Dropout.
"""
def __init__(self, activator=None, propagator=None, preprocessor=None, postprocessor=None):
self.activator = activator
self.propagator = propagator
self.preprocessor = preprocessor
self.postprocessor = postprocessor
def is_activated(self, images, augmenter, parents, default):
"""
Returns whether an augmenter may be executed.
Returns
-------
bool
If True, the augmenter may be executed. If False, it may not be executed.
"""
if self.activator is None:
return default
else:
return self.activator(images, augmenter, parents, default)
def is_propagating(self, images, augmenter, parents, default):
"""
Returns whether an augmenter may call its children to augment an
image. This is independent of the augmenter itself possible changing
the image, without calling its children. (Most (all?) augmenters with
children currently dont perform any changes themselves.)
Returns
-------
bool
If True, the augmenter may be propagate to its children. If False, it may not.
"""
if self.propagator is None:
return default
else:
return self.propagator(images, augmenter, parents, default)
def preprocess(self, images, augmenter, parents):
"""
A function to be called before the augmentation of images starts (per augmenter).
Returns
-------
(N,H,W,C) ndarray or (N,H,W) ndarray or list of (H,W,C) ndarray or list of (H,W) ndarray
The input images, optionally modified.
"""
if self.preprocessor is None:
return images
else:
return self.preprocessor(images, augmenter, parents)
def postprocess(self, images, augmenter, parents):
"""
A function to be called after the augmentation of images was
performed.
Returns
-------
(N,H,W,C) ndarray or (N,H,W) ndarray or list of (H,W,C) ndarray or list of (H,W) ndarray
The input images, optionally modified.
"""
if self.postprocessor is None:
return images
else:
return self.postprocessor(images, augmenter, parents)
class HooksHeatmaps(HooksImages):
"""
Class to intervene with heatmap augmentation runs.
This is e.g. useful to dynamically deactivate some augmenters.
This class is currently the same as the one for images. This may or may
not change in the future.
"""
pass
class HooksKeypoints(HooksImages):
"""
Class to intervene with keypoint augmentation runs.
This is e.g. useful to dynamically deactivate some augmenters.
This class is currently the same as the one for images. This may or may
not change in the future.
"""
pass
def compute_geometric_median(X, eps=1e-5):
"""
Estimate the geometric median of points in 2D.
Code from https://stackoverflow.com/a/30305181
Parameters
----------
X : (N,2) ndarray
Points in 2D. Second axis must be given in xy-form.
eps : float, optional
Distance threshold when to return the median.
Returns
-------
(2,) ndarray
Geometric median as xy-coordinate.
"""
y = np.mean(X, 0)
while True:
D = scipy.spatial.distance.cdist(X, [y])
nonzeros = (D != 0)[:, 0]
Dinv = 1 / D[nonzeros]
Dinvs = np.sum(Dinv)
W = Dinv / Dinvs
T = np.sum(W * X[nonzeros], 0)
num_zeros = len(X) - np.sum(nonzeros)
if num_zeros == 0:
y1 = T
elif num_zeros == len(X):
return y
else:
R = (T - y) * Dinvs
r = np.linalg.norm(R)
rinv = 0 if r == 0 else num_zeros/r
y1 = max(0, 1-rinv)*T + min(1, rinv)*y
if scipy.spatial.distance.euclidean(y, y1) < eps:
return y1
y = y1
class Keypoint(object):
"""
A single keypoint (aka landmark) on an image.
Parameters
----------
x : number
Coordinate of the keypoint on the x axis.
y : number
Coordinate of the keypoint on the y axis.
"""
def __init__(self, x, y):
self.x = x
self.y = y
@property
def x_int(self):
"""
Return the keypoint's x-coordinate, rounded to the closest integer.
Returns
-------
result : int
Keypoint's x-coordinate, rounded to the closest integer.
"""
return int(np.round(self.x))
@property
def y_int(self):
"""
Return the keypoint's y-coordinate, rounded to the closest integer.
Returns
-------
result : int
Keypoint's y-coordinate, rounded to the closest integer.
"""
return int(np.round(self.y))
def project(self, from_shape, to_shape):
"""
Project the keypoint onto a new position on a new image.
E.g. if the keypoint is on its original image at x=(10 of 100 pixels)
and y=(20 of 100 pixels) and is projected onto a new image with
size (width=200, height=200), its new position will be (20, 40).
This is intended for cases where the original image is resized.
It cannot be used for more complex changes (e.g. padding, cropping).
Parameters
----------
from_shape : tuple of int
Shape of the original image. (Before resize.)
to_shape : tuple of int
Shape of the new image. (After resize.)
Returns
-------
imgaug.Keypoint
Keypoint object with new coordinates.
"""
if from_shape[0:2] == to_shape[0:2]:
return self.deepcopy(x=self.x, y=self.y)
# avoid division by zeros
# TODO add this to other project() functions too
assert all([v > 0 for v in from_shape[0:2]]), \
"Got invalid from_shape %s in Keypoint.project()" % (
str(from_shape),)
if any([v <= 0 for v in to_shape[0:2]]):
import warnings
warnings.warn("Got invalid to_shape %s in Keypoint.project()" % (
str(to_shape),))
from_height, from_width = from_shape[0:2]
to_height, to_width = to_shape[0:2]
x = (self.x / from_width) * to_width
y = (self.y / from_height) * to_height
return self.deepcopy(x=x, y=y)
def shift(self, x=0, y=0):
"""
Move the keypoint around on an image.
Parameters
----------
x : number, optional
Move by this value on the x axis.
y : number, optional
Move by this value on the y axis.
Returns
-------
imgaug.Keypoint
Keypoint object with new coordinates.
"""
return self.deepcopy(self.x + x, self.y + y)
def generate_similar_points_manhattan(self, nb_steps, step_size, return_array=False):
"""
Generate nearby points to this keypoint based on manhattan distance.
To generate the first neighbouring points, a distance of S (step size) is moved from the
center point (this keypoint) to the top, right, bottom and left, resulting in four new
points. From these new points, the pattern is repeated. Overlapping points are ignored.
The resulting points have a shape similar to a square rotated by 45 degrees.
Parameters
----------
nb_steps : int
The number of steps to move from the center point. nb_steps=1 results in a total of
5 output points (1 center point + 4 neighbours).
step_size : number
The step size to move from every point to its neighbours.
return_array : bool, optional
Whether to return the generated points as a list of keypoints or an array
of shape ``(N,2)``, where ``N`` is the number of generated points and the second axis contains
the x- (first value) and y- (second value) coordinates.
Returns
-------
points : list of imgaug.Keypoint or (N,2) ndarray
If return_array was False, then a list of Keypoint.
Otherwise a numpy array of shape ``(N,2)``, where ``N`` is the number of generated points and
the second axis contains the x- (first value) and y- (second value) coordinates.
The center keypoint (the one on which this function was called) is always included.
"""
# TODO add test
# Points generates in manhattan style with S steps have a shape similar to a 45deg rotated
# square. The center line with the origin point has S+1+S = 1+2*S points (S to the left,
# S to the right). The lines above contain (S+1+S)-2 + (S+1+S)-2-2 + ... + 1 points. E.g.
# for S=2 it would be 3+1=4 and for S=3 it would be 5+3+1=9. Same for the lines below the
# center. Hence the total number of points is S+1+S + 2*(S^2).
points = np.zeros((nb_steps + 1 + nb_steps + 2*(nb_steps**2), 2), dtype=np.float32)
# we start at the bottom-most line and move towards the top-most line
yy = np.linspace(self.y - nb_steps * step_size, self.y + nb_steps * step_size, nb_steps + 1 + nb_steps)
# bottom-most line contains only one point
width = 1
nth_point = 0
for i_y, y in enumerate(yy):
if width == 1:
xx = [self.x]
else:
xx = np.linspace(self.x - (width-1)//2 * step_size, self.x + (width-1)//2 * step_size, width)
for x in xx:
points[nth_point] = [x, y]
nth_point += 1
if i_y < nb_steps:
width += 2
else:
width -= 2
if return_array:
return points
return [self.deepcopy(x=points[i, 0], y=points[i, 1]) for i in sm.xrange(points.shape[0])]
def copy(self, x=None, y=None):
"""
Create a shallow copy of the Keypoint object.
Parameters
----------
x : None or number, optional
Coordinate of the keypoint on the x axis.
If ``None``, the instance's value will be copied.
y : None or number, optional
Coordinate of the keypoint on the y axis.
If ``None``, the instance's value will be copied.
Returns
-------
imgaug.Keypoint
Shallow copy.
"""
return self.deepcopy(x=x, y=y)
def deepcopy(self, x=None, y=None):
"""
Create a deep copy of the Keypoint object.
Parameters
----------
x : None or number, optional
Coordinate of the keypoint on the x axis.
If ``None``, the instance's value will be copied.
y : None or number, optional
Coordinate of the keypoint on the y axis.
If ``None``, the instance's value will be copied.
Returns
-------
imgaug.Keypoint
Deep copy.
"""
x = self.x if x is None else x
y = self.y if y is None else y
return Keypoint(x=x, y=y)
def __repr__(self):
return self.__str__()
def __str__(self):
return "Keypoint(x=%.8f, y=%.8f)" % (self.x, self.y)
class KeypointsOnImage(object):
"""
Object that represents all keypoints on a single image.
Parameters
----------
keypoints : list of imgaug.Keypoint
List of keypoints on the image.
shape : tuple of int
The shape of the image on which the keypoints are placed.
Examples
--------
>>> image = np.zeros((70, 70))
>>> kps = [Keypoint(x=10, y=20), Keypoint(x=34, y=60)]
>>> kps_oi = KeypointsOnImage(kps, shape=image.shape)
"""
def __init__(self, keypoints, shape):
self.keypoints = keypoints
if is_np_array(shape):
self.shape = shape.shape
else:
do_assert(isinstance(shape, (tuple, list)))
self.shape = tuple(shape)
@property
def height(self):
return self.shape[0]
@property
def width(self):
return self.shape[1]
@property
def empty(self):
"""
Returns whether this object contains zero keypoints.
Returns
-------
result : bool
True if this object contains zero keypoints.
"""
return len(self.keypoints) == 0
def on(self, image):
"""
Project keypoints from one image to a new one.
Parameters
----------
image : ndarray or tuple of int
New image onto which the keypoints are to be projected.
May also simply be that new image's shape tuple.
Returns
-------
keypoints : imgaug.KeypointsOnImage
Object containing all projected keypoints.
"""
if is_np_array(image):
shape = image.shape
else:
shape = image
if shape[0:2] == self.shape[0:2]:
return self.deepcopy()
else:
keypoints = [kp.project(self.shape, shape) for kp in self.keypoints]
return self.deepcopy(keypoints, shape)
def draw_on_image(self, image, color=(0, 255, 0), alpha=1.0, size=3, copy=True, raise_if_out_of_image=False):
"""
Draw all keypoints onto a given image. Each keypoint is marked by a square of a chosen color and size.
Parameters
----------
image : (H,W,3) ndarray
The image onto which to draw the keypoints.
This image should usually have the same shape as
set in KeypointsOnImage.shape.
color : int or list of int or tuple of int or (3,) ndarray, optional
The RGB color of all keypoints. If a single int ``C``, then that is
equivalent to ``(C,C,C)``.
alpha : float, optional
The opacity of the drawn keypoint, where ``1.0`` denotes a fully
visible keypoint and ``0.0`` an invisible one.
size : int, optional
The size of each point. If set to ``C``, each square will have
size ``C x C``.
copy : bool, optional
Whether to copy the image before drawing the points.
raise_if_out_of_image : bool, optional
Whether to raise an exception if any keypoint is outside of the image.
Returns
-------
image : (H,W,3) ndarray
Image with drawn keypoints.
"""
if copy:
image = np.copy(image)
input_dtype = image.dtype
alpha_color = color
if alpha < 0.01:
# keypoints all invisible, nothing to do
return image
elif alpha > 0.99:
alpha = 1
else:
image = image.astype(np.float32, copy=False)
alpha_color = alpha * np.array(color)
height, width = image.shape[0:2]
for keypoint in self.keypoints:
y, x = keypoint.y_int, keypoint.x_int
if 0 <= y < height and 0 <= x < width:
x1 = max(x - size//2, 0)
x2 = min(x + 1 + size//2, width)
y1 = max(y - size//2, 0)
y2 = min(y + 1 + size//2, height)
if alpha == 1:
image[y1:y2, x1:x2] = color
else:
image[y1:y2, x1:x2] = (
(1 - alpha) * image[y1:y2, x1:x2]
+ alpha_color
)
else:
if raise_if_out_of_image:
raise Exception("Cannot draw keypoint x=%.8f, y=%.8f on image with shape %s." % (y, x, image.shape))
if image.dtype.name != input_dtype.name:
if input_dtype.name == "uint8":
image = np.clip(image, 0, 255, out=image)
image = image.astype(input_dtype, copy=False)
return image
def shift(self, x=0, y=0):
"""
Move the keypoints around on an image.
Parameters
----------
x : number, optional
Move each keypoint by this value on the x axis.
y : number, optional
Move each keypoint by this value on the y axis.
Returns
-------
out : KeypointsOnImage
Keypoints after moving them.
"""
keypoints = [keypoint.shift(x=x, y=y) for keypoint in self.keypoints]
return self.deepcopy(keypoints)
def get_coords_array(self):
"""
Convert the coordinates of all keypoints in this object to an array of shape (N,2).
Returns
-------
result : (N, 2) ndarray
Where N is the number of keypoints. Each first value is the
x coordinate, each second value is the y coordinate.
"""
result = np.zeros((len(self.keypoints), 2), np.float32)
for i, keypoint in enumerate(self.keypoints):
result[i, 0] = keypoint.x
result[i, 1] = keypoint.y
return result
@staticmethod
def from_coords_array(coords, shape):
"""
Convert an array (N,2) with a given image shape to a KeypointsOnImage object.
Parameters
----------
coords : (N, 2) ndarray
Coordinates of ``N`` keypoints on the original image.
Each first entry ``coords[i, 0]`` is expected to be the x coordinate.
Each second entry ``coords[i, 1]`` is expected to be the y coordinate.
shape : tuple
Shape tuple of the image on which the keypoints are placed.
Returns
-------
out : KeypointsOnImage
KeypointsOnImage object that contains all keypoints from the array.
"""
keypoints = [Keypoint(x=coords[i, 0], y=coords[i, 1]) for i in sm.xrange(coords.shape[0])]
return KeypointsOnImage(keypoints, shape)
# TODO add to_gaussian_heatmaps(), from_gaussian_heatmaps()
def to_keypoint_image(self, size=1):
"""
Draws a new black image of shape ``(H,W,N)`` in which all keypoint coordinates are set to 255.
(H=shape height, W=shape width, N=number of keypoints)
This function can be used as a helper when augmenting keypoints with a method that only supports the
augmentation of images.
Parameters
-------
size : int
Size of each (squared) point.
Returns
-------
image : (H,W,N) ndarray
Image in which the keypoints are marked. H is the height,
defined in KeypointsOnImage.shape[0] (analogous W). N is the
number of keypoints.
"""
do_assert(len(self.keypoints) > 0)
height, width = self.shape[0:2]
image = np.zeros((height, width, len(self.keypoints)), dtype=np.uint8)
do_assert(size % 2 != 0)
sizeh = max(0, (size-1)//2)
for i, keypoint in enumerate(self.keypoints):
# TODO for float values spread activation over several cells
# here and do voting at the end
y = keypoint.y_int
x = keypoint.x_int
x1 = np.clip(x - sizeh, 0, width-1)
x2 = np.clip(x + sizeh + 1, 0, width)
y1 = np.clip(y - sizeh, 0, height-1)
y2 = np.clip(y + sizeh + 1, 0, height)
if x1 < x2 and y1 < y2:
image[y1:y2, x1:x2, i] = 128
if 0 <= y < height and 0 <= x < width:
image[y, x, i] = 255
return image
@staticmethod
def from_keypoint_image(image, if_not_found_coords={"x": -1, "y": -1}, threshold=1, nb_channels=None): # pylint: disable=locally-disabled, dangerous-default-value, line-too-long
"""
Converts an image generated by ``to_keypoint_image()`` back to a KeypointsOnImage object.
Parameters
----------
image : (H,W,N) ndarray
The keypoints image. N is the number of keypoints.
if_not_found_coords : tuple or list or dict or None, optional
Coordinates to use for keypoints that cannot be found in `image`.
If this is a list/tuple, it must have two integer values.
If it is a dictionary, it must have the keys ``x`` and ``y`` with
each containing one integer value.
If this is None, then the keypoint will not be added to the final
KeypointsOnImage object.
threshold : int, optional
The search for keypoints works by searching for the argmax in
each channel. This parameters contains the minimum value that
the max must have in order to be viewed as a keypoint.
nb_channels : None or int, optional
Number of channels of the image on which the keypoints are placed.
Some keypoint augmenters require that information.
If set to None, the keypoint's shape will be set
to ``(height, width)``, otherwise ``(height, width, nb_channels)``.
Returns
-------
out : KeypointsOnImage
The extracted keypoints.
"""
do_assert(len(image.shape) == 3)
height, width, nb_keypoints = image.shape
drop_if_not_found = False
if if_not_found_coords is None:
drop_if_not_found = True
if_not_found_x = -1
if_not_found_y = -1
elif isinstance(if_not_found_coords, (tuple, list)):
do_assert(len(if_not_found_coords) == 2)
if_not_found_x = if_not_found_coords[0]
if_not_found_y = if_not_found_coords[1]
elif isinstance(if_not_found_coords, dict):
if_not_found_x = if_not_found_coords["x"]
if_not_found_y = if_not_found_coords["y"]
else:
raise Exception("Expected if_not_found_coords to be None or tuple or list or dict, got %s." % (
type(if_not_found_coords),))
keypoints = []
for i in sm.xrange(nb_keypoints):
maxidx_flat = np.argmax(image[..., i])
maxidx_ndim = np.unravel_index(maxidx_flat, (height, width))
found = (image[maxidx_ndim[0], maxidx_ndim[1], i] >= threshold)
if found:
keypoints.append(Keypoint(x=maxidx_ndim[1], y=maxidx_ndim[0]))
else:
if drop_if_not_found:
pass # dont add the keypoint to the result list, i.e. drop it
else:
keypoints.append(Keypoint(x=if_not_found_x, y=if_not_found_y))
out_shape = (height, width)
if nb_channels is not None:
out_shape += (nb_channels,)
return KeypointsOnImage(keypoints, shape=out_shape)
def to_distance_maps(self, inverted=False):
"""
Generates a ``(H,W,K)`` output containing ``K`` distance maps for ``K`` keypoints.
The k-th distance map contains at every location ``(y, x)`` the euclidean distance to the k-th keypoint.
This function can be used as a helper when augmenting keypoints with a method that only supports
the augmentation of images.
Parameters
-------
inverted : bool, optional
If True, inverted distance maps are returned where each distance value d is replaced
by ``d/(d+1)``, i.e. the distance maps have values in the range ``(0.0, 1.0]`` with 1.0
denoting exactly the position of the respective keypoint.
Returns
-------
distance_maps : (H,W,K) ndarray
A ``float32`` array containing ``K`` distance maps for ``K`` keypoints. Each location
``(y, x, k)`` in the array denotes the euclidean distance at ``(y, x)`` to the ``k``-th keypoint.
In inverted mode the distance ``d`` is replaced by ``d/(d+1)``. The height and width
of the array match the height and width in ``KeypointsOnImage.shape``.
"""
do_assert(len(self.keypoints) > 0)
height, width = self.shape[0:2]
distance_maps = np.zeros((height, width, len(self.keypoints)), dtype=np.float32)
yy = np.arange(0, height)
xx = np.arange(0, width)
grid_xx, grid_yy = np.meshgrid(xx, yy)
for i, keypoint in enumerate(self.keypoints):
y, x = keypoint.y, keypoint.x
distance_maps[:, :, i] = (grid_xx - x) ** 2 + (grid_yy - y) ** 2
distance_maps = np.sqrt(distance_maps)
if inverted:
return 1/(distance_maps+1)
return distance_maps
# TODO add option to if_not_found_coords to reuse old keypoint coords
@staticmethod
def from_distance_maps(distance_maps, inverted=False, if_not_found_coords={"x": -1, "y": -1}, threshold=None, # pylint: disable=locally-disabled, dangerous-default-value, line-too-long
nb_channels=None):
"""
Converts maps generated by ``to_distance_maps()`` back to a KeypointsOnImage object.
Parameters
----------
distance_maps : (H,W,N) ndarray
The distance maps. N is the number of keypoints.
inverted : bool, optional
Whether the given distance maps were generated in inverted or normal mode.
if_not_found_coords : tuple or list or dict or None, optional
Coordinates to use for keypoints that cannot be found in ``distance_maps``.
If this is a list/tuple, it must have two integer values.
If it is a dictionary, it must have the keys ``x`` and ``y``, with each
containing one integer value.
If this is None, then the keypoint will not be added to the final
KeypointsOnImage object.
threshold : float, optional
The search for keypoints works by searching for the argmin (non-inverted) or
argmax (inverted) in each channel. This parameters contains the maximum (non-inverted)
or minimum (inverted) value to accept in order to view a hit as a keypoint.
Use None to use no min/max.
nb_channels : None or int, optional
Number of channels of the image on which the keypoints are placed.
Some keypoint augmenters require that information.
If set to None, the keypoint's shape will be set
to ``(height, width)``, otherwise ``(height, width, nb_channels)``.
Returns
-------
imgaug.KeypointsOnImage
The extracted keypoints.
"""
do_assert(len(distance_maps.shape) == 3)
height, width, nb_keypoints = distance_maps.shape
drop_if_not_found = False
if if_not_found_coords is None:
drop_if_not_found = True
if_not_found_x = -1
if_not_found_y = -1
elif isinstance(if_not_found_coords, (tuple, list)):
do_assert(len(if_not_found_coords) == 2)
if_not_found_x = if_not_found_coords[0]
if_not_found_y = if_not_found_coords[1]
elif isinstance(if_not_found_coords, dict):
if_not_found_x = if_not_found_coords["x"]
if_not_found_y = if_not_found_coords["y"]
else:
raise Exception("Expected if_not_found_coords to be None or tuple or list or dict, got %s." % (
type(if_not_found_coords),))
keypoints = []
for i in sm.xrange(nb_keypoints):
# TODO introduce voting here among all distance values that have min/max values
if inverted:
hitidx_flat = np.argmax(distance_maps[..., i])
else:
hitidx_flat = np.argmin(distance_maps[..., i])
hitidx_ndim = np.unravel_index(hitidx_flat, (height, width))
if not inverted and threshold is not None:
found = (distance_maps[hitidx_ndim[0], hitidx_ndim[1], i] < threshold)
elif inverted and threshold is not None:
found = (distance_maps[hitidx_ndim[0], hitidx_ndim[1], i] >= threshold)
else:
found = True
if found:
keypoints.append(Keypoint(x=hitidx_ndim[1], y=hitidx_ndim[0]))
else:
if drop_if_not_found:
pass # dont add the keypoint to the result list, i.e. drop it
else:
keypoints.append(Keypoint(x=if_not_found_x, y=if_not_found_y))
out_shape = (height, width)
if nb_channels is not None:
out_shape += (nb_channels,)
return KeypointsOnImage(keypoints, shape=out_shape)
def copy(self, keypoints=None, shape=None):
"""
Create a shallow copy of the KeypointsOnImage object.
Parameters
----------
keypoints : None or list of imgaug.Keypoint, optional
List of keypoints on the image. If ``None``, the instance's
keypoints will be copied.
shape : tuple of int, optional
The shape of the image on which the keypoints are placed.
If ``None``, the instance's shape will be copied.
Returns
-------
imgaug.KeypointsOnImage
Shallow copy.
"""
result = copy.copy(self)
if keypoints is not None:
result.keypoints = keypoints
if shape is not None:
result.shape = shape
return result
def deepcopy(self, keypoints=None, shape=None):
"""
Create a deep copy of the KeypointsOnImage object.
Parameters
----------
keypoints : None or list of imgaug.Keypoint, optional
List of keypoints on the image. If ``None``, the instance's
keypoints will be copied.
shape : tuple of int, optional
The shape of the image on which the keypoints are placed.
If ``None``, the instance's shape will be copied.
Returns
-------
imgaug.KeypointsOnImage
Deep copy.
"""
# for some reason deepcopy is way slower here than manual copy
if keypoints is None:
keypoints = [kp.deepcopy() for kp in self.keypoints]
if shape is None:
shape = tuple(self.shape)
return KeypointsOnImage(keypoints, shape)
def __repr__(self):
return self.__str__()
def __str__(self):
return "KeypointsOnImage(%s, shape=%s)" % (str(self.keypoints), self.shape)
# TODO functions: square(), to_aspect_ratio(), contains_point()
class BoundingBox(object):
"""
Class representing bounding boxes.
Each bounding box is parameterized by its top left and bottom right corners. Both are given
as x and y-coordinates. The corners are intended to lie inside the bounding box area.
As a result, a bounding box that lies completely inside the image but has maximum extensions
would have coordinates ``(0.0, 0.0)`` and ``(W - epsilon, H - epsilon)``. Note that coordinates
are saved internally as floats.
Parameters
----------
x1 : number
X-coordinate of the top left of the bounding box.
y1 : number
Y-coordinate of the top left of the bounding box.
x2 : number
X-coordinate of the bottom right of the bounding box.
y2 : number
Y-coordinate of the bottom right of the bounding box.
label : None or str, optional
Label of the bounding box, e.g. a string representing the class.
"""
def __init__(self, x1, y1, x2, y2, label=None):
"""Create a new BoundingBox instance."""
if x1 > x2:
x2, x1 = x1, x2
do_assert(x2 >= x1)
if y1 > y2:
y2, y1 = y1, y2
do_assert(y2 >= y1)
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
self.label = label
@property
def x1_int(self):
"""
Return the x-coordinate of the top left corner as an integer.
Returns
-------
int
X-coordinate of the top left corner, rounded to the closest integer.
"""
return int(np.round(self.x1)) # use numpy's round to have consistent behaviour between python versions
@property
def y1_int(self):
"""
Return the y-coordinate of the top left corner as an integer.
Returns
-------
int
Y-coordinate of the top left corner, rounded to the closest integer.
"""
return int(np.round(self.y1)) # use numpy's round to have consistent behaviour between python versions
@property
def x2_int(self):
"""
Return the x-coordinate of the bottom left corner as an integer.
Returns
-------
int
X-coordinate of the bottom left corner, rounded to the closest integer.
"""
return int(np.round(self.x2)) # use numpy's round to have consistent behaviour between python versions
@property
def y2_int(self):
"""
Return the y-coordinate of the bottom left corner as an integer.
Returns
-------
int
Y-coordinate of the bottom left corner, rounded to the closest integer.
"""
return int(np.round(self.y2)) # use numpy's round to have consistent behaviour between python versions
@property
def height(self):
"""
Estimate the height of the bounding box.
Returns
-------
number
Height of the bounding box.
"""
return self.y2 - self.y1
@property
def width(self):
"""
Estimate the width of the bounding box.
Returns
-------
number
Width of the bounding box.
"""
return self.x2 - self.x1
@property
def center_x(self):
"""
Estimate the x-coordinate of the center point of the bounding box.
Returns
-------
number
X-coordinate of the center point of the bounding box.
"""
return self.x1 + self.width/2
@property
def center_y(self):
"""
Estimate the y-coordinate of the center point of the bounding box.
Returns
-------
number
Y-coordinate of the center point of the bounding box.
"""
return self.y1 + self.height/2
@property
def area(self):
"""
Estimate the area of the bounding box.
Returns
-------
number
Area of the bounding box, i.e. `height * width`.
"""
return self.height * self.width
def contains(self, other):
"""
Estimate whether the bounding box contains a point.
Parameters
----------
other : imgaug.Keypoint
Point to check for.
Returns
-------
bool
True if the point is contained in the bounding box, False otherwise.
"""
x, y = other.x, other.y
return self.x1 <= x <= self.x2 and self.y1 <= y <= self.y2
def project(self, from_shape, to_shape):
"""
Project the bounding box onto a new position on a new image.
E.g. if the bounding box is on its original image at
x1=(10 of 100 pixels) and y1=(20 of 100 pixels) and is projected onto
a new image with size (width=200, height=200), its new position will
be (x1=20, y1=40). (Analogous for x2/y2.)
This is intended for cases where the original image is resized.
It cannot be used for more complex changes (e.g. padding, cropping).
Parameters
----------
from_shape : tuple of int
Shape of the original image. (Before resize.)
to_shape : tuple of int
Shape of the new image. (After resize.)
Returns
-------
out : imgaug.BoundingBox
BoundingBox object with new coordinates.
"""
if from_shape[0:2] == to_shape[0:2]:
return self.copy()
else:
from_height, from_width = from_shape[0:2]
to_height, to_width = to_shape[0:2]
do_assert(from_height > 0)
do_assert(from_width > 0)
do_assert(to_height > 0)
do_assert(to_width > 0)
x1 = (self.x1 / from_width) * to_width
y1 = (self.y1 / from_height) * to_height
x2 = (self.x2 / from_width) * to_width
y2 = (self.y2 / from_height) * to_height
return self.copy(
x1=x1,
y1=y1,
x2=x2,
y2=y2,
label=self.label
)
def extend(self, all_sides=0, top=0, right=0, bottom=0, left=0):
"""
Extend the size of the bounding box along its sides.
Parameters
----------
all_sides : number, optional
Value by which to extend the bounding box size along all sides.
top : number, optional
Value by which to extend the bounding box size along its top side.
right : number, optional
Value by which to extend the bounding box size along its right side.
bottom : number, optional
Value by which to extend the bounding box size along its bottom side.
left : number, optional
Value by which to extend the bounding box size along its left side.
Returns
-------
imgaug.BoundingBox
Extended bounding box.
"""
return BoundingBox(
x1=self.x1 - all_sides - left,
x2=self.x2 + all_sides + right,
y1=self.y1 - all_sides - top,
y2=self.y2 + all_sides + bottom
)
def intersection(self, other, default=None):
"""
Compute the intersection bounding box of this bounding box and another one.
Note that in extreme cases, the intersection can be a single point, meaning that the intersection bounding box
will exist, but then also has a height and width of zero.
Parameters
----------
other : imgaug.BoundingBox
Other bounding box with which to generate the intersection.
default : any, optional
Default value to return if there is no intersection.
Returns
-------
imgaug.BoundingBox or any
Intersection bounding box of the two bounding boxes if there is an intersection.
If there is no intersection, the default value will be returned, which can by anything.
"""
x1_i = max(self.x1, other.x1)
y1_i = max(self.y1, other.y1)
x2_i = min(self.x2, other.x2)
y2_i = min(self.y2, other.y2)
if x1_i > x2_i or y1_i > y2_i:
return default
else:
return BoundingBox(x1=x1_i, y1=y1_i, x2=x2_i, y2=y2_i)
def union(self, other):
"""
Compute the union bounding box of this bounding box and another one.
This is equivalent to drawing a bounding box around all corners points of both
bounding boxes.
Parameters
----------
other : imgaug.BoundingBox
Other bounding box with which to generate the union.
Returns
-------
imgaug.BoundingBox
Union bounding box of the two bounding boxes.
"""
return BoundingBox(
x1=min(self.x1, other.x1),
y1=min(self.y1, other.y1),
x2=max(self.x2, other.x2),
y2=max(self.y2, other.y2),
)
def iou(self, other):
"""
Compute the IoU of this bounding box with another one.
IoU is the intersection over union, defined as::
``area(intersection(A, B)) / area(union(A, B))``
``= area(intersection(A, B)) / (area(A) + area(B) - area(intersection(A, B)))``
Parameters
----------
other : imgaug.BoundingBox
Other bounding box with which to compare.
Returns
-------
float
IoU between the two bounding boxes.
"""
inters = self.intersection(other)
if inters is None:
return 0.0
else:
area_union = self.area + other.area - inters.area
return inters.area / area_union if area_union > 0 else 0.0
def is_fully_within_image(self, image):
"""
Estimate whether the bounding box is fully inside the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use.
If an ndarray, its shape will be used.
If a tuple, it is assumed to represent the image shape
and must contain at least two integers.
Returns
-------
bool
True if the bounding box is fully inside the image area. False otherwise.
"""
if isinstance(image, tuple):
shape = image
else:
shape = image.shape
height, width = shape[0:2]
return self.x1 >= 0 and self.x2 < width and self.y1 >= 0 and self.y2 < height
def is_partly_within_image(self, image):
"""
Estimate whether the bounding box is at least partially inside the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use.
If an ndarray, its shape will be used.
If a tuple, it is assumed to represent the image shape
and must contain at least two integers.
Returns
-------
bool
True if the bounding box is at least partially inside the image area. False otherwise.
"""
if isinstance(image, tuple):
shape = image
else:
shape = image.shape
height, width = shape[0:2]
eps = np.finfo(np.float32).eps
img_bb = BoundingBox(x1=0, x2=width-eps, y1=0, y2=height-eps)
return self.intersection(img_bb) is not None
def is_out_of_image(self, image, fully=True, partly=False):
"""
Estimate whether the bounding box is partially or fully outside of the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use. If an ndarray, its shape will be used. If a tuple, it is
assumed to represent the image shape and must contain at least two integers.
fully : bool, optional
Whether to return True if the bounding box is fully outside fo the image area.
partly : bool, optional
Whether to return True if the bounding box is at least partially outside fo the
image area.
Returns
-------
bool
True if the bounding box is partially/fully outside of the image area, depending
on defined parameters. False otherwise.
"""
if self.is_fully_within_image(image):
return False
elif self.is_partly_within_image(image):
return partly
else:
return fully
def cut_out_of_image(self, *args, **kwargs):
warnings.warn(DeprecationWarning("BoundingBox.cut_out_of_image() is deprecated. Use "
"BoundingBox.clip_out_of_image() instead. It has the "
"exactly same interface (simple renaming)."))
return self.clip_out_of_image(*args, **kwargs)
def clip_out_of_image(self, image):
"""
Clip off all parts of the bounding box that are outside of the image.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use for the clipping of the bounding box.
If an ndarray, its shape will be used.
If a tuple, it is assumed to represent the image shape and must contain at least two integers.
Returns
-------
result : imgaug.BoundingBox
Bounding box, clipped to fall within the image dimensions.
"""
if isinstance(image, tuple):
shape = image
else:
shape = image.shape
height, width = shape[0:2]
do_assert(height > 0)
do_assert(width > 0)
eps = np.finfo(np.float32).eps
x1 = np.clip(self.x1, 0, width - eps)
x2 = np.clip(self.x2, 0, width - eps)
y1 = np.clip(self.y1, 0, height - eps)
y2 = np.clip(self.y2, 0, height - eps)
return self.copy(
x1=x1,
y1=y1,
x2=x2,
y2=y2,
label=self.label
)
# TODO convert this to x/y params?
def shift(self, top=None, right=None, bottom=None, left=None):
"""
Shift the bounding box from one or more image sides, i.e. move it on the x/y-axis.
Parameters
----------
top : None or int, optional
Amount of pixels by which to shift the bounding box from the top.
right : None or int, optional
Amount of pixels by which to shift the bounding box from the right.
bottom : None or int, optional
Amount of pixels by which to shift the bounding box from the bottom.
left : None or int, optional
Amount of pixels by which to shift the bounding box from the left.
Returns
-------
result : imgaug.BoundingBox
Shifted bounding box.
"""
top = top if top is not None else 0
right = right if right is not None else 0
bottom = bottom if bottom is not None else 0
left = left if left is not None else 0
return self.copy(
x1=self.x1+left-right,
x2=self.x2+left-right,
y1=self.y1+top-bottom,
y2=self.y2+top-bottom
)
# TODO add explicit test for zero-sized BBs (worked when tested by hand)
def draw_on_image(self, image, color=(0, 255, 0), alpha=1.0, thickness=1, copy=True, raise_if_out_of_image=False):
"""
Draw the bounding box on an image.
Parameters
----------
image : (H,W,C) ndarray(uint8)
The image onto which to draw the bounding box.
color : iterable of int, optional
The color to use, corresponding to the channel layout of the image. Usually RGB.
alpha : float, optional
The transparency of the drawn bounding box, where 1.0 denotes no transparency and
0.0 is invisible.
thickness : int, optional
The thickness of the bounding box in pixels. If the value is larger than 1, then
additional pixels will be added around the bounding box (i.e. extension towards the
outside).
copy : bool, optional
Whether to copy the input image or change it in-place.
raise_if_out_of_image : bool, optional
Whether to raise an error if the bounding box is partially/fully outside of the
image. If set to False, no error will be raised and only the parts inside the image
will be drawn.
Returns
-------
result : (H,W,C) ndarray(uint8)
Image with bounding box drawn on it.
"""
if raise_if_out_of_image and self.is_out_of_image(image):
raise Exception("Cannot draw bounding box x1=%.8f, y1=%.8f, x2=%.8f, y2=%.8f on image with shape %s." % (
self.x1, self.y1, self.x2, self.y2, image.shape))
result = np.copy(image) if copy else image
if isinstance(color, (tuple, list)):
color = np.uint8(color)
for i in range(thickness):
y1, y2, x1, x2 = self.y1_int, self.y2_int, self.x1_int, self.x2_int
# When y values get into the range (H-0.5, H), the *_int functions round them to H.
# That is technically sensible, but in the case of drawing means that the border lies
# just barely outside of the image, making the border disappear, even though the BB
# is fully inside the image. Here we correct for that because of beauty reasons.
# Same is the case for x coordinates.
if self.is_fully_within_image(image):
y1 = np.clip(y1, 0, image.shape[0]-1)
y2 = np.clip(y2, 0, image.shape[0]-1)
x1 = np.clip(x1, 0, image.shape[1]-1)
x2 = np.clip(x2, 0, image.shape[1]-1)
y = [y1-i, y1-i, y2+i, y2+i]
x = [x1-i, x2+i, x2+i, x1-i]
rr, cc = skimage.draw.polygon_perimeter(y, x, shape=result.shape)
if alpha >= 0.99:
result[rr, cc, :] = color
else:
if is_float_array(result):
result[rr, cc, :] = (1 - alpha) * result[rr, cc, :] + alpha * color
result = np.clip(result, 0, 255)
else:
input_dtype = result.dtype
result = result.astype(np.float32)
result[rr, cc, :] = (1 - alpha) * result[rr, cc, :] + alpha * color
result = np.clip(result, 0, 255).astype(input_dtype)
return result
def extract_from_image(self, image, prevent_zero_size=True):
"""
Extract the image pixels within the bounding box.
This function will zero-pad the image if the bounding box is partially/fully outside of
the image.
Parameters
----------
image : (H,W) ndarray or (H,W,C) ndarray
The image from which to extract the pixels within the bounding box.
prevent_zero_size : bool, optional
Whether to prevent height or width of the extracted image from becoming zero.
If this is set to True and height or width of the bounding box is below 1, the height/width will
be increased to 1. This can be useful to prevent problems, e.g. with image saving or plotting.
If it is set to False, images will be returned as ``(H', W')`` or ``(H', W', 3)`` with ``H`` or
``W`` potentially being 0.
Returns
-------
image : (H',W') ndarray or (H',W',C) ndarray
Pixels within the bounding box. Zero-padded if the bounding box is partially/fully
outside of the image. If prevent_zero_size is activated, it is guarantueed that ``H'>0``
and ``W'>0``, otherwise only ``H'>=0`` and ``W'>=0``.
"""
pad_top = 0
pad_right = 0
pad_bottom = 0
pad_left = 0
height, width = image.shape[0], image.shape[1]
x1, x2, y1, y2 = self.x1_int, self.x2_int, self.y1_int, self.y2_int
# When y values get into the range (H-0.5, H), the *_int functions round them to H.
# That is technically sensible, but in the case of extraction leads to a black border,
# which is both ugly and unexpected after calling cut_out_of_image(). Here we correct for
# that because of beauty reasons.
# Same is the case for x coordinates.
if self.is_fully_within_image(image):
y1 = np.clip(y1, 0, image.shape[0]-1)
y2 = np.clip(y2, 0, image.shape[0]-1)
x1 = np.clip(x1, 0, image.shape[1]-1)
x2 = np.clip(x2, 0, image.shape[1]-1)
# TODO add test
if prevent_zero_size:
if abs(x2 - x1) < 1:
x2 = x1 + 1
if abs(y2 - y1) < 1:
y2 = y1 + 1
# if the bb is outside of the image area, the following pads the image
# first with black pixels until the bb is inside the image
# and only then extracts the image area
# TODO probably more efficient to initialize an array of zeros
# and copy only the portions of the bb into that array that are
# natively inside the image area
if x1 < 0:
pad_left = abs(x1)
x2 = x2 + abs(x1)
x1 = 0
if y1 < 0:
pad_top = abs(y1)
y2 = y2 + abs(y1)
y1 = 0
if x2 >= width:
pad_right = x2 - (width - 1)
if y2 >= height:
pad_bottom = y2 - (height - 1)
if any([val > 0 for val in [pad_top, pad_right, pad_bottom, pad_left]]):
if len(image.shape) == 2:
image = np.pad(image, ((pad_top, pad_bottom), (pad_left, pad_right)), mode="constant")
else:
image = np.pad(image, ((pad_top, pad_bottom), (pad_left, pad_right), (0, 0)), mode="constant")
return image[y1:y2, x1:x2]
# TODO also add to_heatmap
# TODO add this to BoundingBoxesOnImage
def to_keypoints(self):
"""
Convert the corners of the bounding box to keypoints (clockwise, starting at top left).
Returns
-------
list of imgaug.Keypoint
Corners of the bounding box as keypoints.
"""
return [
Keypoint(x=self.x1, y=self.y1),
Keypoint(x=self.x2, y=self.y1),
Keypoint(x=self.x2, y=self.y2),
Keypoint(x=self.x1, y=self.y2)
]
def copy(self, x1=None, y1=None, x2=None, y2=None, label=None):
"""
Create a shallow copy of the BoundingBox object.
Parameters
----------
x1 : None or number
If not None, then the x1 coordinate of the copied object will be set to this value.
y1 : None or number
If not None, then the y1 coordinate of the copied object will be set to this value.
x2 : None or number
If not None, then the x2 coordinate of the copied object will be set to this value.
y2 : None or number
If not None, then the y2 coordinate of the copied object will be set to this value.
label : None or string
If not None, then the label of the copied object will be set to this value.
Returns
-------
imgaug.BoundingBox
Shallow copy.
"""
return BoundingBox(
x1=self.x1 if x1 is None else x1,
x2=self.x2 if x2 is None else x2,
y1=self.y1 if y1 is None else y1,
y2=self.y2 if y2 is None else y2,
label=self.label if label is None else label
)
def deepcopy(self, x1=None, y1=None, x2=None, y2=None, label=None):
"""
Create a deep copy of the BoundingBox object.
Parameters
----------
x1 : None or number
If not None, then the x1 coordinate of the copied object will be set to this value.
y1 : None or number
If not None, then the y1 coordinate of the copied object will be set to this value.
x2 : None or number
If not None, then the x2 coordinate of the copied object will be set to this value.
y2 : None or number
If not None, then the y2 coordinate of the copied object will be set to this value.
label : None or string
If not None, then the label of the copied object will be set to this value.
Returns
-------
imgaug.BoundingBox
Deep copy.
"""
return self.copy(x1=x1, y1=y1, x2=x2, y2=y2, label=label)
def __repr__(self):
return self.__str__()
def __str__(self):
return "BoundingBox(x1=%.4f, y1=%.4f, x2=%.4f, y2=%.4f, label=%s)" % (
self.x1, self.y1, self.x2, self.y2, self.label)
class BoundingBoxesOnImage(object):
"""
Object that represents all bounding boxes on a single image.
Parameters
----------
bounding_boxes : list of imgaug.BoundingBox
List of bounding boxes on the image.
shape : tuple of int
The shape of the image on which the bounding boxes are placed.
Examples
--------
>>> image = np.zeros((100, 100))
>>> bbs = [
>>> BoundingBox(x1=10, y1=20, x2=20, y2=30),
>>> BoundingBox(x1=25, y1=50, x2=30, y2=70)
>>> ]
>>> bbs_oi = BoundingBoxesOnImage(bbs, shape=image.shape)
"""
def __init__(self, bounding_boxes, shape):
self.bounding_boxes = bounding_boxes
if is_np_array(shape):
self.shape = shape.shape
else:
do_assert(isinstance(shape, (tuple, list)))
self.shape = tuple(shape)
# TODO remove this? here it is image height at BoundingBox it is bounding box height
@property
def height(self):
"""
Get the height of the image on which the bounding boxes fall.
Returns
-------
int
Image height.
"""
return self.shape[0]
# TODO remove this? here it is image width at BoundingBox it is bounding box width
@property
def width(self):
"""
Get the width of the image on which the bounding boxes fall.
Returns
-------
int
Image width.
"""
return self.shape[1]
@property
def empty(self):
"""
Returns whether this object contains zero bounding boxes.
Returns
-------
bool
True if this object contains zero bounding boxes.
"""
return len(self.bounding_boxes) == 0
def on(self, image):
"""
Project bounding boxes from one image to a new one.
Parameters
----------
image : ndarray or tuple of int
New image onto which the bounding boxes are to be projected.
May also simply be that new image's shape tuple.
Returns
-------
bounding_boxes : imgaug.BoundingBoxesOnImage
Object containing all projected bounding boxes.
"""
if is_np_array(image):
shape = image.shape
else:
shape = image
if shape[0:2] == self.shape[0:2]:
return self.deepcopy()
else:
bounding_boxes = [bb.project(self.shape, shape) for bb in self.bounding_boxes]
return BoundingBoxesOnImage(bounding_boxes, shape)
@classmethod
def from_xyxy_array(cls, xyxy, shape):
"""
Convert an (N,4) ndarray to a BoundingBoxesOnImage object.
This is the inverse of :func:`imgaug.BoundingBoxesOnImage.to_xyxy_array`.
Parameters
----------
xyxy : (N,4) ndarray
Array containing the corner coordinates (top-left, bottom-right) of ``N`` bounding boxes
in the form ``(x1, y1, x2, y2)``. Should usually be of dtype ``float32``.
shape : tuple of int
Shape of the image on which the bounding boxes are placed.
Should usually be ``(H, W, C)`` or ``(H, W)``.
Returns
-------
imgaug.BoundingBoxesOnImage
Object containing a list of BoundingBox objects following the provided corner coordinates.
"""
do_assert(xyxy.shape[1] == 4, "Expected input array of shape (N, 4), got shape %s." % (xyxy.shape,))
boxes = [BoundingBox(*row) for row in xyxy]
return cls(boxes, shape)
def to_xyxy_array(self, dtype=np.float32):
"""
Convert the BoundingBoxesOnImage object to an (N,4) ndarray.
This is the inverse of :func:`imgaug.BoundingBoxesOnImage.from_xyxy_array`.
Parameters
----------
dtype : numpy.dtype, optional
Desired output datatype of the ndarray.
Returns
-------
ndarray
(N,4) ndarray array, where ``N`` denotes the number of bounding boxes and ``4`` denotes the
top-left and bottom-right bounding box corner coordinates in form ``(x1, y1, x2, y2)``.
"""
xyxy_array = np.zeros((len(self.bounding_boxes), 4), dtype=np.float32)
for i, box in enumerate(self.bounding_boxes):
xyxy_array[i] = [box.x1, box.y1, box.x2, box.y2]
return xyxy_array.astype(dtype)
def draw_on_image(self, image, color=(0, 255, 0), alpha=1.0, thickness=1, copy=True, raise_if_out_of_image=False):
"""
Draw all bounding boxes onto a given image.
Parameters
----------
image : (H,W,3) ndarray
The image onto which to draw the bounding boxes.
This image should usually have the same shape as
set in BoundingBoxesOnImage.shape.
color : int or list of int or tuple of int or (3,) ndarray, optional
The RGB color of all bounding boxes. If a single int ``C``, then that is
equivalent to ``(C,C,C)``.
alpha : float, optional
Alpha/transparency of the bounding box.
thickness : int, optional
Thickness in pixels.
copy : bool, optional
Whether to copy the image before drawing the points.
raise_if_out_of_image : bool, optional
Whether to raise an exception if any bounding box is outside of the image.
Returns
-------
image : (H,W,3) ndarray
Image with drawn bounding boxes.
"""
# TODO improve efficiency here by copying only once
for bb in self.bounding_boxes:
image = bb.draw_on_image(
image,
color=color,
alpha=alpha,
thickness=thickness,
copy=copy,
raise_if_out_of_image=raise_if_out_of_image
)
return image
def remove_out_of_image(self, fully=True, partly=False):
"""
Remove all bounding boxes that are fully or partially outside of the image.
Parameters
----------
fully : bool, optional
Whether to remove bounding boxes that are fully outside of the image.
partly : bool, optional
Whether to remove bounding boxes that are partially outside of the image.
Returns
-------
imgaug.BoundingBoxesOnImage
Reduced set of bounding boxes, with those that were fully/partially outside of
the image removed.
"""
bbs_clean = [bb for bb in self.bounding_boxes
if not bb.is_out_of_image(self.shape, fully=fully, partly=partly)]
return BoundingBoxesOnImage(bbs_clean, shape=self.shape)
def cut_out_of_image(self):
warnings.warn(DeprecationWarning("BoundingBoxesOnImage.cut_out_of_image() is deprecated."
"Use BoundingBoxesOnImage.clip_out_of_image() instead. It "
"has the exactly same interface (simple renaming)."))
return self.clip_out_of_image()
def clip_out_of_image(self):
"""
Clip off all parts from all bounding boxes that are outside of the image.
Returns
-------
imgaug.BoundingBoxesOnImage
Bounding boxes, clipped to fall within the image dimensions.
"""
bbs_cut = [bb.clip_out_of_image(self.shape)
for bb in self.bounding_boxes if bb.is_partly_within_image(self.shape)]
return BoundingBoxesOnImage(bbs_cut, shape=self.shape)
def shift(self, top=None, right=None, bottom=None, left=None):
"""
Shift all bounding boxes from one or more image sides, i.e. move them on the x/y-axis.
Parameters
----------
top : None or int, optional
Amount of pixels by which to shift all bounding boxes from the top.
right : None or int, optional
Amount of pixels by which to shift all bounding boxes from the right.
bottom : None or int, optional
Amount of pixels by which to shift all bounding boxes from the bottom.
left : None or int, optional
Amount of pixels by which to shift all bounding boxes from the left.
Returns
-------
imgaug.BoundingBoxesOnImage
Shifted bounding boxes.
"""
bbs_new = [bb.shift(top=top, right=right, bottom=bottom, left=left) for bb in self.bounding_boxes]
return BoundingBoxesOnImage(bbs_new, shape=self.shape)
def copy(self):
"""
Create a shallow copy of the BoundingBoxesOnImage object.
Returns
-------
imgaug.BoundingBoxesOnImage
Shallow copy.
"""
return copy.copy(self)
def deepcopy(self):
"""
Create a deep copy of the BoundingBoxesOnImage object.
Returns
-------
imgaug.BoundingBoxesOnImage
Deep copy.
"""
# Manual copy is far faster than deepcopy for BoundingBoxesOnImage,
# so use manual copy here too
bbs = [bb.deepcopy() for bb in self.bounding_boxes]
return BoundingBoxesOnImage(bbs, tuple(self.shape))
def __repr__(self):
return self.__str__()
def __str__(self):
return "BoundingBoxesOnImage(%s, shape=%s)" % (str(self.bounding_boxes), self.shape)
# TODO somehow merge with BoundingBox
# TODO add functions: simplify() (eg via shapely.ops.simplify()),
# extend(all_sides=0, top=0, right=0, bottom=0, left=0),
# intersection(other, default=None), union(other), iou(other), to_heatmap, to_mask
class Polygon(object):
"""
Class representing polygons.
Each polygon is parameterized by its corner points, given as absolute x- and y-coordinates
with sub-pixel accuracy.
Parameters
----------
exterior : list of imgaug.Keypoint or list of tuple of float or (N,2) ndarray
List of points defining the polygon. May be either a list of Keypoint objects or a list of tuples in xy-form
or a numpy array of shape (N,2) for N points in xy-form.
All coordinates are expected to be the absolute coordinates in the image, given as floats, e.g. x=10.7
and y=3.4 for a point at coordinates (10.7, 3.4). Their order is expected to be clock-wise. They are expected
to not be closed (i.e. first and last coordinate differ).
label : None or str, optional
Label of the polygon, e.g. a string representing the class.
"""
def __init__(self, exterior, label=None):
"""Create a new Polygon instance."""
if isinstance(exterior, list):
if not exterior:
# for empty lists, make sure that the shape is (0, 2) and not (0,) as that is also expected when the
# input is a numpy array
self.exterior = np.zeros((0, 2), dtype=np.float32)
elif isinstance(exterior[0], Keypoint):
# list of Keypoint
self.exterior = np.float32([[point.x, point.y] for point in exterior])
else:
# list of tuples (x, y)
self.exterior = np.float32([[point[0], point[1]] for point in exterior])
else:
do_assert(is_np_array(exterior),
("Expected exterior to be a list of tuples (x, y) or "
+ "an (N, 2) array, got type %s") % (exterior,))
do_assert(exterior.ndim == 2 and exterior.shape[1] == 2,
("Expected exterior to be a list of tuples (x, y) or "
+ "an (N, 2) array, got an array of shape %s") % (
exterior.shape,))
self.exterior = np.float32(exterior)
# Remove last point if it is essentially the same as the first point (polygons are always assumed to be
# closed anyways). This also prevents problems with shapely, which seems to add the last point automatically.
if len(self.exterior) >= 2 and np.allclose(self.exterior[0, :], self.exterior[-1, :]):
self.exterior = self.exterior[:-1]
self.label = label
@property
def xx(self):
"""
Return the x-coordinates of all points in the exterior.
Returns
-------
(N,2) ndarray
X-coordinates of all points in the exterior as a float32 ndarray.
"""
return self.exterior[:, 0]
@property
def yy(self):
"""
Return the y-coordinates of all points in the exterior.
Returns
-------
(N,2) ndarray
Y-coordinates of all points in the exterior as a float32 ndarray.
"""
return self.exterior[:, 1]
@property
def xx_int(self):
"""
Return the x-coordinates of all points in the exterior, rounded to the closest integer value.
Returns
-------
(N,2) ndarray
X-coordinates of all points in the exterior, rounded to the closest integer value.
Result dtype is int32.
"""
return np.int32(np.round(self.xx))
@property
def yy_int(self):
"""
Return the y-coordinates of all points in the exterior, rounded to the closest integer value.
Returns
-------
(N,2) ndarray
Y-coordinates of all points in the exterior, rounded to the closest integer value.
Result dtype is int32.
"""
return np.int32(np.round(self.yy))
@property
def is_valid(self):
"""
Estimate whether the polygon has a valid shape.
To to be considered valid, the polygons must be made up of at least 3 points and have concave shape.
Multiple consecutive points are allowed to have the same coordinates.
Returns
-------
bool
True if polygon has at least 3 points and is concave, otherwise False.
"""
if len(self.exterior) < 3:
return False
return self.to_shapely_polygon().is_valid
@property
def area(self):
"""
Estimate the area of the polygon.
Returns
-------
number
Area of the polygon.
"""
if len(self.exterior) < 3:
raise Exception("Cannot compute the polygon's area because it contains less than three points.")
poly = self.to_shapely_polygon()
return poly.area
def project(self, from_shape, to_shape):
"""
Project the polygon onto an image with different shape.
The relative coordinates of all points remain the same.
E.g. a point at (x=20, y=20) on an image (width=100, height=200) will be
projected on a new image (width=200, height=100) to (x=40, y=10).
This is intended for cases where the original image is resized.
It cannot be used for more complex changes (e.g. padding, cropping).
Parameters
----------
from_shape : tuple of int
Shape of the original image. (Before resize.)
to_shape : tuple of int
Shape of the new image. (After resize.)
Returns
-------
imgaug.Polygon
Polygon object with new coordinates.
"""
if from_shape[0:2] == to_shape[0:2]:
return self.copy()
exterior = [Keypoint(x=x, y=y).project(from_shape, to_shape) for x, y in self.exterior]
return self.copy(exterior=exterior)
def find_closest_point_index(self, x, y, return_distance=False):
"""
Find the index of the point within the exterior that is closest to the given coordinates.
"Closeness" is here defined based on euclidean distance.
This method will raise an AssertionError if the exterior contains no points.
Parameters
----------
x : number
X-coordinate around which to search for close points.
y : number
Y-coordinate around which to search for close points.
return_distance : bool, optional
Whether to also return the distance of the closest point.
Returns
-------
int
Index of the closest point.
number
Euclidean distance to the closest point.
This value is only returned if `return_distance` was set to True.
"""
do_assert(len(self.exterior) > 0)
distances = []
for x2, y2 in self.exterior:
d = (x2 - x) ** 2 + (y2 - y) ** 2
distances.append(d)
distances = np.sqrt(distances)
closest_idx = np.argmin(distances)
if return_distance:
return closest_idx, distances[closest_idx]
return closest_idx
def _compute_inside_image_point_mask(self, image):
if isinstance(image, tuple):
shape = image
else:
shape = image.shape
h, w = shape[0:2]
return np.logical_and(
np.logical_and(0 <= self.exterior[:, 0], self.exterior[:, 0] < w),
np.logical_and(0 <= self.exterior[:, 1], self.exterior[:, 1] < h)
)
# TODO keep this method? it is almost an alias for is_out_of_image()
def is_fully_within_image(self, image):
"""
Estimate whether the polygon is fully inside the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use.
If an ndarray, its shape will be used.
If a tuple, it is assumed to represent the image shape and must contain at least two integers.
Returns
-------
bool
True if the polygon is fully inside the image area.
False otherwise.
"""
return not self.is_out_of_image(image, fully=True, partly=True)
# TODO keep this method? it is almost an alias for is_out_of_image()
def is_partly_within_image(self, image):
"""
Estimate whether the polygon is at least partially inside the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use.
If an ndarray, its shape will be used.
If a tuple, it is assumed to represent the image shape and must contain at least two integers.
Returns
-------
bool
True if the polygon is at least partially inside the image area.
False otherwise.
"""
return not self.is_out_of_image(image, fully=True, partly=False)
def is_out_of_image(self, image, fully=True, partly=False):
"""
Estimate whether the polygon is partially or fully outside of the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use.
If an ndarray, its shape will be used.
If a tuple, it is assumed to represent the image shape and must contain at least two integers.
fully : bool, optional
Whether to return True if the polygon is fully outside fo the image area.
partly : bool, optional
Whether to return True if the polygon is at least partially outside fo the image area.
Returns
-------
bool
True if the polygon is partially/fully outside of the image area, depending
on defined parameters. False otherwise.
"""
if len(self.exterior) == 0:
raise Exception("Cannot determine whether the polygon is inside the image, because it contains no points.")
inside = self._compute_inside_image_point_mask(image)
nb_inside = sum(inside)
if nb_inside == len(inside):
return False
elif nb_inside > 0:
return partly
else:
return fully
def cut_out_of_image(self, image):
warnings.warn(DeprecationWarning("Polygon.cut_out_of_image() is deprecated. Use "
"Polygon.clip_out_of_image() instead. It has the exactly "
"same interface (simple renaming)."))
return self.clip_out_of_image(image)
# TODO this currently can mess up the order of points - change somehow to
# keep the order
def clip_out_of_image(self, image):
"""
Cut off all parts of the polygon that are outside of the image.
This operation may lead to new points being created.
As a single polygon may be split into multiple new polygons, the result is a MultiPolygon.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use for the clipping of the polygon.
If an ndarray, its shape will be used.
If a tuple, it is assumed to represent the image shape and must contain at least two integers.
Returns
-------
imgaug.MultiPolygon
Polygon, clipped to fall within the image dimensions.
Returned as MultiPolygon, because the clipping can split the polygon into multiple parts.
"""
# load shapely lazily, which makes the dependency more optional
import shapely.geometry
# if fully out of image, clip everything away, nothing remaining
if self.is_out_of_image(image, fully=True, partly=False):
return MultiPolygon([])
h, w = image.shape[0:2] if is_np_array(image) else image[0:2]
poly_shapely = self.to_shapely_polygon()
poly_image = shapely.geometry.Polygon([(0, 0), (w, 0), (w, h), (0, h)])
multipoly_inter_shapely = poly_shapely.intersection(poly_image)
if not isinstance(multipoly_inter_shapely, shapely.geometry.MultiPolygon):
do_assert(isinstance(multipoly_inter_shapely, shapely.geometry.Polygon))
multipoly_inter_shapely = shapely.geometry.MultiPolygon([multipoly_inter_shapely])
polygons = []
for poly_inter_shapely in multipoly_inter_shapely.geoms:
polygons.append(Polygon.from_shapely(poly_inter_shapely, label=self.label))
# shapely changes the order of points, we try here to preserve it as good as possible
polygons_reordered = []
for polygon in polygons:
found = False
for x, y in self.exterior:
closest_idx, dist = polygon.find_closest_point_index(x=x, y=y, return_distance=True)
if dist < 1e-6:
polygon_reordered = polygon.change_first_point_by_index(closest_idx)
polygons_reordered.append(polygon_reordered)
found = True
break
do_assert(found) # could only not find closest points if new polys are empty
return MultiPolygon(polygons_reordered)
def shift(self, top=None, right=None, bottom=None, left=None):
"""
Shift the polygon from one or more image sides, i.e. move it on the x/y-axis.
Parameters
----------
top : None or int, optional
Amount of pixels by which to shift the polygon from the top.
right : None or int, optional
Amount of pixels by which to shift the polygon from the right.
bottom : None or int, optional
Amount of pixels by which to shift the polygon from the bottom.
left : None or int, optional
Amount of pixels by which to shift the polygon from the left.
Returns
-------
imgaug.Polygon
Shifted polygon.
"""
top = top if top is not None else 0
right = right if right is not None else 0
bottom = bottom if bottom is not None else 0
left = left if left is not None else 0
exterior = np.copy(self.exterior)
exterior[:, 0] += (left - right)
exterior[:, 1] += (top - bottom)
return self.deepcopy(exterior=exterior)
# TODO add perimeter thickness
def draw_on_image(self,
image,
color=(0, 255, 0), color_fill=None,
color_perimeter=None, color_points=None,
alpha=1.0, alpha_fill=None,
alpha_perimeter=None, alpha_points=None,
size_points=3,
raise_if_out_of_image=False):
"""
Draw the polygon on an image.
Parameters
----------
image : (H,W,C) ndarray
The image onto which to draw the polygon. Usually expected to be
of dtype ``uint8``, though other dtypes are also handled.
color : iterable of int, optional
The color to use for the whole polygon.
Must correspond to the channel layout of the image. Usually RGB.
The values for `color_fill`, `color_perimeter` and `color_points`
will be derived from this color if they are set to ``None``.
This argument has no effect if `color_fill`, `color_perimeter`
and `color_points` are all set anything other than ``None``.
color_fill : None or iterable of int, optional
The color to use for the inner polygon area (excluding perimeter).
Must correspond to the channel layout of the image. Usually RGB.
If this is ``None``, it will be derived from``color * 1.0``.
color_perimeter : None or iterable of int, optional
The color to use for the perimeter (aka border) of the polygon.
Must correspond to the channel layout of the image. Usually RGB.
If this is ``None``, it will be derived from``color * 0.5``.
color_points : None or iterable of int, optional
The color to use for the corner points of the polygon.
Must correspond to the channel layout of the image. Usually RGB.
If this is ``None``, it will be derived from``color * 0.5``.
alpha : float, optional
The opacity of the whole polygon, where ``1.0`` denotes a completely
visible polygon and ``0.0`` an invisible one.
The values for `alpha_fill`, `alpha_perimeter` and `alpha_points`
will be derived from this alpha value if they are set to ``None``.
This argument has no effect if `alpha_fill`, `alpha_perimeter`
and `alpha_points` are all set anything other than ``None``.
alpha_fill : None or number, optional
The opacity of the polygon's inner area (excluding the perimeter),
where ``1.0`` denotes a completely visible inner area and ``0.0``
an invisible one.
If this is ``None``, it will be derived from``alpha * 0.5``.
alpha_perimeter : None or number, optional
The opacity of the polygon's perimeter (aka border),
where ``1.0`` denotes a completely visible perimeter and ``0.0`` an
invisible one.
If this is ``None``, it will be derived from``alpha * 1.0``.
alpha_points : None or number, optional
The opacity of the polygon's corner points, where ``1.0`` denotes
completely visible corners and ``0.0`` invisible ones.
If this is ``None``, it will be derived from``alpha * 1.0``.
size_points : int, optional
The size of each corner point. If set to ``C``, each corner point
will be drawn as a square of size ``C x C``.
raise_if_out_of_image : bool, optional
Whether to raise an error if the polygon is partially/fully
outside of the image. If set to False, no error will be raised and
only the parts inside the image will be drawn.
Returns
-------
result : (H,W,C) ndarray
Image with polygon drawn on it. Result dtype is the same as the input dtype.
"""
assert color is not None
assert alpha is not None
color_fill = color_fill if color_fill is not None else np.array(color)
color_perimeter = color_perimeter if color_perimeter is not None else np.array(color) * 0.5
color_points = color_points if color_points is not None else np.array(color) * 0.5
alpha_fill = alpha_fill if alpha_fill is not None else alpha * 0.5
alpha_perimeter = alpha_perimeter if alpha_perimeter is not None else alpha
alpha_points = alpha_points if alpha_points is not None else alpha
if alpha_fill < 0.01:
alpha_fill = 0
elif alpha_fill > 0.99:
alpha_fill = 1
if alpha_perimeter < 0.01:
alpha_perimeter = 0
elif alpha_perimeter > 0.99:
alpha_perimeter = 1
if alpha_points < 0.01:
alpha_points = 0
elif alpha_points > 0.99:
alpha_points = 1
# TODO separate this into draw_face_on_image() and draw_border_on_image()
if raise_if_out_of_image and self.is_out_of_image(image):
raise Exception("Cannot draw polygon %s on image with shape %s." % (
str(self), image.shape
))
xx = self.xx_int
yy = self.yy_int
# TODO np.clip to image plane if is_fully_within_image(), similar to how it is done for bounding boxes
# TODO improve efficiency by only drawing in rectangle that covers poly instead of drawing in the whole image
# TODO for a rectangular polygon, the face coordinates include the top/left boundary but not the right/bottom
# boundary. This may be unintuitive when not drawing the boundary. Maybe somehow remove the boundary
# coordinates from the face coordinates after generating both?
params = []
if alpha_fill > 0:
rr, cc = skimage.draw.polygon(yy, xx, shape=image.shape)
params.append(
(rr, cc, color_fill, alpha_fill)
)
if alpha_perimeter > 0:
rr, cc = skimage.draw.polygon_perimeter(yy, xx, shape=image.shape)
params.append(
(rr, cc, color_perimeter, alpha_perimeter)
)
input_dtype = image.dtype
result = image.astype(np.float32)
c = 0
for rr, cc, color_this, alpha_this in params:
c += 1
color_this = np.float32(color_this)
# don't have to check here for alpha<=0.01, as then these
# parameters wouldn't have been added to params
if alpha_this >= 0.99:
result[rr, cc, :] = color_this
else:
# TODO replace with blend_alpha()
result[rr, cc, :] = (
(1 - alpha_this) * result[rr, cc, :]
+ alpha_this * color_this
)
if alpha_points > 0:
kpsoi = KeypointsOnImage.from_coords_array(self.exterior,
shape=image.shape)
result = kpsoi.draw_on_image(
result, color=color_points, alpha=alpha_points,
size=size_points, copy=False,
raise_if_out_of_image=raise_if_out_of_image)
if input_dtype.type == np.uint8:
result = np.clip(result, 0, 255).astype(input_dtype) # TODO make clipping more flexible
else:
result = result.astype(input_dtype)
return result
def extract_from_image(self, image):
"""
Extract the image pixels within the polygon.
This function will zero-pad the image if the polygon is partially/fully outside of
the image.
Parameters
----------
image : (H,W) ndarray or (H,W,C) ndarray
The image from which to extract the pixels within the polygon.
Returns
-------
result : (H',W') ndarray or (H',W',C) ndarray
Pixels within the polygon. Zero-padded if the polygon is partially/fully
outside of the image.
"""
do_assert(image.ndim in [2, 3])
if len(self.exterior) <= 2:
raise Exception("Polygon must be made up of at least 3 points to extract its area from an image.")
bb = self.to_bounding_box()
bb_area = bb.extract_from_image(image)
if self.is_out_of_image(image, fully=True, partly=False):
return bb_area
xx = self.xx_int
yy = self.yy_int
xx_mask = xx - np.min(xx)
yy_mask = yy - np.min(yy)
height_mask = np.max(yy_mask)
width_mask = np.max(xx_mask)
rr_face, cc_face = skimage.draw.polygon(yy_mask, xx_mask, shape=(height_mask, width_mask))
mask = np.zeros((height_mask, width_mask), dtype=np.bool)
mask[rr_face, cc_face] = True
if image.ndim == 3:
mask = np.tile(mask[:, :, np.newaxis], (1, 1, image.shape[2]))
return bb_area * mask
def change_first_point_by_coords(self, x, y, max_distance=1e-4):
"""
Set the first point of the exterior to the given point based on its coordinates.
If multiple points are found, the closest one will be picked.
If no matching points are found, an exception is raised.
Note: This method does *not* work in-place.
Parameters
----------
x : number
X-coordinate of the point.
y : number
Y-coordinate of the point.
max_distance : number
Maximum distance past which possible matches are ignored.
Returns
-------
imgaug.Polygon
Copy of this polygon with the new point order.
"""
if len(self.exterior) == 0:
raise Exception("Cannot reorder polygon points, because it contains no points.")
closest_idx, closest_dist = self.find_closest_point_index(x=x, y=y, return_distance=True)
if max_distance is not None and closest_dist > max_distance:
closest_point = self.exterior[closest_idx, :]
raise Exception(
"Closest found point (%.9f, %.9f) exceeds max_distance of %.9f exceeded" % (
closest_point[0], closest_point[1], closest_dist)
)
return self.change_first_point_by_index(closest_idx)
def change_first_point_by_index(self, point_idx):
"""
Set the first point of the exterior to the given point based on its index.
Note: This method does *not* work in-place.
Parameters
----------
point_idx : int
Index of the desired starting point.
Returns
-------
imgaug.Polygon
Copy of this polygon with the new point order.
"""
do_assert(0 <= point_idx < len(self.exterior))
if point_idx == 0:
return self.deepcopy()
exterior = np.concatenate(
(self.exterior[point_idx:, :], self.exterior[:point_idx, :]),
axis=0
)
return self.deepcopy(exterior=exterior)
def to_shapely_polygon(self):
"""
Convert this polygon to a Shapely polygon.
Returns
-------
shapely.geometry.Polygon
The Shapely polygon matching this polygon's exterior.
"""
# load shapely lazily, which makes the dependency more optional
import shapely.geometry
return shapely.geometry.Polygon([(point[0], point[1]) for point in self.exterior])
def to_shapely_line_string(self, closed=False, interpolate=0):
"""
Convert this polygon to a Shapely LineString object.
Parameters
----------
closed : bool, optional
Whether to return the line string with the last point being identical to the first point.
interpolate : int, optional
Number of points to interpolate between any pair of two consecutive points. These points are added
to the final line string.
Returns
-------
shapely.geometry.LineString
The Shapely LineString matching the polygon's exterior.
"""
return _convert_points_to_shapely_line_string(self.exterior, closed=closed, interpolate=interpolate)
def to_bounding_box(self):
"""
Convert this polygon to a bounding box tightly containing the whole polygon.
Returns
-------
imgaug.BoundingBox
Tight bounding box around the polygon.
"""
xx = self.xx
yy = self.yy
return BoundingBox(x1=min(xx), x2=max(xx), y1=min(yy), y2=max(yy), label=self.label)
def to_keypoints(self):
"""
Convert this polygon's `exterior` to ``Keypoint`` instances.
Returns
-------
list of imgaug.Keypoint
Exterior vertices as ``Keypoint`` instances.
"""
return [Keypoint(x=point[0], y=point[1]) for point in self.exterior]
@staticmethod
def from_shapely(polygon_shapely, label=None):
"""
Create a polygon from a Shapely polygon.
Note: This will remove any holes in the Shapely polygon.
Parameters
----------
polygon_shapely : shapely.geometry.Polygon
The shapely polygon.
label : None or str, optional
The label of the new polygon.
Returns
-------
imgaug.Polygon
A polygon with the same exterior as the Shapely polygon.
"""
# load shapely lazily, which makes the dependency more optional
import shapely.geometry
do_assert(isinstance(polygon_shapely, shapely.geometry.Polygon))
# polygon_shapely.exterior can be None if the polygon was instantiated without points
if polygon_shapely.exterior is None or len(polygon_shapely.exterior.coords) == 0:
return Polygon([], label=label)
exterior = np.float32([[x, y] for (x, y) in polygon_shapely.exterior.coords])
return Polygon(exterior, label=label)
def exterior_almost_equals(self, other_polygon, max_distance=1e-6, interpolate=8):
"""
Estimate whether the geometry of the exterior of this polygon and another polygon are comparable.
The two exteriors can have different numbers of points, but any point randomly sampled on the exterior
of one polygon should be close to the closest point on the exterior of the other polygon.
Note that this method works approximately. One can come up with polygons with fairly different shapes that
will still be estimated as equal by this method. In practice however this should be unlikely to be the case.
The probability for something like that goes down as the interpolation parameter is increased.
Parameters
----------
other_polygon : imgaug.Polygon or (N,2) ndarray or list of tuple
The other polygon with which to compare the exterior.
If this is an ndarray, it is assumed to represent an exterior.
It must then have dtype float32 and shape (N,2) with the second dimension denoting xy-coordinates.
If this is a list of tuples, it is assumed to represent an exterior.
Each tuple then must contain exactly two numbers, denoting xy-coordinates.
max_distance : number
The maximum euclidean distance between a point on one polygon and the closest point on the other polygon.
If the distance is exceeded for any such pair, the two exteriors are not viewed as equal.
The points are other the points contained in the polygon's exterior ndarray or interpolated points
between these.
interpolate : int
How many points to interpolate between the points of the polygon's exteriors.
If this is set to zero, then only the points given by the polygon's exterior ndarrays will be used.
Higher values make it less likely that unequal polygons are evaluated as equal.
Returns
-------
bool
Whether the two polygon's exteriors can be viewed as equal (approximate test).
"""
# load shapely lazily, which makes the dependency more optional
import shapely.geometry
atol = max_distance
ext_a = self.exterior
if isinstance(other_polygon, list):
ext_b = np.float32(other_polygon)
elif is_np_array(other_polygon):
ext_b = other_polygon
else:
assert isinstance(other_polygon, Polygon)
ext_b = other_polygon.exterior
len_a = len(ext_a)
len_b = len(ext_b)
if len_a == 0 and len_b == 0:
return True
elif len_a == 0 and len_b > 0:
return False
elif len_a > 0 and len_b == 0:
return False
# neither A nor B is zero-sized at this point
# if A or B only contain points identical to the first point, merge them to one point
if len_a > 1:
if all([np.allclose(ext_a[0, :], ext_a[1 + i, :], rtol=0, atol=atol) for i in sm.xrange(len_a - 1)]):
ext_a = ext_a[0:1, :]
len_a = 1
if len_b > 1:
if all([np.allclose(ext_b[0, :], ext_b[1 + i, :], rtol=0, atol=atol) for i in sm.xrange(len_b - 1)]):
ext_b = ext_b[0:1, :]
len_b = 1
# handle polygons that contain a single point
if len_a == 1 and len_b == 1:
return np.allclose(ext_a[0, :], ext_b[0, :], rtol=0, atol=atol)
elif len_a == 1:
return all([np.allclose(ext_a[0, :], ext_b[i, :], rtol=0, atol=atol) for i in sm.xrange(len_b)])
elif len_b == 1:
return all([np.allclose(ext_b[0, :], ext_a[i, :], rtol=0, atol=atol) for i in sm.xrange(len_a)])
# After this point, both polygons have at least 2 points, i.e. LineStrings can be used.
# We can also safely go back to the original exteriors (before close points were merged).
ls_a = self.to_shapely_line_string(closed=True, interpolate=interpolate)
if isinstance(other_polygon, list) or is_np_array(other_polygon):
ls_b = _convert_points_to_shapely_line_string(
other_polygon, closed=True, interpolate=interpolate)
else:
ls_b = other_polygon.to_shapely_line_string(
closed=True, interpolate=interpolate)
# Measure the distance from each point in A to LineString B and vice versa.
# Make sure that no point violates the tolerance.
# Note that we can't just use LineString.almost_equals(LineString) -- that seems to expect the same number
# and order of points in both LineStrings (failed with duplicated points).
for x, y in ls_a.coords:
point = shapely.geometry.Point(x, y)
if not ls_b.distance(point) <= max_distance:
return False
for x, y in ls_b.coords:
point = shapely.geometry.Point(x, y)
if not ls_a.distance(point) <= max_distance:
return False
return True
def almost_equals(self, other, max_distance=1e-6, interpolate=8):
"""
Compare this polygon with another one and estimate whether they can be viewed as equal.
This is the same as :func:`imgaug.Polygon.exterior_almost_equals` but additionally compares the labels.
Parameters
----------
other
The object to compare against. If not a Polygon, then False will be returned.
max_distance : float
See :func:`imgaug.Polygon.exterior_almost_equals`.
interpolate : int
See :func:`imgaug.Polygon.exterior_almost_equals`.
Returns
-------
bool
Whether the two polygons can be viewed as equal. In the case of the exteriors this is an approximate test.
"""
if not isinstance(other, Polygon):
return False
if self.label is not None or other.label is not None:
if self.label is None:
return False
if other.label is None:
return False
if self.label != other.label:
return False
return self.exterior_almost_equals(other, max_distance=max_distance, interpolate=interpolate)
def copy(self, exterior=None, label=None):
"""
Create a shallow copy of the Polygon object.
Parameters
----------
exterior : list of imgaug.Keypoint or list of tuple or (N,2) ndarray, optional
List of points defining the polygon. See :func:`imgaug.Polygon.__init__` for details.
label : None or str, optional
If not None, then the label of the copied object will be set to this value.
Returns
-------
imgaug.Polygon
Shallow copy.
"""
return self.deepcopy(exterior=exterior, label=label)
def deepcopy(self, exterior=None, label=None):
"""
Create a deep copy of the Polygon object.
Parameters
----------
exterior : list of Keypoint or list of tuple or (N,2) ndarray, optional
List of points defining the polygon. See `imgaug.Polygon.__init__` for details.
label : None or str
If not None, then the label of the copied object will be set to this value.
Returns
-------
imgaug.Polygon
Deep copy.
"""
return Polygon(
exterior=np.copy(self.exterior) if exterior is None else exterior,
label=self.label if label is None else label
)
def __repr__(self):
return self.__str__()
def __str__(self):
points_str = ", ".join(["(x=%.3f, y=%.3f)" % (point[0], point[1]) for point in self.exterior])
return "Polygon([%s] (%d points), label=%s)" % (points_str, len(self.exterior), self.label)
class PolygonsOnImage(object):
"""
Object that represents all polygons on a single image.
Parameters
----------
polygons : list of imgaug.Polygon
List of polygons on the image.
shape : tuple of int
The shape of the image on which the polygons are placed.
Examples
--------
>>> import numpy as np
>>> import imgaug as ia
>>> image = np.zeros((100, 100))
>>> polys = [
>>> ia.Polygon([(0, 0), (100, 0), (100, 100), (0, 100)]),
>>> ia.Polygon([(50, 0), (100, 50), (50, 100), (0, 50)])
>>> ]
>>> polys_oi = ia.PolygonsOnImage(polys, shape=image.shape)
"""
def __init__(self, polygons, shape):
self.polygons = polygons
if is_np_array(shape):
self.shape = shape.shape
else:
do_assert(isinstance(shape, (tuple, list)))
self.shape = tuple(shape)
@property
def empty(self):
"""
Returns whether this object contains zero polygons.
Returns
-------
bool
True if this object contains zero polygons.
"""
return len(self.polygons) == 0
def on(self, image):
"""
Project polygons from one image to a new one.
Parameters
----------
image : ndarray or tuple of int
New image onto which the polygons are to be projected.
May also simply be that new image's shape tuple.
Returns
-------
imgaug.PolygonsOnImage
Object containing all projected polygons.
"""
if is_np_array(image):
shape = image.shape
else:
shape = image
if shape[0:2] == self.shape[0:2]:
return self.deepcopy()
else:
polygons = [poly.project(self.shape, shape) for poly in self.polygons]
return PolygonsOnImage(polygons, shape)
def draw_on_image(self,
image,
color=(0, 255, 0), color_fill=None,
color_perimeter=None, color_points=None,
alpha=1.0, alpha_fill=None,
alpha_perimeter=None, alpha_points=None,
size_points=3,
raise_if_out_of_image=False):
"""
Draw all polygons onto a given image.
Parameters
----------
image : (H,W,C) ndarray
The image onto which to draw the bounding boxes.
This image should usually have the same shape as set in
``PolygonsOnImage.shape``.
color : iterable of int, optional
The color to use for the whole polygons.
Must correspond to the channel layout of the image. Usually RGB.
The values for `color_fill`, `color_perimeter` and `color_points`
will be derived from this color if they are set to ``None``.
This argument has no effect if `color_fill`, `color_perimeter`
and `color_points` are all set anything other than ``None``.
color_fill : None or iterable of int, optional
The color to use for the inner polygon areas (excluding perimeters).
Must correspond to the channel layout of the image. Usually RGB.
If this is ``None``, it will be derived from``color * 1.0``.
color_perimeter : None or iterable of int, optional
The color to use for the perimeters (aka borders) of the polygons.
Must correspond to the channel layout of the image. Usually RGB.
If this is ``None``, it will be derived from``color * 0.5``.
color_points : None or iterable of int, optional
The color to use for the corner points of the polygons.
Must correspond to the channel layout of the image. Usually RGB.
If this is ``None``, it will be derived from``color * 0.5``.
alpha : float, optional
The opacity of the whole polygons, where ``1.0`` denotes
completely visible polygons and ``0.0`` invisible ones.
The values for `alpha_fill`, `alpha_perimeter` and `alpha_points`
will be derived from this alpha value if they are set to ``None``.
This argument has no effect if `alpha_fill`, `alpha_perimeter`
and `alpha_points` are all set anything other than ``None``.
alpha_fill : None or number, optional
The opacity of the polygon's inner areas (excluding the perimeters),
where ``1.0`` denotes completely visible inner areas and ``0.0``
invisible ones.
If this is ``None``, it will be derived from``alpha * 0.5``.
alpha_perimeter : None or number, optional
The opacity of the polygon's perimeters (aka borders),
where ``1.0`` denotes completely visible perimeters and ``0.0``
invisible ones.
If this is ``None``, it will be derived from``alpha * 1.0``.
alpha_points : None or number, optional
The opacity of the polygon's corner points, where ``1.0`` denotes
completely visible corners and ``0.0`` invisible ones.
Currently this is an on/off choice, i.e. only ``0.0`` or ``1.0``
are allowed.
If this is ``None``, it will be derived from``alpha * 1.0``.
size_points : int, optional
The size of all corner points. If set to ``C``, each corner point
will be drawn as a square of size ``C x C``.
raise_if_out_of_image : bool, optional
Whether to raise an error if any polygon is partially/fully
outside of the image. If set to False, no error will be raised and
only the parts inside the image will be drawn.
Returns
-------
image : (H,W,C) ndarray
Image with drawn polygons.
"""
for poly in self.polygons:
image = poly.draw_on_image(
image,
color=color,
color_fill=color_fill,
color_perimeter=color_perimeter,
color_points=color_points,
alpha=alpha,
alpha_fill=alpha_fill,
alpha_perimeter=alpha_perimeter,
alpha_points=alpha_points,
size_points=size_points,
raise_if_out_of_image=raise_if_out_of_image
)
return image
def remove_out_of_image(self, fully=True, partly=False):
"""
Remove all polygons that are fully or partially outside of the image.
Parameters
----------
fully : bool, optional
Whether to remove polygons that are fully outside of the image.
partly : bool, optional
Whether to remove polygons that are partially outside of the image.
Returns
-------
imgaug.PolygonsOnImage
Reduced set of polygons, with those that were fully/partially
outside of the image removed.
"""
polys_clean = [
poly for poly in self.polygons
if not poly.is_out_of_image(self.shape, fully=fully, partly=partly)
]
return PolygonsOnImage(polys_clean, shape=self.shape)
def clip_out_of_image(self):
"""
Clip off all parts from all polygons that are outside of the image.
NOTE: The result can contain less polygons than the input did. That
happens when a polygon is fully outside of the image plane.
NOTE: The result can also contain *more* polygons than the input
did. That happens when distinct parts of a polygon are only
connected by areas that are outside of the image plane and hence will
be clipped off, resulting in two or more unconnected polygon parts that
are left in the image plane.
Returns
-------
imgaug.PolygonsOnImage
Polygons, clipped to fall within the image dimensions. Count of
output polygons may differ from the input count.
"""
polys_cut = [
poly.clip_out_of_image(self.shape).geoms
for poly
in self.polygons
if poly.is_partly_within_image(self.shape)
]
polys_cut_flat = [poly for poly_lst in polys_cut for poly in poly_lst]
return PolygonsOnImage(polys_cut_flat, shape=self.shape)
def shift(self, top=None, right=None, bottom=None, left=None):
"""
Shift all polygons from one or more image sides, i.e. move them on the x/y-axis.
Parameters
----------
top : None or int, optional
Amount of pixels by which to shift all polygons from the top.
right : None or int, optional
Amount of pixels by which to shift all polygons from the right.
bottom : None or int, optional
Amount of pixels by which to shift all polygons from the bottom.
left : None or int, optional
Amount of pixels by which to shift all polygons from the left.
Returns
-------
imgaug.PolygonsOnImage
Shifted polygons.
"""
polys_new = [
poly.shift(top=top, right=right, bottom=bottom, left=left)
for poly
in self.polygons
]
return PolygonsOnImage(polys_new, shape=self.shape)
def copy(self):
"""
Create a shallow copy of the PolygonsOnImage object.
Returns
-------
imgaug.PolygonsOnImage
Shallow copy.
"""
return copy.copy(self)
def deepcopy(self):
"""
Create a deep copy of the PolygonsOnImage object.
Returns
-------
imgaug.PolygonsOnImage
Deep copy.
"""
# Manual copy is far faster than deepcopy for PolygonsOnImage,
# so use manual copy here too
polys = [poly.deepcopy() for poly in self.polygons]
return PolygonsOnImage(polys, tuple(self.shape))
def __repr__(self):
return self.__str__()
def __str__(self):
return "PolygonsOnImage(%s, shape=%s)" % (str(self.polygons), self.shape)
def _convert_points_to_shapely_line_string(points, closed=False, interpolate=0):
# load shapely lazily, which makes the dependency more optional
import shapely.geometry
if len(points) <= 1:
raise Exception(
("Conversion to shapely line string requires at least two points, but points input contains "
"only %d points.") % (len(points),)
)
points_tuples = [(point[0], point[1]) for point in points]
# interpolate points between each consecutive pair of points
if interpolate > 0:
points_tuples = _interpolate_points(points_tuples, interpolate)
# close if requested and not yet closed
if closed and len(points) > 1: # here intentionally used points instead of points_tuples
points_tuples.append(points_tuples[0])
return shapely.geometry.LineString(points_tuples)
def _interpolate_point_pair(point_a, point_b, nb_steps):
if nb_steps < 1:
return []
x1, y1 = point_a
x2, y2 = point_b
vec = np.float32([x2 - x1, y2 - y1])
step_size = vec / (1 + nb_steps)
return [(x1 + (i + 1) * step_size[0], y1 + (i + 1) * step_size[1]) for i in sm.xrange(nb_steps)]
def _interpolate_points(points, nb_steps, closed=True):
if len(points) <= 1:
return points
if closed:
points = list(points) + [points[0]]
points_interp = []
for point_a, point_b in zip(points[:-1], points[1:]):
points_interp.extend([point_a] + _interpolate_point_pair(point_a, point_b, nb_steps))
if not closed:
points_interp.append(points[-1])
# close does not have to be reverted here, as last point is not included in the extend()
return points_interp
def _interpolate_points_by_max_distance(points, max_distance, closed=True):
do_assert(max_distance > 0, "max_distance must have value greater than 0, got %.8f" % (max_distance,))
if len(points) <= 1:
return points
if closed:
points = list(points) + [points[0]]
points_interp = []
for point_a, point_b in zip(points[:-1], points[1:]):
dist = np.sqrt((point_a[0] - point_b[0]) ** 2 + (point_a[1] - point_b[1]) ** 2)
nb_steps = int((dist / max_distance) - 1)
points_interp.extend([point_a] + _interpolate_point_pair(point_a, point_b, nb_steps))
if not closed:
points_interp.append(points[-1])
return points_interp
class _ConcavePolygonRecoverer(object):
def __init__(self, threshold_duplicate_points=1e-4, noise_strength=1e-4,
oversampling=0.01, max_segment_difference=1e-4):
self.threshold_duplicate_points = threshold_duplicate_points
self.noise_strength = noise_strength
self.oversampling = oversampling
self.max_segment_difference = max_segment_difference
# this limits the maximum amount of points after oversampling, i.e.
# if N points are input into oversampling, then M oversampled points are
# generated such that N+M <= this value
self.oversample_up_to_n_points_max = 75
# ----
# parameters for _fit_best_valid_polygon()
# ----
# how many changes may be done max to the initial (convex hull) polygon
# before simply returning the result
self.fit_n_changes_max = 100
# for how many iterations the optimization loop may run max
# before simply returning the result
self.fit_n_iters_max = 3
# how far (wrt. to their position in the input list) two points may be
# apart max to consider adding an edge between them (in the first loop
# iteration and the ones after that)
self.fit_max_dist_first_iter = 1
self.fit_max_dist_other_iters = 2
# The fit loop first generates candidate edges and then modifies the
# polygon based on these candidates. This limits the maximum amount
# of considered candidates. If the number is less than the possible
# number of candidates, they are randomly subsampled. Values beyond
# 100 significantly increase runtime (for polygons that reach that
# number).
self.fit_n_candidates_before_sort_max = 100
def recover_from(self, new_exterior, old_polygon, random_state=0):
assert isinstance(new_exterior, list) or (
is_np_array(new_exterior)
and new_exterior.ndim == 2
and new_exterior.shape[1] == 2)
assert len(new_exterior) >= 3, \
"Cannot recover a concave polygon from less than three points."
# create Polygon instance, if it is already valid then just return
# immediately
polygon = old_polygon.deepcopy(exterior=new_exterior)
if polygon.is_valid:
return polygon
if not isinstance(random_state, np.random.RandomState):
random_state = np.random.RandomState(random_state)
rss = derive_random_states(random_state, 3)
# remove consecutive duplicate points
new_exterior = self._remove_consecutive_duplicate_points(new_exterior)
# check that points are not all identical or on a line
new_exterior = self._fix_polygon_is_line(new_exterior, rss[0])
# jitter duplicate points
new_exterior = self._jitter_duplicate_points(new_exterior, rss[1])
# generate intersection points
segment_add_points = self._generate_intersection_points(new_exterior)
# oversample points around intersections
if self.oversampling is not None and self.oversampling > 0:
segment_add_points = self._oversample_intersection_points(
new_exterior, segment_add_points)
# integrate new points into exterior
new_exterior_inter = self._insert_intersection_points(
new_exterior, segment_add_points)
# find best fit polygon, starting from convext polygon
new_exterior_concave_ids = self._fit_best_valid_polygon(new_exterior_inter, rss[2])
new_exterior_concave = [new_exterior_inter[idx] for idx in new_exterior_concave_ids]
# TODO return new_exterior_concave here instead of polygon, leave it to
# caller to decide what to do with it
return old_polygon.deepcopy(exterior=new_exterior_concave)
def _remove_consecutive_duplicate_points(self, points):
result = []
for point in points:
if result:
dist = np.linalg.norm(np.float32(point) - np.float32(result[-1]))
is_same = (dist < self.threshold_duplicate_points)
if not is_same:
result.append(point)
else:
result.append(point)
if len(result) >= 2:
dist = np.linalg.norm(np.float32(result[0]) - np.float32(result[-1]))
is_same = (dist < self.threshold_duplicate_points)
result = result[0:-1] if is_same else result
return result
# fix polygons for which all points are on a line
def _fix_polygon_is_line(self, exterior, random_state):
assert len(exterior) >= 3
noise_strength = self.noise_strength
while self._is_polygon_line(exterior):
noise = random_state.uniform(
-noise_strength, noise_strength, size=(len(exterior), 2)
).astype(np.float32)
exterior = [(point[0] + noise_i[0], point[1] + noise_i[1])
for point, noise_i in zip(exterior, noise)]
noise_strength = noise_strength * 10
assert noise_strength > 0
return exterior
@classmethod
def _is_polygon_line(cls, exterior):
vec_down = np.float32([0, 1])
p1 = exterior[0]
angles = set()
for p2 in exterior[1:]:
vec = np.float32(p2) - np.float32(p1)
angle = angle_between_vectors(vec_down, vec)
angles.add(int(angle * 1000))
return len(angles) <= 1
def _jitter_duplicate_points(self, exterior, random_state):
def _find_duplicates(exterior_with_duplicates):
points_map = collections.defaultdict(list)
for i, point in enumerate(exterior_with_duplicates):
# we use 10/x here to be a bit more lenient, the precise
# distance test is further below
x = int(np.round(point[0] * ((1/10) / self.threshold_duplicate_points)))
y = int(np.round(point[1] * ((1/10) / self.threshold_duplicate_points)))
for d0 in [-1, 0, 1]:
for d1 in [-1, 0, 1]:
points_map[(x+d0, y+d1)].append(i)
duplicates = [False] * len(exterior_with_duplicates)
for key in points_map:
candidates = points_map[key]
for i in range(len(candidates)):
p0_idx = candidates[i]
p0 = exterior_with_duplicates[p0_idx]
if duplicates[p0_idx]:
continue
for j in range(i+1, len(candidates)):
p1_idx = candidates[j]
p1 = exterior_with_duplicates[p1_idx]
if duplicates[p1_idx]:
continue
dist = np.sqrt((p0[0] - p1[0])**2 + (p0[1] - p1[1])**2)
if dist < self.threshold_duplicate_points:
duplicates[p1_idx] = True
return duplicates
noise_strength = self.noise_strength
assert noise_strength > 0
exterior = exterior[:]
converged = False
while not converged:
duplicates = _find_duplicates(exterior)
if any(duplicates):
noise = random_state.uniform(
-self.noise_strength, self.noise_strength, size=(len(exterior), 2)
).astype(np.float32)
for i, is_duplicate in enumerate(duplicates):
if is_duplicate:
exterior[i] = (exterior[i][0] + noise[i][0], exterior[i][1] + noise[i][1])
noise_strength *= 10
else:
converged = True
return exterior
# TODO remove?
@classmethod
def _calculate_circumference(cls, points):
assert len(points) >= 3
points = np.array(points, dtype=np.float32)
points_matrix = np.zeros((len(points), 4), dtype=np.float32)
points_matrix[:, 0:2] = points
points_matrix[0:-1, 2:4] = points_matrix[1:, 0:2]
points_matrix[-1, 2:4] = points_matrix[0, 0:2]
distances = np.linalg.norm(
points_matrix[:, 0:2] - points_matrix[:, 2:4], axis=1)
return np.sum(distances)
def _generate_intersection_points(self, exterior, one_point_per_intersection=True):
assert isinstance(exterior, list)
assert all([len(point) == 2 for point in exterior])
if len(exterior) <= 0:
return []
# use (*[i][0], *[i][1]) formulation here imnstead of just *[i],
# because this way we convert numpy arrays to tuples of floats, which
# is required by isect_segments_include_segments
segments = [
(
(exterior[i][0], exterior[i][1]),
(exterior[(i + 1) % len(exterior)][0], exterior[(i + 1) % len(exterior)][1])
)
for i in range(len(exterior))
]
# returns [(point, [(segment_p0, segment_p1), ..]), ...]
from imgaug.external.poly_point_isect_py2py3 import isect_segments_include_segments
intersections = isect_segments_include_segments(segments)
# estimate to which segment the found intersection points belong
segments_add_points = [[] for _ in range(len(segments))]
for point, associated_segments in intersections:
# the intersection point may be associated with multiple segments,
# but we only want to add it once, so pick the first segment
if one_point_per_intersection:
associated_segments = [associated_segments[0]]
for seg_inter_p0, seg_inter_p1 in associated_segments:
diffs = []
dists = []
for seg_p0, seg_p1 in segments:
dist_p0p0 = np.linalg.norm(seg_p0 - np.array(seg_inter_p0))
dist_p1p1 = np.linalg.norm(seg_p1 - np.array(seg_inter_p1))
dist_p0p1 = np.linalg.norm(seg_p0 - np.array(seg_inter_p1))
dist_p1p0 = np.linalg.norm(seg_p1 - np.array(seg_inter_p0))
diff = min(dist_p0p0 + dist_p1p1, dist_p0p1 + dist_p1p0)
diffs.append(diff)
dists.append(np.linalg.norm(
(seg_p0[0] - point[0], seg_p0[1] - point[1])
))
min_diff = np.min(diffs)
if min_diff < self.max_segment_difference:
idx = int(np.argmin(diffs))
segments_add_points[idx].append((point, dists[idx]))
else:
warnings.warn(
"Couldn't find fitting segment in "
"_generate_intersection_points(). Ignoring intersection "
"point.")
# sort intersection points by their distance to point 0 in each segment
# (clockwise ordering, this does something only for segments with
# >=2 intersection points)
segment_add_points_sorted = []
for idx in range(len(segments_add_points)):
points = [t[0] for t in segments_add_points[idx]]
dists = [t[1] for t in segments_add_points[idx]]
if len(points) < 2:
segment_add_points_sorted.append(points)
else:
both = sorted(zip(points, dists), key=lambda t: t[1])
# keep points, drop distances
segment_add_points_sorted.append([a for a, _b in both])
return segment_add_points_sorted
def _oversample_intersection_points(self, exterior, segment_add_points):
# segment_add_points must be sorted
if self.oversampling is None or self.oversampling <= 0:
return segment_add_points
segment_add_points_sorted_overs = [[] for _ in range(len(segment_add_points))]
n_points = len(exterior)
for i in range(len(exterior)):
last = exterior[i]
for j, p_inter in enumerate(segment_add_points[i]):
direction = (p_inter[0] - last[0], p_inter[1] - last[1])
if j == 0:
# previous point was non-intersection, place 1 new point
oversample = [1.0 - self.oversampling]
else:
# previous point was intersection, place 2 new points
oversample = [self.oversampling, 1.0 - self.oversampling]
for dist in oversample:
point_over = (last[0] + dist * direction[0],
last[1] + dist * direction[1])
segment_add_points_sorted_overs[i].append(point_over)
segment_add_points_sorted_overs[i].append(p_inter)
last = p_inter
is_last_in_group = (j == len(segment_add_points[i]) - 1)
if is_last_in_group:
# previous point was oversampled, next point is
# non-intersection, place 1 new point between the two
exterior_point = exterior[(i + 1) % len(exterior)]
direction = (exterior_point[0] - last[0],
exterior_point[1] - last[1])
segment_add_points_sorted_overs[i].append(
(last[0] + self.oversampling * direction[0],
last[1] + self.oversampling * direction[1])
)
last = segment_add_points_sorted_overs[i][-1]
n_points += len(segment_add_points_sorted_overs[i])
if n_points > self.oversample_up_to_n_points_max:
return segment_add_points_sorted_overs
return segment_add_points_sorted_overs
@classmethod
def _insert_intersection_points(cls, exterior, segment_add_points):
# segment_add_points must be sorted
assert len(exterior) == len(segment_add_points)
exterior_interp = []
for i in range(len(exterior)):
p0 = exterior[i]
exterior_interp.append(p0)
for j, p_inter in enumerate(segment_add_points[i]):
exterior_interp.append(p_inter)
return exterior_interp
def _fit_best_valid_polygon(self, points, random_state):
if len(points) < 2:
return None
def _compute_distance_point_to_line(point, line_start, line_end):
x_diff = line_end[0] - line_start[0]
y_diff = line_end[1] - line_start[1]
num = abs(
y_diff*point[0] - x_diff*point[1]
+ line_end[0]*line_start[1] - line_end[1]*line_start[0]
)
den = np.sqrt(y_diff**2 + x_diff**2)
if den == 0:
return np.sqrt((point[0] - line_start[0])**2 + (point[1] - line_start[1])**2)
return num / den
poly = Polygon(points)
if poly.is_valid:
return sm.xrange(len(points))
hull = scipy.spatial.ConvexHull(points)
points_kept = list(hull.vertices)
points_left = [i for i in range(len(points)) if i not in hull.vertices]
iteration = 0
n_changes = 0
converged = False
while not converged:
candidates = []
# estimate distance metrics for points-segment pairs:
# (1) distance (in vertices) between point and segment-start-point
# in original input point chain
# (2) euclidean distance between point and segment/line
# TODO this can be done more efficiently by caching the values and
# only computing distances to segments that have changed in
# the last iteration
# TODO these distances are not really the best metrics here. Something
# like IoU between new and old (invalid) polygon would be
# better, but can probably only be computed for pairs of valid
# polygons. Maybe something based on pointwise distances,
# where the points are sampled on the edges (not edge vertices
# themselves). Maybe something based on drawing the perimeter
# on images or based on distance maps.
point_kept_idx_to_pos = {point_idx: i for i, point_idx in enumerate(points_kept)}
# generate all possible combinations from <points_kept> and <points_left>
combos = np.transpose([np.tile(np.int32(points_left), len(np.int32(points_kept))),
np.repeat(np.int32(points_kept), len(np.int32(points_left)))])
combos = np.concatenate(
(combos, np.zeros((combos.shape[0], 3), dtype=np.int32)),
axis=1)
# copy columns 0, 1 into 2, 3 so that 2 is always the lower value
mask = combos[:, 0] < combos[:, 1]
combos[:, 2:4] = combos[:, 0:2]
combos[mask, 2] = combos[mask, 1]
combos[mask, 3] = combos[mask, 0]
# distance (in indices) between each pair of <point_kept> and <point_left>
combos[:, 4] = np.minimum(
combos[:, 3] - combos[:, 2],
len(points) - combos[:, 3] + combos[:, 2]
)
# limit candidates
max_dist = self.fit_max_dist_other_iters
if iteration > 0:
max_dist = self.fit_max_dist_first_iter
candidate_rows = combos[combos[:, 4] <= max_dist]
if self.fit_n_candidates_before_sort_max is not None \
and len(candidate_rows) > self.fit_n_candidates_before_sort_max:
random_state.shuffle(candidate_rows)
candidate_rows = candidate_rows[0:self.fit_n_candidates_before_sort_max]
for row in candidate_rows:
point_left_idx = row[0]
point_kept_idx = row[1]
in_points_kept_pos = point_kept_idx_to_pos[point_kept_idx]
segment_start_idx = point_kept_idx
segment_end_idx = points_kept[(in_points_kept_pos+1) % len(points_kept)]
segment_start = points[segment_start_idx]
segment_end = points[segment_end_idx]
if iteration == 0:
dist_eucl = 0
else:
dist_eucl = _compute_distance_point_to_line(
points[point_left_idx], segment_start, segment_end)
candidates.append((point_left_idx, point_kept_idx, row[4], dist_eucl))
# Sort computed distances first by minimal vertex-distance (see
# above, metric 1) (ASC), then by euclidean distance
# (metric 2) (ASC).
candidate_ids = np.arange(len(candidates))
candidate_ids = sorted(candidate_ids, key=lambda idx: (candidates[idx][2], candidates[idx][3]))
if self.fit_n_changes_max is not None:
candidate_ids = candidate_ids[:self.fit_n_changes_max]
# Iterate over point-segment pairs in sorted order. For each such
# candidate: Add the point to the already collected points,
# create a polygon from that and check if the polygon is valid.
# If it is, add the point to the output list and recalculate
# distance metrics. If it isn't valid, proceed with the next
# candidate until no more candidates are left.
#
# small change: this now no longer breaks upon the first found point
# that leads to a valid polygon, but checks all candidates instead
is_valid = False
done = set()
for candidate_idx in candidate_ids:
point_left_idx = candidates[candidate_idx][0]
point_kept_idx = candidates[candidate_idx][1]
if (point_left_idx, point_kept_idx) not in done:
in_points_kept_idx = [i for i, point_idx in enumerate(points_kept) if point_idx == point_kept_idx][0]
points_kept_hypothesis = points_kept[:]
points_kept_hypothesis.insert(in_points_kept_idx+1, point_left_idx)
poly_hypothesis = Polygon([points[idx] for idx in points_kept_hypothesis])
if poly_hypothesis.is_valid:
is_valid = True
points_kept = points_kept_hypothesis
points_left = [point_idx for point_idx in points_left if point_idx != point_left_idx]
n_changes += 1
if n_changes >= self.fit_n_changes_max:
return points_kept
done.add((point_left_idx, point_kept_idx))
done.add((point_kept_idx, point_left_idx))
# none of the left points could be used to create a valid polygon?
# (this automatically covers the case of no points being left)
if not is_valid and iteration > 0:
converged = True
iteration += 1
if self.fit_n_iters_max is not None and iteration > self.fit_n_iters_max:
break
return points_kept
class MultiPolygon(object):
"""
Class that represents several polygons.
Parameters
----------
geoms : list of imgaug.Polygon
List of the polygons.
"""
def __init__(self, geoms):
"""Create a new MultiPolygon instance."""
do_assert(len(geoms) == 0 or all([isinstance(el, Polygon) for el in geoms]))
self.geoms = geoms
@staticmethod
def from_shapely(geometry, label=None):
"""
Create a MultiPolygon from a Shapely MultiPolygon, a Shapely Polygon or a Shapely GeometryCollection.
This also creates all necessary Polygons contained by this MultiPolygon.
Parameters
----------
geometry : shapely.geometry.MultiPolygon or shapely.geometry.Polygon\
or shapely.geometry.collection.GeometryCollection
The object to convert to a MultiPolygon.
label : None or str, optional
A label assigned to all Polygons within the MultiPolygon.
Returns
-------
imgaug.MultiPolygon
The derived MultiPolygon.
"""
# load shapely lazily, which makes the dependency more optional
import shapely.geometry
if isinstance(geometry, shapely.geometry.MultiPolygon):
return MultiPolygon([Polygon.from_shapely(poly, label=label) for poly in geometry.geoms])
elif isinstance(geometry, shapely.geometry.Polygon):
return MultiPolygon([Polygon.from_shapely(geometry, label=label)])
elif isinstance(geometry, shapely.geometry.collection.GeometryCollection):
do_assert(all([isinstance(poly, shapely.geometry.Polygon) for poly in geometry.geoms]))
return MultiPolygon([Polygon.from_shapely(poly, label=label) for poly in geometry.geoms])
else:
raise Exception("Unknown datatype '%s'. Expected shapely.geometry.Polygon or "
"shapely.geometry.MultiPolygon or "
"shapely.geometry.collections.GeometryCollection." % (type(geometry),))
class HeatmapsOnImage(object):
"""
Object representing heatmaps on images.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Array representing the heatmap(s).
Must be of dtype float32.
If multiple heatmaps are provided, then ``C`` is expected to denote their number.
shape : tuple of int
Shape of the image on which the heatmap(s) is/are placed. NOT the shape of the
heatmap(s) array, unless it is identical to the image shape (note the likely
difference between the arrays in the number of channels).
If there is not a corresponding image, use the shape of the heatmaps array.
min_value : float, optional
Minimum value for the heatmaps that `arr` represents. This will usually be ``0.0``.
max_value : float, optional
Maximum value for the heatmaps that `arr` represents. This will usually be ``1.0``.
"""
def __init__(self, arr, shape, min_value=0.0, max_value=1.0):
"""Construct a new HeatmapsOnImage object."""
do_assert(is_np_array(arr), "Expected numpy array as heatmap input array, got type %s" % (type(arr),))
# TODO maybe allow 0-sized heatmaps? in that case the min() and max() must be adjusted
do_assert(arr.shape[0] > 0 and arr.shape[1] > 0,
"Expected numpy array as heatmap with height and width greater than 0, got shape %s." % (arr.shape,))
do_assert(arr.dtype.type in [np.float32],
"Heatmap input array expected to be of dtype float32, got dtype %s." % (arr.dtype,))
do_assert(arr.ndim in [2, 3], "Heatmap input array must be 2d or 3d, got shape %s." % (arr.shape,))
do_assert(len(shape) in [2, 3],
"Argument 'shape' in HeatmapsOnImage expected to be 2d or 3d, got shape %s." % (shape,))
do_assert(min_value < max_value)
do_assert(np.min(arr.flat[0:50]) >= min_value - np.finfo(arr.dtype).eps,
("Value range of heatmap was chosen to be (%.8f, %.8f), but found value below minimum in first "
+ "50 heatmap array values.") % (min_value, max_value))
do_assert(np.max(arr.flat[0:50]) <= max_value + np.finfo(arr.dtype).eps,
("Value range of heatmap was chosen to be (%.8f, %.8f), but found value above maximum in first "
+ "50 heatmap array values.") % (min_value, max_value))
if arr.ndim == 2:
arr = arr[..., np.newaxis]
self.arr_was_2d = True
else:
self.arr_was_2d = False
eps = np.finfo(np.float32).eps
min_is_zero = 0.0 - eps < min_value < 0.0 + eps
max_is_one = 1.0 - eps < max_value < 1.0 + eps
if min_is_zero and max_is_one:
self.arr_0to1 = arr
else:
self.arr_0to1 = (arr - min_value) / (max_value - min_value)
self.shape = shape
self.min_value = min_value
self.max_value = max_value
def get_arr(self):
"""
Get the heatmap's array within the value range originally provided in ``__init__()``.
The HeatmapsOnImage object saves heatmaps internally in the value range ``(min=0.0, max=1.0)``.
This function converts the internal representation to ``(min=min_value, max=max_value)``,
where ``min_value`` and ``max_value`` are provided upon instantiation of the object.
Returns
-------
result : (H,W) ndarray or (H,W,C) ndarray
Heatmap array. Dtype is float32.
"""
if self.arr_was_2d and self.arr_0to1.shape[2] == 1:
arr = self.arr_0to1[:, :, 0]
else:
arr = self.arr_0to1
eps = np.finfo(np.float32).eps
min_is_zero = 0.0 - eps < self.min_value < 0.0 + eps
max_is_one = 1.0 - eps < self.max_value < 1.0 + eps
if min_is_zero and max_is_one:
return np.copy(arr)
else:
diff = self.max_value - self.min_value
return self.min_value + diff * arr
# TODO
# def find_global_maxima(self):
# raise NotImplementedError()
def draw(self, size=None, cmap="jet"):
"""
Render the heatmaps as RGB images.
Parameters
----------
size : None or float or iterable of int or iterable of float, optional
Size of the rendered RGB image as ``(height, width)``.
See :func:`imgaug.imgaug.imresize_single_image` for details.
If set to None, no resizing is performed and the size of the heatmaps array is used.
cmap : str or None, optional
Color map of ``matplotlib`` to use in order to convert the heatmaps to RGB images.
If set to None, no color map will be used and the heatmaps will be converted
to simple intensity maps.
Returns
-------
heatmaps_drawn : list of (H,W,3) ndarray
Rendered heatmaps. One per heatmap array channel. Dtype is uint8.
"""
heatmaps_uint8 = self.to_uint8()
heatmaps_drawn = []
for c in sm.xrange(heatmaps_uint8.shape[2]):
# c:c+1 here, because the additional axis is needed by imresize_single_image
heatmap_c = heatmaps_uint8[..., c:c+1]
if size is not None:
heatmap_c_rs = imresize_single_image(heatmap_c, size, interpolation="nearest")
else:
heatmap_c_rs = heatmap_c
heatmap_c_rs = np.squeeze(heatmap_c_rs).astype(np.float32) / 255.0
if cmap is not None:
# import only when necessary (faster startup; optional dependency; less fragile -- see issue #225)
import matplotlib.pyplot as plt
cmap_func = plt.get_cmap(cmap)
heatmap_cmapped = cmap_func(heatmap_c_rs)
heatmap_cmapped = np.delete(heatmap_cmapped, 3, 2)
else:
heatmap_cmapped = np.tile(heatmap_c_rs[..., np.newaxis], (1, 1, 3))
heatmap_cmapped = np.clip(heatmap_cmapped * 255, 0, 255).astype(np.uint8)
heatmaps_drawn.append(heatmap_cmapped)
return heatmaps_drawn
def draw_on_image(self, image, alpha=0.75, cmap="jet", resize="heatmaps"):
"""
Draw the heatmaps as overlays over an image.
Parameters
----------
image : (H,W,3) ndarray
Image onto which to draw the heatmaps. Expected to be of dtype uint8.
alpha : float, optional
Alpha/opacity value to use for the mixing of image and heatmaps.
Higher values mean that the heatmaps will be more visible and the image less visible.
cmap : str or None, optional
Color map to use. See :func:`imgaug.HeatmapsOnImage.draw` for details.
resize : {'heatmaps', 'image'}, optional
In case of size differences between the image and heatmaps, either the image or
the heatmaps can be resized. This parameter controls which of the two will be resized
to the other's size.
Returns
-------
mix : list of (H,W,3) ndarray
Rendered overlays. One per heatmap array channel. Dtype is uint8.
"""
# assert RGB image
do_assert(image.ndim == 3)
do_assert(image.shape[2] == 3)
do_assert(image.dtype.type == np.uint8)
do_assert(0 - 1e-8 <= alpha <= 1.0 + 1e-8)
do_assert(resize in ["heatmaps", "image"])
if resize == "image":
image = imresize_single_image(image, self.arr_0to1.shape[0:2], interpolation="cubic")
heatmaps_drawn = self.draw(
size=image.shape[0:2] if resize == "heatmaps" else None,
cmap=cmap
)
mix = [
np.clip((1-alpha) * image + alpha * heatmap_i, 0, 255).astype(np.uint8)
for heatmap_i
in heatmaps_drawn
]
return mix
def invert(self):
"""
Inverts each value in the heatmap, shifting low towards high values and vice versa.
This changes each value to::
v' = max - (v - min)
where ``v`` is the value at some spatial location, ``min`` is the minimum value in the heatmap
and ``max`` is the maximum value.
As the heatmap uses internally a 0.0 to 1.0 representation, this simply becomes ``v' = 1.0 - v``.
Note that the attributes ``min_value`` and ``max_value`` are not switched. They both keep their values.
This function can be useful e.g. when working with depth maps, where algorithms might have
an easier time representing the furthest away points with zeros, requiring an inverted
depth map.
Returns
-------
arr_inv : imgaug.HeatmapsOnImage
Inverted heatmap.
"""
arr_inv = HeatmapsOnImage.from_0to1(1 - self.arr_0to1, shape=self.shape, min_value=self.min_value,
max_value=self.max_value)
arr_inv.arr_was_2d = self.arr_was_2d
return arr_inv
def pad(self, top=0, right=0, bottom=0, left=0, mode="constant", cval=0.0):
"""
Pad the heatmaps on their top/right/bottom/left side.
Parameters
----------
top : int, optional
Amount of pixels to add at the top side of the heatmaps. Must be 0 or greater.
right : int, optional
Amount of pixels to add at the right side of the heatmaps. Must be 0 or greater.
bottom : int, optional
Amount of pixels to add at the bottom side of the heatmaps. Must be 0 or greater.
left : int, optional
Amount of pixels to add at the left side of the heatmaps. Must be 0 or greater.
mode : string, optional
Padding mode to use. See :func:`numpy.pad` for details.
cval : number, optional
Value to use for padding if `mode` is ``constant``. See :func:`numpy.pad` for details.
Returns
-------
imgaug.HeatmapsOnImage
Padded heatmaps of height ``H'=H+top+bottom`` and width ``W'=W+left+right``.
"""
arr_0to1_padded = pad(self.arr_0to1, top=top, right=right, bottom=bottom, left=left, mode=mode, cval=cval)
return HeatmapsOnImage.from_0to1(arr_0to1_padded, shape=self.shape, min_value=self.min_value,
max_value=self.max_value)
def pad_to_aspect_ratio(self, aspect_ratio, mode="constant", cval=0.0, return_pad_amounts=False):
"""
Pad the heatmaps on their sides so that they match a target aspect ratio.
Depending on which dimension is smaller (height or width), only the corresponding
sides (left/right or top/bottom) will be padded. In each case, both of the sides will
be padded equally.
Parameters
----------
aspect_ratio : float
Target aspect ratio, given as width/height. E.g. 2.0 denotes the image having twice
as much width as height.
mode : str, optional
Padding mode to use. See :func:`numpy.pad` for details.
cval : number, optional
Value to use for padding if `mode` is ``constant``. See :func:`numpy.pad` for details.
return_pad_amounts : bool, optional
If False, then only the padded image will be returned. If True, a tuple with two
entries will be returned, where the first entry is the padded image and the second
entry are the amounts by which each image side was padded. These amounts are again a
tuple of the form (top, right, bottom, left), with each value being an integer.
Returns
-------
heatmaps : imgaug.HeatmapsOnImage
Padded heatmaps as HeatmapsOnImage object.
pad_amounts : tuple of int
Amounts by which the heatmaps were padded on each side, given as a tuple ``(top, right, bottom, left)``.
This tuple is only returned if `return_pad_amounts` was set to True.
"""
arr_0to1_padded, pad_amounts = pad_to_aspect_ratio(self.arr_0to1, aspect_ratio=aspect_ratio, mode=mode,
cval=cval, return_pad_amounts=True)
heatmaps = HeatmapsOnImage.from_0to1(arr_0to1_padded, shape=self.shape, min_value=self.min_value,
max_value=self.max_value)
if return_pad_amounts:
return heatmaps, pad_amounts
else:
return heatmaps
def avg_pool(self, block_size):
"""
Resize the heatmap(s) array using average pooling of a given block/kernel size.
Parameters
----------
block_size : int or tuple of int
Size of each block of values to pool, aka kernel size. See :func:`imgaug.pool` for details.
Returns
-------
imgaug.HeatmapsOnImage
Heatmaps after average pooling.
"""
arr_0to1_reduced = avg_pool(self.arr_0to1, block_size, cval=0.0)
return HeatmapsOnImage.from_0to1(arr_0to1_reduced, shape=self.shape, min_value=self.min_value,
max_value=self.max_value)
def max_pool(self, block_size):
"""
Resize the heatmap(s) array using max-pooling of a given block/kernel size.
Parameters
----------
block_size : int or tuple of int
Size of each block of values to pool, aka kernel size. See :func:`imgaug.pool` for details.
Returns
-------
imgaug.HeatmapsOnImage
Heatmaps after max-pooling.
"""
arr_0to1_reduced = max_pool(self.arr_0to1, block_size)
return HeatmapsOnImage.from_0to1(arr_0to1_reduced, shape=self.shape, min_value=self.min_value,
max_value=self.max_value)
def scale(self, *args, **kwargs):
warnings.warn(DeprecationWarning("HeatmapsOnImage.scale() is deprecated. "
"Use HeatmapsOnImage.resize() instead. "
"It has the exactly same interface "
"(simple renaming)."))
return self.resize(*args, **kwargs)
def resize(self, sizes, interpolation="cubic"):
"""
Resize the heatmap(s) array to the provided size given the provided interpolation.
Parameters
----------
sizes : float or iterable of int or iterable of float
New size of the array in ``(height, width)``.
See :func:`imgaug.imgaug.imresize_single_image` for details.
interpolation : None or str or int, optional
The interpolation to use during resize.
See :func:`imgaug.imgaug.imresize_single_image` for details.
Returns
-------
imgaug.HeatmapsOnImage
Resized heatmaps object.
"""
arr_0to1_resized = imresize_single_image(self.arr_0to1, sizes, interpolation=interpolation)
# cubic interpolation can lead to values outside of [0.0, 1.0],
# see https://github.com/opencv/opencv/issues/7195
# TODO area interpolation too?
arr_0to1_resized = np.clip(arr_0to1_resized, 0.0, 1.0)
return HeatmapsOnImage.from_0to1(arr_0to1_resized, shape=self.shape, min_value=self.min_value,
max_value=self.max_value)
def to_uint8(self):
"""
Convert this heatmaps object to a 0-to-255 array.
Returns
-------
arr_uint8 : (H,W,C) ndarray
Heatmap as a 0-to-255 array (dtype is uint8).
"""
# TODO this always returns (H,W,C), even if input ndarray was originall (H,W)
# does it make sense here to also return (H,W) if self.arr_was_2d?
arr_0to255 = np.clip(np.round(self.arr_0to1 * 255), 0, 255)
arr_uint8 = arr_0to255.astype(np.uint8)
return arr_uint8
@staticmethod
def from_uint8(arr_uint8, shape, min_value=0.0, max_value=1.0):
"""
Create a heatmaps object from an heatmap array containing values ranging from 0 to 255.
Parameters
----------
arr_uint8 : (H,W) ndarray or (H,W,C) ndarray
Heatmap(s) array, where ``H`` is height, ``W`` is width and ``C`` is the number of heatmap channels.
Expected dtype is uint8.
shape : tuple of int
Shape of the image on which the heatmap(s) is/are placed. NOT the shape of the
heatmap(s) array, unless it is identical to the image shape (note the likely
difference between the arrays in the number of channels).
If there is not a corresponding image, use the shape of the heatmaps array.
min_value : float, optional
Minimum value for the heatmaps that the 0-to-255 array represents. This will usually
be 0.0. It is used when calling :func:`imgaug.HeatmapsOnImage.get_arr`, which converts the
underlying ``(0, 255)`` array to value range ``(min_value, max_value)``.
max_value : float, optional
Maximum value for the heatmaps that 0-to-255 array represents.
See parameter `min_value` for details.
Returns
-------
imgaug.HeatmapsOnImage
Heatmaps object.
"""
arr_0to1 = arr_uint8.astype(np.float32) / 255.0
return HeatmapsOnImage.from_0to1(arr_0to1, shape, min_value=min_value, max_value=max_value)
@staticmethod
def from_0to1(arr_0to1, shape, min_value=0.0, max_value=1.0):
"""
Create a heatmaps object from an heatmap array containing values ranging from 0.0 to 1.0.
Parameters
----------
arr_0to1 : (H,W) or (H,W,C) ndarray
Heatmap(s) array, where ``H`` is height, ``W`` is width and ``C`` is the number of heatmap channels.
Expected dtype is float32.
shape : tuple of ints
Shape of the image on which the heatmap(s) is/are placed. NOT the shape of the
heatmap(s) array, unless it is identical to the image shape (note the likely
difference between the arrays in the number of channels).
If there is not a corresponding image, use the shape of the heatmaps array.
min_value : float, optional
Minimum value for the heatmaps that the 0-to-1 array represents. This will usually
be 0.0. It is used when calling :func:`imgaug.HeatmapsOnImage.get_arr`, which converts the
underlying ``(0.0, 1.0)`` array to value range ``(min_value, max_value)``.
E.g. if you started with heatmaps in the range ``(-1.0, 1.0)`` and projected these
to (0.0, 1.0), you should call this function with ``min_value=-1.0``, ``max_value=1.0``
so that :func:`imgaug.HeatmapsOnImage.get_arr` returns heatmap arrays having value
range (-1.0, 1.0).
max_value : float, optional
Maximum value for the heatmaps that to 0-to-255 array represents.
See parameter min_value for details.
Returns
-------
heatmaps : imgaug.HeatmapsOnImage
Heatmaps object.
"""
heatmaps = HeatmapsOnImage(arr_0to1, shape, min_value=0.0, max_value=1.0)
heatmaps.min_value = min_value
heatmaps.max_value = max_value
return heatmaps
@classmethod
def change_normalization(cls, arr, source, target):
"""
Change the value range of a heatmap from one min-max to another min-max.
E.g. the value range may be changed from min=0.0, max=1.0 to min=-1.0, max=1.0.
Parameters
----------
arr : ndarray
Heatmap array to modify.
source : tuple of float
Current value range of the input array, given as (min, max), where both are float values.
target : tuple of float
Desired output value range of the array, given as (min, max), where both are float values.
Returns
-------
arr_target : ndarray
Input array, with value range projected to the desired target value range.
"""
do_assert(is_np_array(arr))
if isinstance(source, HeatmapsOnImage):
source = (source.min_value, source.max_value)
else:
do_assert(isinstance(source, tuple))
do_assert(len(source) == 2)
do_assert(source[0] < source[1])
if isinstance(target, HeatmapsOnImage):
target = (target.min_value, target.max_value)
else:
do_assert(isinstance(target, tuple))
do_assert(len(target) == 2)
do_assert(target[0] < target[1])
# Check if source and target are the same (with a tiny bit of tolerance)
# if so, evade compuation and just copy the array instead.
# This is reasonable, as source and target will often both be (0.0, 1.0).
eps = np.finfo(arr.dtype).eps
mins_same = source[0] - 10*eps < target[0] < source[0] + 10*eps
maxs_same = source[1] - 10*eps < target[1] < source[1] + 10*eps
if mins_same and maxs_same:
return np.copy(arr)
min_source, max_source = source
min_target, max_target = target
diff_source = max_source - min_source
diff_target = max_target - min_target
arr_0to1 = (arr - min_source) / diff_source
arr_target = min_target + arr_0to1 * diff_target
return arr_target
def copy(self):
"""
Create a shallow copy of the Heatmaps object.
Returns
-------
imgaug.HeatmapsOnImage
Shallow copy.
"""
return self.deepcopy()
def deepcopy(self):
"""
Create a deep copy of the Heatmaps object.
Returns
-------
imgaug.HeatmapsOnImage
Deep copy.
"""
return HeatmapsOnImage(self.get_arr(), shape=self.shape, min_value=self.min_value, max_value=self.max_value)
class SegmentationMapOnImage(object):
"""
Object representing a segmentation map associated with an image.
Attributes
----------
DEFAULT_SEGMENT_COLORS : list of tuple of int
Standard RGB colors to use during drawing, ordered by class index.
Parameters
----------
arr : (H,W) ndarray or (H,W,1) ndarray or (H,W,C) ndarray
Array representing the segmentation map. May have datatypes bool, integer or float.
* If bool: Assumed to be of shape (H,W), (H,W,1) or (H,W,C). If (H,W) or (H,W,1) it
is assumed to be for the case of having a single class (where any False denotes
background). Otherwise there are assumed to be C channels, one for each class,
with each of them containing a mask for that class. The masks may overlap.
* If integer: Assumed to be of shape (H,W) or (H,W,1). Each pixel is assumed to
contain an integer denoting the class index. Classes are assumed to be
non-overlapping. The number of classes cannot be guessed from this input, hence
nb_classes must be set.
* If float: Assumed to b eof shape (H,W), (H,W,1) or (H,W,C) with meanings being
similar to the case of `bool`. Values are expected to fall always in the range
0.0 to 1.0 and are usually expected to be either 0.0 or 1.0 upon instantiation
of a new segmentation map. Classes may overlap.
shape : iterable of int
Shape of the corresponding image (NOT the segmentation map array). This is expected
to be ``(H, W)`` or ``(H, W, C)`` with ``C`` usually being 3. If there is no corresponding image,
then use the segmentation map's shape instead.
nb_classes : int or None
Total number of unique classes that may appear in an segmentation map, i.e. the max
class index. This may be None if the input array is of type bool or float. The number
of classes however must be provided if the input array is of type int, as then the
number of classes cannot be guessed.
"""
DEFAULT_SEGMENT_COLORS = [
(0, 0, 0), # black
(230, 25, 75), # red
(60, 180, 75), # green
(255, 225, 25), # yellow
(0, 130, 200), # blue
(245, 130, 48), # orange
(145, 30, 180), # purple
(70, 240, 240), # cyan
(240, 50, 230), # magenta
(210, 245, 60), # lime
(250, 190, 190), # pink
(0, 128, 128), # teal
(230, 190, 255), # lavender
(170, 110, 40), # brown
(255, 250, 200), # beige
(128, 0, 0), # maroon
(170, 255, 195), # mint
(128, 128, 0), # olive
(255, 215, 180), # coral
(0, 0, 128), # navy
(128, 128, 128), # grey
(255, 255, 255), # white
# --
(115, 12, 37), # dark red
(30, 90, 37), # dark green
(127, 112, 12), # dark yellow
(0, 65, 100), # dark blue
(122, 65, 24), # dark orange
(72, 15, 90), # dark purple
(35, 120, 120), # dark cyan
(120, 25, 115), # dark magenta
(105, 122, 30), # dark lime
(125, 95, 95), # dark pink
(0, 64, 64), # dark teal
(115, 95, 127), # dark lavender
(85, 55, 20), # dark brown
(127, 125, 100), # dark beige
(64, 0, 0), # dark maroon
(85, 127, 97), # dark mint
(64, 64, 0), # dark olive
(127, 107, 90), # dark coral
(0, 0, 64), # dark navy
(64, 64, 64), # dark grey
]
def __init__(self, arr, shape, nb_classes=None):
do_assert(is_np_array(arr), "Expected to get numpy array, got %s." % (type(arr),))
if arr.dtype.name == "bool":
do_assert(arr.ndim in [2, 3])
self.input_was = ("bool", arr.ndim)
if arr.ndim == 2:
arr = arr[..., np.newaxis]
arr = arr.astype(np.float32)
elif arr.dtype.kind in ["i", "u"]:
do_assert(arr.ndim == 2 or (arr.ndim == 3 and arr.shape[2] == 1))
do_assert(nb_classes is not None)
do_assert(nb_classes > 0)
do_assert(np.min(arr.flat[0:100]) >= 0)
do_assert(np.max(arr.flat[0:100]) <= nb_classes)
self.input_was = ("int", arr.dtype.type, arr.ndim)
if arr.ndim == 3:
arr = arr[..., 0]
# TODO improve efficiency here by building only sub-heatmaps for classes actually
# present in the image. This would also get rid of nb_classes.
arr = np.eye(nb_classes)[arr] # from class indices to one hot
arr = arr.astype(np.float32)
elif arr.dtype.kind == "f":
do_assert(arr.ndim == 3)
self.input_was = ("float", arr.dtype.type, arr.ndim)
arr = arr.astype(np.float32)
else:
raise Exception(("Input was expected to be an ndarray any bool, int, uint or float dtype. "
+ "Got dtype %s.") % (arr.dtype.name,))
do_assert(arr.ndim == 3)
do_assert(arr.dtype.name == "float32")
self.arr = arr
self.shape = shape
self.nb_classes = nb_classes if nb_classes is not None else arr.shape[2]
def get_arr_int(self, background_threshold=0.01, background_class_id=None):
"""
Get the segmentation map array as an integer array of shape (H, W).
Each pixel in that array contains an integer value representing the pixel's class.
If multiple classes overlap, the one with the highest local float value is picked.
If that highest local value is below `background_threshold`, the method instead uses
the background class id as the pixel's class value.
By default, class id 0 is the background class. This may only be changed if the original
input to the segmentation map object was an integer map.
Parameters
----------
background_threshold : float, optional
At each pixel, each class-heatmap has a value between 0.0 and 1.0. If none of the
class-heatmaps has a value above this threshold, the method uses the background class
id instead.
background_class_id : None or int, optional
Class id to fall back to if no class-heatmap passes the threshold at a spatial
location. May only be provided if the original input was an integer mask and in these
cases defaults to 0. If the input were float or boolean masks, the background class id
may not be set as it is assumed that the background is implicitly defined
as 'any spatial location that has zero-like values in all masks'.
Returns
-------
result : (H,W) ndarray
Segmentation map array (int32).
If the original input consisted of boolean or float masks, then the highest possible
class id is ``1+C``, where ``C`` is the number of provided float/boolean masks. The value
``0`` in the integer mask then denotes the background class.
"""
if self.input_was[0] in ["bool", "float"]:
do_assert(background_class_id is None,
"The background class id may only be changed if the original input to SegmentationMapOnImage "
+ "was an *integer* based segmentation map.")
if background_class_id is None:
background_class_id = 0
channelwise_max_idx = np.argmax(self.arr, axis=2)
# for bool and float input masks, we assume that the background is implicitly given,
# i.e. anything where all masks/channels have zero-like values
# for int, we assume that the background class is explicitly given and has the index 0
if self.input_was[0] in ["bool", "float"]:
result = 1 + channelwise_max_idx
else: # integer mask was provided
result = channelwise_max_idx
if background_threshold is not None and background_threshold > 0:
probs = np.amax(self.arr, axis=2)
result[probs < background_threshold] = background_class_id
return result.astype(np.int32)
# TODO
# def get_arr_bool(self, allow_overlapping=False, threshold=0.5, background_threshold=0.01, background_class_id=0):
# raise NotImplementedError()
def draw(self, size=None, background_threshold=0.01, background_class_id=None, colors=None,
return_foreground_mask=False):
"""
Render the segmentation map as an RGB image.
Parameters
----------
size : None or float or iterable of int or iterable of float, optional
Size of the rendered RGB image as ``(height, width)``.
See :func:`imgaug.imgaug.imresize_single_image` for details.
If set to None, no resizing is performed and the size of the segmentation map array is used.
background_threshold : float, optional
See :func:`imgaug.SegmentationMapOnImage.get_arr_int`.
background_class_id : None or int, optional
See :func:`imgaug.SegmentationMapOnImage.get_arr_int`.
colors : None or list of tuple of int, optional
Colors to use. One for each class to draw. If None, then default colors will be used.
return_foreground_mask : bool, optional
Whether to return a mask of the same size as the drawn segmentation map, containing
True at any spatial location that is not the background class and False everywhere else.
Returns
-------
segmap_drawn : (H,W,3) ndarray
Rendered segmentation map (dtype is uint8).
foreground_mask : (H,W) ndarray
Mask indicating the locations of foreground classes (dtype is bool).
This value is only returned if `return_foreground_mask` is True.
"""
arr = self.get_arr_int(background_threshold=background_threshold, background_class_id=background_class_id)
nb_classes = 1 + np.max(arr)
segmap_drawn = np.zeros((arr.shape[0], arr.shape[1], 3), dtype=np.uint8)
if colors is None:
colors = SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS
do_assert(nb_classes <= len(colors),
"Can't draw all %d classes as it would exceed the maximum number of %d available colors." % (
nb_classes, len(colors),))
ids_in_map = np.unique(arr)
for c, color in zip(sm.xrange(nb_classes), colors):
if c in ids_in_map:
class_mask = (arr == c)
segmap_drawn[class_mask] = color
if return_foreground_mask:
background_class_id = 0 if background_class_id is None else background_class_id
foreground_mask = (arr != background_class_id)
else:
foreground_mask = None
if size is not None:
segmap_drawn = imresize_single_image(segmap_drawn, size, interpolation="nearest")
if foreground_mask is not None:
foreground_mask = imresize_single_image(
foreground_mask.astype(np.uint8), size, interpolation="nearest") > 0
if foreground_mask is not None:
return segmap_drawn, foreground_mask
return segmap_drawn
def draw_on_image(self, image, alpha=0.75, resize="segmentation_map", background_threshold=0.01,
background_class_id=None, colors=None, draw_background=False):
"""
Draw the segmentation map as an overlay over an image.
Parameters
----------
image : (H,W,3) ndarray
Image onto which to draw the segmentation map. Dtype is expected to be uint8.
alpha : float, optional
Alpha/opacity value to use for the mixing of image and segmentation map.
Higher values mean that the segmentation map will be more visible and the image less visible.
resize : {'segmentation_map', 'image'}, optional
In case of size differences between the image and segmentation map, either the image or
the segmentation map can be resized. This parameter controls which of the two will be
resized to the other's size.
background_threshold : float, optional
See :func:`imgaug.SegmentationMapOnImage.get_arr_int`.
background_class_id : None or int, optional
See :func:`imgaug.SegmentationMapOnImage.get_arr_int`.
colors : None or list of tuple of int, optional
Colors to use. One for each class to draw. If None, then default colors will be used.
draw_background : bool, optional
If True, the background will be drawn like any other class.
If False, the background will not be drawn, i.e. the respective background pixels
will be identical with the image's RGB color at the corresponding spatial location
and no color overlay will be applied.
Returns
-------
mix : (H,W,3) ndarray
Rendered overlays (dtype is uint8).
"""
# assert RGB image
do_assert(image.ndim == 3)
do_assert(image.shape[2] == 3)
do_assert(image.dtype.type == np.uint8)
do_assert(0 - 1e-8 <= alpha <= 1.0 + 1e-8)
do_assert(resize in ["segmentation_map", "image"])
if resize == "image":
image = imresize_single_image(image, self.arr.shape[0:2], interpolation="cubic")
segmap_drawn, foreground_mask = self.draw(
background_threshold=background_threshold,
background_class_id=background_class_id,
size=image.shape[0:2] if resize == "segmentation_map" else None,
colors=colors,
return_foreground_mask=True
)
if draw_background:
mix = np.clip(
(1-alpha) * image + alpha * segmap_drawn,
0,
255
).astype(np.uint8)
else:
foreground_mask = foreground_mask[..., np.newaxis]
mix = np.zeros_like(image)
mix += (~foreground_mask).astype(np.uint8) * image
mix += foreground_mask.astype(np.uint8) * np.clip(
(1-alpha) * image + alpha * segmap_drawn,
0,
255
).astype(np.uint8)
return mix
def pad(self, top=0, right=0, bottom=0, left=0, mode="constant", cval=0.0):
"""
Pad the segmentation map on its top/right/bottom/left side.
Parameters
----------
top : int, optional
Amount of pixels to add at the top side of the segmentation map. Must be 0 or greater.
right : int, optional
Amount of pixels to add at the right side of the segmentation map. Must be 0 or greater.
bottom : int, optional
Amount of pixels to add at the bottom side of the segmentation map. Must be 0 or greater.
left : int, optional
Amount of pixels to add at the left side of the segmentation map. Must be 0 or greater.
mode : str, optional
Padding mode to use. See :func:`numpy.pad` for details.
cval : number, optional
Value to use for padding if `mode` is ``constant``. See :func:`numpy.pad` for details.
Returns
-------
segmap : imgaug.SegmentationMapOnImage
Padded segmentation map of height ``H'=H+top+bottom`` and width ``W'=W+left+right``.
"""
arr_padded = pad(self.arr, top=top, right=right, bottom=bottom, left=left, mode=mode, cval=cval)
segmap = SegmentationMapOnImage(arr_padded, shape=self.shape)
segmap.input_was = self.input_was
return segmap
def pad_to_aspect_ratio(self, aspect_ratio, mode="constant", cval=0.0, return_pad_amounts=False):
"""
Pad the segmentation map on its sides so that its matches a target aspect ratio.
Depending on which dimension is smaller (height or width), only the corresponding
sides (left/right or top/bottom) will be padded. In each case, both of the sides will
be padded equally.
Parameters
----------
aspect_ratio : float
Target aspect ratio, given as width/height. E.g. 2.0 denotes the image having twice
as much width as height.
mode : str, optional
Padding mode to use. See :func:`numpy.pad` for details.
cval : number, optional
Value to use for padding if `mode` is ``constant``. See :func:`numpy.pad` for details.
return_pad_amounts : bool, optional
If False, then only the padded image will be returned. If True, a tuple with two
entries will be returned, where the first entry is the padded image and the second
entry are the amounts by which each image side was padded. These amounts are again a
tuple of the form (top, right, bottom, left), with each value being an integer.
Returns
-------
segmap : imgaug.SegmentationMapOnImage
Padded segmentation map as SegmentationMapOnImage object.
pad_amounts : tuple of int
Amounts by which the segmentation map was padded on each side, given as a
tuple ``(top, right, bottom, left)``.
This tuple is only returned if `return_pad_amounts` was set to True.
"""
arr_padded, pad_amounts = pad_to_aspect_ratio(self.arr, aspect_ratio=aspect_ratio, mode=mode, cval=cval,
return_pad_amounts=True)
segmap = SegmentationMapOnImage(arr_padded, shape=self.shape)
segmap.input_was = self.input_was
if return_pad_amounts:
return segmap, pad_amounts
else:
return segmap
def scale(self, *args, **kwargs):
warnings.warn(DeprecationWarning("SegmentationMapOnImage.scale() is deprecated. "
"Use SegmentationMapOnImage.resize() instead. "
"It has the exactly same interface (simple renaming)."))
return self.resize(*args, **kwargs)
def resize(self, sizes, interpolation="cubic"):
"""
Resize the segmentation map array to the provided size given the provided interpolation.
Parameters
----------
sizes : float or iterable of int or iterable of float
New size of the array in ``(height, width)``.
See :func:`imgaug.imgaug.imresize_single_image` for details.
interpolation : None or str or int, optional
The interpolation to use during resize.
See :func:`imgaug.imgaug.imresize_single_image` for details.
Note: The segmentation map is internally stored as multiple float-based heatmaps,
making smooth interpolations potentially more reasonable than nearest neighbour
interpolation.
Returns
-------
segmap : imgaug.SegmentationMapOnImage
Resized segmentation map object.
"""
arr_resized = imresize_single_image(self.arr, sizes, interpolation=interpolation)
# cubic interpolation can lead to values outside of [0.0, 1.0],
# see https://github.com/opencv/opencv/issues/7195
# TODO area interpolation too?
arr_resized = np.clip(arr_resized, 0.0, 1.0)
segmap = SegmentationMapOnImage(arr_resized, shape=self.shape)
segmap.input_was = self.input_was
return segmap
def to_heatmaps(self, only_nonempty=False, not_none_if_no_nonempty=False):
"""
Convert segmentation map to heatmaps object.
Each segmentation map class will be represented as a single heatmap channel.
Parameters
----------
only_nonempty : bool, optional
If True, then only heatmaps for classes that appear in the segmentation map will be
generated. Additionally, a list of these class ids will be returned.
not_none_if_no_nonempty : bool, optional
If `only_nonempty` is True and for a segmentation map no channel was non-empty,
this function usually returns None as the heatmaps object. If however this parameter
is set to True, a heatmaps object with one channel (representing class 0)
will be returned as a fallback in these cases.
Returns
-------
imgaug.HeatmapsOnImage or None
Segmentation map as a heatmaps object.
If `only_nonempty` was set to True and no class appeared in the segmentation map,
then this is None.
class_indices : list of int
Class ids (0 to C-1) of the classes that were actually added to the heatmaps.
Only returned if `only_nonempty` was set to True.
"""
if not only_nonempty:
return HeatmapsOnImage.from_0to1(self.arr, self.shape, min_value=0.0, max_value=1.0)
else:
nonempty_mask = np.sum(self.arr, axis=(0, 1)) > 0 + 1e-4
if np.sum(nonempty_mask) == 0:
if not_none_if_no_nonempty:
nonempty_mask[0] = True
else:
return None, []
class_indices = np.arange(self.arr.shape[2])[nonempty_mask]
channels = self.arr[..., class_indices]
return HeatmapsOnImage(channels, self.shape, min_value=0.0, max_value=1.0), class_indices
@staticmethod
def from_heatmaps(heatmaps, class_indices=None, nb_classes=None):
"""
Convert heatmaps to segmentation map.
Assumes that each class is represented as a single heatmap channel.
Parameters
----------
heatmaps : imgaug.HeatmapsOnImage
Heatmaps to convert.
class_indices : None or list of int, optional
List of class indices represented by each heatmap channel. See also the
secondary output of :func:`imgaug.SegmentationMapOnImage.to_heatmap`.
If this is provided, it must have the same length as the number of heatmap channels.
nb_classes : None or int, optional
Number of classes. Must be provided if class_indices is set.
Returns
-------
imgaug.SegmentationMapOnImage
Segmentation map derived from heatmaps.
"""
if class_indices is None:
return SegmentationMapOnImage(heatmaps.arr_0to1, shape=heatmaps.shape)
else:
do_assert(nb_classes is not None)
do_assert(min(class_indices) >= 0)
do_assert(max(class_indices) < nb_classes)
do_assert(len(class_indices) == heatmaps.arr_0to1.shape[2])
arr_0to1 = heatmaps.arr_0to1
arr_0to1_full = np.zeros((arr_0to1.shape[0], arr_0to1.shape[1], nb_classes), dtype=np.float32)
for heatmap_channel, mapped_channel in enumerate(class_indices):
arr_0to1_full[:, :, mapped_channel] = arr_0to1[:, :, heatmap_channel]
return SegmentationMapOnImage(arr_0to1_full, shape=heatmaps.shape)
def copy(self):
"""
Create a shallow copy of the segmentation map object.
Returns
-------
imgaug.SegmentationMapOnImage
Shallow copy.
"""
return self.deepcopy()
def deepcopy(self):
"""
Create a deep copy of the segmentation map object.
Returns
-------
imgaug.SegmentationMapOnImage
Deep copy.
"""
segmap = SegmentationMapOnImage(self.arr, shape=self.shape, nb_classes=self.nb_classes)
segmap.input_was = self.input_was
return segmap
class Batch(object):
"""
Class encapsulating a batch before and after augmentation.
Parameters
----------
images : None or (N,H,W,C) ndarray or (N,H,W) ndarray or list of (H,W,C) ndarray or list of (H,W) ndarray
The images to augment.
heatmaps : None or list of imgaug.HeatmapsOnImage
The heatmaps to augment.
segmentation_maps : None or list of SegmentationMapOnImage
The segmentation maps to augment.
keypoints : None or list of KeypointOnImage
The keypoints to augment.
bounding_boxes : None or list of BoundingBoxesOnImage
The bounding boxes to augment.
polygons : None or list of PolygonsOnImage
The polygons to augment.
data
Additional data that is saved in the batch and may be read out
after augmentation. This could e.g. contain filepaths to each image
in `images`. As this object is usually used for background
augmentation with multiple processes, the augmented Batch objects might
not be returned in the original order, making this information useful.
"""
def __init__(self, images=None, heatmaps=None, segmentation_maps=None,
keypoints=None, bounding_boxes=None, polygons=None,
data=None):
self.images_unaug = images
self.images_aug = None
self.heatmaps_unaug = heatmaps
self.heatmaps_aug = None
self.segmentation_maps_unaug = segmentation_maps
self.segmentation_maps_aug = None
self.keypoints_unaug = keypoints
self.keypoints_aug = None
self.bounding_boxes_unaug = bounding_boxes
self.bounding_boxes_aug = None
self.polygons_unaug = polygons
self.polygons_aug = None
self.data = data
@property
def images(self):
warnings.warn(DeprecationWarning(
"Accessing imgaug.Batch.images is deprecated. Access instead "
"imgaug.Batch.images_unaug or imgaug.Batch.images_aug."))
return self.images_unaug
@property
def heatmaps(self):
warnings.warn(DeprecationWarning(
"Accessing imgaug.Batch.heatmaps is deprecated. Access instead "
"imgaug.Batch.heatmaps_unaug or imgaug.Batch.heatmaps_aug."))
return self.heatmaps_unaug
@property
def segmentation_maps(self):
warnings.warn(DeprecationWarning(
"Accessing imgaug.Batch.segmentation_maps is deprecated. Access "
"instead imgaug.Batch.segmentation_maps_unaug or "
"imgaug.Batch.segmentation_maps_aug."))
return self.segmentation_maps_unaug
@property
def keypoints(self):
warnings.warn(DeprecationWarning(
"Accessing imgaug.Batch.keypoints is deprecated. Access "
"instead imgaug.Batch.keypoints_unaug or "
"imgaug.Batch.keypoints_aug."))
return self.keypoints_unaug
@property
def bounding_boxes(self):
warnings.warn(DeprecationWarning(
"Accessing imgaug.Batch.bounding_boxes is deprecated. Access "
"instead imgaug.Batch.bounding_boxes_unaug or "
"imgaug.Batch.bounding_boxes_aug."))
return self.bounding_boxes_unaug
def deepcopy(self):
def _copy_images(images):
if images is None:
images_copy = None
elif is_np_array(images):
images_copy = np.copy(images)
else:
do_assert(is_iterable(images))
do_assert(all([is_np_array(image) for image in images]))
images_copy = list([np.copy(image) for image in images])
return images_copy
def _copy_augmentable_objects(augmentables, clazz):
if augmentables is None:
augmentables_copy = None
else:
do_assert(is_iterable(augmentables))
do_assert(all([isinstance(augmentable, clazz) for augmentable in augmentables]))
augmentables_copy = [augmentable.deepcopy() for augmentable in augmentables]
return augmentables_copy
batch = Batch(
images=_copy_images(self.images_unaug),
heatmaps=_copy_augmentable_objects(self.heatmaps_unaug, HeatmapsOnImage),
segmentation_maps=_copy_augmentable_objects(self.segmentation_maps_unaug, SegmentationMapOnImage),
keypoints=_copy_augmentable_objects(self.keypoints_unaug, KeypointsOnImage),
bounding_boxes=_copy_augmentable_objects(self.bounding_boxes_unaug, BoundingBoxesOnImage),
polygons=_copy_augmentable_objects(self.polygons_unaug, PolygonsOnImage),
data=copy.deepcopy(self.data)
)
batch.images_aug = _copy_images(self.images_aug)
batch.heatmaps_aug = _copy_augmentable_objects(self.heatmaps_aug, HeatmapsOnImage)
batch.segmentation_maps_aug = _copy_augmentable_objects(self.segmentation_maps_aug, SegmentationMapOnImage)
batch.keypoints_aug = _copy_augmentable_objects(self.keypoints_aug, KeypointsOnImage)
batch.bounding_boxes_aug = _copy_augmentable_objects(self.bounding_boxes_aug, BoundingBoxesOnImage)
batch.polygons_aug = _copy_augmentable_objects(self.polygons_aug, PolygonsOnImage)
return batch
def BatchLoader(*args, **kwargs):
warnings.warn(DeprecationWarning("Using imgaug.imgaug.BatchLoader is depcrecated. "
"Use imgaug.multicore.BatchLoader instead."))
from . import multicore
return multicore.BatchLoader(*args, **kwargs)
def BackgroundAugmenter(*args, **kwargs):
warnings.warn(DeprecationWarning("Using imgaug.imgaug.BackgroundAugmenter is depcrecated. "
"Use imgaug.multicore.BackgroundAugmenter instead."))
from . import multicore
return multicore.BackgroundAugmenter(*args, **kwargs)
Fix docstring
from __future__ import print_function, division, absolute_import
import math
import copy
import numbers
import sys
import os
import json
import types
import warnings
import numpy as np
import cv2
import imageio
import scipy.spatial.distance
import six
import six.moves as sm
import skimage.draw
import skimage.measure
import collections
from PIL import Image as PIL_Image, ImageDraw as PIL_ImageDraw, ImageFont as PIL_ImageFont
ALL = "ALL"
FILE_DIR = os.path.dirname(os.path.abspath(__file__))
# filepath to the quokka image, its annotations and depth map
QUOKKA_FP = os.path.join(FILE_DIR, "quokka.jpg")
QUOKKA_ANNOTATIONS_FP = os.path.join(FILE_DIR, "quokka_annotations.json")
QUOKKA_DEPTH_MAP_HALFRES_FP = os.path.join(FILE_DIR, "quokka_depth_map_halfres.png")
DEFAULT_FONT_FP = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"DejaVuSans.ttf"
)
# We instantiate a current/global random state here once.
# One can also call np.random, but that is (in contrast to np.random.RandomState)
# a module and hence cannot be copied via deepcopy. That's why we use RandomState
# here (and in all augmenters) instead of np.random.
CURRENT_RANDOM_STATE = np.random.RandomState(42)
SEED_MIN_VALUE = 0
SEED_MAX_VALUE = 2**31-1 # use 2**31 instead of 2**32 here because 2**31 errored on some systems
# to check if a dtype instance is among these dtypes, use e.g. `dtype.type in NP_FLOAT_TYPES`
# do not just use `dtype in NP_FLOAT_TYPES` as that would fail
NP_FLOAT_TYPES = set(np.sctypes["float"])
NP_INT_TYPES = set(np.sctypes["int"])
NP_UINT_TYPES = set(np.sctypes["uint"])
IMSHOW_BACKEND_DEFAULT = "matplotlib"
IMRESIZE_VALID_INTERPOLATIONS = ["nearest", "linear", "area", "cubic",
cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_AREA, cv2.INTER_CUBIC]
def is_np_array(val):
"""
Checks whether a variable is a numpy array.
Parameters
----------
val
The variable to check.
Returns
-------
out : bool
True if the variable is a numpy array. Otherwise False.
"""
# using np.generic here via isinstance(val, (np.ndarray, np.generic)) seems to also fire for scalar numpy values
# even though those are not arrays
return isinstance(val, np.ndarray)
def is_single_integer(val):
"""
Checks whether a variable is an integer.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is an integer. Otherwise False.
"""
return isinstance(val, numbers.Integral) and not isinstance(val, bool)
def is_single_float(val):
"""
Checks whether a variable is a float.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a float. Otherwise False.
"""
return isinstance(val, numbers.Real) and not is_single_integer(val) and not isinstance(val, bool)
def is_single_number(val):
"""
Checks whether a variable is a number, i.e. an integer or float.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a number. Otherwise False.
"""
return is_single_integer(val) or is_single_float(val)
def is_iterable(val):
"""
Checks whether a variable is iterable.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is an iterable. Otherwise False.
"""
return isinstance(val, collections.Iterable)
# TODO convert to is_single_string() or rename is_single_integer/float/number()
def is_string(val):
"""
Checks whether a variable is a string.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a string. Otherwise False.
"""
return isinstance(val, six.string_types)
def is_single_bool(val):
"""
Checks whether a variable is a boolean.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a boolean. Otherwise False.
"""
return type(val) == type(True)
def is_integer_array(val):
"""
Checks whether a variable is a numpy integer array.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a numpy integer array. Otherwise False.
"""
return is_np_array(val) and issubclass(val.dtype.type, np.integer)
def is_float_array(val):
"""
Checks whether a variable is a numpy float array.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a numpy float array. Otherwise False.
"""
return is_np_array(val) and issubclass(val.dtype.type, np.floating)
def is_callable(val):
"""
Checks whether a variable is a callable, e.g. a function.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a callable. Otherwise False.
"""
# python 3.x with x <= 2 does not support callable(), apparently
if sys.version_info[0] == 3 and sys.version_info[1] <= 2:
return hasattr(val, '__call__')
else:
return callable(val)
def is_generator(val):
"""
Checks whether a variable is a generator.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True is the variable is a generator. Otherwise False.
"""
return isinstance(val, types.GeneratorType)
def caller_name():
"""
Returns the name of the caller, e.g. a function.
Returns
-------
str
The name of the caller as a string
"""
return sys._getframe(1).f_code.co_name
def seed(seedval):
"""
Set the seed used by the global random state and thereby all randomness
in the library.
This random state is by default by all augmenters. Under special
circumstances (e.g. when an augmenter is switched to deterministic mode),
the global random state is replaced by another -- local -- one.
The replacement is dependent on the global random state.
Parameters
----------
seedval : int
The seed to use.
"""
CURRENT_RANDOM_STATE.seed(seedval)
def current_random_state():
"""
Returns the current/global random state of the library.
Returns
----------
numpy.random.RandomState
The current/global random state.
"""
return CURRENT_RANDOM_STATE
def new_random_state(seed=None, fully_random=False):
"""
Returns a new random state.
Parameters
----------
seed : None or int, optional
Optional seed value to use.
The same datatypes are allowed as for ``numpy.random.RandomState(seed)``.
fully_random : bool, optional
Whether to use numpy's random initialization for the
RandomState (used if set to True). If False, a seed is sampled from
the global random state, which is a bit faster and hence the default.
Returns
-------
numpy.random.RandomState
The new random state.
"""
if seed is None:
if not fully_random:
# sample manually a seed instead of just RandomState(),
# because the latter one
# is way slower.
seed = CURRENT_RANDOM_STATE.randint(SEED_MIN_VALUE, SEED_MAX_VALUE, 1)[0]
return np.random.RandomState(seed)
def dummy_random_state():
"""
Returns a dummy random state that is always based on a seed of 1.
Returns
-------
numpy.random.RandomState
The new random state.
"""
return np.random.RandomState(1)
def copy_random_state(random_state, force_copy=False):
"""
Creates a copy of a random state.
Parameters
----------
random_state : numpy.random.RandomState
The random state to copy.
force_copy : bool, optional
If True, this function will always create a copy of every random
state. If False, it will not copy numpy's default random state,
but all other random states.
Returns
-------
rs_copy : numpy.random.RandomState
The copied random state.
"""
if random_state == np.random and not force_copy:
return random_state
else:
rs_copy = dummy_random_state()
orig_state = random_state.get_state()
rs_copy.set_state(orig_state)
return rs_copy
def derive_random_state(random_state):
"""
Create a new random states based on an existing random state or seed.
Parameters
----------
random_state : numpy.random.RandomState
Random state or seed from which to derive the new random state.
Returns
-------
numpy.random.RandomState
Derived random state.
"""
return derive_random_states(random_state, n=1)[0]
# TODO use this everywhere instead of manual seed + create
def derive_random_states(random_state, n=1):
"""
Create N new random states based on an existing random state or seed.
Parameters
----------
random_state : numpy.random.RandomState
Random state or seed from which to derive new random states.
n : int, optional
Number of random states to derive.
Returns
-------
list of numpy.random.RandomState
Derived random states.
"""
seed_ = random_state.randint(SEED_MIN_VALUE, SEED_MAX_VALUE, 1)[0]
return [new_random_state(seed_+i) for i in sm.xrange(n)]
def forward_random_state(random_state):
"""
Forward the internal state of a random state.
This makes sure that future calls to the random_state will produce new random values.
Parameters
----------
random_state : numpy.random.RandomState
Random state to forward.
"""
random_state.uniform()
def _quokka_normalize_extract(extract):
"""
Generate a normalized rectangle to be extract from the standard quokka image.
Parameters
----------
extract : 'square' or tuple of number or imgaug.BoundingBox or imgaug.BoundingBoxesOnImage
Unnormalized representation of the image subarea to be extracted.
* If string ``square``, then a squared area ``(x: 0 to max 643, y: 0 to max 643)``
will be extracted from the image.
* If a tuple, then expected to contain four numbers denoting ``x1``, ``y1``, ``x2``
and ``y2``.
* If a BoundingBox, then that bounding box's area will be extracted from the image.
* If a BoundingBoxesOnImage, then expected to contain exactly one bounding box
and a shape matching the full image dimensions (i.e. (643, 960, *)). Then the
one bounding box will be used similar to BoundingBox.
Returns
-------
bb : imgaug.BoundingBox
Normalized representation of the area to extract from the standard quokka image.
"""
if extract == "square":
bb = BoundingBox(x1=0, y1=0, x2=643, y2=643)
elif isinstance(extract, tuple) and len(extract) == 4:
bb = BoundingBox(x1=extract[0], y1=extract[1], x2=extract[2], y2=extract[3])
elif isinstance(extract, BoundingBox):
bb = extract
elif isinstance(extract, BoundingBoxesOnImage):
do_assert(len(extract.bounding_boxes) == 1)
do_assert(extract.shape[0:2] == (643, 960))
bb = extract.bounding_boxes[0]
else:
raise Exception(
"Expected 'square' or tuple of four entries or BoundingBox or BoundingBoxesOnImage "
+ "for parameter 'extract', got %s." % (type(extract),)
)
return bb
def _compute_resized_shape(from_shape, to_shape):
"""
Computes the intended new shape of an image-like array after resizing.
Parameters
----------
from_shape : tuple or ndarray
Old shape of the array. Usually expected to be a tuple of form ``(H, W)`` or ``(H, W, C)`` or
alternatively an array with two or three dimensions.
to_shape : None or tuple of ints or tuple of floats or int or float or ndarray
New shape of the array.
* If None, then `from_shape` will be used as the new shape.
* If an int ``V``, then the new shape will be ``(V, V, [C])``, where ``C`` will be added if it
is part of `from_shape`.
* If a float ``V``, then the new shape will be ``(H*V, W*V, [C])``, where ``H`` and ``W`` are the old
height/width.
* If a tuple ``(H', W', [C'])`` of ints, then ``H'`` and ``W'`` will be used as the new height
and width.
* If a tuple ``(H', W', [C'])`` of floats (except ``C``), then ``H'`` and ``W'`` will
be used as the new height and width.
* If a numpy array, then the array's shape will be used.
Returns
-------
to_shape_computed : tuple of int
New shape.
"""
if is_np_array(from_shape):
from_shape = from_shape.shape
if is_np_array(to_shape):
to_shape = to_shape.shape
to_shape_computed = list(from_shape)
if to_shape is None:
pass
elif isinstance(to_shape, tuple):
do_assert(len(from_shape) in [2, 3])
do_assert(len(to_shape) in [2, 3])
if len(from_shape) == 3 and len(to_shape) == 3:
do_assert(from_shape[2] == to_shape[2])
elif len(to_shape) == 3:
to_shape_computed.append(to_shape[2])
do_assert(all([v is None or is_single_number(v) for v in to_shape[0:2]]),
"Expected the first two entries in to_shape to be None or numbers, "
+ "got types %s." % (str([type(v) for v in to_shape[0:2]]),))
for i, from_shape_i in enumerate(from_shape[0:2]):
if to_shape[i] is None:
to_shape_computed[i] = from_shape_i
elif is_single_integer(to_shape[i]):
to_shape_computed[i] = to_shape[i]
else: # float
to_shape_computed[i] = int(np.round(from_shape_i * to_shape[i]))
elif is_single_integer(to_shape) or is_single_float(to_shape):
to_shape_computed = _compute_resized_shape(from_shape, (to_shape, to_shape))
else:
raise Exception("Expected to_shape to be None or ndarray or tuple of floats or tuple of ints or single int "
+ "or single float, got %s." % (type(to_shape),))
return tuple(to_shape_computed)
def quokka(size=None, extract=None):
"""
Returns an image of a quokka as a numpy array.
Parameters
----------
size : None or float or tuple of int, optional
Size of the output image. Input into :func:`imgaug.imgaug.imresize_single_image`.
Usually expected to be a tuple ``(H, W)``, where ``H`` is the desired height
and ``W`` is the width. If None, then the image will not be resized.
extract : None or 'square' or tuple of number or imgaug.BoundingBox or imgaug.BoundingBoxesOnImage
Subarea of the quokka image to extract:
* If None, then the whole image will be used.
* If string ``square``, then a squared area ``(x: 0 to max 643, y: 0 to max 643)`` will
be extracted from the image.
* If a tuple, then expected to contain four numbers denoting ``x1``, ``y1``, ``x2``
and ``y2``.
* If a BoundingBox, then that bounding box's area will be extracted from the image.
* If a BoundingBoxesOnImage, then expected to contain exactly one bounding box
and a shape matching the full image dimensions (i.e. ``(643, 960, *)``). Then the
one bounding box will be used similar to BoundingBox.
Returns
-------
img : (H,W,3) ndarray
The image array of dtype uint8.
"""
img = imageio.imread(QUOKKA_FP, pilmode="RGB")
if extract is not None:
bb = _quokka_normalize_extract(extract)
img = bb.extract_from_image(img)
if size is not None:
shape_resized = _compute_resized_shape(img.shape, size)
img = imresize_single_image(img, shape_resized[0:2])
return img
def quokka_square(size=None):
"""
Returns an (square) image of a quokka as a numpy array.
Parameters
----------
size : None or float or tuple of int, optional
Size of the output image. Input into :func:`imgaug.imgaug.imresize_single_image`.
Usually expected to be a tuple ``(H, W)``, where ``H`` is the desired height
and ``W`` is the width. If None, then the image will not be resized.
Returns
-------
img : (H,W,3) ndarray
The image array of dtype uint8.
"""
return quokka(size=size, extract="square")
def quokka_heatmap(size=None, extract=None):
"""
Returns a heatmap (here: depth map) for the standard example quokka image.
Parameters
----------
size : None or float or tuple of int, optional
See :func:`imgaug.quokka`.
extract : None or 'square' or tuple of number or imgaug.BoundingBox or imgaug.BoundingBoxesOnImage
See :func:`imgaug.quokka`.
Returns
-------
result : imgaug.HeatmapsOnImage
Depth map as an heatmap object. Values close to 0.0 denote objects that are close to
the camera. Values close to 1.0 denote objects that are furthest away (among all shown
objects).
"""
img = imageio.imread(QUOKKA_DEPTH_MAP_HALFRES_FP, pilmode="RGB")
img = imresize_single_image(img, (643, 960), interpolation="cubic")
if extract is not None:
bb = _quokka_normalize_extract(extract)
img = bb.extract_from_image(img)
if size is None:
size = img.shape[0:2]
shape_resized = _compute_resized_shape(img.shape, size)
img = imresize_single_image(img, shape_resized[0:2])
img_0to1 = img[..., 0] # depth map was saved as 3-channel RGB
img_0to1 = img_0to1.astype(np.float32) / 255.0
img_0to1 = 1 - img_0to1 # depth map was saved as 0 being furthest away
return HeatmapsOnImage(img_0to1, shape=img_0to1.shape[0:2] + (3,))
def quokka_segmentation_map(size=None, extract=None):
"""
Returns a segmentation map for the standard example quokka image.
Parameters
----------
size : None or float or tuple of int, optional
See :func:`imgaug.quokka`.
extract : None or 'square' or tuple of number or imgaug.BoundingBox or imgaug.BoundingBoxesOnImage
See :func:`imgaug.quokka`.
Returns
-------
result : imgaug.SegmentationMapOnImage
Segmentation map object.
"""
with open(QUOKKA_ANNOTATIONS_FP, "r") as f:
json_dict = json.load(f)
xx = []
yy = []
for kp_dict in json_dict["polygons"][0]["keypoints"]:
x = kp_dict["x"]
y = kp_dict["y"]
xx.append(x)
yy.append(y)
img_seg = np.zeros((643, 960, 1), dtype=np.float32)
rr, cc = skimage.draw.polygon(np.array(yy), np.array(xx), shape=img_seg.shape)
img_seg[rr, cc] = 1.0
if extract is not None:
bb = _quokka_normalize_extract(extract)
img_seg = bb.extract_from_image(img_seg)
segmap = SegmentationMapOnImage(img_seg, shape=img_seg.shape[0:2] + (3,))
if size is not None:
shape_resized = _compute_resized_shape(img_seg.shape, size)
segmap = segmap.resize(shape_resized[0:2])
segmap.shape = tuple(shape_resized[0:2]) + (3,)
return segmap
def quokka_keypoints(size=None, extract=None):
"""
Returns example keypoints on the standard example quokke image.
The keypoints cover the eyes, ears, nose and paws.
Parameters
----------
size : None or float or tuple of int or tuple of float, optional
Size of the output image on which the keypoints are placed. If None, then the keypoints
are not projected to any new size (positions on the original image are used).
Floats lead to relative size changes, ints to absolute sizes in pixels.
extract : None or 'square' or tuple of number or imgaug.BoundingBox or imgaug.BoundingBoxesOnImage
Subarea to extract from the image. See :func:`imgaug.quokka`.
Returns
-------
kpsoi : imgaug.KeypointsOnImage
Example keypoints on the quokka image.
"""
left, top = 0, 0
if extract is not None:
bb_extract = _quokka_normalize_extract(extract)
left = bb_extract.x1
top = bb_extract.y1
with open(QUOKKA_ANNOTATIONS_FP, "r") as f:
json_dict = json.load(f)
keypoints = []
for kp_dict in json_dict["keypoints"]:
keypoints.append(Keypoint(x=kp_dict["x"] - left, y=kp_dict["y"] - top))
if extract is not None:
shape = (bb_extract.height, bb_extract.width, 3)
else:
shape = (643, 960, 3)
kpsoi = KeypointsOnImage(keypoints, shape=shape)
if size is not None:
shape_resized = _compute_resized_shape(shape, size)
kpsoi = kpsoi.on(shape_resized)
return kpsoi
def quokka_bounding_boxes(size=None, extract=None):
"""
Returns example bounding boxes on the standard example quokke image.
Currently only a single bounding box is returned that covers the quokka.
Parameters
----------
size : None or float or tuple of int or tuple of float, optional
Size of the output image on which the BBs are placed. If None, then the BBs
are not projected to any new size (positions on the original image are used).
Floats lead to relative size changes, ints to absolute sizes in pixels.
extract : None or 'square' or tuple of number or imgaug.BoundingBox or imgaug.BoundingBoxesOnImage
Subarea to extract from the image. See :func:`imgaug.quokka`.
Returns
-------
bbsoi : imgaug.BoundingBoxesOnImage
Example BBs on the quokka image.
"""
left, top = 0, 0
if extract is not None:
bb_extract = _quokka_normalize_extract(extract)
left = bb_extract.x1
top = bb_extract.y1
with open(QUOKKA_ANNOTATIONS_FP, "r") as f:
json_dict = json.load(f)
bbs = []
for bb_dict in json_dict["bounding_boxes"]:
bbs.append(
BoundingBox(
x1=bb_dict["x1"] - left,
y1=bb_dict["y1"] - top,
x2=bb_dict["x2"] - left,
y2=bb_dict["y2"] - top
)
)
if extract is not None:
shape = (bb_extract.height, bb_extract.width, 3)
else:
shape = (643, 960, 3)
bbsoi = BoundingBoxesOnImage(bbs, shape=shape)
if size is not None:
shape_resized = _compute_resized_shape(shape, size)
bbsoi = bbsoi.on(shape_resized)
return bbsoi
def quokka_polygons(size=None, extract=None):
"""
Returns example polygons on the standard example quokke image.
The result contains one polygon, covering the quokka's outline.
Parameters
----------
size : None or float or tuple of int or tuple of float, optional
Size of the output image on which the polygons are placed. If None,
then the polygons are not projected to any new size (positions on the
original image are used). Floats lead to relative size changes, ints
to absolute sizes in pixels.
extract : None or 'square' or tuple of number or imgaug.BoundingBox or \
imgaug.BoundingBoxesOnImage
Subarea to extract from the image. See :func:`imgaug.quokka`.
Returns
-------
psoi : imgaug.PolygonsOnImage
Example polygons on the quokka image.
"""
left, top = 0, 0
if extract is not None:
bb_extract = _quokka_normalize_extract(extract)
left = bb_extract.x1
top = bb_extract.y1
with open(QUOKKA_ANNOTATIONS_FP, "r") as f:
json_dict = json.load(f)
polygons = []
for poly_json in json_dict["polygons"]:
polygons.append(
Polygon([(point["x"] - left, point["y"] - top)
for point in poly_json["keypoints"]])
)
if extract is not None:
shape = (bb_extract.height, bb_extract.width, 3)
else:
shape = (643, 960, 3)
psoi = PolygonsOnImage(polygons, shape=shape)
if size is not None:
shape_resized = _compute_resized_shape(shape, size)
psoi = psoi.on(shape_resized)
return psoi
def angle_between_vectors(v1, v2):
"""
Returns the angle in radians between vectors `v1` and `v2`.
From http://stackoverflow.com/questions/2827393/angles-between-two-n-dimensional-vectors-in-python
Parameters
----------
v1 : (N,) ndarray
First vector.
v2 : (N,) ndarray
Second vector.
Returns
-------
out : float
Angle in radians.
Examples
--------
>>> angle_between_vectors(np.float32([1, 0, 0]), np.float32([0, 1, 0]))
1.570796...
>>> angle_between_vectors(np.float32([1, 0, 0]), np.float32([1, 0, 0]))
0.0
>>> angle_between_vectors(np.float32([1, 0, 0]), np.float32([-1, 0, 0]))
3.141592...
"""
l1 = np.linalg.norm(v1)
l2 = np.linalg.norm(v2)
v1_u = (v1 / l1) if l1 > 0 else np.float32(v1) * 0
v2_u = (v2 / l2) if l2 > 0 else np.float32(v2) * 0
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
# TODO is this used anywhere?
def compute_line_intersection_point(x1, y1, x2, y2, x3, y3, x4, y4):
"""
Compute the intersection point of two lines.
Taken from https://stackoverflow.com/a/20679579 .
Parameters
----------
x1 : number
x coordinate of the first point on line 1. (The lines extends beyond this point.)
y1 : number:
y coordinate of the first point on line 1. (The lines extends beyond this point.)
x2 : number
x coordinate of the second point on line 1. (The lines extends beyond this point.)
y2 : number:
y coordinate of the second point on line 1. (The lines extends beyond this point.)
x3 : number
x coordinate of the first point on line 2. (The lines extends beyond this point.)
y3 : number:
y coordinate of the first point on line 2. (The lines extends beyond this point.)
x4 : number
x coordinate of the second point on line 2. (The lines extends beyond this point.)
y4 : number:
y coordinate of the second point on line 2. (The lines extends beyond this point.)
Returns
-------
tuple of number or bool
The coordinate of the intersection point as a tuple ``(x, y)``.
If the lines are parallel (no intersection point or an infinite number of them), the result is False.
"""
def _make_line(p1, p2):
A = (p1[1] - p2[1])
B = (p2[0] - p1[0])
C = (p1[0]*p2[1] - p2[0]*p1[1])
return A, B, -C
L1 = _make_line((x1, y1), (x2, y2))
L2 = _make_line((x3, y3), (x4, y4))
D = L1[0] * L2[1] - L1[1] * L2[0]
Dx = L1[2] * L2[1] - L1[1] * L2[2]
Dy = L1[0] * L2[2] - L1[2] * L2[0]
if D != 0:
x = Dx / D
y = Dy / D
return x, y
else:
return False
# TODO replace by cv2.putText()?
def draw_text(img, y, x, text, color=(0, 255, 0), size=25):
"""
Draw text on an image.
This uses by default DejaVuSans as its font, which is included in this library.
dtype support::
* ``uint8``: yes; fully tested
* ``uint16``: no
* ``uint32``: no
* ``uint64``: no
* ``int8``: no
* ``int16``: no
* ``int32``: no
* ``int64``: no
* ``float16``: no
* ``float32``: yes; not tested
* ``float64``: no
* ``float128``: no
* ``bool``: no
TODO check if other dtypes could be enabled
Parameters
----------
img : (H,W,3) ndarray
The image array to draw text on.
Expected to be of dtype uint8 or float32 (value range 0.0 to 255.0).
y : int
x-coordinate of the top left corner of the text.
x : int
y- coordinate of the top left corner of the text.
text : str
The text to draw.
color : iterable of int, optional
Color of the text to draw. For RGB-images this is expected to be an RGB color.
size : int, optional
Font size of the text to draw.
Returns
-------
img_np : (H,W,3) ndarray
Input image with text drawn on it.
"""
do_assert(img.dtype in [np.uint8, np.float32])
input_dtype = img.dtype
if img.dtype == np.float32:
img = img.astype(np.uint8)
img = PIL_Image.fromarray(img)
font = PIL_ImageFont.truetype(DEFAULT_FONT_FP, size)
context = PIL_ImageDraw.Draw(img)
context.text((x, y), text, fill=tuple(color), font=font)
img_np = np.asarray(img)
# PIL/asarray returns read only array
if not img_np.flags["WRITEABLE"]:
try:
# this seems to no longer work with np 1.16 (or was pillow updated?)
img_np.setflags(write=True)
except ValueError as ex:
if "cannot set WRITEABLE flag to True of this array" in str(ex):
img_np = np.copy(img_np)
if img_np.dtype != input_dtype:
img_np = img_np.astype(input_dtype)
return img_np
# TODO rename sizes to size?
def imresize_many_images(images, sizes=None, interpolation=None):
"""
Resize many images to a specified size.
dtype support::
* ``uint8``: yes; fully tested
* ``uint16``: yes; tested
* ``uint32``: no (1)
* ``uint64``: no (2)
* ``int8``: yes; tested (3)
* ``int16``: yes; tested
* ``int32``: limited; tested (4)
* ``int64``: no (2)
* ``float16``: yes; tested (5)
* ``float32``: yes; tested
* ``float64``: yes; tested
* ``float128``: no (1)
* ``bool``: yes; tested (6)
- (1) rejected by ``cv2.imresize``
- (2) results too inaccurate
- (3) mapped internally to ``int16`` when interpolation!="nearest"
- (4) only supported for interpolation="nearest", other interpolations lead to cv2 error
- (5) mapped internally to ``float32``
- (6) mapped internally to ``uint8``
Parameters
----------
images : (N,H,W,[C]) ndarray or list of (H,W,[C]) ndarray
Array of the images to resize.
Usually recommended to be of dtype uint8.
sizes : float or iterable of int or iterable of float
The new size of the images, given either as a fraction (a single float) or as
a ``(height, width)`` tuple of two integers or as a ``(height fraction, width fraction)``
tuple of two floats.
interpolation : None or str or int, optional
The interpolation to use during resize.
If int, then expected to be one of:
* ``cv2.INTER_NEAREST`` (nearest neighbour interpolation)
* ``cv2.INTER_LINEAR`` (linear interpolation)
* ``cv2.INTER_AREA`` (area interpolation)
* ``cv2.INTER_CUBIC`` (cubic interpolation)
If string, then expected to be one of:
* ``nearest`` (identical to ``cv2.INTER_NEAREST``)
* ``linear`` (identical to ``cv2.INTER_LINEAR``)
* ``area`` (identical to ``cv2.INTER_AREA``)
* ``cubic`` (identical to ``cv2.INTER_CUBIC``)
If None, the interpolation will be chosen automatically. For size
increases, area interpolation will be picked and for size decreases,
linear interpolation will be picked.
Returns
-------
result : (N,H',W',[C]) ndarray
Array of the resized images.
Examples
--------
>>> imresize_many_images(np.zeros((2, 16, 16, 3), dtype=np.uint8), 2.0)
Converts 2 RGB images of height and width 16 to images of height and width 16*2 = 32.
>>> imresize_many_images(np.zeros((2, 16, 16, 3), dtype=np.uint8), (16, 32))
Converts 2 RGB images of height and width 16 to images of height 16 and width 32.
>>> imresize_many_images(np.zeros((2, 16, 16, 3), dtype=np.uint8), (2.0, 4.0))
Converts 2 RGB images of height and width 16 to images of height 32 and width 64.
"""
# we just do nothing if the input contains zero images
# one could also argue that an exception would be appropriate here
if len(images) == 0:
return images
# verify that all input images have height/width > 0
do_assert(
all([image.shape[0] > 0 and image.shape[1] > 0 for image in images]),
("Cannot resize images, because at least one image has a height and/or width of zero. "
+ "Observed shapes were: %s.") % (str([image.shape for image in images]),)
)
# verify that sizes contains only values >0
if is_single_number(sizes) and sizes <= 0:
raise Exception(
"Cannot resize to the target size %.8f, because the value is zero or lower than zero." % (sizes,))
elif isinstance(sizes, tuple) and (sizes[0] <= 0 or sizes[1] <= 0):
sizes_str = [
"int %d" % (sizes[0],) if is_single_integer(sizes[0]) else "float %.8f" % (sizes[0],),
"int %d" % (sizes[1],) if is_single_integer(sizes[1]) else "float %.8f" % (sizes[1],),
]
sizes_str = "(%s, %s)" % (sizes_str[0], sizes_str[1])
raise Exception(
"Cannot resize to the target sizes %s. At least one value is zero or lower than zero." % (sizes_str,))
# change after the validation to make the above error messages match the original input
if is_single_number(sizes):
sizes = (sizes, sizes)
else:
do_assert(len(sizes) == 2, "Expected tuple with exactly two entries, got %d entries." % (len(sizes),))
do_assert(all([is_single_number(val) for val in sizes]),
"Expected tuple with two ints or floats, got types %s." % (str([type(val) for val in sizes]),))
# if input is a list, call this function N times for N images
# but check beforehand if all images have the same shape, then just convert to a single array and de-convert
# afterwards
if isinstance(images, list):
nb_shapes = len(set([image.shape for image in images]))
if nb_shapes == 1:
return list(imresize_many_images(np.array(images), sizes=sizes, interpolation=interpolation))
else:
return [imresize_many_images(image[np.newaxis, ...], sizes=sizes, interpolation=interpolation)[0, ...]
for image in images]
shape = images.shape
do_assert(images.ndim in [3, 4], "Expected array of shape (N, H, W, [C]), got shape %s" % (str(shape),))
nb_images = shape[0]
im_height, im_width = shape[1], shape[2]
nb_channels = shape[3] if images.ndim > 3 else None
height, width = sizes[0], sizes[1]
height = int(np.round(im_height * height)) if is_single_float(height) else height
width = int(np.round(im_width * width)) if is_single_float(width) else width
if height == im_height and width == im_width:
return np.copy(images)
ip = interpolation
do_assert(ip is None or ip in IMRESIZE_VALID_INTERPOLATIONS)
if ip is None:
if height > im_height or width > im_width:
ip = cv2.INTER_AREA
else:
ip = cv2.INTER_LINEAR
elif ip in ["nearest", cv2.INTER_NEAREST]:
ip = cv2.INTER_NEAREST
elif ip in ["linear", cv2.INTER_LINEAR]:
ip = cv2.INTER_LINEAR
elif ip in ["area", cv2.INTER_AREA]:
ip = cv2.INTER_AREA
else: # if ip in ["cubic", cv2.INTER_CUBIC]:
ip = cv2.INTER_CUBIC
# TODO find more beautiful way to avoid circular imports
from . import dtypes as iadt
if ip == cv2.INTER_NEAREST:
iadt.gate_dtypes(images,
allowed=["bool", "uint8", "uint16", "int8", "int16", "int32", "float16", "float32", "float64"],
disallowed=["uint32", "uint64", "uint128", "uint256", "int64", "int128", "int256",
"float96", "float128", "float256"],
augmenter=None)
else:
iadt.gate_dtypes(images,
allowed=["bool", "uint8", "uint16", "int8", "int16", "float16", "float32", "float64"],
disallowed=["uint32", "uint64", "uint128", "uint256", "int32", "int64", "int128", "int256",
"float96", "float128", "float256"],
augmenter=None)
result_shape = (nb_images, height, width)
if nb_channels is not None:
result_shape = result_shape + (nb_channels,)
result = np.zeros(result_shape, dtype=images.dtype)
for i, image in enumerate(images):
input_dtype = image.dtype
if image.dtype.type == np.bool_:
image = image.astype(np.uint8) * 255
elif image.dtype.type == np.int8 and ip != cv2.INTER_NEAREST:
image = image.astype(np.int16)
elif image.dtype.type == np.float16:
image = image.astype(np.float32)
result_img = cv2.resize(image, (width, height), interpolation=ip)
assert result_img.dtype == image.dtype
# cv2 removes the channel axis if input was (H, W, 1)
# we re-add it (but only if input was not (H, W))
if len(result_img.shape) == 2 and nb_channels is not None and nb_channels == 1:
result_img = result_img[:, :, np.newaxis]
if input_dtype.type == np.bool_:
result_img = result_img > 127
elif input_dtype.type == np.int8 and ip != cv2.INTER_NEAREST:
# TODO somehow better avoid circular imports here
from . import dtypes as iadt
result_img = iadt.restore_dtypes_(result_img, np.int8)
elif input_dtype.type == np.float16:
# TODO see above
from . import dtypes as iadt
result_img = iadt.restore_dtypes_(result_img, np.float16)
result[i] = result_img
return result
def imresize_single_image(image, sizes, interpolation=None):
"""
Resizes a single image.
dtype support::
See :func:`imgaug.imgaug.imresize_many_images`.
Parameters
----------
image : (H,W,C) ndarray or (H,W) ndarray
Array of the image to resize.
Usually recommended to be of dtype uint8.
sizes : float or iterable of int or iterable of float
See :func:`imgaug.imgaug.imresize_many_images`.
interpolation : None or str or int, optional
See :func:`imgaug.imgaug.imresize_many_images`.
Returns
-------
out : (H',W',C) ndarray or (H',W') ndarray
The resized image.
"""
grayscale = False
if image.ndim == 2:
grayscale = True
image = image[:, :, np.newaxis]
do_assert(len(image.shape) == 3, image.shape)
rs = imresize_many_images(image[np.newaxis, :, :, :], sizes, interpolation=interpolation)
if grayscale:
return np.squeeze(rs[0, :, :, 0])
else:
return rs[0, ...]
# TODO add crop() function too
def pad(arr, top=0, right=0, bottom=0, left=0, mode="constant", cval=0):
"""
Pad an image-like array on its top/right/bottom/left side.
This function is a wrapper around :func:`numpy.pad`.
dtype support::
* ``uint8``: yes; fully tested (1)
* ``uint16``: yes; fully tested (1)
* ``uint32``: yes; fully tested (2) (3)
* ``uint64``: yes; fully tested (2) (3)
* ``int8``: yes; fully tested (1)
* ``int16``: yes; fully tested (1)
* ``int32``: yes; fully tested (1)
* ``int64``: yes; fully tested (2) (3)
* ``float16``: yes; fully tested (2) (3)
* ``float32``: yes; fully tested (1)
* ``float64``: yes; fully tested (1)
* ``float128``: yes; fully tested (2) (3)
* ``bool``: yes; tested (2) (3)
- (1) Uses ``cv2`` if `mode` is one of: ``"constant"``, ``"edge"``, ``"reflect"``, ``"symmetric"``.
Otherwise uses ``numpy``.
- (2) Uses ``numpy``.
- (3) Rejected by ``cv2``.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Image-like array to pad.
top : int, optional
Amount of pixels to add at the top side of the image. Must be 0 or greater.
right : int, optional
Amount of pixels to add at the right side of the image. Must be 0 or greater.
bottom : int, optional
Amount of pixels to add at the bottom side of the image. Must be 0 or greater.
left : int, optional
Amount of pixels to add at the left side of the image. Must be 0 or greater.
mode : str, optional
Padding mode to use. See :func:`numpy.pad` for details.
In case of mode ``constant``, the parameter `cval` will be used as the ``constant_values``
parameter to :func:`numpy.pad`.
In case of mode ``linear_ramp``, the parameter `cval` will be used as the ``end_values``
parameter to :func:`numpy.pad`.
cval : number, optional
Value to use for padding if `mode` is ``constant``. See :func:`numpy.pad` for details.
The cval is expected to match the input array's dtype and value range.
Returns
-------
arr_pad : (H',W') ndarray or (H',W',C) ndarray
Padded array with height ``H'=H+top+bottom`` and width ``W'=W+left+right``.
"""
do_assert(arr.ndim in [2, 3])
do_assert(top >= 0)
do_assert(right >= 0)
do_assert(bottom >= 0)
do_assert(left >= 0)
if top > 0 or right > 0 or bottom > 0 or left > 0:
mapping_mode_np_to_cv2 = {
"constant": cv2.BORDER_CONSTANT,
"edge": cv2.BORDER_REPLICATE,
"linear_ramp": None,
"maximum": None,
"mean": None,
"median": None,
"minimum": None,
"reflect": cv2.BORDER_REFLECT_101,
"symmetric": cv2.BORDER_REFLECT,
"wrap": None,
cv2.BORDER_CONSTANT: cv2.BORDER_CONSTANT,
cv2.BORDER_REPLICATE: cv2.BORDER_REPLICATE,
cv2.BORDER_REFLECT_101: cv2.BORDER_REFLECT_101,
cv2.BORDER_REFLECT: cv2.BORDER_REFLECT
}
bad_mode_cv2 = mapping_mode_np_to_cv2.get(mode, None) is None
# these datatypes all simply generate a "TypeError: src data type = X is not supported" error
bad_datatype_cv2 = arr.dtype.name in ["uint32", "uint64", "int64", "float16", "float128", "bool"]
if not bad_datatype_cv2 and not bad_mode_cv2:
cval = float(cval) if arr.dtype.kind == "f" else int(cval) # results in TypeError otherwise for np inputs
if arr.ndim == 2 or arr.shape[2] <= 4:
# without this, only the first channel is padded with the cval, all following channels with 0
if arr.ndim == 3:
cval = tuple([cval] * arr.shape[2])
arr_pad = cv2.copyMakeBorder(arr, top=top, bottom=bottom, left=left, right=right,
borderType=mapping_mode_np_to_cv2[mode], value=cval)
if arr.ndim == 3 and arr_pad.ndim == 2:
arr_pad = arr_pad[..., np.newaxis]
else:
result = []
channel_start_idx = 0
while channel_start_idx < arr.shape[2]:
arr_c = arr[..., channel_start_idx:channel_start_idx+4]
cval_c = tuple([cval] * arr_c.shape[2])
arr_pad_c = cv2.copyMakeBorder(arr_c, top=top, bottom=bottom, left=left, right=right,
borderType=mapping_mode_np_to_cv2[mode], value=cval_c)
arr_pad_c = np.atleast_3d(arr_pad_c)
result.append(arr_pad_c)
channel_start_idx += 4
arr_pad = np.concatenate(result, axis=2)
else:
paddings_np = [(top, bottom), (left, right)] # paddings for 2d case
if arr.ndim == 3:
paddings_np.append((0, 0)) # add paddings for 3d case
if mode == "constant":
arr_pad = np.pad(arr, paddings_np, mode=mode, constant_values=cval)
elif mode == "linear_ramp":
arr_pad = np.pad(arr, paddings_np, mode=mode, end_values=cval)
else:
arr_pad = np.pad(arr, paddings_np, mode=mode)
return arr_pad
return np.copy(arr)
# TODO allow shape as input instead of array
def compute_paddings_for_aspect_ratio(arr, aspect_ratio):
"""
Compute the amount of pixels by which an array has to be padded to fulfill an aspect ratio.
The aspect ratio is given as width/height.
Depending on which dimension is smaller (height or width), only the corresponding
sides (left/right or top/bottom) will be padded. In each case, both of the sides will
be padded equally.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Image-like array for which to compute pad amounts.
aspect_ratio : float
Target aspect ratio, given as width/height. E.g. 2.0 denotes the image having twice
as much width as height.
Returns
-------
result : tuple of int
Required paddign amounts to reach the target aspect ratio, given as a tuple
of the form ``(top, right, bottom, left)``.
"""
do_assert(arr.ndim in [2, 3])
do_assert(aspect_ratio > 0)
height, width = arr.shape[0:2]
do_assert(height > 0)
aspect_ratio_current = width / height
pad_top = 0
pad_right = 0
pad_bottom = 0
pad_left = 0
if aspect_ratio_current < aspect_ratio:
# vertical image, height > width
diff = (aspect_ratio * height) - width
pad_right = int(np.ceil(diff / 2))
pad_left = int(np.floor(diff / 2))
elif aspect_ratio_current > aspect_ratio:
# horizontal image, width > height
diff = ((1/aspect_ratio) * width) - height
pad_top = int(np.floor(diff / 2))
pad_bottom = int(np.ceil(diff / 2))
return pad_top, pad_right, pad_bottom, pad_left
def pad_to_aspect_ratio(arr, aspect_ratio, mode="constant", cval=0, return_pad_amounts=False):
"""
Pad an image-like array on its sides so that it matches a target aspect ratio.
Depending on which dimension is smaller (height or width), only the corresponding
sides (left/right or top/bottom) will be padded. In each case, both of the sides will
be padded equally.
dtype support::
See :func:`imgaug.imgaug.pad`.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Image-like array to pad.
aspect_ratio : float
Target aspect ratio, given as width/height. E.g. 2.0 denotes the image having twice
as much width as height.
mode : str, optional
Padding mode to use. See :func:`numpy.pad` for details.
cval : number, optional
Value to use for padding if `mode` is ``constant``. See :func:`numpy.pad` for details.
return_pad_amounts : bool, optional
If False, then only the padded image will be returned. If True, a tuple with two
entries will be returned, where the first entry is the padded image and the second
entry are the amounts by which each image side was padded. These amounts are again a
tuple of the form (top, right, bottom, left), with each value being an integer.
Returns
-------
arr_padded : (H',W') ndarray or (H',W',C) ndarray
Padded image as (H',W') or (H',W',C) ndarray, fulfulling the given aspect_ratio.
tuple of int
Amounts by which the image was padded on each side, given as a tuple ``(top, right, bottom, left)``.
This tuple is only returned if `return_pad_amounts` was set to True.
Otherwise only ``arr_padded`` is returned.
"""
pad_top, pad_right, pad_bottom, pad_left = compute_paddings_for_aspect_ratio(arr, aspect_ratio)
arr_padded = pad(
arr,
top=pad_top,
right=pad_right,
bottom=pad_bottom,
left=pad_left,
mode=mode,
cval=cval
)
if return_pad_amounts:
return arr_padded, (pad_top, pad_right, pad_bottom, pad_left)
else:
return arr_padded
def pool(arr, block_size, func, cval=0, preserve_dtype=True):
"""
Resize an array by pooling values within blocks.
dtype support::
* ``uint8``: yes; fully tested
* ``uint16``: yes; tested
* ``uint32``: yes; tested (2)
* ``uint64``: no (1)
* ``int8``: yes; tested
* ``int16``: yes; tested
* ``int32``: yes; tested (2)
* ``int64``: no (1)
* ``float16``: yes; tested
* ``float32``: yes; tested
* ``float64``: yes; tested
* ``float128``: yes; tested (2)
* ``bool``: yes; tested
- (1) results too inaccurate (at least when using np.average as func)
- (2) Note that scikit-image documentation says that the wrapped pooling function converts
inputs to float64. Actual tests showed no indication of that happening (at least when
using preserve_dtype=True).
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Image-like array to pool. Ideally of datatype ``numpy.float64``.
block_size : int or tuple of int
Spatial size of each group of values to pool, aka kernel size.
If a single integer, then a symmetric block of that size along height and width will be used.
If a tuple of two values, it is assumed to be the block size along height and width of the image-like,
with pooling happening per channel.
If a tuple of three values, it is assumed to be the block size along height, width and channels.
func : callable
Function to apply to a given block in order to convert it to a single number,
e.g. :func:`numpy.average`, :func:`numpy.min`, :func:`numpy.max`.
cval : number, optional
Value to use in order to pad the array along its border if the array cannot be divided
by `block_size` without remainder.
preserve_dtype : bool, optional
Whether to convert the array back to the input datatype if it is changed away from
that in the pooling process.
Returns
-------
arr_reduced : (H',W') ndarray or (H',W',C') ndarray
Array after pooling.
"""
# TODO find better way to avoid circular import
from . import dtypes as iadt
iadt.gate_dtypes(arr,
allowed=["bool", "uint8", "uint16", "uint32", "int8", "int16", "int32",
"float16", "float32", "float64", "float128"],
disallowed=["uint64", "uint128", "uint256", "int64", "int128", "int256",
"float256"],
augmenter=None)
do_assert(arr.ndim in [2, 3])
is_valid_int = is_single_integer(block_size) and block_size >= 1
is_valid_tuple = is_iterable(block_size) and len(block_size) in [2, 3] \
and [is_single_integer(val) and val >= 1 for val in block_size]
do_assert(is_valid_int or is_valid_tuple)
if is_single_integer(block_size):
block_size = [block_size, block_size]
if len(block_size) < arr.ndim:
block_size = list(block_size) + [1]
input_dtype = arr.dtype
arr_reduced = skimage.measure.block_reduce(arr, tuple(block_size), func, cval=cval)
if preserve_dtype and arr_reduced.dtype.type != input_dtype:
arr_reduced = arr_reduced.astype(input_dtype)
return arr_reduced
def avg_pool(arr, block_size, cval=0, preserve_dtype=True):
"""
Resize an array using average pooling.
dtype support::
See :func:`imgaug.imgaug.pool`.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Image-like array to pool. See :func:`imgaug.pool` for details.
block_size : int or tuple of int or tuple of int
Size of each block of values to pool. See :func:`imgaug.pool` for details.
cval : number, optional
Padding value. See :func:`imgaug.pool` for details.
preserve_dtype : bool, optional
Whether to preserve the input array dtype. See :func:`imgaug.pool` for details.
Returns
-------
arr_reduced : (H',W') ndarray or (H',W',C') ndarray
Array after average pooling.
"""
return pool(arr, block_size, np.average, cval=cval, preserve_dtype=preserve_dtype)
def max_pool(arr, block_size, cval=0, preserve_dtype=True):
"""
Resize an array using max-pooling.
dtype support::
See :func:`imgaug.imgaug.pool`.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Image-like array to pool. See :func:`imgaug.pool` for details.
block_size : int or tuple of int or tuple of int
Size of each block of values to pool. See `imgaug.pool` for details.
cval : number, optional
Padding value. See :func:`imgaug.pool` for details.
preserve_dtype : bool, optional
Whether to preserve the input array dtype. See :func:`imgaug.pool` for details.
Returns
-------
arr_reduced : (H',W') ndarray or (H',W',C') ndarray
Array after max-pooling.
"""
return pool(arr, block_size, np.max, cval=cval, preserve_dtype=preserve_dtype)
def draw_grid(images, rows=None, cols=None):
"""
Converts multiple input images into a single image showing them in a grid.
dtype support::
* ``uint8``: yes; fully tested
* ``uint16``: yes; fully tested
* ``uint32``: yes; fully tested
* ``uint64``: yes; fully tested
* ``int8``: yes; fully tested
* ``int16``: yes; fully tested
* ``int32``: yes; fully tested
* ``int64``: yes; fully tested
* ``float16``: yes; fully tested
* ``float32``: yes; fully tested
* ``float64``: yes; fully tested
* ``float128``: yes; fully tested
* ``bool``: yes; fully tested
Parameters
----------
images : (N,H,W,3) ndarray or iterable of (H,W,3) array
The input images to convert to a grid.
rows : None or int, optional
The number of rows to show in the grid.
If None, it will be automatically derived.
cols : None or int, optional
The number of cols to show in the grid.
If None, it will be automatically derived.
Returns
-------
grid : (H',W',3) ndarray
Image of the generated grid.
"""
nb_images = len(images)
do_assert(nb_images > 0)
if is_np_array(images):
do_assert(images.ndim == 4)
else:
do_assert(is_iterable(images) and is_np_array(images[0]) and images[0].ndim == 3)
dts = [image.dtype.name for image in images]
nb_dtypes = len(set(dts))
do_assert(nb_dtypes == 1, ("All images provided to draw_grid() must have the same dtype, "
+ "found %d dtypes (%s)") % (nb_dtypes, ", ".join(dts)))
cell_height = max([image.shape[0] for image in images])
cell_width = max([image.shape[1] for image in images])
channels = set([image.shape[2] for image in images])
do_assert(
len(channels) == 1,
"All images are expected to have the same number of channels, "
+ "but got channel set %s with length %d instead." % (str(channels), len(channels))
)
nb_channels = list(channels)[0]
if rows is None and cols is None:
rows = cols = int(math.ceil(math.sqrt(nb_images)))
elif rows is not None:
cols = int(math.ceil(nb_images / rows))
elif cols is not None:
rows = int(math.ceil(nb_images / cols))
do_assert(rows * cols >= nb_images)
width = cell_width * cols
height = cell_height * rows
dt = images.dtype if is_np_array(images) else images[0].dtype
grid = np.zeros((height, width, nb_channels), dtype=dt)
cell_idx = 0
for row_idx in sm.xrange(rows):
for col_idx in sm.xrange(cols):
if cell_idx < nb_images:
image = images[cell_idx]
cell_y1 = cell_height * row_idx
cell_y2 = cell_y1 + image.shape[0]
cell_x1 = cell_width * col_idx
cell_x2 = cell_x1 + image.shape[1]
grid[cell_y1:cell_y2, cell_x1:cell_x2, :] = image
cell_idx += 1
return grid
def show_grid(images, rows=None, cols=None):
"""
Converts the input images to a grid image and shows it in a new window.
dtype support::
minimum of (
:func:`imgaug.imgaug.draw_grid`,
:func:`imgaug.imgaug.imshow`
)
Parameters
----------
images : (N,H,W,3) ndarray or iterable of (H,W,3) array
See :func:`imgaug.draw_grid`.
rows : None or int, optional
See :func:`imgaug.draw_grid`.
cols : None or int, optional
See :func:`imgaug.draw_grid`.
"""
grid = draw_grid(images, rows=rows, cols=cols)
imshow(grid)
def imshow(image, backend=IMSHOW_BACKEND_DEFAULT):
"""
Shows an image in a window.
dtype support::
* ``uint8``: yes; not tested
* ``uint16``: ?
* ``uint32``: ?
* ``uint64``: ?
* ``int8``: ?
* ``int16``: ?
* ``int32``: ?
* ``int64``: ?
* ``float16``: ?
* ``float32``: ?
* ``float64``: ?
* ``float128``: ?
* ``bool``: ?
Parameters
----------
image : (H,W,3) ndarray
Image to show.
backend : {'matplotlib', 'cv2'}, optional
Library to use to show the image. May be either matplotlib or OpenCV ('cv2').
OpenCV tends to be faster, but apparently causes more technical issues.
"""
do_assert(backend in ["matplotlib", "cv2"], "Expected backend 'matplotlib' or 'cv2', got %s." % (backend,))
if backend == "cv2":
image_bgr = image
if image.ndim == 3 and image.shape[2] in [3, 4]:
image_bgr = image[..., 0:3][..., ::-1]
win_name = "imgaug-default-window"
cv2.namedWindow(win_name, cv2.WINDOW_NORMAL)
cv2.imshow(win_name, image_bgr)
cv2.waitKey(0)
cv2.destroyWindow(win_name)
else:
# import only when necessary (faster startup; optional dependency; less fragile -- see issue #225)
import matplotlib.pyplot as plt
dpi = 96
h, w = image.shape[0] / dpi, image.shape[1] / dpi
w = max(w, 6) # if the figure is too narrow, the footer may appear and make the fig suddenly wider (ugly)
fig, ax = plt.subplots(figsize=(w, h), dpi=dpi)
fig.canvas.set_window_title("imgaug.imshow(%s)" % (image.shape,))
ax.imshow(image, cmap="gray") # cmap is only activate for grayscale images
plt.show()
def do_assert(condition, message="Assertion failed."):
"""
Function that behaves equally to an `assert` statement, but raises an
Exception.
This is added because `assert` statements are removed in optimized code.
It replaces `assert` statements throughout the library that should be
kept even in optimized code.
Parameters
----------
condition : bool
If False, an exception is raised.
message : str, optional
Error message.
"""
if not condition:
raise AssertionError(str(message))
class HooksImages(object):
"""
Class to intervene with image augmentation runs.
This is e.g. useful to dynamically deactivate some augmenters.
Parameters
----------
activator : None or callable, optional
A function that gives permission to execute an augmenter.
The expected interface is ``f(images, augmenter, parents, default)``,
where ``images`` are the input images to augment, ``augmenter`` is the
instance of the augmenter to execute, ``parents`` are previously
executed augmenters and ``default`` is an expected default value to be
returned if the activator function does not plan to make a decision
for the given inputs.
propagator : None or callable, optional
A function that gives permission to propagate the augmentation further
to the children of an augmenter. This happens after the activator.
In theory, an augmenter may augment images itself (if allowed by the
activator) and then execute child augmenters afterwards (if allowed by
the propagator). If the activator returned False, the propagation step
will never be executed.
The expected interface is ``f(images, augmenter, parents, default)``,
with all arguments having identical meaning to the activator.
preprocessor : None or callable, optional
A function to call before an augmenter performed any augmentations.
The interface is ``f(images, augmenter, parents)``,
with all arguments having identical meaning to the activator.
It is expected to return the input images, optionally modified.
postprocessor : None or callable, optional
A function to call after an augmenter performed augmentations.
The interface is the same as for the preprocessor.
Examples
--------
>>> seq = iaa.Sequential([
>>> iaa.GaussianBlur(3.0, name="blur"),
>>> iaa.Dropout(0.05, name="dropout"),
>>> iaa.Affine(translate_px=-5, name="affine")
>>> ])
>>> images = [np.zeros((10, 10), dtype=np.uint8)]
>>>
>>> def activator(images, augmenter, parents, default):
>>> return False if augmenter.name in ["blur", "dropout"] else default
>>>
>>> seq_det = seq.to_deterministic()
>>> images_aug = seq_det.augment_images(images)
>>> heatmaps = [np.random.rand(*(3, 10, 10))]
>>> heatmaps_aug = seq_det.augment_images(
>>> heatmaps,
>>> hooks=ia.HooksImages(activator=activator)
>>> )
This augments images and their respective heatmaps in the same way.
The heatmaps however are only modified by Affine, not by GaussianBlur or
Dropout.
"""
def __init__(self, activator=None, propagator=None, preprocessor=None, postprocessor=None):
self.activator = activator
self.propagator = propagator
self.preprocessor = preprocessor
self.postprocessor = postprocessor
def is_activated(self, images, augmenter, parents, default):
"""
Returns whether an augmenter may be executed.
Returns
-------
bool
If True, the augmenter may be executed. If False, it may not be executed.
"""
if self.activator is None:
return default
else:
return self.activator(images, augmenter, parents, default)
def is_propagating(self, images, augmenter, parents, default):
"""
Returns whether an augmenter may call its children to augment an
image. This is independent of the augmenter itself possible changing
the image, without calling its children. (Most (all?) augmenters with
children currently dont perform any changes themselves.)
Returns
-------
bool
If True, the augmenter may be propagate to its children. If False, it may not.
"""
if self.propagator is None:
return default
else:
return self.propagator(images, augmenter, parents, default)
def preprocess(self, images, augmenter, parents):
"""
A function to be called before the augmentation of images starts (per augmenter).
Returns
-------
(N,H,W,C) ndarray or (N,H,W) ndarray or list of (H,W,C) ndarray or list of (H,W) ndarray
The input images, optionally modified.
"""
if self.preprocessor is None:
return images
else:
return self.preprocessor(images, augmenter, parents)
def postprocess(self, images, augmenter, parents):
"""
A function to be called after the augmentation of images was
performed.
Returns
-------
(N,H,W,C) ndarray or (N,H,W) ndarray or list of (H,W,C) ndarray or list of (H,W) ndarray
The input images, optionally modified.
"""
if self.postprocessor is None:
return images
else:
return self.postprocessor(images, augmenter, parents)
class HooksHeatmaps(HooksImages):
"""
Class to intervene with heatmap augmentation runs.
This is e.g. useful to dynamically deactivate some augmenters.
This class is currently the same as the one for images. This may or may
not change in the future.
"""
pass
class HooksKeypoints(HooksImages):
"""
Class to intervene with keypoint augmentation runs.
This is e.g. useful to dynamically deactivate some augmenters.
This class is currently the same as the one for images. This may or may
not change in the future.
"""
pass
def compute_geometric_median(X, eps=1e-5):
"""
Estimate the geometric median of points in 2D.
Code from https://stackoverflow.com/a/30305181
Parameters
----------
X : (N,2) ndarray
Points in 2D. Second axis must be given in xy-form.
eps : float, optional
Distance threshold when to return the median.
Returns
-------
(2,) ndarray
Geometric median as xy-coordinate.
"""
y = np.mean(X, 0)
while True:
D = scipy.spatial.distance.cdist(X, [y])
nonzeros = (D != 0)[:, 0]
Dinv = 1 / D[nonzeros]
Dinvs = np.sum(Dinv)
W = Dinv / Dinvs
T = np.sum(W * X[nonzeros], 0)
num_zeros = len(X) - np.sum(nonzeros)
if num_zeros == 0:
y1 = T
elif num_zeros == len(X):
return y
else:
R = (T - y) * Dinvs
r = np.linalg.norm(R)
rinv = 0 if r == 0 else num_zeros/r
y1 = max(0, 1-rinv)*T + min(1, rinv)*y
if scipy.spatial.distance.euclidean(y, y1) < eps:
return y1
y = y1
class Keypoint(object):
"""
A single keypoint (aka landmark) on an image.
Parameters
----------
x : number
Coordinate of the keypoint on the x axis.
y : number
Coordinate of the keypoint on the y axis.
"""
def __init__(self, x, y):
self.x = x
self.y = y
@property
def x_int(self):
"""
Return the keypoint's x-coordinate, rounded to the closest integer.
Returns
-------
result : int
Keypoint's x-coordinate, rounded to the closest integer.
"""
return int(np.round(self.x))
@property
def y_int(self):
"""
Return the keypoint's y-coordinate, rounded to the closest integer.
Returns
-------
result : int
Keypoint's y-coordinate, rounded to the closest integer.
"""
return int(np.round(self.y))
def project(self, from_shape, to_shape):
"""
Project the keypoint onto a new position on a new image.
E.g. if the keypoint is on its original image at x=(10 of 100 pixels)
and y=(20 of 100 pixels) and is projected onto a new image with
size (width=200, height=200), its new position will be (20, 40).
This is intended for cases where the original image is resized.
It cannot be used for more complex changes (e.g. padding, cropping).
Parameters
----------
from_shape : tuple of int
Shape of the original image. (Before resize.)
to_shape : tuple of int
Shape of the new image. (After resize.)
Returns
-------
imgaug.Keypoint
Keypoint object with new coordinates.
"""
if from_shape[0:2] == to_shape[0:2]:
return self.deepcopy(x=self.x, y=self.y)
# avoid division by zeros
# TODO add this to other project() functions too
assert all([v > 0 for v in from_shape[0:2]]), \
"Got invalid from_shape %s in Keypoint.project()" % (
str(from_shape),)
if any([v <= 0 for v in to_shape[0:2]]):
import warnings
warnings.warn("Got invalid to_shape %s in Keypoint.project()" % (
str(to_shape),))
from_height, from_width = from_shape[0:2]
to_height, to_width = to_shape[0:2]
x = (self.x / from_width) * to_width
y = (self.y / from_height) * to_height
return self.deepcopy(x=x, y=y)
def shift(self, x=0, y=0):
"""
Move the keypoint around on an image.
Parameters
----------
x : number, optional
Move by this value on the x axis.
y : number, optional
Move by this value on the y axis.
Returns
-------
imgaug.Keypoint
Keypoint object with new coordinates.
"""
return self.deepcopy(self.x + x, self.y + y)
def generate_similar_points_manhattan(self, nb_steps, step_size, return_array=False):
"""
Generate nearby points to this keypoint based on manhattan distance.
To generate the first neighbouring points, a distance of S (step size) is moved from the
center point (this keypoint) to the top, right, bottom and left, resulting in four new
points. From these new points, the pattern is repeated. Overlapping points are ignored.
The resulting points have a shape similar to a square rotated by 45 degrees.
Parameters
----------
nb_steps : int
The number of steps to move from the center point. nb_steps=1 results in a total of
5 output points (1 center point + 4 neighbours).
step_size : number
The step size to move from every point to its neighbours.
return_array : bool, optional
Whether to return the generated points as a list of keypoints or an array
of shape ``(N,2)``, where ``N`` is the number of generated points and the second axis contains
the x- (first value) and y- (second value) coordinates.
Returns
-------
points : list of imgaug.Keypoint or (N,2) ndarray
If return_array was False, then a list of Keypoint.
Otherwise a numpy array of shape ``(N,2)``, where ``N`` is the number of generated points and
the second axis contains the x- (first value) and y- (second value) coordinates.
The center keypoint (the one on which this function was called) is always included.
"""
# TODO add test
# Points generates in manhattan style with S steps have a shape similar to a 45deg rotated
# square. The center line with the origin point has S+1+S = 1+2*S points (S to the left,
# S to the right). The lines above contain (S+1+S)-2 + (S+1+S)-2-2 + ... + 1 points. E.g.
# for S=2 it would be 3+1=4 and for S=3 it would be 5+3+1=9. Same for the lines below the
# center. Hence the total number of points is S+1+S + 2*(S^2).
points = np.zeros((nb_steps + 1 + nb_steps + 2*(nb_steps**2), 2), dtype=np.float32)
# we start at the bottom-most line and move towards the top-most line
yy = np.linspace(self.y - nb_steps * step_size, self.y + nb_steps * step_size, nb_steps + 1 + nb_steps)
# bottom-most line contains only one point
width = 1
nth_point = 0
for i_y, y in enumerate(yy):
if width == 1:
xx = [self.x]
else:
xx = np.linspace(self.x - (width-1)//2 * step_size, self.x + (width-1)//2 * step_size, width)
for x in xx:
points[nth_point] = [x, y]
nth_point += 1
if i_y < nb_steps:
width += 2
else:
width -= 2
if return_array:
return points
return [self.deepcopy(x=points[i, 0], y=points[i, 1]) for i in sm.xrange(points.shape[0])]
def copy(self, x=None, y=None):
"""
Create a shallow copy of the Keypoint object.
Parameters
----------
x : None or number, optional
Coordinate of the keypoint on the x axis.
If ``None``, the instance's value will be copied.
y : None or number, optional
Coordinate of the keypoint on the y axis.
If ``None``, the instance's value will be copied.
Returns
-------
imgaug.Keypoint
Shallow copy.
"""
return self.deepcopy(x=x, y=y)
def deepcopy(self, x=None, y=None):
"""
Create a deep copy of the Keypoint object.
Parameters
----------
x : None or number, optional
Coordinate of the keypoint on the x axis.
If ``None``, the instance's value will be copied.
y : None or number, optional
Coordinate of the keypoint on the y axis.
If ``None``, the instance's value will be copied.
Returns
-------
imgaug.Keypoint
Deep copy.
"""
x = self.x if x is None else x
y = self.y if y is None else y
return Keypoint(x=x, y=y)
def __repr__(self):
return self.__str__()
def __str__(self):
return "Keypoint(x=%.8f, y=%.8f)" % (self.x, self.y)
class KeypointsOnImage(object):
"""
Object that represents all keypoints on a single image.
Parameters
----------
keypoints : list of imgaug.Keypoint
List of keypoints on the image.
shape : tuple of int
The shape of the image on which the keypoints are placed.
Examples
--------
>>> image = np.zeros((70, 70))
>>> kps = [Keypoint(x=10, y=20), Keypoint(x=34, y=60)]
>>> kps_oi = KeypointsOnImage(kps, shape=image.shape)
"""
def __init__(self, keypoints, shape):
self.keypoints = keypoints
if is_np_array(shape):
self.shape = shape.shape
else:
do_assert(isinstance(shape, (tuple, list)))
self.shape = tuple(shape)
@property
def height(self):
return self.shape[0]
@property
def width(self):
return self.shape[1]
@property
def empty(self):
"""
Returns whether this object contains zero keypoints.
Returns
-------
result : bool
True if this object contains zero keypoints.
"""
return len(self.keypoints) == 0
def on(self, image):
"""
Project keypoints from one image to a new one.
Parameters
----------
image : ndarray or tuple of int
New image onto which the keypoints are to be projected.
May also simply be that new image's shape tuple.
Returns
-------
keypoints : imgaug.KeypointsOnImage
Object containing all projected keypoints.
"""
if is_np_array(image):
shape = image.shape
else:
shape = image
if shape[0:2] == self.shape[0:2]:
return self.deepcopy()
else:
keypoints = [kp.project(self.shape, shape) for kp in self.keypoints]
return self.deepcopy(keypoints, shape)
def draw_on_image(self, image, color=(0, 255, 0), alpha=1.0, size=3, copy=True, raise_if_out_of_image=False):
"""
Draw all keypoints onto a given image. Each keypoint is marked by a square of a chosen color and size.
Parameters
----------
image : (H,W,3) ndarray
The image onto which to draw the keypoints.
This image should usually have the same shape as
set in KeypointsOnImage.shape.
color : int or list of int or tuple of int or (3,) ndarray, optional
The RGB color of all keypoints. If a single int ``C``, then that is
equivalent to ``(C,C,C)``.
alpha : float, optional
The opacity of the drawn keypoint, where ``1.0`` denotes a fully
visible keypoint and ``0.0`` an invisible one.
size : int, optional
The size of each point. If set to ``C``, each square will have
size ``C x C``.
copy : bool, optional
Whether to copy the image before drawing the points.
raise_if_out_of_image : bool, optional
Whether to raise an exception if any keypoint is outside of the image.
Returns
-------
image : (H,W,3) ndarray
Image with drawn keypoints.
"""
if copy:
image = np.copy(image)
input_dtype = image.dtype
alpha_color = color
if alpha < 0.01:
# keypoints all invisible, nothing to do
return image
elif alpha > 0.99:
alpha = 1
else:
image = image.astype(np.float32, copy=False)
alpha_color = alpha * np.array(color)
height, width = image.shape[0:2]
for keypoint in self.keypoints:
y, x = keypoint.y_int, keypoint.x_int
if 0 <= y < height and 0 <= x < width:
x1 = max(x - size//2, 0)
x2 = min(x + 1 + size//2, width)
y1 = max(y - size//2, 0)
y2 = min(y + 1 + size//2, height)
if alpha == 1:
image[y1:y2, x1:x2] = color
else:
image[y1:y2, x1:x2] = (
(1 - alpha) * image[y1:y2, x1:x2]
+ alpha_color
)
else:
if raise_if_out_of_image:
raise Exception("Cannot draw keypoint x=%.8f, y=%.8f on image with shape %s." % (y, x, image.shape))
if image.dtype.name != input_dtype.name:
if input_dtype.name == "uint8":
image = np.clip(image, 0, 255, out=image)
image = image.astype(input_dtype, copy=False)
return image
def shift(self, x=0, y=0):
"""
Move the keypoints around on an image.
Parameters
----------
x : number, optional
Move each keypoint by this value on the x axis.
y : number, optional
Move each keypoint by this value on the y axis.
Returns
-------
out : KeypointsOnImage
Keypoints after moving them.
"""
keypoints = [keypoint.shift(x=x, y=y) for keypoint in self.keypoints]
return self.deepcopy(keypoints)
def get_coords_array(self):
"""
Convert the coordinates of all keypoints in this object to an array of shape (N,2).
Returns
-------
result : (N, 2) ndarray
Where N is the number of keypoints. Each first value is the
x coordinate, each second value is the y coordinate.
"""
result = np.zeros((len(self.keypoints), 2), np.float32)
for i, keypoint in enumerate(self.keypoints):
result[i, 0] = keypoint.x
result[i, 1] = keypoint.y
return result
@staticmethod
def from_coords_array(coords, shape):
"""
Convert an array (N,2) with a given image shape to a KeypointsOnImage object.
Parameters
----------
coords : (N, 2) ndarray
Coordinates of ``N`` keypoints on the original image.
Each first entry ``coords[i, 0]`` is expected to be the x coordinate.
Each second entry ``coords[i, 1]`` is expected to be the y coordinate.
shape : tuple
Shape tuple of the image on which the keypoints are placed.
Returns
-------
out : KeypointsOnImage
KeypointsOnImage object that contains all keypoints from the array.
"""
keypoints = [Keypoint(x=coords[i, 0], y=coords[i, 1]) for i in sm.xrange(coords.shape[0])]
return KeypointsOnImage(keypoints, shape)
# TODO add to_gaussian_heatmaps(), from_gaussian_heatmaps()
def to_keypoint_image(self, size=1):
"""
Draws a new black image of shape ``(H,W,N)`` in which all keypoint coordinates are set to 255.
(H=shape height, W=shape width, N=number of keypoints)
This function can be used as a helper when augmenting keypoints with a method that only supports the
augmentation of images.
Parameters
-------
size : int
Size of each (squared) point.
Returns
-------
image : (H,W,N) ndarray
Image in which the keypoints are marked. H is the height,
defined in KeypointsOnImage.shape[0] (analogous W). N is the
number of keypoints.
"""
do_assert(len(self.keypoints) > 0)
height, width = self.shape[0:2]
image = np.zeros((height, width, len(self.keypoints)), dtype=np.uint8)
do_assert(size % 2 != 0)
sizeh = max(0, (size-1)//2)
for i, keypoint in enumerate(self.keypoints):
# TODO for float values spread activation over several cells
# here and do voting at the end
y = keypoint.y_int
x = keypoint.x_int
x1 = np.clip(x - sizeh, 0, width-1)
x2 = np.clip(x + sizeh + 1, 0, width)
y1 = np.clip(y - sizeh, 0, height-1)
y2 = np.clip(y + sizeh + 1, 0, height)
if x1 < x2 and y1 < y2:
image[y1:y2, x1:x2, i] = 128
if 0 <= y < height and 0 <= x < width:
image[y, x, i] = 255
return image
@staticmethod
def from_keypoint_image(image, if_not_found_coords={"x": -1, "y": -1}, threshold=1, nb_channels=None): # pylint: disable=locally-disabled, dangerous-default-value, line-too-long
"""
Converts an image generated by ``to_keypoint_image()`` back to a KeypointsOnImage object.
Parameters
----------
image : (H,W,N) ndarray
The keypoints image. N is the number of keypoints.
if_not_found_coords : tuple or list or dict or None, optional
Coordinates to use for keypoints that cannot be found in `image`.
If this is a list/tuple, it must have two integer values.
If it is a dictionary, it must have the keys ``x`` and ``y`` with
each containing one integer value.
If this is None, then the keypoint will not be added to the final
KeypointsOnImage object.
threshold : int, optional
The search for keypoints works by searching for the argmax in
each channel. This parameters contains the minimum value that
the max must have in order to be viewed as a keypoint.
nb_channels : None or int, optional
Number of channels of the image on which the keypoints are placed.
Some keypoint augmenters require that information.
If set to None, the keypoint's shape will be set
to ``(height, width)``, otherwise ``(height, width, nb_channels)``.
Returns
-------
out : KeypointsOnImage
The extracted keypoints.
"""
do_assert(len(image.shape) == 3)
height, width, nb_keypoints = image.shape
drop_if_not_found = False
if if_not_found_coords is None:
drop_if_not_found = True
if_not_found_x = -1
if_not_found_y = -1
elif isinstance(if_not_found_coords, (tuple, list)):
do_assert(len(if_not_found_coords) == 2)
if_not_found_x = if_not_found_coords[0]
if_not_found_y = if_not_found_coords[1]
elif isinstance(if_not_found_coords, dict):
if_not_found_x = if_not_found_coords["x"]
if_not_found_y = if_not_found_coords["y"]
else:
raise Exception("Expected if_not_found_coords to be None or tuple or list or dict, got %s." % (
type(if_not_found_coords),))
keypoints = []
for i in sm.xrange(nb_keypoints):
maxidx_flat = np.argmax(image[..., i])
maxidx_ndim = np.unravel_index(maxidx_flat, (height, width))
found = (image[maxidx_ndim[0], maxidx_ndim[1], i] >= threshold)
if found:
keypoints.append(Keypoint(x=maxidx_ndim[1], y=maxidx_ndim[0]))
else:
if drop_if_not_found:
pass # dont add the keypoint to the result list, i.e. drop it
else:
keypoints.append(Keypoint(x=if_not_found_x, y=if_not_found_y))
out_shape = (height, width)
if nb_channels is not None:
out_shape += (nb_channels,)
return KeypointsOnImage(keypoints, shape=out_shape)
def to_distance_maps(self, inverted=False):
"""
Generates a ``(H,W,K)`` output containing ``K`` distance maps for ``K`` keypoints.
The k-th distance map contains at every location ``(y, x)`` the euclidean distance to the k-th keypoint.
This function can be used as a helper when augmenting keypoints with a method that only supports
the augmentation of images.
Parameters
-------
inverted : bool, optional
If True, inverted distance maps are returned where each distance value d is replaced
by ``d/(d+1)``, i.e. the distance maps have values in the range ``(0.0, 1.0]`` with 1.0
denoting exactly the position of the respective keypoint.
Returns
-------
distance_maps : (H,W,K) ndarray
A ``float32`` array containing ``K`` distance maps for ``K`` keypoints. Each location
``(y, x, k)`` in the array denotes the euclidean distance at ``(y, x)`` to the ``k``-th keypoint.
In inverted mode the distance ``d`` is replaced by ``d/(d+1)``. The height and width
of the array match the height and width in ``KeypointsOnImage.shape``.
"""
do_assert(len(self.keypoints) > 0)
height, width = self.shape[0:2]
distance_maps = np.zeros((height, width, len(self.keypoints)), dtype=np.float32)
yy = np.arange(0, height)
xx = np.arange(0, width)
grid_xx, grid_yy = np.meshgrid(xx, yy)
for i, keypoint in enumerate(self.keypoints):
y, x = keypoint.y, keypoint.x
distance_maps[:, :, i] = (grid_xx - x) ** 2 + (grid_yy - y) ** 2
distance_maps = np.sqrt(distance_maps)
if inverted:
return 1/(distance_maps+1)
return distance_maps
# TODO add option to if_not_found_coords to reuse old keypoint coords
@staticmethod
def from_distance_maps(distance_maps, inverted=False, if_not_found_coords={"x": -1, "y": -1}, threshold=None, # pylint: disable=locally-disabled, dangerous-default-value, line-too-long
nb_channels=None):
"""
Converts maps generated by ``to_distance_maps()`` back to a KeypointsOnImage object.
Parameters
----------
distance_maps : (H,W,N) ndarray
The distance maps. N is the number of keypoints.
inverted : bool, optional
Whether the given distance maps were generated in inverted or normal mode.
if_not_found_coords : tuple or list or dict or None, optional
Coordinates to use for keypoints that cannot be found in ``distance_maps``.
If this is a list/tuple, it must have two integer values.
If it is a dictionary, it must have the keys ``x`` and ``y``, with each
containing one integer value.
If this is None, then the keypoint will not be added to the final
KeypointsOnImage object.
threshold : float, optional
The search for keypoints works by searching for the argmin (non-inverted) or
argmax (inverted) in each channel. This parameters contains the maximum (non-inverted)
or minimum (inverted) value to accept in order to view a hit as a keypoint.
Use None to use no min/max.
nb_channels : None or int, optional
Number of channels of the image on which the keypoints are placed.
Some keypoint augmenters require that information.
If set to None, the keypoint's shape will be set
to ``(height, width)``, otherwise ``(height, width, nb_channels)``.
Returns
-------
imgaug.KeypointsOnImage
The extracted keypoints.
"""
do_assert(len(distance_maps.shape) == 3)
height, width, nb_keypoints = distance_maps.shape
drop_if_not_found = False
if if_not_found_coords is None:
drop_if_not_found = True
if_not_found_x = -1
if_not_found_y = -1
elif isinstance(if_not_found_coords, (tuple, list)):
do_assert(len(if_not_found_coords) == 2)
if_not_found_x = if_not_found_coords[0]
if_not_found_y = if_not_found_coords[1]
elif isinstance(if_not_found_coords, dict):
if_not_found_x = if_not_found_coords["x"]
if_not_found_y = if_not_found_coords["y"]
else:
raise Exception("Expected if_not_found_coords to be None or tuple or list or dict, got %s." % (
type(if_not_found_coords),))
keypoints = []
for i in sm.xrange(nb_keypoints):
# TODO introduce voting here among all distance values that have min/max values
if inverted:
hitidx_flat = np.argmax(distance_maps[..., i])
else:
hitidx_flat = np.argmin(distance_maps[..., i])
hitidx_ndim = np.unravel_index(hitidx_flat, (height, width))
if not inverted and threshold is not None:
found = (distance_maps[hitidx_ndim[0], hitidx_ndim[1], i] < threshold)
elif inverted and threshold is not None:
found = (distance_maps[hitidx_ndim[0], hitidx_ndim[1], i] >= threshold)
else:
found = True
if found:
keypoints.append(Keypoint(x=hitidx_ndim[1], y=hitidx_ndim[0]))
else:
if drop_if_not_found:
pass # dont add the keypoint to the result list, i.e. drop it
else:
keypoints.append(Keypoint(x=if_not_found_x, y=if_not_found_y))
out_shape = (height, width)
if nb_channels is not None:
out_shape += (nb_channels,)
return KeypointsOnImage(keypoints, shape=out_shape)
def copy(self, keypoints=None, shape=None):
"""
Create a shallow copy of the KeypointsOnImage object.
Parameters
----------
keypoints : None or list of imgaug.Keypoint, optional
List of keypoints on the image. If ``None``, the instance's
keypoints will be copied.
shape : tuple of int, optional
The shape of the image on which the keypoints are placed.
If ``None``, the instance's shape will be copied.
Returns
-------
imgaug.KeypointsOnImage
Shallow copy.
"""
result = copy.copy(self)
if keypoints is not None:
result.keypoints = keypoints
if shape is not None:
result.shape = shape
return result
def deepcopy(self, keypoints=None, shape=None):
"""
Create a deep copy of the KeypointsOnImage object.
Parameters
----------
keypoints : None or list of imgaug.Keypoint, optional
List of keypoints on the image. If ``None``, the instance's
keypoints will be copied.
shape : tuple of int, optional
The shape of the image on which the keypoints are placed.
If ``None``, the instance's shape will be copied.
Returns
-------
imgaug.KeypointsOnImage
Deep copy.
"""
# for some reason deepcopy is way slower here than manual copy
if keypoints is None:
keypoints = [kp.deepcopy() for kp in self.keypoints]
if shape is None:
shape = tuple(self.shape)
return KeypointsOnImage(keypoints, shape)
def __repr__(self):
return self.__str__()
def __str__(self):
return "KeypointsOnImage(%s, shape=%s)" % (str(self.keypoints), self.shape)
# TODO functions: square(), to_aspect_ratio(), contains_point()
class BoundingBox(object):
"""
Class representing bounding boxes.
Each bounding box is parameterized by its top left and bottom right corners. Both are given
as x and y-coordinates. The corners are intended to lie inside the bounding box area.
As a result, a bounding box that lies completely inside the image but has maximum extensions
would have coordinates ``(0.0, 0.0)`` and ``(W - epsilon, H - epsilon)``. Note that coordinates
are saved internally as floats.
Parameters
----------
x1 : number
X-coordinate of the top left of the bounding box.
y1 : number
Y-coordinate of the top left of the bounding box.
x2 : number
X-coordinate of the bottom right of the bounding box.
y2 : number
Y-coordinate of the bottom right of the bounding box.
label : None or str, optional
Label of the bounding box, e.g. a string representing the class.
"""
def __init__(self, x1, y1, x2, y2, label=None):
"""Create a new BoundingBox instance."""
if x1 > x2:
x2, x1 = x1, x2
do_assert(x2 >= x1)
if y1 > y2:
y2, y1 = y1, y2
do_assert(y2 >= y1)
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
self.label = label
@property
def x1_int(self):
"""
Return the x-coordinate of the top left corner as an integer.
Returns
-------
int
X-coordinate of the top left corner, rounded to the closest integer.
"""
return int(np.round(self.x1)) # use numpy's round to have consistent behaviour between python versions
@property
def y1_int(self):
"""
Return the y-coordinate of the top left corner as an integer.
Returns
-------
int
Y-coordinate of the top left corner, rounded to the closest integer.
"""
return int(np.round(self.y1)) # use numpy's round to have consistent behaviour between python versions
@property
def x2_int(self):
"""
Return the x-coordinate of the bottom left corner as an integer.
Returns
-------
int
X-coordinate of the bottom left corner, rounded to the closest integer.
"""
return int(np.round(self.x2)) # use numpy's round to have consistent behaviour between python versions
@property
def y2_int(self):
"""
Return the y-coordinate of the bottom left corner as an integer.
Returns
-------
int
Y-coordinate of the bottom left corner, rounded to the closest integer.
"""
return int(np.round(self.y2)) # use numpy's round to have consistent behaviour between python versions
@property
def height(self):
"""
Estimate the height of the bounding box.
Returns
-------
number
Height of the bounding box.
"""
return self.y2 - self.y1
@property
def width(self):
"""
Estimate the width of the bounding box.
Returns
-------
number
Width of the bounding box.
"""
return self.x2 - self.x1
@property
def center_x(self):
"""
Estimate the x-coordinate of the center point of the bounding box.
Returns
-------
number
X-coordinate of the center point of the bounding box.
"""
return self.x1 + self.width/2
@property
def center_y(self):
"""
Estimate the y-coordinate of the center point of the bounding box.
Returns
-------
number
Y-coordinate of the center point of the bounding box.
"""
return self.y1 + self.height/2
@property
def area(self):
"""
Estimate the area of the bounding box.
Returns
-------
number
Area of the bounding box, i.e. `height * width`.
"""
return self.height * self.width
def contains(self, other):
"""
Estimate whether the bounding box contains a point.
Parameters
----------
other : imgaug.Keypoint
Point to check for.
Returns
-------
bool
True if the point is contained in the bounding box, False otherwise.
"""
x, y = other.x, other.y
return self.x1 <= x <= self.x2 and self.y1 <= y <= self.y2
def project(self, from_shape, to_shape):
"""
Project the bounding box onto a new position on a new image.
E.g. if the bounding box is on its original image at
x1=(10 of 100 pixels) and y1=(20 of 100 pixels) and is projected onto
a new image with size (width=200, height=200), its new position will
be (x1=20, y1=40). (Analogous for x2/y2.)
This is intended for cases where the original image is resized.
It cannot be used for more complex changes (e.g. padding, cropping).
Parameters
----------
from_shape : tuple of int
Shape of the original image. (Before resize.)
to_shape : tuple of int
Shape of the new image. (After resize.)
Returns
-------
out : imgaug.BoundingBox
BoundingBox object with new coordinates.
"""
if from_shape[0:2] == to_shape[0:2]:
return self.copy()
else:
from_height, from_width = from_shape[0:2]
to_height, to_width = to_shape[0:2]
do_assert(from_height > 0)
do_assert(from_width > 0)
do_assert(to_height > 0)
do_assert(to_width > 0)
x1 = (self.x1 / from_width) * to_width
y1 = (self.y1 / from_height) * to_height
x2 = (self.x2 / from_width) * to_width
y2 = (self.y2 / from_height) * to_height
return self.copy(
x1=x1,
y1=y1,
x2=x2,
y2=y2,
label=self.label
)
def extend(self, all_sides=0, top=0, right=0, bottom=0, left=0):
"""
Extend the size of the bounding box along its sides.
Parameters
----------
all_sides : number, optional
Value by which to extend the bounding box size along all sides.
top : number, optional
Value by which to extend the bounding box size along its top side.
right : number, optional
Value by which to extend the bounding box size along its right side.
bottom : number, optional
Value by which to extend the bounding box size along its bottom side.
left : number, optional
Value by which to extend the bounding box size along its left side.
Returns
-------
imgaug.BoundingBox
Extended bounding box.
"""
return BoundingBox(
x1=self.x1 - all_sides - left,
x2=self.x2 + all_sides + right,
y1=self.y1 - all_sides - top,
y2=self.y2 + all_sides + bottom
)
def intersection(self, other, default=None):
"""
Compute the intersection bounding box of this bounding box and another one.
Note that in extreme cases, the intersection can be a single point, meaning that the intersection bounding box
will exist, but then also has a height and width of zero.
Parameters
----------
other : imgaug.BoundingBox
Other bounding box with which to generate the intersection.
default : any, optional
Default value to return if there is no intersection.
Returns
-------
imgaug.BoundingBox or any
Intersection bounding box of the two bounding boxes if there is an intersection.
If there is no intersection, the default value will be returned, which can by anything.
"""
x1_i = max(self.x1, other.x1)
y1_i = max(self.y1, other.y1)
x2_i = min(self.x2, other.x2)
y2_i = min(self.y2, other.y2)
if x1_i > x2_i or y1_i > y2_i:
return default
else:
return BoundingBox(x1=x1_i, y1=y1_i, x2=x2_i, y2=y2_i)
def union(self, other):
"""
Compute the union bounding box of this bounding box and another one.
This is equivalent to drawing a bounding box around all corners points of both
bounding boxes.
Parameters
----------
other : imgaug.BoundingBox
Other bounding box with which to generate the union.
Returns
-------
imgaug.BoundingBox
Union bounding box of the two bounding boxes.
"""
return BoundingBox(
x1=min(self.x1, other.x1),
y1=min(self.y1, other.y1),
x2=max(self.x2, other.x2),
y2=max(self.y2, other.y2),
)
def iou(self, other):
"""
Compute the IoU of this bounding box with another one.
IoU is the intersection over union, defined as::
``area(intersection(A, B)) / area(union(A, B))``
``= area(intersection(A, B)) / (area(A) + area(B) - area(intersection(A, B)))``
Parameters
----------
other : imgaug.BoundingBox
Other bounding box with which to compare.
Returns
-------
float
IoU between the two bounding boxes.
"""
inters = self.intersection(other)
if inters is None:
return 0.0
else:
area_union = self.area + other.area - inters.area
return inters.area / area_union if area_union > 0 else 0.0
def is_fully_within_image(self, image):
"""
Estimate whether the bounding box is fully inside the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use.
If an ndarray, its shape will be used.
If a tuple, it is assumed to represent the image shape
and must contain at least two integers.
Returns
-------
bool
True if the bounding box is fully inside the image area. False otherwise.
"""
if isinstance(image, tuple):
shape = image
else:
shape = image.shape
height, width = shape[0:2]
return self.x1 >= 0 and self.x2 < width and self.y1 >= 0 and self.y2 < height
def is_partly_within_image(self, image):
"""
Estimate whether the bounding box is at least partially inside the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use.
If an ndarray, its shape will be used.
If a tuple, it is assumed to represent the image shape
and must contain at least two integers.
Returns
-------
bool
True if the bounding box is at least partially inside the image area. False otherwise.
"""
if isinstance(image, tuple):
shape = image
else:
shape = image.shape
height, width = shape[0:2]
eps = np.finfo(np.float32).eps
img_bb = BoundingBox(x1=0, x2=width-eps, y1=0, y2=height-eps)
return self.intersection(img_bb) is not None
def is_out_of_image(self, image, fully=True, partly=False):
"""
Estimate whether the bounding box is partially or fully outside of the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use. If an ndarray, its shape will be used. If a tuple, it is
assumed to represent the image shape and must contain at least two integers.
fully : bool, optional
Whether to return True if the bounding box is fully outside fo the image area.
partly : bool, optional
Whether to return True if the bounding box is at least partially outside fo the
image area.
Returns
-------
bool
True if the bounding box is partially/fully outside of the image area, depending
on defined parameters. False otherwise.
"""
if self.is_fully_within_image(image):
return False
elif self.is_partly_within_image(image):
return partly
else:
return fully
def cut_out_of_image(self, *args, **kwargs):
warnings.warn(DeprecationWarning("BoundingBox.cut_out_of_image() is deprecated. Use "
"BoundingBox.clip_out_of_image() instead. It has the "
"exactly same interface (simple renaming)."))
return self.clip_out_of_image(*args, **kwargs)
def clip_out_of_image(self, image):
"""
Clip off all parts of the bounding box that are outside of the image.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use for the clipping of the bounding box.
If an ndarray, its shape will be used.
If a tuple, it is assumed to represent the image shape and must contain at least two integers.
Returns
-------
result : imgaug.BoundingBox
Bounding box, clipped to fall within the image dimensions.
"""
if isinstance(image, tuple):
shape = image
else:
shape = image.shape
height, width = shape[0:2]
do_assert(height > 0)
do_assert(width > 0)
eps = np.finfo(np.float32).eps
x1 = np.clip(self.x1, 0, width - eps)
x2 = np.clip(self.x2, 0, width - eps)
y1 = np.clip(self.y1, 0, height - eps)
y2 = np.clip(self.y2, 0, height - eps)
return self.copy(
x1=x1,
y1=y1,
x2=x2,
y2=y2,
label=self.label
)
# TODO convert this to x/y params?
def shift(self, top=None, right=None, bottom=None, left=None):
"""
Shift the bounding box from one or more image sides, i.e. move it on the x/y-axis.
Parameters
----------
top : None or int, optional
Amount of pixels by which to shift the bounding box from the top.
right : None or int, optional
Amount of pixels by which to shift the bounding box from the right.
bottom : None or int, optional
Amount of pixels by which to shift the bounding box from the bottom.
left : None or int, optional
Amount of pixels by which to shift the bounding box from the left.
Returns
-------
result : imgaug.BoundingBox
Shifted bounding box.
"""
top = top if top is not None else 0
right = right if right is not None else 0
bottom = bottom if bottom is not None else 0
left = left if left is not None else 0
return self.copy(
x1=self.x1+left-right,
x2=self.x2+left-right,
y1=self.y1+top-bottom,
y2=self.y2+top-bottom
)
# TODO add explicit test for zero-sized BBs (worked when tested by hand)
def draw_on_image(self, image, color=(0, 255, 0), alpha=1.0, thickness=1, copy=True, raise_if_out_of_image=False):
"""
Draw the bounding box on an image.
Parameters
----------
image : (H,W,C) ndarray(uint8)
The image onto which to draw the bounding box.
color : iterable of int, optional
The color to use, corresponding to the channel layout of the image. Usually RGB.
alpha : float, optional
The transparency of the drawn bounding box, where 1.0 denotes no transparency and
0.0 is invisible.
thickness : int, optional
The thickness of the bounding box in pixels. If the value is larger than 1, then
additional pixels will be added around the bounding box (i.e. extension towards the
outside).
copy : bool, optional
Whether to copy the input image or change it in-place.
raise_if_out_of_image : bool, optional
Whether to raise an error if the bounding box is partially/fully outside of the
image. If set to False, no error will be raised and only the parts inside the image
will be drawn.
Returns
-------
result : (H,W,C) ndarray(uint8)
Image with bounding box drawn on it.
"""
if raise_if_out_of_image and self.is_out_of_image(image):
raise Exception("Cannot draw bounding box x1=%.8f, y1=%.8f, x2=%.8f, y2=%.8f on image with shape %s." % (
self.x1, self.y1, self.x2, self.y2, image.shape))
result = np.copy(image) if copy else image
if isinstance(color, (tuple, list)):
color = np.uint8(color)
for i in range(thickness):
y1, y2, x1, x2 = self.y1_int, self.y2_int, self.x1_int, self.x2_int
# When y values get into the range (H-0.5, H), the *_int functions round them to H.
# That is technically sensible, but in the case of drawing means that the border lies
# just barely outside of the image, making the border disappear, even though the BB
# is fully inside the image. Here we correct for that because of beauty reasons.
# Same is the case for x coordinates.
if self.is_fully_within_image(image):
y1 = np.clip(y1, 0, image.shape[0]-1)
y2 = np.clip(y2, 0, image.shape[0]-1)
x1 = np.clip(x1, 0, image.shape[1]-1)
x2 = np.clip(x2, 0, image.shape[1]-1)
y = [y1-i, y1-i, y2+i, y2+i]
x = [x1-i, x2+i, x2+i, x1-i]
rr, cc = skimage.draw.polygon_perimeter(y, x, shape=result.shape)
if alpha >= 0.99:
result[rr, cc, :] = color
else:
if is_float_array(result):
result[rr, cc, :] = (1 - alpha) * result[rr, cc, :] + alpha * color
result = np.clip(result, 0, 255)
else:
input_dtype = result.dtype
result = result.astype(np.float32)
result[rr, cc, :] = (1 - alpha) * result[rr, cc, :] + alpha * color
result = np.clip(result, 0, 255).astype(input_dtype)
return result
def extract_from_image(self, image, prevent_zero_size=True):
"""
Extract the image pixels within the bounding box.
This function will zero-pad the image if the bounding box is partially/fully outside of
the image.
Parameters
----------
image : (H,W) ndarray or (H,W,C) ndarray
The image from which to extract the pixels within the bounding box.
prevent_zero_size : bool, optional
Whether to prevent height or width of the extracted image from becoming zero.
If this is set to True and height or width of the bounding box is below 1, the height/width will
be increased to 1. This can be useful to prevent problems, e.g. with image saving or plotting.
If it is set to False, images will be returned as ``(H', W')`` or ``(H', W', 3)`` with ``H`` or
``W`` potentially being 0.
Returns
-------
image : (H',W') ndarray or (H',W',C) ndarray
Pixels within the bounding box. Zero-padded if the bounding box is partially/fully
outside of the image. If prevent_zero_size is activated, it is guarantueed that ``H'>0``
and ``W'>0``, otherwise only ``H'>=0`` and ``W'>=0``.
"""
pad_top = 0
pad_right = 0
pad_bottom = 0
pad_left = 0
height, width = image.shape[0], image.shape[1]
x1, x2, y1, y2 = self.x1_int, self.x2_int, self.y1_int, self.y2_int
# When y values get into the range (H-0.5, H), the *_int functions round them to H.
# That is technically sensible, but in the case of extraction leads to a black border,
# which is both ugly and unexpected after calling cut_out_of_image(). Here we correct for
# that because of beauty reasons.
# Same is the case for x coordinates.
if self.is_fully_within_image(image):
y1 = np.clip(y1, 0, image.shape[0]-1)
y2 = np.clip(y2, 0, image.shape[0]-1)
x1 = np.clip(x1, 0, image.shape[1]-1)
x2 = np.clip(x2, 0, image.shape[1]-1)
# TODO add test
if prevent_zero_size:
if abs(x2 - x1) < 1:
x2 = x1 + 1
if abs(y2 - y1) < 1:
y2 = y1 + 1
# if the bb is outside of the image area, the following pads the image
# first with black pixels until the bb is inside the image
# and only then extracts the image area
# TODO probably more efficient to initialize an array of zeros
# and copy only the portions of the bb into that array that are
# natively inside the image area
if x1 < 0:
pad_left = abs(x1)
x2 = x2 + abs(x1)
x1 = 0
if y1 < 0:
pad_top = abs(y1)
y2 = y2 + abs(y1)
y1 = 0
if x2 >= width:
pad_right = x2 - (width - 1)
if y2 >= height:
pad_bottom = y2 - (height - 1)
if any([val > 0 for val in [pad_top, pad_right, pad_bottom, pad_left]]):
if len(image.shape) == 2:
image = np.pad(image, ((pad_top, pad_bottom), (pad_left, pad_right)), mode="constant")
else:
image = np.pad(image, ((pad_top, pad_bottom), (pad_left, pad_right), (0, 0)), mode="constant")
return image[y1:y2, x1:x2]
# TODO also add to_heatmap
# TODO add this to BoundingBoxesOnImage
def to_keypoints(self):
"""
Convert the corners of the bounding box to keypoints (clockwise, starting at top left).
Returns
-------
list of imgaug.Keypoint
Corners of the bounding box as keypoints.
"""
return [
Keypoint(x=self.x1, y=self.y1),
Keypoint(x=self.x2, y=self.y1),
Keypoint(x=self.x2, y=self.y2),
Keypoint(x=self.x1, y=self.y2)
]
def copy(self, x1=None, y1=None, x2=None, y2=None, label=None):
"""
Create a shallow copy of the BoundingBox object.
Parameters
----------
x1 : None or number
If not None, then the x1 coordinate of the copied object will be set to this value.
y1 : None or number
If not None, then the y1 coordinate of the copied object will be set to this value.
x2 : None or number
If not None, then the x2 coordinate of the copied object will be set to this value.
y2 : None or number
If not None, then the y2 coordinate of the copied object will be set to this value.
label : None or string
If not None, then the label of the copied object will be set to this value.
Returns
-------
imgaug.BoundingBox
Shallow copy.
"""
return BoundingBox(
x1=self.x1 if x1 is None else x1,
x2=self.x2 if x2 is None else x2,
y1=self.y1 if y1 is None else y1,
y2=self.y2 if y2 is None else y2,
label=self.label if label is None else label
)
def deepcopy(self, x1=None, y1=None, x2=None, y2=None, label=None):
"""
Create a deep copy of the BoundingBox object.
Parameters
----------
x1 : None or number
If not None, then the x1 coordinate of the copied object will be set to this value.
y1 : None or number
If not None, then the y1 coordinate of the copied object will be set to this value.
x2 : None or number
If not None, then the x2 coordinate of the copied object will be set to this value.
y2 : None or number
If not None, then the y2 coordinate of the copied object will be set to this value.
label : None or string
If not None, then the label of the copied object will be set to this value.
Returns
-------
imgaug.BoundingBox
Deep copy.
"""
return self.copy(x1=x1, y1=y1, x2=x2, y2=y2, label=label)
def __repr__(self):
return self.__str__()
def __str__(self):
return "BoundingBox(x1=%.4f, y1=%.4f, x2=%.4f, y2=%.4f, label=%s)" % (
self.x1, self.y1, self.x2, self.y2, self.label)
class BoundingBoxesOnImage(object):
"""
Object that represents all bounding boxes on a single image.
Parameters
----------
bounding_boxes : list of imgaug.BoundingBox
List of bounding boxes on the image.
shape : tuple of int
The shape of the image on which the bounding boxes are placed.
Examples
--------
>>> image = np.zeros((100, 100))
>>> bbs = [
>>> BoundingBox(x1=10, y1=20, x2=20, y2=30),
>>> BoundingBox(x1=25, y1=50, x2=30, y2=70)
>>> ]
>>> bbs_oi = BoundingBoxesOnImage(bbs, shape=image.shape)
"""
def __init__(self, bounding_boxes, shape):
self.bounding_boxes = bounding_boxes
if is_np_array(shape):
self.shape = shape.shape
else:
do_assert(isinstance(shape, (tuple, list)))
self.shape = tuple(shape)
# TODO remove this? here it is image height at BoundingBox it is bounding box height
@property
def height(self):
"""
Get the height of the image on which the bounding boxes fall.
Returns
-------
int
Image height.
"""
return self.shape[0]
# TODO remove this? here it is image width at BoundingBox it is bounding box width
@property
def width(self):
"""
Get the width of the image on which the bounding boxes fall.
Returns
-------
int
Image width.
"""
return self.shape[1]
@property
def empty(self):
"""
Returns whether this object contains zero bounding boxes.
Returns
-------
bool
True if this object contains zero bounding boxes.
"""
return len(self.bounding_boxes) == 0
def on(self, image):
"""
Project bounding boxes from one image to a new one.
Parameters
----------
image : ndarray or tuple of int
New image onto which the bounding boxes are to be projected.
May also simply be that new image's shape tuple.
Returns
-------
bounding_boxes : imgaug.BoundingBoxesOnImage
Object containing all projected bounding boxes.
"""
if is_np_array(image):
shape = image.shape
else:
shape = image
if shape[0:2] == self.shape[0:2]:
return self.deepcopy()
else:
bounding_boxes = [bb.project(self.shape, shape) for bb in self.bounding_boxes]
return BoundingBoxesOnImage(bounding_boxes, shape)
@classmethod
def from_xyxy_array(cls, xyxy, shape):
"""
Convert an (N,4) ndarray to a BoundingBoxesOnImage object.
This is the inverse of :func:`imgaug.BoundingBoxesOnImage.to_xyxy_array`.
Parameters
----------
xyxy : (N,4) ndarray
Array containing the corner coordinates (top-left, bottom-right) of ``N`` bounding boxes
in the form ``(x1, y1, x2, y2)``. Should usually be of dtype ``float32``.
shape : tuple of int
Shape of the image on which the bounding boxes are placed.
Should usually be ``(H, W, C)`` or ``(H, W)``.
Returns
-------
imgaug.BoundingBoxesOnImage
Object containing a list of BoundingBox objects following the provided corner coordinates.
"""
do_assert(xyxy.shape[1] == 4, "Expected input array of shape (N, 4), got shape %s." % (xyxy.shape,))
boxes = [BoundingBox(*row) for row in xyxy]
return cls(boxes, shape)
def to_xyxy_array(self, dtype=np.float32):
"""
Convert the BoundingBoxesOnImage object to an (N,4) ndarray.
This is the inverse of :func:`imgaug.BoundingBoxesOnImage.from_xyxy_array`.
Parameters
----------
dtype : numpy.dtype, optional
Desired output datatype of the ndarray.
Returns
-------
ndarray
(N,4) ndarray array, where ``N`` denotes the number of bounding boxes and ``4`` denotes the
top-left and bottom-right bounding box corner coordinates in form ``(x1, y1, x2, y2)``.
"""
xyxy_array = np.zeros((len(self.bounding_boxes), 4), dtype=np.float32)
for i, box in enumerate(self.bounding_boxes):
xyxy_array[i] = [box.x1, box.y1, box.x2, box.y2]
return xyxy_array.astype(dtype)
def draw_on_image(self, image, color=(0, 255, 0), alpha=1.0, thickness=1, copy=True, raise_if_out_of_image=False):
"""
Draw all bounding boxes onto a given image.
Parameters
----------
image : (H,W,3) ndarray
The image onto which to draw the bounding boxes.
This image should usually have the same shape as
set in BoundingBoxesOnImage.shape.
color : int or list of int or tuple of int or (3,) ndarray, optional
The RGB color of all bounding boxes. If a single int ``C``, then that is
equivalent to ``(C,C,C)``.
alpha : float, optional
Alpha/transparency of the bounding box.
thickness : int, optional
Thickness in pixels.
copy : bool, optional
Whether to copy the image before drawing the points.
raise_if_out_of_image : bool, optional
Whether to raise an exception if any bounding box is outside of the image.
Returns
-------
image : (H,W,3) ndarray
Image with drawn bounding boxes.
"""
# TODO improve efficiency here by copying only once
for bb in self.bounding_boxes:
image = bb.draw_on_image(
image,
color=color,
alpha=alpha,
thickness=thickness,
copy=copy,
raise_if_out_of_image=raise_if_out_of_image
)
return image
def remove_out_of_image(self, fully=True, partly=False):
"""
Remove all bounding boxes that are fully or partially outside of the image.
Parameters
----------
fully : bool, optional
Whether to remove bounding boxes that are fully outside of the image.
partly : bool, optional
Whether to remove bounding boxes that are partially outside of the image.
Returns
-------
imgaug.BoundingBoxesOnImage
Reduced set of bounding boxes, with those that were fully/partially outside of
the image removed.
"""
bbs_clean = [bb for bb in self.bounding_boxes
if not bb.is_out_of_image(self.shape, fully=fully, partly=partly)]
return BoundingBoxesOnImage(bbs_clean, shape=self.shape)
def cut_out_of_image(self):
warnings.warn(DeprecationWarning("BoundingBoxesOnImage.cut_out_of_image() is deprecated."
"Use BoundingBoxesOnImage.clip_out_of_image() instead. It "
"has the exactly same interface (simple renaming)."))
return self.clip_out_of_image()
def clip_out_of_image(self):
"""
Clip off all parts from all bounding boxes that are outside of the image.
Returns
-------
imgaug.BoundingBoxesOnImage
Bounding boxes, clipped to fall within the image dimensions.
"""
bbs_cut = [bb.clip_out_of_image(self.shape)
for bb in self.bounding_boxes if bb.is_partly_within_image(self.shape)]
return BoundingBoxesOnImage(bbs_cut, shape=self.shape)
def shift(self, top=None, right=None, bottom=None, left=None):
"""
Shift all bounding boxes from one or more image sides, i.e. move them on the x/y-axis.
Parameters
----------
top : None or int, optional
Amount of pixels by which to shift all bounding boxes from the top.
right : None or int, optional
Amount of pixels by which to shift all bounding boxes from the right.
bottom : None or int, optional
Amount of pixels by which to shift all bounding boxes from the bottom.
left : None or int, optional
Amount of pixels by which to shift all bounding boxes from the left.
Returns
-------
imgaug.BoundingBoxesOnImage
Shifted bounding boxes.
"""
bbs_new = [bb.shift(top=top, right=right, bottom=bottom, left=left) for bb in self.bounding_boxes]
return BoundingBoxesOnImage(bbs_new, shape=self.shape)
def copy(self):
"""
Create a shallow copy of the BoundingBoxesOnImage object.
Returns
-------
imgaug.BoundingBoxesOnImage
Shallow copy.
"""
return copy.copy(self)
def deepcopy(self):
"""
Create a deep copy of the BoundingBoxesOnImage object.
Returns
-------
imgaug.BoundingBoxesOnImage
Deep copy.
"""
# Manual copy is far faster than deepcopy for BoundingBoxesOnImage,
# so use manual copy here too
bbs = [bb.deepcopy() for bb in self.bounding_boxes]
return BoundingBoxesOnImage(bbs, tuple(self.shape))
def __repr__(self):
return self.__str__()
def __str__(self):
return "BoundingBoxesOnImage(%s, shape=%s)" % (str(self.bounding_boxes), self.shape)
# TODO somehow merge with BoundingBox
# TODO add functions: simplify() (eg via shapely.ops.simplify()),
# extend(all_sides=0, top=0, right=0, bottom=0, left=0),
# intersection(other, default=None), union(other), iou(other), to_heatmap, to_mask
class Polygon(object):
"""
Class representing polygons.
Each polygon is parameterized by its corner points, given as absolute x- and y-coordinates
with sub-pixel accuracy.
Parameters
----------
exterior : list of imgaug.Keypoint or list of tuple of float or (N,2) ndarray
List of points defining the polygon. May be either a list of Keypoint objects or a list of tuples in xy-form
or a numpy array of shape (N,2) for N points in xy-form.
All coordinates are expected to be the absolute coordinates in the image, given as floats, e.g. x=10.7
and y=3.4 for a point at coordinates (10.7, 3.4). Their order is expected to be clock-wise. They are expected
to not be closed (i.e. first and last coordinate differ).
label : None or str, optional
Label of the polygon, e.g. a string representing the class.
"""
def __init__(self, exterior, label=None):
"""Create a new Polygon instance."""
if isinstance(exterior, list):
if not exterior:
# for empty lists, make sure that the shape is (0, 2) and not (0,) as that is also expected when the
# input is a numpy array
self.exterior = np.zeros((0, 2), dtype=np.float32)
elif isinstance(exterior[0], Keypoint):
# list of Keypoint
self.exterior = np.float32([[point.x, point.y] for point in exterior])
else:
# list of tuples (x, y)
self.exterior = np.float32([[point[0], point[1]] for point in exterior])
else:
do_assert(is_np_array(exterior),
("Expected exterior to be a list of tuples (x, y) or "
+ "an (N, 2) array, got type %s") % (exterior,))
do_assert(exterior.ndim == 2 and exterior.shape[1] == 2,
("Expected exterior to be a list of tuples (x, y) or "
+ "an (N, 2) array, got an array of shape %s") % (
exterior.shape,))
self.exterior = np.float32(exterior)
# Remove last point if it is essentially the same as the first point (polygons are always assumed to be
# closed anyways). This also prevents problems with shapely, which seems to add the last point automatically.
if len(self.exterior) >= 2 and np.allclose(self.exterior[0, :], self.exterior[-1, :]):
self.exterior = self.exterior[:-1]
self.label = label
@property
def xx(self):
"""
Return the x-coordinates of all points in the exterior.
Returns
-------
(N,2) ndarray
X-coordinates of all points in the exterior as a float32 ndarray.
"""
return self.exterior[:, 0]
@property
def yy(self):
"""
Return the y-coordinates of all points in the exterior.
Returns
-------
(N,2) ndarray
Y-coordinates of all points in the exterior as a float32 ndarray.
"""
return self.exterior[:, 1]
@property
def xx_int(self):
"""
Return the x-coordinates of all points in the exterior, rounded to the closest integer value.
Returns
-------
(N,2) ndarray
X-coordinates of all points in the exterior, rounded to the closest integer value.
Result dtype is int32.
"""
return np.int32(np.round(self.xx))
@property
def yy_int(self):
"""
Return the y-coordinates of all points in the exterior, rounded to the closest integer value.
Returns
-------
(N,2) ndarray
Y-coordinates of all points in the exterior, rounded to the closest integer value.
Result dtype is int32.
"""
return np.int32(np.round(self.yy))
@property
def is_valid(self):
"""
Estimate whether the polygon has a valid shape.
To to be considered valid, the polygons must be made up of at least 3 points and have concave shape.
Multiple consecutive points are allowed to have the same coordinates.
Returns
-------
bool
True if polygon has at least 3 points and is concave, otherwise False.
"""
if len(self.exterior) < 3:
return False
return self.to_shapely_polygon().is_valid
@property
def area(self):
"""
Estimate the area of the polygon.
Returns
-------
number
Area of the polygon.
"""
if len(self.exterior) < 3:
raise Exception("Cannot compute the polygon's area because it contains less than three points.")
poly = self.to_shapely_polygon()
return poly.area
def project(self, from_shape, to_shape):
"""
Project the polygon onto an image with different shape.
The relative coordinates of all points remain the same.
E.g. a point at (x=20, y=20) on an image (width=100, height=200) will be
projected on a new image (width=200, height=100) to (x=40, y=10).
This is intended for cases where the original image is resized.
It cannot be used for more complex changes (e.g. padding, cropping).
Parameters
----------
from_shape : tuple of int
Shape of the original image. (Before resize.)
to_shape : tuple of int
Shape of the new image. (After resize.)
Returns
-------
imgaug.Polygon
Polygon object with new coordinates.
"""
if from_shape[0:2] == to_shape[0:2]:
return self.copy()
exterior = [Keypoint(x=x, y=y).project(from_shape, to_shape) for x, y in self.exterior]
return self.copy(exterior=exterior)
def find_closest_point_index(self, x, y, return_distance=False):
"""
Find the index of the point within the exterior that is closest to the given coordinates.
"Closeness" is here defined based on euclidean distance.
This method will raise an AssertionError if the exterior contains no points.
Parameters
----------
x : number
X-coordinate around which to search for close points.
y : number
Y-coordinate around which to search for close points.
return_distance : bool, optional
Whether to also return the distance of the closest point.
Returns
-------
int
Index of the closest point.
number
Euclidean distance to the closest point.
This value is only returned if `return_distance` was set to True.
"""
do_assert(len(self.exterior) > 0)
distances = []
for x2, y2 in self.exterior:
d = (x2 - x) ** 2 + (y2 - y) ** 2
distances.append(d)
distances = np.sqrt(distances)
closest_idx = np.argmin(distances)
if return_distance:
return closest_idx, distances[closest_idx]
return closest_idx
def _compute_inside_image_point_mask(self, image):
if isinstance(image, tuple):
shape = image
else:
shape = image.shape
h, w = shape[0:2]
return np.logical_and(
np.logical_and(0 <= self.exterior[:, 0], self.exterior[:, 0] < w),
np.logical_and(0 <= self.exterior[:, 1], self.exterior[:, 1] < h)
)
# TODO keep this method? it is almost an alias for is_out_of_image()
def is_fully_within_image(self, image):
"""
Estimate whether the polygon is fully inside the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use.
If an ndarray, its shape will be used.
If a tuple, it is assumed to represent the image shape and must contain at least two integers.
Returns
-------
bool
True if the polygon is fully inside the image area.
False otherwise.
"""
return not self.is_out_of_image(image, fully=True, partly=True)
# TODO keep this method? it is almost an alias for is_out_of_image()
def is_partly_within_image(self, image):
"""
Estimate whether the polygon is at least partially inside the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use.
If an ndarray, its shape will be used.
If a tuple, it is assumed to represent the image shape and must contain at least two integers.
Returns
-------
bool
True if the polygon is at least partially inside the image area.
False otherwise.
"""
return not self.is_out_of_image(image, fully=True, partly=False)
def is_out_of_image(self, image, fully=True, partly=False):
"""
Estimate whether the polygon is partially or fully outside of the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use.
If an ndarray, its shape will be used.
If a tuple, it is assumed to represent the image shape and must contain at least two integers.
fully : bool, optional
Whether to return True if the polygon is fully outside fo the image area.
partly : bool, optional
Whether to return True if the polygon is at least partially outside fo the image area.
Returns
-------
bool
True if the polygon is partially/fully outside of the image area, depending
on defined parameters. False otherwise.
"""
if len(self.exterior) == 0:
raise Exception("Cannot determine whether the polygon is inside the image, because it contains no points.")
inside = self._compute_inside_image_point_mask(image)
nb_inside = sum(inside)
if nb_inside == len(inside):
return False
elif nb_inside > 0:
return partly
else:
return fully
def cut_out_of_image(self, image):
warnings.warn(DeprecationWarning("Polygon.cut_out_of_image() is deprecated. Use "
"Polygon.clip_out_of_image() instead. It has the exactly "
"same interface (simple renaming)."))
return self.clip_out_of_image(image)
# TODO this currently can mess up the order of points - change somehow to
# keep the order
def clip_out_of_image(self, image):
"""
Cut off all parts of the polygon that are outside of the image.
This operation may lead to new points being created.
As a single polygon may be split into multiple new polygons, the result is a MultiPolygon.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use for the clipping of the polygon.
If an ndarray, its shape will be used.
If a tuple, it is assumed to represent the image shape and must contain at least two integers.
Returns
-------
imgaug.MultiPolygon
Polygon, clipped to fall within the image dimensions.
Returned as MultiPolygon, because the clipping can split the polygon into multiple parts.
"""
# load shapely lazily, which makes the dependency more optional
import shapely.geometry
# if fully out of image, clip everything away, nothing remaining
if self.is_out_of_image(image, fully=True, partly=False):
return MultiPolygon([])
h, w = image.shape[0:2] if is_np_array(image) else image[0:2]
poly_shapely = self.to_shapely_polygon()
poly_image = shapely.geometry.Polygon([(0, 0), (w, 0), (w, h), (0, h)])
multipoly_inter_shapely = poly_shapely.intersection(poly_image)
if not isinstance(multipoly_inter_shapely, shapely.geometry.MultiPolygon):
do_assert(isinstance(multipoly_inter_shapely, shapely.geometry.Polygon))
multipoly_inter_shapely = shapely.geometry.MultiPolygon([multipoly_inter_shapely])
polygons = []
for poly_inter_shapely in multipoly_inter_shapely.geoms:
polygons.append(Polygon.from_shapely(poly_inter_shapely, label=self.label))
# shapely changes the order of points, we try here to preserve it as good as possible
polygons_reordered = []
for polygon in polygons:
found = False
for x, y in self.exterior:
closest_idx, dist = polygon.find_closest_point_index(x=x, y=y, return_distance=True)
if dist < 1e-6:
polygon_reordered = polygon.change_first_point_by_index(closest_idx)
polygons_reordered.append(polygon_reordered)
found = True
break
do_assert(found) # could only not find closest points if new polys are empty
return MultiPolygon(polygons_reordered)
def shift(self, top=None, right=None, bottom=None, left=None):
"""
Shift the polygon from one or more image sides, i.e. move it on the x/y-axis.
Parameters
----------
top : None or int, optional
Amount of pixels by which to shift the polygon from the top.
right : None or int, optional
Amount of pixels by which to shift the polygon from the right.
bottom : None or int, optional
Amount of pixels by which to shift the polygon from the bottom.
left : None or int, optional
Amount of pixels by which to shift the polygon from the left.
Returns
-------
imgaug.Polygon
Shifted polygon.
"""
top = top if top is not None else 0
right = right if right is not None else 0
bottom = bottom if bottom is not None else 0
left = left if left is not None else 0
exterior = np.copy(self.exterior)
exterior[:, 0] += (left - right)
exterior[:, 1] += (top - bottom)
return self.deepcopy(exterior=exterior)
# TODO add perimeter thickness
def draw_on_image(self,
image,
color=(0, 255, 0), color_fill=None,
color_perimeter=None, color_points=None,
alpha=1.0, alpha_fill=None,
alpha_perimeter=None, alpha_points=None,
size_points=3,
raise_if_out_of_image=False):
"""
Draw the polygon on an image.
Parameters
----------
image : (H,W,C) ndarray
The image onto which to draw the polygon. Usually expected to be
of dtype ``uint8``, though other dtypes are also handled.
color : iterable of int, optional
The color to use for the whole polygon.
Must correspond to the channel layout of the image. Usually RGB.
The values for `color_fill`, `color_perimeter` and `color_points`
will be derived from this color if they are set to ``None``.
This argument has no effect if `color_fill`, `color_perimeter`
and `color_points` are all set anything other than ``None``.
color_fill : None or iterable of int, optional
The color to use for the inner polygon area (excluding perimeter).
Must correspond to the channel layout of the image. Usually RGB.
If this is ``None``, it will be derived from``color * 1.0``.
color_perimeter : None or iterable of int, optional
The color to use for the perimeter (aka border) of the polygon.
Must correspond to the channel layout of the image. Usually RGB.
If this is ``None``, it will be derived from``color * 0.5``.
color_points : None or iterable of int, optional
The color to use for the corner points of the polygon.
Must correspond to the channel layout of the image. Usually RGB.
If this is ``None``, it will be derived from``color * 0.5``.
alpha : float, optional
The opacity of the whole polygon, where ``1.0`` denotes a completely
visible polygon and ``0.0`` an invisible one.
The values for `alpha_fill`, `alpha_perimeter` and `alpha_points`
will be derived from this alpha value if they are set to ``None``.
This argument has no effect if `alpha_fill`, `alpha_perimeter`
and `alpha_points` are all set anything other than ``None``.
alpha_fill : None or number, optional
The opacity of the polygon's inner area (excluding the perimeter),
where ``1.0`` denotes a completely visible inner area and ``0.0``
an invisible one.
If this is ``None``, it will be derived from``alpha * 0.5``.
alpha_perimeter : None or number, optional
The opacity of the polygon's perimeter (aka border),
where ``1.0`` denotes a completely visible perimeter and ``0.0`` an
invisible one.
If this is ``None``, it will be derived from``alpha * 1.0``.
alpha_points : None or number, optional
The opacity of the polygon's corner points, where ``1.0`` denotes
completely visible corners and ``0.0`` invisible ones.
If this is ``None``, it will be derived from``alpha * 1.0``.
size_points : int, optional
The size of each corner point. If set to ``C``, each corner point
will be drawn as a square of size ``C x C``.
raise_if_out_of_image : bool, optional
Whether to raise an error if the polygon is partially/fully
outside of the image. If set to False, no error will be raised and
only the parts inside the image will be drawn.
Returns
-------
result : (H,W,C) ndarray
Image with polygon drawn on it. Result dtype is the same as the input dtype.
"""
assert color is not None
assert alpha is not None
color_fill = color_fill if color_fill is not None else np.array(color)
color_perimeter = color_perimeter if color_perimeter is not None else np.array(color) * 0.5
color_points = color_points if color_points is not None else np.array(color) * 0.5
alpha_fill = alpha_fill if alpha_fill is not None else alpha * 0.5
alpha_perimeter = alpha_perimeter if alpha_perimeter is not None else alpha
alpha_points = alpha_points if alpha_points is not None else alpha
if alpha_fill < 0.01:
alpha_fill = 0
elif alpha_fill > 0.99:
alpha_fill = 1
if alpha_perimeter < 0.01:
alpha_perimeter = 0
elif alpha_perimeter > 0.99:
alpha_perimeter = 1
if alpha_points < 0.01:
alpha_points = 0
elif alpha_points > 0.99:
alpha_points = 1
# TODO separate this into draw_face_on_image() and draw_border_on_image()
if raise_if_out_of_image and self.is_out_of_image(image):
raise Exception("Cannot draw polygon %s on image with shape %s." % (
str(self), image.shape
))
xx = self.xx_int
yy = self.yy_int
# TODO np.clip to image plane if is_fully_within_image(), similar to how it is done for bounding boxes
# TODO improve efficiency by only drawing in rectangle that covers poly instead of drawing in the whole image
# TODO for a rectangular polygon, the face coordinates include the top/left boundary but not the right/bottom
# boundary. This may be unintuitive when not drawing the boundary. Maybe somehow remove the boundary
# coordinates from the face coordinates after generating both?
params = []
if alpha_fill > 0:
rr, cc = skimage.draw.polygon(yy, xx, shape=image.shape)
params.append(
(rr, cc, color_fill, alpha_fill)
)
if alpha_perimeter > 0:
rr, cc = skimage.draw.polygon_perimeter(yy, xx, shape=image.shape)
params.append(
(rr, cc, color_perimeter, alpha_perimeter)
)
input_dtype = image.dtype
result = image.astype(np.float32)
c = 0
for rr, cc, color_this, alpha_this in params:
c += 1
color_this = np.float32(color_this)
# don't have to check here for alpha<=0.01, as then these
# parameters wouldn't have been added to params
if alpha_this >= 0.99:
result[rr, cc, :] = color_this
else:
# TODO replace with blend_alpha()
result[rr, cc, :] = (
(1 - alpha_this) * result[rr, cc, :]
+ alpha_this * color_this
)
if alpha_points > 0:
kpsoi = KeypointsOnImage.from_coords_array(self.exterior,
shape=image.shape)
result = kpsoi.draw_on_image(
result, color=color_points, alpha=alpha_points,
size=size_points, copy=False,
raise_if_out_of_image=raise_if_out_of_image)
if input_dtype.type == np.uint8:
result = np.clip(result, 0, 255).astype(input_dtype) # TODO make clipping more flexible
else:
result = result.astype(input_dtype)
return result
def extract_from_image(self, image):
"""
Extract the image pixels within the polygon.
This function will zero-pad the image if the polygon is partially/fully outside of
the image.
Parameters
----------
image : (H,W) ndarray or (H,W,C) ndarray
The image from which to extract the pixels within the polygon.
Returns
-------
result : (H',W') ndarray or (H',W',C) ndarray
Pixels within the polygon. Zero-padded if the polygon is partially/fully
outside of the image.
"""
do_assert(image.ndim in [2, 3])
if len(self.exterior) <= 2:
raise Exception("Polygon must be made up of at least 3 points to extract its area from an image.")
bb = self.to_bounding_box()
bb_area = bb.extract_from_image(image)
if self.is_out_of_image(image, fully=True, partly=False):
return bb_area
xx = self.xx_int
yy = self.yy_int
xx_mask = xx - np.min(xx)
yy_mask = yy - np.min(yy)
height_mask = np.max(yy_mask)
width_mask = np.max(xx_mask)
rr_face, cc_face = skimage.draw.polygon(yy_mask, xx_mask, shape=(height_mask, width_mask))
mask = np.zeros((height_mask, width_mask), dtype=np.bool)
mask[rr_face, cc_face] = True
if image.ndim == 3:
mask = np.tile(mask[:, :, np.newaxis], (1, 1, image.shape[2]))
return bb_area * mask
def change_first_point_by_coords(self, x, y, max_distance=1e-4):
"""
Set the first point of the exterior to the given point based on its coordinates.
If multiple points are found, the closest one will be picked.
If no matching points are found, an exception is raised.
Note: This method does *not* work in-place.
Parameters
----------
x : number
X-coordinate of the point.
y : number
Y-coordinate of the point.
max_distance : None or number
Maximum distance past which possible matches are ignored.
If ``None`` the distance limit is deactivated.
Returns
-------
imgaug.Polygon
Copy of this polygon with the new point order.
"""
if len(self.exterior) == 0:
raise Exception("Cannot reorder polygon points, because it contains no points.")
closest_idx, closest_dist = self.find_closest_point_index(x=x, y=y, return_distance=True)
if max_distance is not None and closest_dist > max_distance:
closest_point = self.exterior[closest_idx, :]
raise Exception(
"Closest found point (%.9f, %.9f) exceeds max_distance of %.9f exceeded" % (
closest_point[0], closest_point[1], closest_dist)
)
return self.change_first_point_by_index(closest_idx)
def change_first_point_by_index(self, point_idx):
"""
Set the first point of the exterior to the given point based on its index.
Note: This method does *not* work in-place.
Parameters
----------
point_idx : int
Index of the desired starting point.
Returns
-------
imgaug.Polygon
Copy of this polygon with the new point order.
"""
do_assert(0 <= point_idx < len(self.exterior))
if point_idx == 0:
return self.deepcopy()
exterior = np.concatenate(
(self.exterior[point_idx:, :], self.exterior[:point_idx, :]),
axis=0
)
return self.deepcopy(exterior=exterior)
def to_shapely_polygon(self):
"""
Convert this polygon to a Shapely polygon.
Returns
-------
shapely.geometry.Polygon
The Shapely polygon matching this polygon's exterior.
"""
# load shapely lazily, which makes the dependency more optional
import shapely.geometry
return shapely.geometry.Polygon([(point[0], point[1]) for point in self.exterior])
def to_shapely_line_string(self, closed=False, interpolate=0):
"""
Convert this polygon to a Shapely LineString object.
Parameters
----------
closed : bool, optional
Whether to return the line string with the last point being identical to the first point.
interpolate : int, optional
Number of points to interpolate between any pair of two consecutive points. These points are added
to the final line string.
Returns
-------
shapely.geometry.LineString
The Shapely LineString matching the polygon's exterior.
"""
return _convert_points_to_shapely_line_string(self.exterior, closed=closed, interpolate=interpolate)
def to_bounding_box(self):
"""
Convert this polygon to a bounding box tightly containing the whole polygon.
Returns
-------
imgaug.BoundingBox
Tight bounding box around the polygon.
"""
xx = self.xx
yy = self.yy
return BoundingBox(x1=min(xx), x2=max(xx), y1=min(yy), y2=max(yy), label=self.label)
def to_keypoints(self):
"""
Convert this polygon's `exterior` to ``Keypoint`` instances.
Returns
-------
list of imgaug.Keypoint
Exterior vertices as ``Keypoint`` instances.
"""
return [Keypoint(x=point[0], y=point[1]) for point in self.exterior]
@staticmethod
def from_shapely(polygon_shapely, label=None):
"""
Create a polygon from a Shapely polygon.
Note: This will remove any holes in the Shapely polygon.
Parameters
----------
polygon_shapely : shapely.geometry.Polygon
The shapely polygon.
label : None or str, optional
The label of the new polygon.
Returns
-------
imgaug.Polygon
A polygon with the same exterior as the Shapely polygon.
"""
# load shapely lazily, which makes the dependency more optional
import shapely.geometry
do_assert(isinstance(polygon_shapely, shapely.geometry.Polygon))
# polygon_shapely.exterior can be None if the polygon was instantiated without points
if polygon_shapely.exterior is None or len(polygon_shapely.exterior.coords) == 0:
return Polygon([], label=label)
exterior = np.float32([[x, y] for (x, y) in polygon_shapely.exterior.coords])
return Polygon(exterior, label=label)
def exterior_almost_equals(self, other_polygon, max_distance=1e-6, interpolate=8):
"""
Estimate whether the geometry of the exterior of this polygon and another polygon are comparable.
The two exteriors can have different numbers of points, but any point randomly sampled on the exterior
of one polygon should be close to the closest point on the exterior of the other polygon.
Note that this method works approximately. One can come up with polygons with fairly different shapes that
will still be estimated as equal by this method. In practice however this should be unlikely to be the case.
The probability for something like that goes down as the interpolation parameter is increased.
Parameters
----------
other_polygon : imgaug.Polygon or (N,2) ndarray or list of tuple
The other polygon with which to compare the exterior.
If this is an ndarray, it is assumed to represent an exterior.
It must then have dtype float32 and shape (N,2) with the second dimension denoting xy-coordinates.
If this is a list of tuples, it is assumed to represent an exterior.
Each tuple then must contain exactly two numbers, denoting xy-coordinates.
max_distance : number
The maximum euclidean distance between a point on one polygon and the closest point on the other polygon.
If the distance is exceeded for any such pair, the two exteriors are not viewed as equal.
The points are other the points contained in the polygon's exterior ndarray or interpolated points
between these.
interpolate : int
How many points to interpolate between the points of the polygon's exteriors.
If this is set to zero, then only the points given by the polygon's exterior ndarrays will be used.
Higher values make it less likely that unequal polygons are evaluated as equal.
Returns
-------
bool
Whether the two polygon's exteriors can be viewed as equal (approximate test).
"""
# load shapely lazily, which makes the dependency more optional
import shapely.geometry
atol = max_distance
ext_a = self.exterior
if isinstance(other_polygon, list):
ext_b = np.float32(other_polygon)
elif is_np_array(other_polygon):
ext_b = other_polygon
else:
assert isinstance(other_polygon, Polygon)
ext_b = other_polygon.exterior
len_a = len(ext_a)
len_b = len(ext_b)
if len_a == 0 and len_b == 0:
return True
elif len_a == 0 and len_b > 0:
return False
elif len_a > 0 and len_b == 0:
return False
# neither A nor B is zero-sized at this point
# if A or B only contain points identical to the first point, merge them to one point
if len_a > 1:
if all([np.allclose(ext_a[0, :], ext_a[1 + i, :], rtol=0, atol=atol) for i in sm.xrange(len_a - 1)]):
ext_a = ext_a[0:1, :]
len_a = 1
if len_b > 1:
if all([np.allclose(ext_b[0, :], ext_b[1 + i, :], rtol=0, atol=atol) for i in sm.xrange(len_b - 1)]):
ext_b = ext_b[0:1, :]
len_b = 1
# handle polygons that contain a single point
if len_a == 1 and len_b == 1:
return np.allclose(ext_a[0, :], ext_b[0, :], rtol=0, atol=atol)
elif len_a == 1:
return all([np.allclose(ext_a[0, :], ext_b[i, :], rtol=0, atol=atol) for i in sm.xrange(len_b)])
elif len_b == 1:
return all([np.allclose(ext_b[0, :], ext_a[i, :], rtol=0, atol=atol) for i in sm.xrange(len_a)])
# After this point, both polygons have at least 2 points, i.e. LineStrings can be used.
# We can also safely go back to the original exteriors (before close points were merged).
ls_a = self.to_shapely_line_string(closed=True, interpolate=interpolate)
if isinstance(other_polygon, list) or is_np_array(other_polygon):
ls_b = _convert_points_to_shapely_line_string(
other_polygon, closed=True, interpolate=interpolate)
else:
ls_b = other_polygon.to_shapely_line_string(
closed=True, interpolate=interpolate)
# Measure the distance from each point in A to LineString B and vice versa.
# Make sure that no point violates the tolerance.
# Note that we can't just use LineString.almost_equals(LineString) -- that seems to expect the same number
# and order of points in both LineStrings (failed with duplicated points).
for x, y in ls_a.coords:
point = shapely.geometry.Point(x, y)
if not ls_b.distance(point) <= max_distance:
return False
for x, y in ls_b.coords:
point = shapely.geometry.Point(x, y)
if not ls_a.distance(point) <= max_distance:
return False
return True
def almost_equals(self, other, max_distance=1e-6, interpolate=8):
"""
Compare this polygon with another one and estimate whether they can be viewed as equal.
This is the same as :func:`imgaug.Polygon.exterior_almost_equals` but additionally compares the labels.
Parameters
----------
other
The object to compare against. If not a Polygon, then False will be returned.
max_distance : float
See :func:`imgaug.Polygon.exterior_almost_equals`.
interpolate : int
See :func:`imgaug.Polygon.exterior_almost_equals`.
Returns
-------
bool
Whether the two polygons can be viewed as equal. In the case of the exteriors this is an approximate test.
"""
if not isinstance(other, Polygon):
return False
if self.label is not None or other.label is not None:
if self.label is None:
return False
if other.label is None:
return False
if self.label != other.label:
return False
return self.exterior_almost_equals(other, max_distance=max_distance, interpolate=interpolate)
def copy(self, exterior=None, label=None):
"""
Create a shallow copy of the Polygon object.
Parameters
----------
exterior : list of imgaug.Keypoint or list of tuple or (N,2) ndarray, optional
List of points defining the polygon. See :func:`imgaug.Polygon.__init__` for details.
label : None or str, optional
If not None, then the label of the copied object will be set to this value.
Returns
-------
imgaug.Polygon
Shallow copy.
"""
return self.deepcopy(exterior=exterior, label=label)
def deepcopy(self, exterior=None, label=None):
"""
Create a deep copy of the Polygon object.
Parameters
----------
exterior : list of Keypoint or list of tuple or (N,2) ndarray, optional
List of points defining the polygon. See `imgaug.Polygon.__init__` for details.
label : None or str
If not None, then the label of the copied object will be set to this value.
Returns
-------
imgaug.Polygon
Deep copy.
"""
return Polygon(
exterior=np.copy(self.exterior) if exterior is None else exterior,
label=self.label if label is None else label
)
def __repr__(self):
return self.__str__()
def __str__(self):
points_str = ", ".join(["(x=%.3f, y=%.3f)" % (point[0], point[1]) for point in self.exterior])
return "Polygon([%s] (%d points), label=%s)" % (points_str, len(self.exterior), self.label)
class PolygonsOnImage(object):
"""
Object that represents all polygons on a single image.
Parameters
----------
polygons : list of imgaug.Polygon
List of polygons on the image.
shape : tuple of int
The shape of the image on which the polygons are placed.
Examples
--------
>>> import numpy as np
>>> import imgaug as ia
>>> image = np.zeros((100, 100))
>>> polys = [
>>> ia.Polygon([(0, 0), (100, 0), (100, 100), (0, 100)]),
>>> ia.Polygon([(50, 0), (100, 50), (50, 100), (0, 50)])
>>> ]
>>> polys_oi = ia.PolygonsOnImage(polys, shape=image.shape)
"""
def __init__(self, polygons, shape):
self.polygons = polygons
if is_np_array(shape):
self.shape = shape.shape
else:
do_assert(isinstance(shape, (tuple, list)))
self.shape = tuple(shape)
@property
def empty(self):
"""
Returns whether this object contains zero polygons.
Returns
-------
bool
True if this object contains zero polygons.
"""
return len(self.polygons) == 0
def on(self, image):
"""
Project polygons from one image to a new one.
Parameters
----------
image : ndarray or tuple of int
New image onto which the polygons are to be projected.
May also simply be that new image's shape tuple.
Returns
-------
imgaug.PolygonsOnImage
Object containing all projected polygons.
"""
if is_np_array(image):
shape = image.shape
else:
shape = image
if shape[0:2] == self.shape[0:2]:
return self.deepcopy()
else:
polygons = [poly.project(self.shape, shape) for poly in self.polygons]
return PolygonsOnImage(polygons, shape)
def draw_on_image(self,
image,
color=(0, 255, 0), color_fill=None,
color_perimeter=None, color_points=None,
alpha=1.0, alpha_fill=None,
alpha_perimeter=None, alpha_points=None,
size_points=3,
raise_if_out_of_image=False):
"""
Draw all polygons onto a given image.
Parameters
----------
image : (H,W,C) ndarray
The image onto which to draw the bounding boxes.
This image should usually have the same shape as set in
``PolygonsOnImage.shape``.
color : iterable of int, optional
The color to use for the whole polygons.
Must correspond to the channel layout of the image. Usually RGB.
The values for `color_fill`, `color_perimeter` and `color_points`
will be derived from this color if they are set to ``None``.
This argument has no effect if `color_fill`, `color_perimeter`
and `color_points` are all set anything other than ``None``.
color_fill : None or iterable of int, optional
The color to use for the inner polygon areas (excluding perimeters).
Must correspond to the channel layout of the image. Usually RGB.
If this is ``None``, it will be derived from``color * 1.0``.
color_perimeter : None or iterable of int, optional
The color to use for the perimeters (aka borders) of the polygons.
Must correspond to the channel layout of the image. Usually RGB.
If this is ``None``, it will be derived from``color * 0.5``.
color_points : None or iterable of int, optional
The color to use for the corner points of the polygons.
Must correspond to the channel layout of the image. Usually RGB.
If this is ``None``, it will be derived from``color * 0.5``.
alpha : float, optional
The opacity of the whole polygons, where ``1.0`` denotes
completely visible polygons and ``0.0`` invisible ones.
The values for `alpha_fill`, `alpha_perimeter` and `alpha_points`
will be derived from this alpha value if they are set to ``None``.
This argument has no effect if `alpha_fill`, `alpha_perimeter`
and `alpha_points` are all set anything other than ``None``.
alpha_fill : None or number, optional
The opacity of the polygon's inner areas (excluding the perimeters),
where ``1.0`` denotes completely visible inner areas and ``0.0``
invisible ones.
If this is ``None``, it will be derived from``alpha * 0.5``.
alpha_perimeter : None or number, optional
The opacity of the polygon's perimeters (aka borders),
where ``1.0`` denotes completely visible perimeters and ``0.0``
invisible ones.
If this is ``None``, it will be derived from``alpha * 1.0``.
alpha_points : None or number, optional
The opacity of the polygon's corner points, where ``1.0`` denotes
completely visible corners and ``0.0`` invisible ones.
Currently this is an on/off choice, i.e. only ``0.0`` or ``1.0``
are allowed.
If this is ``None``, it will be derived from``alpha * 1.0``.
size_points : int, optional
The size of all corner points. If set to ``C``, each corner point
will be drawn as a square of size ``C x C``.
raise_if_out_of_image : bool, optional
Whether to raise an error if any polygon is partially/fully
outside of the image. If set to False, no error will be raised and
only the parts inside the image will be drawn.
Returns
-------
image : (H,W,C) ndarray
Image with drawn polygons.
"""
for poly in self.polygons:
image = poly.draw_on_image(
image,
color=color,
color_fill=color_fill,
color_perimeter=color_perimeter,
color_points=color_points,
alpha=alpha,
alpha_fill=alpha_fill,
alpha_perimeter=alpha_perimeter,
alpha_points=alpha_points,
size_points=size_points,
raise_if_out_of_image=raise_if_out_of_image
)
return image
def remove_out_of_image(self, fully=True, partly=False):
"""
Remove all polygons that are fully or partially outside of the image.
Parameters
----------
fully : bool, optional
Whether to remove polygons that are fully outside of the image.
partly : bool, optional
Whether to remove polygons that are partially outside of the image.
Returns
-------
imgaug.PolygonsOnImage
Reduced set of polygons, with those that were fully/partially
outside of the image removed.
"""
polys_clean = [
poly for poly in self.polygons
if not poly.is_out_of_image(self.shape, fully=fully, partly=partly)
]
return PolygonsOnImage(polys_clean, shape=self.shape)
def clip_out_of_image(self):
"""
Clip off all parts from all polygons that are outside of the image.
NOTE: The result can contain less polygons than the input did. That
happens when a polygon is fully outside of the image plane.
NOTE: The result can also contain *more* polygons than the input
did. That happens when distinct parts of a polygon are only
connected by areas that are outside of the image plane and hence will
be clipped off, resulting in two or more unconnected polygon parts that
are left in the image plane.
Returns
-------
imgaug.PolygonsOnImage
Polygons, clipped to fall within the image dimensions. Count of
output polygons may differ from the input count.
"""
polys_cut = [
poly.clip_out_of_image(self.shape).geoms
for poly
in self.polygons
if poly.is_partly_within_image(self.shape)
]
polys_cut_flat = [poly for poly_lst in polys_cut for poly in poly_lst]
return PolygonsOnImage(polys_cut_flat, shape=self.shape)
def shift(self, top=None, right=None, bottom=None, left=None):
"""
Shift all polygons from one or more image sides, i.e. move them on the x/y-axis.
Parameters
----------
top : None or int, optional
Amount of pixels by which to shift all polygons from the top.
right : None or int, optional
Amount of pixels by which to shift all polygons from the right.
bottom : None or int, optional
Amount of pixels by which to shift all polygons from the bottom.
left : None or int, optional
Amount of pixels by which to shift all polygons from the left.
Returns
-------
imgaug.PolygonsOnImage
Shifted polygons.
"""
polys_new = [
poly.shift(top=top, right=right, bottom=bottom, left=left)
for poly
in self.polygons
]
return PolygonsOnImage(polys_new, shape=self.shape)
def copy(self):
"""
Create a shallow copy of the PolygonsOnImage object.
Returns
-------
imgaug.PolygonsOnImage
Shallow copy.
"""
return copy.copy(self)
def deepcopy(self):
"""
Create a deep copy of the PolygonsOnImage object.
Returns
-------
imgaug.PolygonsOnImage
Deep copy.
"""
# Manual copy is far faster than deepcopy for PolygonsOnImage,
# so use manual copy here too
polys = [poly.deepcopy() for poly in self.polygons]
return PolygonsOnImage(polys, tuple(self.shape))
def __repr__(self):
return self.__str__()
def __str__(self):
return "PolygonsOnImage(%s, shape=%s)" % (str(self.polygons), self.shape)
def _convert_points_to_shapely_line_string(points, closed=False, interpolate=0):
# load shapely lazily, which makes the dependency more optional
import shapely.geometry
if len(points) <= 1:
raise Exception(
("Conversion to shapely line string requires at least two points, but points input contains "
"only %d points.") % (len(points),)
)
points_tuples = [(point[0], point[1]) for point in points]
# interpolate points between each consecutive pair of points
if interpolate > 0:
points_tuples = _interpolate_points(points_tuples, interpolate)
# close if requested and not yet closed
if closed and len(points) > 1: # here intentionally used points instead of points_tuples
points_tuples.append(points_tuples[0])
return shapely.geometry.LineString(points_tuples)
def _interpolate_point_pair(point_a, point_b, nb_steps):
if nb_steps < 1:
return []
x1, y1 = point_a
x2, y2 = point_b
vec = np.float32([x2 - x1, y2 - y1])
step_size = vec / (1 + nb_steps)
return [(x1 + (i + 1) * step_size[0], y1 + (i + 1) * step_size[1]) for i in sm.xrange(nb_steps)]
def _interpolate_points(points, nb_steps, closed=True):
if len(points) <= 1:
return points
if closed:
points = list(points) + [points[0]]
points_interp = []
for point_a, point_b in zip(points[:-1], points[1:]):
points_interp.extend([point_a] + _interpolate_point_pair(point_a, point_b, nb_steps))
if not closed:
points_interp.append(points[-1])
# close does not have to be reverted here, as last point is not included in the extend()
return points_interp
def _interpolate_points_by_max_distance(points, max_distance, closed=True):
do_assert(max_distance > 0, "max_distance must have value greater than 0, got %.8f" % (max_distance,))
if len(points) <= 1:
return points
if closed:
points = list(points) + [points[0]]
points_interp = []
for point_a, point_b in zip(points[:-1], points[1:]):
dist = np.sqrt((point_a[0] - point_b[0]) ** 2 + (point_a[1] - point_b[1]) ** 2)
nb_steps = int((dist / max_distance) - 1)
points_interp.extend([point_a] + _interpolate_point_pair(point_a, point_b, nb_steps))
if not closed:
points_interp.append(points[-1])
return points_interp
class _ConcavePolygonRecoverer(object):
def __init__(self, threshold_duplicate_points=1e-4, noise_strength=1e-4,
oversampling=0.01, max_segment_difference=1e-4):
self.threshold_duplicate_points = threshold_duplicate_points
self.noise_strength = noise_strength
self.oversampling = oversampling
self.max_segment_difference = max_segment_difference
# this limits the maximum amount of points after oversampling, i.e.
# if N points are input into oversampling, then M oversampled points are
# generated such that N+M <= this value
self.oversample_up_to_n_points_max = 75
# ----
# parameters for _fit_best_valid_polygon()
# ----
# how many changes may be done max to the initial (convex hull) polygon
# before simply returning the result
self.fit_n_changes_max = 100
# for how many iterations the optimization loop may run max
# before simply returning the result
self.fit_n_iters_max = 3
# how far (wrt. to their position in the input list) two points may be
# apart max to consider adding an edge between them (in the first loop
# iteration and the ones after that)
self.fit_max_dist_first_iter = 1
self.fit_max_dist_other_iters = 2
# The fit loop first generates candidate edges and then modifies the
# polygon based on these candidates. This limits the maximum amount
# of considered candidates. If the number is less than the possible
# number of candidates, they are randomly subsampled. Values beyond
# 100 significantly increase runtime (for polygons that reach that
# number).
self.fit_n_candidates_before_sort_max = 100
def recover_from(self, new_exterior, old_polygon, random_state=0):
assert isinstance(new_exterior, list) or (
is_np_array(new_exterior)
and new_exterior.ndim == 2
and new_exterior.shape[1] == 2)
assert len(new_exterior) >= 3, \
"Cannot recover a concave polygon from less than three points."
# create Polygon instance, if it is already valid then just return
# immediately
polygon = old_polygon.deepcopy(exterior=new_exterior)
if polygon.is_valid:
return polygon
if not isinstance(random_state, np.random.RandomState):
random_state = np.random.RandomState(random_state)
rss = derive_random_states(random_state, 3)
# remove consecutive duplicate points
new_exterior = self._remove_consecutive_duplicate_points(new_exterior)
# check that points are not all identical or on a line
new_exterior = self._fix_polygon_is_line(new_exterior, rss[0])
# jitter duplicate points
new_exterior = self._jitter_duplicate_points(new_exterior, rss[1])
# generate intersection points
segment_add_points = self._generate_intersection_points(new_exterior)
# oversample points around intersections
if self.oversampling is not None and self.oversampling > 0:
segment_add_points = self._oversample_intersection_points(
new_exterior, segment_add_points)
# integrate new points into exterior
new_exterior_inter = self._insert_intersection_points(
new_exterior, segment_add_points)
# find best fit polygon, starting from convext polygon
new_exterior_concave_ids = self._fit_best_valid_polygon(new_exterior_inter, rss[2])
new_exterior_concave = [new_exterior_inter[idx] for idx in new_exterior_concave_ids]
# TODO return new_exterior_concave here instead of polygon, leave it to
# caller to decide what to do with it
return old_polygon.deepcopy(exterior=new_exterior_concave)
def _remove_consecutive_duplicate_points(self, points):
result = []
for point in points:
if result:
dist = np.linalg.norm(np.float32(point) - np.float32(result[-1]))
is_same = (dist < self.threshold_duplicate_points)
if not is_same:
result.append(point)
else:
result.append(point)
if len(result) >= 2:
dist = np.linalg.norm(np.float32(result[0]) - np.float32(result[-1]))
is_same = (dist < self.threshold_duplicate_points)
result = result[0:-1] if is_same else result
return result
# fix polygons for which all points are on a line
def _fix_polygon_is_line(self, exterior, random_state):
assert len(exterior) >= 3
noise_strength = self.noise_strength
while self._is_polygon_line(exterior):
noise = random_state.uniform(
-noise_strength, noise_strength, size=(len(exterior), 2)
).astype(np.float32)
exterior = [(point[0] + noise_i[0], point[1] + noise_i[1])
for point, noise_i in zip(exterior, noise)]
noise_strength = noise_strength * 10
assert noise_strength > 0
return exterior
@classmethod
def _is_polygon_line(cls, exterior):
vec_down = np.float32([0, 1])
p1 = exterior[0]
angles = set()
for p2 in exterior[1:]:
vec = np.float32(p2) - np.float32(p1)
angle = angle_between_vectors(vec_down, vec)
angles.add(int(angle * 1000))
return len(angles) <= 1
def _jitter_duplicate_points(self, exterior, random_state):
def _find_duplicates(exterior_with_duplicates):
points_map = collections.defaultdict(list)
for i, point in enumerate(exterior_with_duplicates):
# we use 10/x here to be a bit more lenient, the precise
# distance test is further below
x = int(np.round(point[0] * ((1/10) / self.threshold_duplicate_points)))
y = int(np.round(point[1] * ((1/10) / self.threshold_duplicate_points)))
for d0 in [-1, 0, 1]:
for d1 in [-1, 0, 1]:
points_map[(x+d0, y+d1)].append(i)
duplicates = [False] * len(exterior_with_duplicates)
for key in points_map:
candidates = points_map[key]
for i in range(len(candidates)):
p0_idx = candidates[i]
p0 = exterior_with_duplicates[p0_idx]
if duplicates[p0_idx]:
continue
for j in range(i+1, len(candidates)):
p1_idx = candidates[j]
p1 = exterior_with_duplicates[p1_idx]
if duplicates[p1_idx]:
continue
dist = np.sqrt((p0[0] - p1[0])**2 + (p0[1] - p1[1])**2)
if dist < self.threshold_duplicate_points:
duplicates[p1_idx] = True
return duplicates
noise_strength = self.noise_strength
assert noise_strength > 0
exterior = exterior[:]
converged = False
while not converged:
duplicates = _find_duplicates(exterior)
if any(duplicates):
noise = random_state.uniform(
-self.noise_strength, self.noise_strength, size=(len(exterior), 2)
).astype(np.float32)
for i, is_duplicate in enumerate(duplicates):
if is_duplicate:
exterior[i] = (exterior[i][0] + noise[i][0], exterior[i][1] + noise[i][1])
noise_strength *= 10
else:
converged = True
return exterior
# TODO remove?
@classmethod
def _calculate_circumference(cls, points):
assert len(points) >= 3
points = np.array(points, dtype=np.float32)
points_matrix = np.zeros((len(points), 4), dtype=np.float32)
points_matrix[:, 0:2] = points
points_matrix[0:-1, 2:4] = points_matrix[1:, 0:2]
points_matrix[-1, 2:4] = points_matrix[0, 0:2]
distances = np.linalg.norm(
points_matrix[:, 0:2] - points_matrix[:, 2:4], axis=1)
return np.sum(distances)
def _generate_intersection_points(self, exterior, one_point_per_intersection=True):
assert isinstance(exterior, list)
assert all([len(point) == 2 for point in exterior])
if len(exterior) <= 0:
return []
# use (*[i][0], *[i][1]) formulation here imnstead of just *[i],
# because this way we convert numpy arrays to tuples of floats, which
# is required by isect_segments_include_segments
segments = [
(
(exterior[i][0], exterior[i][1]),
(exterior[(i + 1) % len(exterior)][0], exterior[(i + 1) % len(exterior)][1])
)
for i in range(len(exterior))
]
# returns [(point, [(segment_p0, segment_p1), ..]), ...]
from imgaug.external.poly_point_isect_py2py3 import isect_segments_include_segments
intersections = isect_segments_include_segments(segments)
# estimate to which segment the found intersection points belong
segments_add_points = [[] for _ in range(len(segments))]
for point, associated_segments in intersections:
# the intersection point may be associated with multiple segments,
# but we only want to add it once, so pick the first segment
if one_point_per_intersection:
associated_segments = [associated_segments[0]]
for seg_inter_p0, seg_inter_p1 in associated_segments:
diffs = []
dists = []
for seg_p0, seg_p1 in segments:
dist_p0p0 = np.linalg.norm(seg_p0 - np.array(seg_inter_p0))
dist_p1p1 = np.linalg.norm(seg_p1 - np.array(seg_inter_p1))
dist_p0p1 = np.linalg.norm(seg_p0 - np.array(seg_inter_p1))
dist_p1p0 = np.linalg.norm(seg_p1 - np.array(seg_inter_p0))
diff = min(dist_p0p0 + dist_p1p1, dist_p0p1 + dist_p1p0)
diffs.append(diff)
dists.append(np.linalg.norm(
(seg_p0[0] - point[0], seg_p0[1] - point[1])
))
min_diff = np.min(diffs)
if min_diff < self.max_segment_difference:
idx = int(np.argmin(diffs))
segments_add_points[idx].append((point, dists[idx]))
else:
warnings.warn(
"Couldn't find fitting segment in "
"_generate_intersection_points(). Ignoring intersection "
"point.")
# sort intersection points by their distance to point 0 in each segment
# (clockwise ordering, this does something only for segments with
# >=2 intersection points)
segment_add_points_sorted = []
for idx in range(len(segments_add_points)):
points = [t[0] for t in segments_add_points[idx]]
dists = [t[1] for t in segments_add_points[idx]]
if len(points) < 2:
segment_add_points_sorted.append(points)
else:
both = sorted(zip(points, dists), key=lambda t: t[1])
# keep points, drop distances
segment_add_points_sorted.append([a for a, _b in both])
return segment_add_points_sorted
def _oversample_intersection_points(self, exterior, segment_add_points):
# segment_add_points must be sorted
if self.oversampling is None or self.oversampling <= 0:
return segment_add_points
segment_add_points_sorted_overs = [[] for _ in range(len(segment_add_points))]
n_points = len(exterior)
for i in range(len(exterior)):
last = exterior[i]
for j, p_inter in enumerate(segment_add_points[i]):
direction = (p_inter[0] - last[0], p_inter[1] - last[1])
if j == 0:
# previous point was non-intersection, place 1 new point
oversample = [1.0 - self.oversampling]
else:
# previous point was intersection, place 2 new points
oversample = [self.oversampling, 1.0 - self.oversampling]
for dist in oversample:
point_over = (last[0] + dist * direction[0],
last[1] + dist * direction[1])
segment_add_points_sorted_overs[i].append(point_over)
segment_add_points_sorted_overs[i].append(p_inter)
last = p_inter
is_last_in_group = (j == len(segment_add_points[i]) - 1)
if is_last_in_group:
# previous point was oversampled, next point is
# non-intersection, place 1 new point between the two
exterior_point = exterior[(i + 1) % len(exterior)]
direction = (exterior_point[0] - last[0],
exterior_point[1] - last[1])
segment_add_points_sorted_overs[i].append(
(last[0] + self.oversampling * direction[0],
last[1] + self.oversampling * direction[1])
)
last = segment_add_points_sorted_overs[i][-1]
n_points += len(segment_add_points_sorted_overs[i])
if n_points > self.oversample_up_to_n_points_max:
return segment_add_points_sorted_overs
return segment_add_points_sorted_overs
@classmethod
def _insert_intersection_points(cls, exterior, segment_add_points):
# segment_add_points must be sorted
assert len(exterior) == len(segment_add_points)
exterior_interp = []
for i in range(len(exterior)):
p0 = exterior[i]
exterior_interp.append(p0)
for j, p_inter in enumerate(segment_add_points[i]):
exterior_interp.append(p_inter)
return exterior_interp
def _fit_best_valid_polygon(self, points, random_state):
if len(points) < 2:
return None
def _compute_distance_point_to_line(point, line_start, line_end):
x_diff = line_end[0] - line_start[0]
y_diff = line_end[1] - line_start[1]
num = abs(
y_diff*point[0] - x_diff*point[1]
+ line_end[0]*line_start[1] - line_end[1]*line_start[0]
)
den = np.sqrt(y_diff**2 + x_diff**2)
if den == 0:
return np.sqrt((point[0] - line_start[0])**2 + (point[1] - line_start[1])**2)
return num / den
poly = Polygon(points)
if poly.is_valid:
return sm.xrange(len(points))
hull = scipy.spatial.ConvexHull(points)
points_kept = list(hull.vertices)
points_left = [i for i in range(len(points)) if i not in hull.vertices]
iteration = 0
n_changes = 0
converged = False
while not converged:
candidates = []
# estimate distance metrics for points-segment pairs:
# (1) distance (in vertices) between point and segment-start-point
# in original input point chain
# (2) euclidean distance between point and segment/line
# TODO this can be done more efficiently by caching the values and
# only computing distances to segments that have changed in
# the last iteration
# TODO these distances are not really the best metrics here. Something
# like IoU between new and old (invalid) polygon would be
# better, but can probably only be computed for pairs of valid
# polygons. Maybe something based on pointwise distances,
# where the points are sampled on the edges (not edge vertices
# themselves). Maybe something based on drawing the perimeter
# on images or based on distance maps.
point_kept_idx_to_pos = {point_idx: i for i, point_idx in enumerate(points_kept)}
# generate all possible combinations from <points_kept> and <points_left>
combos = np.transpose([np.tile(np.int32(points_left), len(np.int32(points_kept))),
np.repeat(np.int32(points_kept), len(np.int32(points_left)))])
combos = np.concatenate(
(combos, np.zeros((combos.shape[0], 3), dtype=np.int32)),
axis=1)
# copy columns 0, 1 into 2, 3 so that 2 is always the lower value
mask = combos[:, 0] < combos[:, 1]
combos[:, 2:4] = combos[:, 0:2]
combos[mask, 2] = combos[mask, 1]
combos[mask, 3] = combos[mask, 0]
# distance (in indices) between each pair of <point_kept> and <point_left>
combos[:, 4] = np.minimum(
combos[:, 3] - combos[:, 2],
len(points) - combos[:, 3] + combos[:, 2]
)
# limit candidates
max_dist = self.fit_max_dist_other_iters
if iteration > 0:
max_dist = self.fit_max_dist_first_iter
candidate_rows = combos[combos[:, 4] <= max_dist]
if self.fit_n_candidates_before_sort_max is not None \
and len(candidate_rows) > self.fit_n_candidates_before_sort_max:
random_state.shuffle(candidate_rows)
candidate_rows = candidate_rows[0:self.fit_n_candidates_before_sort_max]
for row in candidate_rows:
point_left_idx = row[0]
point_kept_idx = row[1]
in_points_kept_pos = point_kept_idx_to_pos[point_kept_idx]
segment_start_idx = point_kept_idx
segment_end_idx = points_kept[(in_points_kept_pos+1) % len(points_kept)]
segment_start = points[segment_start_idx]
segment_end = points[segment_end_idx]
if iteration == 0:
dist_eucl = 0
else:
dist_eucl = _compute_distance_point_to_line(
points[point_left_idx], segment_start, segment_end)
candidates.append((point_left_idx, point_kept_idx, row[4], dist_eucl))
# Sort computed distances first by minimal vertex-distance (see
# above, metric 1) (ASC), then by euclidean distance
# (metric 2) (ASC).
candidate_ids = np.arange(len(candidates))
candidate_ids = sorted(candidate_ids, key=lambda idx: (candidates[idx][2], candidates[idx][3]))
if self.fit_n_changes_max is not None:
candidate_ids = candidate_ids[:self.fit_n_changes_max]
# Iterate over point-segment pairs in sorted order. For each such
# candidate: Add the point to the already collected points,
# create a polygon from that and check if the polygon is valid.
# If it is, add the point to the output list and recalculate
# distance metrics. If it isn't valid, proceed with the next
# candidate until no more candidates are left.
#
# small change: this now no longer breaks upon the first found point
# that leads to a valid polygon, but checks all candidates instead
is_valid = False
done = set()
for candidate_idx in candidate_ids:
point_left_idx = candidates[candidate_idx][0]
point_kept_idx = candidates[candidate_idx][1]
if (point_left_idx, point_kept_idx) not in done:
in_points_kept_idx = [i for i, point_idx in enumerate(points_kept) if point_idx == point_kept_idx][0]
points_kept_hypothesis = points_kept[:]
points_kept_hypothesis.insert(in_points_kept_idx+1, point_left_idx)
poly_hypothesis = Polygon([points[idx] for idx in points_kept_hypothesis])
if poly_hypothesis.is_valid:
is_valid = True
points_kept = points_kept_hypothesis
points_left = [point_idx for point_idx in points_left if point_idx != point_left_idx]
n_changes += 1
if n_changes >= self.fit_n_changes_max:
return points_kept
done.add((point_left_idx, point_kept_idx))
done.add((point_kept_idx, point_left_idx))
# none of the left points could be used to create a valid polygon?
# (this automatically covers the case of no points being left)
if not is_valid and iteration > 0:
converged = True
iteration += 1
if self.fit_n_iters_max is not None and iteration > self.fit_n_iters_max:
break
return points_kept
class MultiPolygon(object):
"""
Class that represents several polygons.
Parameters
----------
geoms : list of imgaug.Polygon
List of the polygons.
"""
def __init__(self, geoms):
"""Create a new MultiPolygon instance."""
do_assert(len(geoms) == 0 or all([isinstance(el, Polygon) for el in geoms]))
self.geoms = geoms
@staticmethod
def from_shapely(geometry, label=None):
"""
Create a MultiPolygon from a Shapely MultiPolygon, a Shapely Polygon or a Shapely GeometryCollection.
This also creates all necessary Polygons contained by this MultiPolygon.
Parameters
----------
geometry : shapely.geometry.MultiPolygon or shapely.geometry.Polygon\
or shapely.geometry.collection.GeometryCollection
The object to convert to a MultiPolygon.
label : None or str, optional
A label assigned to all Polygons within the MultiPolygon.
Returns
-------
imgaug.MultiPolygon
The derived MultiPolygon.
"""
# load shapely lazily, which makes the dependency more optional
import shapely.geometry
if isinstance(geometry, shapely.geometry.MultiPolygon):
return MultiPolygon([Polygon.from_shapely(poly, label=label) for poly in geometry.geoms])
elif isinstance(geometry, shapely.geometry.Polygon):
return MultiPolygon([Polygon.from_shapely(geometry, label=label)])
elif isinstance(geometry, shapely.geometry.collection.GeometryCollection):
do_assert(all([isinstance(poly, shapely.geometry.Polygon) for poly in geometry.geoms]))
return MultiPolygon([Polygon.from_shapely(poly, label=label) for poly in geometry.geoms])
else:
raise Exception("Unknown datatype '%s'. Expected shapely.geometry.Polygon or "
"shapely.geometry.MultiPolygon or "
"shapely.geometry.collections.GeometryCollection." % (type(geometry),))
class HeatmapsOnImage(object):
"""
Object representing heatmaps on images.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Array representing the heatmap(s).
Must be of dtype float32.
If multiple heatmaps are provided, then ``C`` is expected to denote their number.
shape : tuple of int
Shape of the image on which the heatmap(s) is/are placed. NOT the shape of the
heatmap(s) array, unless it is identical to the image shape (note the likely
difference between the arrays in the number of channels).
If there is not a corresponding image, use the shape of the heatmaps array.
min_value : float, optional
Minimum value for the heatmaps that `arr` represents. This will usually be ``0.0``.
max_value : float, optional
Maximum value for the heatmaps that `arr` represents. This will usually be ``1.0``.
"""
def __init__(self, arr, shape, min_value=0.0, max_value=1.0):
"""Construct a new HeatmapsOnImage object."""
do_assert(is_np_array(arr), "Expected numpy array as heatmap input array, got type %s" % (type(arr),))
# TODO maybe allow 0-sized heatmaps? in that case the min() and max() must be adjusted
do_assert(arr.shape[0] > 0 and arr.shape[1] > 0,
"Expected numpy array as heatmap with height and width greater than 0, got shape %s." % (arr.shape,))
do_assert(arr.dtype.type in [np.float32],
"Heatmap input array expected to be of dtype float32, got dtype %s." % (arr.dtype,))
do_assert(arr.ndim in [2, 3], "Heatmap input array must be 2d or 3d, got shape %s." % (arr.shape,))
do_assert(len(shape) in [2, 3],
"Argument 'shape' in HeatmapsOnImage expected to be 2d or 3d, got shape %s." % (shape,))
do_assert(min_value < max_value)
do_assert(np.min(arr.flat[0:50]) >= min_value - np.finfo(arr.dtype).eps,
("Value range of heatmap was chosen to be (%.8f, %.8f), but found value below minimum in first "
+ "50 heatmap array values.") % (min_value, max_value))
do_assert(np.max(arr.flat[0:50]) <= max_value + np.finfo(arr.dtype).eps,
("Value range of heatmap was chosen to be (%.8f, %.8f), but found value above maximum in first "
+ "50 heatmap array values.") % (min_value, max_value))
if arr.ndim == 2:
arr = arr[..., np.newaxis]
self.arr_was_2d = True
else:
self.arr_was_2d = False
eps = np.finfo(np.float32).eps
min_is_zero = 0.0 - eps < min_value < 0.0 + eps
max_is_one = 1.0 - eps < max_value < 1.0 + eps
if min_is_zero and max_is_one:
self.arr_0to1 = arr
else:
self.arr_0to1 = (arr - min_value) / (max_value - min_value)
self.shape = shape
self.min_value = min_value
self.max_value = max_value
def get_arr(self):
"""
Get the heatmap's array within the value range originally provided in ``__init__()``.
The HeatmapsOnImage object saves heatmaps internally in the value range ``(min=0.0, max=1.0)``.
This function converts the internal representation to ``(min=min_value, max=max_value)``,
where ``min_value`` and ``max_value`` are provided upon instantiation of the object.
Returns
-------
result : (H,W) ndarray or (H,W,C) ndarray
Heatmap array. Dtype is float32.
"""
if self.arr_was_2d and self.arr_0to1.shape[2] == 1:
arr = self.arr_0to1[:, :, 0]
else:
arr = self.arr_0to1
eps = np.finfo(np.float32).eps
min_is_zero = 0.0 - eps < self.min_value < 0.0 + eps
max_is_one = 1.0 - eps < self.max_value < 1.0 + eps
if min_is_zero and max_is_one:
return np.copy(arr)
else:
diff = self.max_value - self.min_value
return self.min_value + diff * arr
# TODO
# def find_global_maxima(self):
# raise NotImplementedError()
def draw(self, size=None, cmap="jet"):
"""
Render the heatmaps as RGB images.
Parameters
----------
size : None or float or iterable of int or iterable of float, optional
Size of the rendered RGB image as ``(height, width)``.
See :func:`imgaug.imgaug.imresize_single_image` for details.
If set to None, no resizing is performed and the size of the heatmaps array is used.
cmap : str or None, optional
Color map of ``matplotlib`` to use in order to convert the heatmaps to RGB images.
If set to None, no color map will be used and the heatmaps will be converted
to simple intensity maps.
Returns
-------
heatmaps_drawn : list of (H,W,3) ndarray
Rendered heatmaps. One per heatmap array channel. Dtype is uint8.
"""
heatmaps_uint8 = self.to_uint8()
heatmaps_drawn = []
for c in sm.xrange(heatmaps_uint8.shape[2]):
# c:c+1 here, because the additional axis is needed by imresize_single_image
heatmap_c = heatmaps_uint8[..., c:c+1]
if size is not None:
heatmap_c_rs = imresize_single_image(heatmap_c, size, interpolation="nearest")
else:
heatmap_c_rs = heatmap_c
heatmap_c_rs = np.squeeze(heatmap_c_rs).astype(np.float32) / 255.0
if cmap is not None:
# import only when necessary (faster startup; optional dependency; less fragile -- see issue #225)
import matplotlib.pyplot as plt
cmap_func = plt.get_cmap(cmap)
heatmap_cmapped = cmap_func(heatmap_c_rs)
heatmap_cmapped = np.delete(heatmap_cmapped, 3, 2)
else:
heatmap_cmapped = np.tile(heatmap_c_rs[..., np.newaxis], (1, 1, 3))
heatmap_cmapped = np.clip(heatmap_cmapped * 255, 0, 255).astype(np.uint8)
heatmaps_drawn.append(heatmap_cmapped)
return heatmaps_drawn
def draw_on_image(self, image, alpha=0.75, cmap="jet", resize="heatmaps"):
"""
Draw the heatmaps as overlays over an image.
Parameters
----------
image : (H,W,3) ndarray
Image onto which to draw the heatmaps. Expected to be of dtype uint8.
alpha : float, optional
Alpha/opacity value to use for the mixing of image and heatmaps.
Higher values mean that the heatmaps will be more visible and the image less visible.
cmap : str or None, optional
Color map to use. See :func:`imgaug.HeatmapsOnImage.draw` for details.
resize : {'heatmaps', 'image'}, optional
In case of size differences between the image and heatmaps, either the image or
the heatmaps can be resized. This parameter controls which of the two will be resized
to the other's size.
Returns
-------
mix : list of (H,W,3) ndarray
Rendered overlays. One per heatmap array channel. Dtype is uint8.
"""
# assert RGB image
do_assert(image.ndim == 3)
do_assert(image.shape[2] == 3)
do_assert(image.dtype.type == np.uint8)
do_assert(0 - 1e-8 <= alpha <= 1.0 + 1e-8)
do_assert(resize in ["heatmaps", "image"])
if resize == "image":
image = imresize_single_image(image, self.arr_0to1.shape[0:2], interpolation="cubic")
heatmaps_drawn = self.draw(
size=image.shape[0:2] if resize == "heatmaps" else None,
cmap=cmap
)
mix = [
np.clip((1-alpha) * image + alpha * heatmap_i, 0, 255).astype(np.uint8)
for heatmap_i
in heatmaps_drawn
]
return mix
def invert(self):
"""
Inverts each value in the heatmap, shifting low towards high values and vice versa.
This changes each value to::
v' = max - (v - min)
where ``v`` is the value at some spatial location, ``min`` is the minimum value in the heatmap
and ``max`` is the maximum value.
As the heatmap uses internally a 0.0 to 1.0 representation, this simply becomes ``v' = 1.0 - v``.
Note that the attributes ``min_value`` and ``max_value`` are not switched. They both keep their values.
This function can be useful e.g. when working with depth maps, where algorithms might have
an easier time representing the furthest away points with zeros, requiring an inverted
depth map.
Returns
-------
arr_inv : imgaug.HeatmapsOnImage
Inverted heatmap.
"""
arr_inv = HeatmapsOnImage.from_0to1(1 - self.arr_0to1, shape=self.shape, min_value=self.min_value,
max_value=self.max_value)
arr_inv.arr_was_2d = self.arr_was_2d
return arr_inv
def pad(self, top=0, right=0, bottom=0, left=0, mode="constant", cval=0.0):
"""
Pad the heatmaps on their top/right/bottom/left side.
Parameters
----------
top : int, optional
Amount of pixels to add at the top side of the heatmaps. Must be 0 or greater.
right : int, optional
Amount of pixels to add at the right side of the heatmaps. Must be 0 or greater.
bottom : int, optional
Amount of pixels to add at the bottom side of the heatmaps. Must be 0 or greater.
left : int, optional
Amount of pixels to add at the left side of the heatmaps. Must be 0 or greater.
mode : string, optional
Padding mode to use. See :func:`numpy.pad` for details.
cval : number, optional
Value to use for padding if `mode` is ``constant``. See :func:`numpy.pad` for details.
Returns
-------
imgaug.HeatmapsOnImage
Padded heatmaps of height ``H'=H+top+bottom`` and width ``W'=W+left+right``.
"""
arr_0to1_padded = pad(self.arr_0to1, top=top, right=right, bottom=bottom, left=left, mode=mode, cval=cval)
return HeatmapsOnImage.from_0to1(arr_0to1_padded, shape=self.shape, min_value=self.min_value,
max_value=self.max_value)
def pad_to_aspect_ratio(self, aspect_ratio, mode="constant", cval=0.0, return_pad_amounts=False):
"""
Pad the heatmaps on their sides so that they match a target aspect ratio.
Depending on which dimension is smaller (height or width), only the corresponding
sides (left/right or top/bottom) will be padded. In each case, both of the sides will
be padded equally.
Parameters
----------
aspect_ratio : float
Target aspect ratio, given as width/height. E.g. 2.0 denotes the image having twice
as much width as height.
mode : str, optional
Padding mode to use. See :func:`numpy.pad` for details.
cval : number, optional
Value to use for padding if `mode` is ``constant``. See :func:`numpy.pad` for details.
return_pad_amounts : bool, optional
If False, then only the padded image will be returned. If True, a tuple with two
entries will be returned, where the first entry is the padded image and the second
entry are the amounts by which each image side was padded. These amounts are again a
tuple of the form (top, right, bottom, left), with each value being an integer.
Returns
-------
heatmaps : imgaug.HeatmapsOnImage
Padded heatmaps as HeatmapsOnImage object.
pad_amounts : tuple of int
Amounts by which the heatmaps were padded on each side, given as a tuple ``(top, right, bottom, left)``.
This tuple is only returned if `return_pad_amounts` was set to True.
"""
arr_0to1_padded, pad_amounts = pad_to_aspect_ratio(self.arr_0to1, aspect_ratio=aspect_ratio, mode=mode,
cval=cval, return_pad_amounts=True)
heatmaps = HeatmapsOnImage.from_0to1(arr_0to1_padded, shape=self.shape, min_value=self.min_value,
max_value=self.max_value)
if return_pad_amounts:
return heatmaps, pad_amounts
else:
return heatmaps
def avg_pool(self, block_size):
"""
Resize the heatmap(s) array using average pooling of a given block/kernel size.
Parameters
----------
block_size : int or tuple of int
Size of each block of values to pool, aka kernel size. See :func:`imgaug.pool` for details.
Returns
-------
imgaug.HeatmapsOnImage
Heatmaps after average pooling.
"""
arr_0to1_reduced = avg_pool(self.arr_0to1, block_size, cval=0.0)
return HeatmapsOnImage.from_0to1(arr_0to1_reduced, shape=self.shape, min_value=self.min_value,
max_value=self.max_value)
def max_pool(self, block_size):
"""
Resize the heatmap(s) array using max-pooling of a given block/kernel size.
Parameters
----------
block_size : int or tuple of int
Size of each block of values to pool, aka kernel size. See :func:`imgaug.pool` for details.
Returns
-------
imgaug.HeatmapsOnImage
Heatmaps after max-pooling.
"""
arr_0to1_reduced = max_pool(self.arr_0to1, block_size)
return HeatmapsOnImage.from_0to1(arr_0to1_reduced, shape=self.shape, min_value=self.min_value,
max_value=self.max_value)
def scale(self, *args, **kwargs):
warnings.warn(DeprecationWarning("HeatmapsOnImage.scale() is deprecated. "
"Use HeatmapsOnImage.resize() instead. "
"It has the exactly same interface "
"(simple renaming)."))
return self.resize(*args, **kwargs)
def resize(self, sizes, interpolation="cubic"):
"""
Resize the heatmap(s) array to the provided size given the provided interpolation.
Parameters
----------
sizes : float or iterable of int or iterable of float
New size of the array in ``(height, width)``.
See :func:`imgaug.imgaug.imresize_single_image` for details.
interpolation : None or str or int, optional
The interpolation to use during resize.
See :func:`imgaug.imgaug.imresize_single_image` for details.
Returns
-------
imgaug.HeatmapsOnImage
Resized heatmaps object.
"""
arr_0to1_resized = imresize_single_image(self.arr_0to1, sizes, interpolation=interpolation)
# cubic interpolation can lead to values outside of [0.0, 1.0],
# see https://github.com/opencv/opencv/issues/7195
# TODO area interpolation too?
arr_0to1_resized = np.clip(arr_0to1_resized, 0.0, 1.0)
return HeatmapsOnImage.from_0to1(arr_0to1_resized, shape=self.shape, min_value=self.min_value,
max_value=self.max_value)
def to_uint8(self):
"""
Convert this heatmaps object to a 0-to-255 array.
Returns
-------
arr_uint8 : (H,W,C) ndarray
Heatmap as a 0-to-255 array (dtype is uint8).
"""
# TODO this always returns (H,W,C), even if input ndarray was originall (H,W)
# does it make sense here to also return (H,W) if self.arr_was_2d?
arr_0to255 = np.clip(np.round(self.arr_0to1 * 255), 0, 255)
arr_uint8 = arr_0to255.astype(np.uint8)
return arr_uint8
@staticmethod
def from_uint8(arr_uint8, shape, min_value=0.0, max_value=1.0):
"""
Create a heatmaps object from an heatmap array containing values ranging from 0 to 255.
Parameters
----------
arr_uint8 : (H,W) ndarray or (H,W,C) ndarray
Heatmap(s) array, where ``H`` is height, ``W`` is width and ``C`` is the number of heatmap channels.
Expected dtype is uint8.
shape : tuple of int
Shape of the image on which the heatmap(s) is/are placed. NOT the shape of the
heatmap(s) array, unless it is identical to the image shape (note the likely
difference between the arrays in the number of channels).
If there is not a corresponding image, use the shape of the heatmaps array.
min_value : float, optional
Minimum value for the heatmaps that the 0-to-255 array represents. This will usually
be 0.0. It is used when calling :func:`imgaug.HeatmapsOnImage.get_arr`, which converts the
underlying ``(0, 255)`` array to value range ``(min_value, max_value)``.
max_value : float, optional
Maximum value for the heatmaps that 0-to-255 array represents.
See parameter `min_value` for details.
Returns
-------
imgaug.HeatmapsOnImage
Heatmaps object.
"""
arr_0to1 = arr_uint8.astype(np.float32) / 255.0
return HeatmapsOnImage.from_0to1(arr_0to1, shape, min_value=min_value, max_value=max_value)
@staticmethod
def from_0to1(arr_0to1, shape, min_value=0.0, max_value=1.0):
"""
Create a heatmaps object from an heatmap array containing values ranging from 0.0 to 1.0.
Parameters
----------
arr_0to1 : (H,W) or (H,W,C) ndarray
Heatmap(s) array, where ``H`` is height, ``W`` is width and ``C`` is the number of heatmap channels.
Expected dtype is float32.
shape : tuple of ints
Shape of the image on which the heatmap(s) is/are placed. NOT the shape of the
heatmap(s) array, unless it is identical to the image shape (note the likely
difference between the arrays in the number of channels).
If there is not a corresponding image, use the shape of the heatmaps array.
min_value : float, optional
Minimum value for the heatmaps that the 0-to-1 array represents. This will usually
be 0.0. It is used when calling :func:`imgaug.HeatmapsOnImage.get_arr`, which converts the
underlying ``(0.0, 1.0)`` array to value range ``(min_value, max_value)``.
E.g. if you started with heatmaps in the range ``(-1.0, 1.0)`` and projected these
to (0.0, 1.0), you should call this function with ``min_value=-1.0``, ``max_value=1.0``
so that :func:`imgaug.HeatmapsOnImage.get_arr` returns heatmap arrays having value
range (-1.0, 1.0).
max_value : float, optional
Maximum value for the heatmaps that to 0-to-255 array represents.
See parameter min_value for details.
Returns
-------
heatmaps : imgaug.HeatmapsOnImage
Heatmaps object.
"""
heatmaps = HeatmapsOnImage(arr_0to1, shape, min_value=0.0, max_value=1.0)
heatmaps.min_value = min_value
heatmaps.max_value = max_value
return heatmaps
@classmethod
def change_normalization(cls, arr, source, target):
"""
Change the value range of a heatmap from one min-max to another min-max.
E.g. the value range may be changed from min=0.0, max=1.0 to min=-1.0, max=1.0.
Parameters
----------
arr : ndarray
Heatmap array to modify.
source : tuple of float
Current value range of the input array, given as (min, max), where both are float values.
target : tuple of float
Desired output value range of the array, given as (min, max), where both are float values.
Returns
-------
arr_target : ndarray
Input array, with value range projected to the desired target value range.
"""
do_assert(is_np_array(arr))
if isinstance(source, HeatmapsOnImage):
source = (source.min_value, source.max_value)
else:
do_assert(isinstance(source, tuple))
do_assert(len(source) == 2)
do_assert(source[0] < source[1])
if isinstance(target, HeatmapsOnImage):
target = (target.min_value, target.max_value)
else:
do_assert(isinstance(target, tuple))
do_assert(len(target) == 2)
do_assert(target[0] < target[1])
# Check if source and target are the same (with a tiny bit of tolerance)
# if so, evade compuation and just copy the array instead.
# This is reasonable, as source and target will often both be (0.0, 1.0).
eps = np.finfo(arr.dtype).eps
mins_same = source[0] - 10*eps < target[0] < source[0] + 10*eps
maxs_same = source[1] - 10*eps < target[1] < source[1] + 10*eps
if mins_same and maxs_same:
return np.copy(arr)
min_source, max_source = source
min_target, max_target = target
diff_source = max_source - min_source
diff_target = max_target - min_target
arr_0to1 = (arr - min_source) / diff_source
arr_target = min_target + arr_0to1 * diff_target
return arr_target
def copy(self):
"""
Create a shallow copy of the Heatmaps object.
Returns
-------
imgaug.HeatmapsOnImage
Shallow copy.
"""
return self.deepcopy()
def deepcopy(self):
"""
Create a deep copy of the Heatmaps object.
Returns
-------
imgaug.HeatmapsOnImage
Deep copy.
"""
return HeatmapsOnImage(self.get_arr(), shape=self.shape, min_value=self.min_value, max_value=self.max_value)
class SegmentationMapOnImage(object):
"""
Object representing a segmentation map associated with an image.
Attributes
----------
DEFAULT_SEGMENT_COLORS : list of tuple of int
Standard RGB colors to use during drawing, ordered by class index.
Parameters
----------
arr : (H,W) ndarray or (H,W,1) ndarray or (H,W,C) ndarray
Array representing the segmentation map. May have datatypes bool, integer or float.
* If bool: Assumed to be of shape (H,W), (H,W,1) or (H,W,C). If (H,W) or (H,W,1) it
is assumed to be for the case of having a single class (where any False denotes
background). Otherwise there are assumed to be C channels, one for each class,
with each of them containing a mask for that class. The masks may overlap.
* If integer: Assumed to be of shape (H,W) or (H,W,1). Each pixel is assumed to
contain an integer denoting the class index. Classes are assumed to be
non-overlapping. The number of classes cannot be guessed from this input, hence
nb_classes must be set.
* If float: Assumed to b eof shape (H,W), (H,W,1) or (H,W,C) with meanings being
similar to the case of `bool`. Values are expected to fall always in the range
0.0 to 1.0 and are usually expected to be either 0.0 or 1.0 upon instantiation
of a new segmentation map. Classes may overlap.
shape : iterable of int
Shape of the corresponding image (NOT the segmentation map array). This is expected
to be ``(H, W)`` or ``(H, W, C)`` with ``C`` usually being 3. If there is no corresponding image,
then use the segmentation map's shape instead.
nb_classes : int or None
Total number of unique classes that may appear in an segmentation map, i.e. the max
class index. This may be None if the input array is of type bool or float. The number
of classes however must be provided if the input array is of type int, as then the
number of classes cannot be guessed.
"""
DEFAULT_SEGMENT_COLORS = [
(0, 0, 0), # black
(230, 25, 75), # red
(60, 180, 75), # green
(255, 225, 25), # yellow
(0, 130, 200), # blue
(245, 130, 48), # orange
(145, 30, 180), # purple
(70, 240, 240), # cyan
(240, 50, 230), # magenta
(210, 245, 60), # lime
(250, 190, 190), # pink
(0, 128, 128), # teal
(230, 190, 255), # lavender
(170, 110, 40), # brown
(255, 250, 200), # beige
(128, 0, 0), # maroon
(170, 255, 195), # mint
(128, 128, 0), # olive
(255, 215, 180), # coral
(0, 0, 128), # navy
(128, 128, 128), # grey
(255, 255, 255), # white
# --
(115, 12, 37), # dark red
(30, 90, 37), # dark green
(127, 112, 12), # dark yellow
(0, 65, 100), # dark blue
(122, 65, 24), # dark orange
(72, 15, 90), # dark purple
(35, 120, 120), # dark cyan
(120, 25, 115), # dark magenta
(105, 122, 30), # dark lime
(125, 95, 95), # dark pink
(0, 64, 64), # dark teal
(115, 95, 127), # dark lavender
(85, 55, 20), # dark brown
(127, 125, 100), # dark beige
(64, 0, 0), # dark maroon
(85, 127, 97), # dark mint
(64, 64, 0), # dark olive
(127, 107, 90), # dark coral
(0, 0, 64), # dark navy
(64, 64, 64), # dark grey
]
def __init__(self, arr, shape, nb_classes=None):
do_assert(is_np_array(arr), "Expected to get numpy array, got %s." % (type(arr),))
if arr.dtype.name == "bool":
do_assert(arr.ndim in [2, 3])
self.input_was = ("bool", arr.ndim)
if arr.ndim == 2:
arr = arr[..., np.newaxis]
arr = arr.astype(np.float32)
elif arr.dtype.kind in ["i", "u"]:
do_assert(arr.ndim == 2 or (arr.ndim == 3 and arr.shape[2] == 1))
do_assert(nb_classes is not None)
do_assert(nb_classes > 0)
do_assert(np.min(arr.flat[0:100]) >= 0)
do_assert(np.max(arr.flat[0:100]) <= nb_classes)
self.input_was = ("int", arr.dtype.type, arr.ndim)
if arr.ndim == 3:
arr = arr[..., 0]
# TODO improve efficiency here by building only sub-heatmaps for classes actually
# present in the image. This would also get rid of nb_classes.
arr = np.eye(nb_classes)[arr] # from class indices to one hot
arr = arr.astype(np.float32)
elif arr.dtype.kind == "f":
do_assert(arr.ndim == 3)
self.input_was = ("float", arr.dtype.type, arr.ndim)
arr = arr.astype(np.float32)
else:
raise Exception(("Input was expected to be an ndarray any bool, int, uint or float dtype. "
+ "Got dtype %s.") % (arr.dtype.name,))
do_assert(arr.ndim == 3)
do_assert(arr.dtype.name == "float32")
self.arr = arr
self.shape = shape
self.nb_classes = nb_classes if nb_classes is not None else arr.shape[2]
def get_arr_int(self, background_threshold=0.01, background_class_id=None):
"""
Get the segmentation map array as an integer array of shape (H, W).
Each pixel in that array contains an integer value representing the pixel's class.
If multiple classes overlap, the one with the highest local float value is picked.
If that highest local value is below `background_threshold`, the method instead uses
the background class id as the pixel's class value.
By default, class id 0 is the background class. This may only be changed if the original
input to the segmentation map object was an integer map.
Parameters
----------
background_threshold : float, optional
At each pixel, each class-heatmap has a value between 0.0 and 1.0. If none of the
class-heatmaps has a value above this threshold, the method uses the background class
id instead.
background_class_id : None or int, optional
Class id to fall back to if no class-heatmap passes the threshold at a spatial
location. May only be provided if the original input was an integer mask and in these
cases defaults to 0. If the input were float or boolean masks, the background class id
may not be set as it is assumed that the background is implicitly defined
as 'any spatial location that has zero-like values in all masks'.
Returns
-------
result : (H,W) ndarray
Segmentation map array (int32).
If the original input consisted of boolean or float masks, then the highest possible
class id is ``1+C``, where ``C`` is the number of provided float/boolean masks. The value
``0`` in the integer mask then denotes the background class.
"""
if self.input_was[0] in ["bool", "float"]:
do_assert(background_class_id is None,
"The background class id may only be changed if the original input to SegmentationMapOnImage "
+ "was an *integer* based segmentation map.")
if background_class_id is None:
background_class_id = 0
channelwise_max_idx = np.argmax(self.arr, axis=2)
# for bool and float input masks, we assume that the background is implicitly given,
# i.e. anything where all masks/channels have zero-like values
# for int, we assume that the background class is explicitly given and has the index 0
if self.input_was[0] in ["bool", "float"]:
result = 1 + channelwise_max_idx
else: # integer mask was provided
result = channelwise_max_idx
if background_threshold is not None and background_threshold > 0:
probs = np.amax(self.arr, axis=2)
result[probs < background_threshold] = background_class_id
return result.astype(np.int32)
# TODO
# def get_arr_bool(self, allow_overlapping=False, threshold=0.5, background_threshold=0.01, background_class_id=0):
# raise NotImplementedError()
def draw(self, size=None, background_threshold=0.01, background_class_id=None, colors=None,
return_foreground_mask=False):
"""
Render the segmentation map as an RGB image.
Parameters
----------
size : None or float or iterable of int or iterable of float, optional
Size of the rendered RGB image as ``(height, width)``.
See :func:`imgaug.imgaug.imresize_single_image` for details.
If set to None, no resizing is performed and the size of the segmentation map array is used.
background_threshold : float, optional
See :func:`imgaug.SegmentationMapOnImage.get_arr_int`.
background_class_id : None or int, optional
See :func:`imgaug.SegmentationMapOnImage.get_arr_int`.
colors : None or list of tuple of int, optional
Colors to use. One for each class to draw. If None, then default colors will be used.
return_foreground_mask : bool, optional
Whether to return a mask of the same size as the drawn segmentation map, containing
True at any spatial location that is not the background class and False everywhere else.
Returns
-------
segmap_drawn : (H,W,3) ndarray
Rendered segmentation map (dtype is uint8).
foreground_mask : (H,W) ndarray
Mask indicating the locations of foreground classes (dtype is bool).
This value is only returned if `return_foreground_mask` is True.
"""
arr = self.get_arr_int(background_threshold=background_threshold, background_class_id=background_class_id)
nb_classes = 1 + np.max(arr)
segmap_drawn = np.zeros((arr.shape[0], arr.shape[1], 3), dtype=np.uint8)
if colors is None:
colors = SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS
do_assert(nb_classes <= len(colors),
"Can't draw all %d classes as it would exceed the maximum number of %d available colors." % (
nb_classes, len(colors),))
ids_in_map = np.unique(arr)
for c, color in zip(sm.xrange(nb_classes), colors):
if c in ids_in_map:
class_mask = (arr == c)
segmap_drawn[class_mask] = color
if return_foreground_mask:
background_class_id = 0 if background_class_id is None else background_class_id
foreground_mask = (arr != background_class_id)
else:
foreground_mask = None
if size is not None:
segmap_drawn = imresize_single_image(segmap_drawn, size, interpolation="nearest")
if foreground_mask is not None:
foreground_mask = imresize_single_image(
foreground_mask.astype(np.uint8), size, interpolation="nearest") > 0
if foreground_mask is not None:
return segmap_drawn, foreground_mask
return segmap_drawn
def draw_on_image(self, image, alpha=0.75, resize="segmentation_map", background_threshold=0.01,
background_class_id=None, colors=None, draw_background=False):
"""
Draw the segmentation map as an overlay over an image.
Parameters
----------
image : (H,W,3) ndarray
Image onto which to draw the segmentation map. Dtype is expected to be uint8.
alpha : float, optional
Alpha/opacity value to use for the mixing of image and segmentation map.
Higher values mean that the segmentation map will be more visible and the image less visible.
resize : {'segmentation_map', 'image'}, optional
In case of size differences between the image and segmentation map, either the image or
the segmentation map can be resized. This parameter controls which of the two will be
resized to the other's size.
background_threshold : float, optional
See :func:`imgaug.SegmentationMapOnImage.get_arr_int`.
background_class_id : None or int, optional
See :func:`imgaug.SegmentationMapOnImage.get_arr_int`.
colors : None or list of tuple of int, optional
Colors to use. One for each class to draw. If None, then default colors will be used.
draw_background : bool, optional
If True, the background will be drawn like any other class.
If False, the background will not be drawn, i.e. the respective background pixels
will be identical with the image's RGB color at the corresponding spatial location
and no color overlay will be applied.
Returns
-------
mix : (H,W,3) ndarray
Rendered overlays (dtype is uint8).
"""
# assert RGB image
do_assert(image.ndim == 3)
do_assert(image.shape[2] == 3)
do_assert(image.dtype.type == np.uint8)
do_assert(0 - 1e-8 <= alpha <= 1.0 + 1e-8)
do_assert(resize in ["segmentation_map", "image"])
if resize == "image":
image = imresize_single_image(image, self.arr.shape[0:2], interpolation="cubic")
segmap_drawn, foreground_mask = self.draw(
background_threshold=background_threshold,
background_class_id=background_class_id,
size=image.shape[0:2] if resize == "segmentation_map" else None,
colors=colors,
return_foreground_mask=True
)
if draw_background:
mix = np.clip(
(1-alpha) * image + alpha * segmap_drawn,
0,
255
).astype(np.uint8)
else:
foreground_mask = foreground_mask[..., np.newaxis]
mix = np.zeros_like(image)
mix += (~foreground_mask).astype(np.uint8) * image
mix += foreground_mask.astype(np.uint8) * np.clip(
(1-alpha) * image + alpha * segmap_drawn,
0,
255
).astype(np.uint8)
return mix
def pad(self, top=0, right=0, bottom=0, left=0, mode="constant", cval=0.0):
"""
Pad the segmentation map on its top/right/bottom/left side.
Parameters
----------
top : int, optional
Amount of pixels to add at the top side of the segmentation map. Must be 0 or greater.
right : int, optional
Amount of pixels to add at the right side of the segmentation map. Must be 0 or greater.
bottom : int, optional
Amount of pixels to add at the bottom side of the segmentation map. Must be 0 or greater.
left : int, optional
Amount of pixels to add at the left side of the segmentation map. Must be 0 or greater.
mode : str, optional
Padding mode to use. See :func:`numpy.pad` for details.
cval : number, optional
Value to use for padding if `mode` is ``constant``. See :func:`numpy.pad` for details.
Returns
-------
segmap : imgaug.SegmentationMapOnImage
Padded segmentation map of height ``H'=H+top+bottom`` and width ``W'=W+left+right``.
"""
arr_padded = pad(self.arr, top=top, right=right, bottom=bottom, left=left, mode=mode, cval=cval)
segmap = SegmentationMapOnImage(arr_padded, shape=self.shape)
segmap.input_was = self.input_was
return segmap
def pad_to_aspect_ratio(self, aspect_ratio, mode="constant", cval=0.0, return_pad_amounts=False):
"""
Pad the segmentation map on its sides so that its matches a target aspect ratio.
Depending on which dimension is smaller (height or width), only the corresponding
sides (left/right or top/bottom) will be padded. In each case, both of the sides will
be padded equally.
Parameters
----------
aspect_ratio : float
Target aspect ratio, given as width/height. E.g. 2.0 denotes the image having twice
as much width as height.
mode : str, optional
Padding mode to use. See :func:`numpy.pad` for details.
cval : number, optional
Value to use for padding if `mode` is ``constant``. See :func:`numpy.pad` for details.
return_pad_amounts : bool, optional
If False, then only the padded image will be returned. If True, a tuple with two
entries will be returned, where the first entry is the padded image and the second
entry are the amounts by which each image side was padded. These amounts are again a
tuple of the form (top, right, bottom, left), with each value being an integer.
Returns
-------
segmap : imgaug.SegmentationMapOnImage
Padded segmentation map as SegmentationMapOnImage object.
pad_amounts : tuple of int
Amounts by which the segmentation map was padded on each side, given as a
tuple ``(top, right, bottom, left)``.
This tuple is only returned if `return_pad_amounts` was set to True.
"""
arr_padded, pad_amounts = pad_to_aspect_ratio(self.arr, aspect_ratio=aspect_ratio, mode=mode, cval=cval,
return_pad_amounts=True)
segmap = SegmentationMapOnImage(arr_padded, shape=self.shape)
segmap.input_was = self.input_was
if return_pad_amounts:
return segmap, pad_amounts
else:
return segmap
def scale(self, *args, **kwargs):
warnings.warn(DeprecationWarning("SegmentationMapOnImage.scale() is deprecated. "
"Use SegmentationMapOnImage.resize() instead. "
"It has the exactly same interface (simple renaming)."))
return self.resize(*args, **kwargs)
def resize(self, sizes, interpolation="cubic"):
"""
Resize the segmentation map array to the provided size given the provided interpolation.
Parameters
----------
sizes : float or iterable of int or iterable of float
New size of the array in ``(height, width)``.
See :func:`imgaug.imgaug.imresize_single_image` for details.
interpolation : None or str or int, optional
The interpolation to use during resize.
See :func:`imgaug.imgaug.imresize_single_image` for details.
Note: The segmentation map is internally stored as multiple float-based heatmaps,
making smooth interpolations potentially more reasonable than nearest neighbour
interpolation.
Returns
-------
segmap : imgaug.SegmentationMapOnImage
Resized segmentation map object.
"""
arr_resized = imresize_single_image(self.arr, sizes, interpolation=interpolation)
# cubic interpolation can lead to values outside of [0.0, 1.0],
# see https://github.com/opencv/opencv/issues/7195
# TODO area interpolation too?
arr_resized = np.clip(arr_resized, 0.0, 1.0)
segmap = SegmentationMapOnImage(arr_resized, shape=self.shape)
segmap.input_was = self.input_was
return segmap
def to_heatmaps(self, only_nonempty=False, not_none_if_no_nonempty=False):
"""
Convert segmentation map to heatmaps object.
Each segmentation map class will be represented as a single heatmap channel.
Parameters
----------
only_nonempty : bool, optional
If True, then only heatmaps for classes that appear in the segmentation map will be
generated. Additionally, a list of these class ids will be returned.
not_none_if_no_nonempty : bool, optional
If `only_nonempty` is True and for a segmentation map no channel was non-empty,
this function usually returns None as the heatmaps object. If however this parameter
is set to True, a heatmaps object with one channel (representing class 0)
will be returned as a fallback in these cases.
Returns
-------
imgaug.HeatmapsOnImage or None
Segmentation map as a heatmaps object.
If `only_nonempty` was set to True and no class appeared in the segmentation map,
then this is None.
class_indices : list of int
Class ids (0 to C-1) of the classes that were actually added to the heatmaps.
Only returned if `only_nonempty` was set to True.
"""
if not only_nonempty:
return HeatmapsOnImage.from_0to1(self.arr, self.shape, min_value=0.0, max_value=1.0)
else:
nonempty_mask = np.sum(self.arr, axis=(0, 1)) > 0 + 1e-4
if np.sum(nonempty_mask) == 0:
if not_none_if_no_nonempty:
nonempty_mask[0] = True
else:
return None, []
class_indices = np.arange(self.arr.shape[2])[nonempty_mask]
channels = self.arr[..., class_indices]
return HeatmapsOnImage(channels, self.shape, min_value=0.0, max_value=1.0), class_indices
@staticmethod
def from_heatmaps(heatmaps, class_indices=None, nb_classes=None):
"""
Convert heatmaps to segmentation map.
Assumes that each class is represented as a single heatmap channel.
Parameters
----------
heatmaps : imgaug.HeatmapsOnImage
Heatmaps to convert.
class_indices : None or list of int, optional
List of class indices represented by each heatmap channel. See also the
secondary output of :func:`imgaug.SegmentationMapOnImage.to_heatmap`.
If this is provided, it must have the same length as the number of heatmap channels.
nb_classes : None or int, optional
Number of classes. Must be provided if class_indices is set.
Returns
-------
imgaug.SegmentationMapOnImage
Segmentation map derived from heatmaps.
"""
if class_indices is None:
return SegmentationMapOnImage(heatmaps.arr_0to1, shape=heatmaps.shape)
else:
do_assert(nb_classes is not None)
do_assert(min(class_indices) >= 0)
do_assert(max(class_indices) < nb_classes)
do_assert(len(class_indices) == heatmaps.arr_0to1.shape[2])
arr_0to1 = heatmaps.arr_0to1
arr_0to1_full = np.zeros((arr_0to1.shape[0], arr_0to1.shape[1], nb_classes), dtype=np.float32)
for heatmap_channel, mapped_channel in enumerate(class_indices):
arr_0to1_full[:, :, mapped_channel] = arr_0to1[:, :, heatmap_channel]
return SegmentationMapOnImage(arr_0to1_full, shape=heatmaps.shape)
def copy(self):
"""
Create a shallow copy of the segmentation map object.
Returns
-------
imgaug.SegmentationMapOnImage
Shallow copy.
"""
return self.deepcopy()
def deepcopy(self):
"""
Create a deep copy of the segmentation map object.
Returns
-------
imgaug.SegmentationMapOnImage
Deep copy.
"""
segmap = SegmentationMapOnImage(self.arr, shape=self.shape, nb_classes=self.nb_classes)
segmap.input_was = self.input_was
return segmap
class Batch(object):
"""
Class encapsulating a batch before and after augmentation.
Parameters
----------
images : None or (N,H,W,C) ndarray or (N,H,W) ndarray or list of (H,W,C) ndarray or list of (H,W) ndarray
The images to augment.
heatmaps : None or list of imgaug.HeatmapsOnImage
The heatmaps to augment.
segmentation_maps : None or list of SegmentationMapOnImage
The segmentation maps to augment.
keypoints : None or list of KeypointOnImage
The keypoints to augment.
bounding_boxes : None or list of BoundingBoxesOnImage
The bounding boxes to augment.
polygons : None or list of PolygonsOnImage
The polygons to augment.
data
Additional data that is saved in the batch and may be read out
after augmentation. This could e.g. contain filepaths to each image
in `images`. As this object is usually used for background
augmentation with multiple processes, the augmented Batch objects might
not be returned in the original order, making this information useful.
"""
def __init__(self, images=None, heatmaps=None, segmentation_maps=None,
keypoints=None, bounding_boxes=None, polygons=None,
data=None):
self.images_unaug = images
self.images_aug = None
self.heatmaps_unaug = heatmaps
self.heatmaps_aug = None
self.segmentation_maps_unaug = segmentation_maps
self.segmentation_maps_aug = None
self.keypoints_unaug = keypoints
self.keypoints_aug = None
self.bounding_boxes_unaug = bounding_boxes
self.bounding_boxes_aug = None
self.polygons_unaug = polygons
self.polygons_aug = None
self.data = data
@property
def images(self):
warnings.warn(DeprecationWarning(
"Accessing imgaug.Batch.images is deprecated. Access instead "
"imgaug.Batch.images_unaug or imgaug.Batch.images_aug."))
return self.images_unaug
@property
def heatmaps(self):
warnings.warn(DeprecationWarning(
"Accessing imgaug.Batch.heatmaps is deprecated. Access instead "
"imgaug.Batch.heatmaps_unaug or imgaug.Batch.heatmaps_aug."))
return self.heatmaps_unaug
@property
def segmentation_maps(self):
warnings.warn(DeprecationWarning(
"Accessing imgaug.Batch.segmentation_maps is deprecated. Access "
"instead imgaug.Batch.segmentation_maps_unaug or "
"imgaug.Batch.segmentation_maps_aug."))
return self.segmentation_maps_unaug
@property
def keypoints(self):
warnings.warn(DeprecationWarning(
"Accessing imgaug.Batch.keypoints is deprecated. Access "
"instead imgaug.Batch.keypoints_unaug or "
"imgaug.Batch.keypoints_aug."))
return self.keypoints_unaug
@property
def bounding_boxes(self):
warnings.warn(DeprecationWarning(
"Accessing imgaug.Batch.bounding_boxes is deprecated. Access "
"instead imgaug.Batch.bounding_boxes_unaug or "
"imgaug.Batch.bounding_boxes_aug."))
return self.bounding_boxes_unaug
def deepcopy(self):
def _copy_images(images):
if images is None:
images_copy = None
elif is_np_array(images):
images_copy = np.copy(images)
else:
do_assert(is_iterable(images))
do_assert(all([is_np_array(image) for image in images]))
images_copy = list([np.copy(image) for image in images])
return images_copy
def _copy_augmentable_objects(augmentables, clazz):
if augmentables is None:
augmentables_copy = None
else:
do_assert(is_iterable(augmentables))
do_assert(all([isinstance(augmentable, clazz) for augmentable in augmentables]))
augmentables_copy = [augmentable.deepcopy() for augmentable in augmentables]
return augmentables_copy
batch = Batch(
images=_copy_images(self.images_unaug),
heatmaps=_copy_augmentable_objects(self.heatmaps_unaug, HeatmapsOnImage),
segmentation_maps=_copy_augmentable_objects(self.segmentation_maps_unaug, SegmentationMapOnImage),
keypoints=_copy_augmentable_objects(self.keypoints_unaug, KeypointsOnImage),
bounding_boxes=_copy_augmentable_objects(self.bounding_boxes_unaug, BoundingBoxesOnImage),
polygons=_copy_augmentable_objects(self.polygons_unaug, PolygonsOnImage),
data=copy.deepcopy(self.data)
)
batch.images_aug = _copy_images(self.images_aug)
batch.heatmaps_aug = _copy_augmentable_objects(self.heatmaps_aug, HeatmapsOnImage)
batch.segmentation_maps_aug = _copy_augmentable_objects(self.segmentation_maps_aug, SegmentationMapOnImage)
batch.keypoints_aug = _copy_augmentable_objects(self.keypoints_aug, KeypointsOnImage)
batch.bounding_boxes_aug = _copy_augmentable_objects(self.bounding_boxes_aug, BoundingBoxesOnImage)
batch.polygons_aug = _copy_augmentable_objects(self.polygons_aug, PolygonsOnImage)
return batch
def BatchLoader(*args, **kwargs):
warnings.warn(DeprecationWarning("Using imgaug.imgaug.BatchLoader is depcrecated. "
"Use imgaug.multicore.BatchLoader instead."))
from . import multicore
return multicore.BatchLoader(*args, **kwargs)
def BackgroundAugmenter(*args, **kwargs):
warnings.warn(DeprecationWarning("Using imgaug.imgaug.BackgroundAugmenter is depcrecated. "
"Use imgaug.multicore.BackgroundAugmenter instead."))
from . import multicore
return multicore.BackgroundAugmenter(*args, **kwargs)
|
from __future__ import unicode_literals, division, print_function
import io
import csv
import random
from recordclass import recordclass
from numpy import array
class Station(recordclass('Station', b'station_id name operator voltages frequencies lines')):
def __hash__(self):
return hash(self.station_id)
class Line(recordclass('Line', b'line_id operator left right frequency voltage resistance reactance capacitance')):
def __hash__(self):
return hash(self.line_id)
def __repr__(self):
return "{0}: {1} -> {2}".format(self.line_id, self.left.name, self.right.name).encode('utf-8')
@property
def susceptance(self):
if self.capacitance is None or self.frequency is None:
return None
return self.capacitance * self.frequency
class Network(object):
def __init__(self):
self.stations = dict()
self.lines = dict()
self._areas = dict()
def connected_sets(self):
# bfs algorithm to find connected sets in the network
unseen = set(self.stations.values())
connected = []
while unseen:
current = []
root = unseen.pop()
queue = [root]
while queue:
node = queue.pop()
if node in unseen:
unseen.remove(node)
current.append(node)
for line in node.lines:
if line.left in unseen:
queue.append(line.left)
if line.right in unseen:
queue.append(line.right)
connected.append(current)
return connected
def patch(self):
# flood algorithm to patch all lines and stations with values from neighbours
totals = list()
while True:
changes = 0
for station in self.stations.itervalues():
line_voltages = [line.voltage for line in station.lines if line.voltage is not None]
line_frequencies = [line.frequency for line in station.lines if line.frequency is not None]
if station.voltages is None:
if line_voltages:
station.voltages = list(set(line_voltages))
changes += 1
# what about conflicting voltages?
elif any(lv not in station.voltages for lv in line_voltages):
station.voltages = list(set(station.voltages + line_voltages))
changes += 1
if station.frequencies is None:
if line_frequencies:
station.frequencies = list(set(line_frequencies))
changes += 1
elif any(lf not in station.frequencies for lf in line_frequencies):
station.frequencies = list(set(station.frequencies + line_frequencies))
changes += 1
for line in self.lines.itervalues():
if line.frequency is None:
if line.left.frequencies is not None:
if line.right.frequencies is not None:
shared_frequency = set(line.left.frequencies) & set(line.right.frequencies)
if shared_frequency:
line.frequency = max(shared_frequency)
changes += 1
elif len(line.left.frequencies) == 1:
line.frequency = line.left.frequencies[0]
changes += 1
elif line.right.frequencies is not None and len(line.right.frequencies) == 1:
line.frequency = line.right.frequencies[0]
changes += 1
if line.voltage is None:
# this never seems to happen though!
if line.left.voltages is not None:
if line.right.voltages is not None:
shared_voltage = set(line.left.voltages) & set(line.right.voltages)
if shared_voltage:
line.voltage = max(shared_voltage)
changes += 1
elif len(line.left.voltages) == 1:
line.voltage = line.left.voltages[0]
changes += 1
elif line.right.frequencies is not None and len(line.right.frequencies == 1):
line.frequency = line.right.frequencies[0]
if changes == 0:
break
totals.append(changes)
return totals
def report(self):
# calculate missing values statically
broken_stations = 0
broken_lines = 0
mismatches = 0
for station in self.stations.itervalues():
if station.voltages is None or station.frequencies is None:
broken_stations += 1
for line in station.lines:
if station.frequencies is not None:
if line.frequency not in station.frequencies:
mismatches += 1
continue
elif line.frequency is not None:
mismatches += 1
continue
if station.voltages is not None:
if line.voltage not in station.voltages:
mismatches += 1
continue
elif line.voltage is not None:
mismatches += 1
continue
for line in self.lines.itervalues():
if line.voltage is None or line.frequency is None:
broken_lines += 1
return broken_stations, broken_lines, mismatches
def _area_number(self, area_name):
if area_name not in self._areas:
# assign next area number
self._areas[area_name] = len(self._areas) + 1
return self._areas[area_name]
def powercase(self, loads=None):
# loads is a map of station id -> load, either positive or
# negative; a negative load is represented by a generator.
# if no loads map is passed,
# generate an 'electrified pair' of two random
# nodes, one of which delivers power, the other consumes it
if loads is None:
loads = self._electrified_pair()
ppc = {
"version": 2,
"baseMVA": 100.0
}
nodes = list()
transformers = list()
generators = list()
edges = list()
station_to_bus = dict()
next_bus_id = 1
for station in self.station.itervalues():
# because we do a DC PF, we ignore frequencies completely
pass
def _electrified_pair(self):
src, dst = random.sample(self.stations, 2)
return {
src: 100, # MW
dst: 50, # MW
}
def _make_bus(self, station, voltage, load, bus_id):
# see pypower.caseformat for documentation on how this works
area_nr = self._area_number(station.operator)
base_kv = station.voltage // 1000
return [
bus_id,
3, # slack bus
load, # real load in MW
0, # reactive load MVAr, zero because DC
0, # shunt conductance
0, # shunt susceptance
area_nr, # area number
1.0, # voltage magnitude per unit
0, # voltage angle
base_kv, # base voltage (per unit base)
area_nr, # loss zone nr
1.1, # max voltage per unit
0.9, # min voltage per unit
]
def _make_transformer(self, from_bus, to_bus, from_voltage, to_voltage):
return [
from_bus,
to_bus,
]
def dot(self):
buf = io.StringIO()
buf.write("graph {\n")
buf.write("rankdir LR\n")
for station in self.stations.itervalues():
buf.write('s_{0} [label="{1}"]\n'.format(station.station_id, station.name.replace('"', "'")))
for line in self.lines.itervalues():
buf.write('s_{0} -- s_{1}\n'.format(line.left.station_id, line.right.station_id))
buf.write("}\n")
return buf.getvalue()
def __repr__(self):
return "Network of {0} stations, {1} lines".format(len(self.stations), len(self.lines)).encode('utf-8')
class ScigridNetwork(Network):
class _csv_dialect(csv.excel):
quotechar = b"'"
def read(self, vertices_csv, links_csv):
with io.open(vertices_csv, 'rb') as handle:
for row in csv.DictReader(handle, dialect=self._csv_dialect):
station_id = row['v_id']
name = row['name'].decode('utf-8')
operator = row['operator'].decode('utf-8')
voltages = map(int, row['voltage'].decode('utf-8').split(';')) if row['voltage'] else None
frequencies = map(float, row['frequency'].decode('utf-8').split(';')) if row['frequency'] else None
self.stations[row['v_id']] = Station(station_id=station_id, name=name, operator=operator,
voltages=voltages, frequencies=frequencies, lines=list())
with io.open(links_csv, 'rb') as handle:
for i, row in enumerate(csv.DictReader(handle, dialect=self._csv_dialect)):
line_id = row['l_id']
operator = row['operator'].decode('utf-8')
left = self.stations[row['v_id_1']]
right = self.stations[row['v_id_2']]
resistance = float(row['r_ohmkm']) * int(row['length_m']) / 1000 if row['r_ohmkm'] else None
reactance = float(row['x_ohmkm']) * int(row['length_m']) / 1000 if row['x_ohmkm'] else None
capacitance = float(row['c_nfkm']) * int(row['length_m']) / 1000 if row['c_nfkm'] else None
frequency = float(row['frequency']) if row['frequency'] else None
voltage = int(row['voltage']) if row['voltage'] else None
line = Line(line_id=line_id, operator=operator, left=left, right=right,
voltage=voltage, frequency=frequency,
resistance=resistance, reactance=reactance, capacitance=capacitance)
self.lines[row['l_id']] = line
left.lines.append(line)
right.lines.append(line)
Create SciGRID powercase; doesn't work well
from __future__ import unicode_literals, division, print_function
import io
import csv
import random
import itertools
from recordclass import recordclass
from numpy import array
class Station(recordclass('Station', b'station_id name operator voltages frequencies lines')):
def __hash__(self):
return hash(self.station_id)
class Line(recordclass('Line', b'line_id operator left right frequency voltage resistance reactance capacitance')):
def __hash__(self):
return hash(self.line_id)
def __repr__(self):
return "{0}: {1} -> {2}".format(self.line_id, self.left.name, self.right.name).encode('utf-8')
@property
def susceptance(self):
if self.capacitance is None or self.frequency is None:
return None
return self.capacitance * self.frequency
class Network(object):
def __init__(self):
self.stations = dict()
self.lines = dict()
self._areas = dict()
def connected_sets(self):
# bfs algorithm to find connected sets in the network
unseen = set(self.stations.values())
connected = []
while unseen:
current = []
root = unseen.pop()
queue = [root]
while queue:
node = queue.pop()
if node in unseen:
unseen.remove(node)
current.append(node)
for line in node.lines:
if line.left in unseen:
queue.append(line.left)
if line.right in unseen:
queue.append(line.right)
connected.append(current)
return connected
def patch(self):
# flood algorithm to patch all lines and stations with values from neighbours
totals = list()
while True:
changes = 0
for station in self.stations.itervalues():
line_voltages = [line.voltage for line in station.lines if line.voltage is not None]
line_frequencies = [line.frequency for line in station.lines if line.frequency is not None]
if station.voltages is None:
if line_voltages:
station.voltages = list(set(line_voltages))
changes += 1
# what about conflicting voltages?
elif any(lv not in station.voltages for lv in line_voltages):
station.voltages = list(set(station.voltages + line_voltages))
changes += 1
if station.frequencies is None:
if line_frequencies:
station.frequencies = list(set(line_frequencies))
changes += 1
elif any(lf not in station.frequencies for lf in line_frequencies):
station.frequencies = list(set(station.frequencies + line_frequencies))
changes += 1
for line in self.lines.itervalues():
if line.frequency is None:
if line.left.frequencies is not None:
if line.right.frequencies is not None:
shared_frequency = set(line.left.frequencies) & set(line.right.frequencies)
if shared_frequency:
line.frequency = max(shared_frequency)
changes += 1
elif len(line.left.frequencies) == 1:
line.frequency = line.left.frequencies[0]
changes += 1
elif line.right.frequencies is not None and len(line.right.frequencies) == 1:
line.frequency = line.right.frequencies[0]
changes += 1
if line.voltage is None:
# this never seems to happen though!
if line.left.voltages is not None:
if line.right.voltages is not None:
shared_voltage = set(line.left.voltages) & set(line.right.voltages)
if shared_voltage:
line.voltage = max(shared_voltage)
changes += 1
elif len(line.left.voltages) == 1:
line.voltage = line.left.voltages[0]
changes += 1
elif line.right.frequencies is not None and len(line.right.frequencies == 1):
line.frequency = line.right.frequencies[0]
if changes == 0:
break
totals.append(changes)
return totals
def report(self):
# calculate missing values statically
broken_stations = 0
broken_lines = 0
mismatches = 0
for station in self.stations.itervalues():
if station.voltages is None or station.frequencies is None:
broken_stations += 1
for line in station.lines:
if station.frequencies is not None:
if line.frequency not in station.frequencies:
mismatches += 1
continue
elif line.frequency is not None:
mismatches += 1
continue
if station.voltages is not None:
if line.voltage not in station.voltages:
mismatches += 1
continue
elif line.voltage is not None:
mismatches += 1
continue
for line in self.lines.itervalues():
if line.voltage is None or line.frequency is None:
broken_lines += 1
return broken_stations, broken_lines, mismatches
def _area_number(self, area_name):
if area_name not in self._areas:
# assign next area number
self._areas[area_name] = len(self._areas) + 1
return self._areas[area_name]
def powercase(self, loads=None):
# loads is a map of station id -> load, either positive or
# negative; a negative load is represented by a generator.
# if no loads map is passed, generate an 'electrified pair' of
# two random nodes, one of which delivers power, the other
# consumes it
if loads is None:
loads = self._electrified_pair()
ppc = {
"version": 2,
"baseMVA": 100.0
}
nodes = list()
edges = list()
generators = list()
station_to_bus = dict()
bus_id_gen = itertools.count()
for station in self.stations.itervalues():
# because we do a DC PF, we ignore frequencies completely
minv, maxv = min(station.voltages), max(station.voltages)
for voltage in station.voltages:
if station.station_id in loads and voltage == minv:
bus_load = loads[station.station_id]
else:
bus_load = 0
bus_id = next(bus_id_gen)
station_to_bus[station.station_id, voltage] = bus_id
if bus_load < 0:
# it is a generator instead of a load, insert it
generators.append(self._make_generator(bus_id, -bus_load))
bus_load = 0
nodes.append(self._make_bus(station, voltage, bus_load, bus_id))
for voltage in station.voltages:
if voltage != maxv:
# create a transformer branch from max voltage to this voltage
from_bus = station_to_bus[station.station_id, maxv]
to_bus = station_to_bus[station.station_id, voltage]
edges.append(self._make_transformer(from_bus, to_bus))
for line in self.lines.itervalues():
# create branches between stations
from_bus = station_to_bus[line.left.station_id, line.voltage]
to_bus = station_to_bus[line.right.station_id, line.voltage]
edges.append(self._make_line(line, from_bus, to_bus))
ppc['bus'] = array(nodes)
ppc['gen'] = array(generators)
ppc['branch'] = array(edges)
return ppc
def _electrified_pair(self):
src, dst = random.sample(self.stations, 2)
return {
src: -100, # MW
dst: 50, # MW
}
def _make_bus(self, station, voltage, load, bus_id):
# see pypower.caseformat for documentation on how this works
area_nr = self._area_number(station.operator)
base_kv = voltage // 1000
return [
bus_id,
3, # slack bus
load, # real load in MW
0, # reactive load MVAr, zero because DC
0, # shunt conductance
0, # shunt susceptance
area_nr, # area number
1.0, # voltage magnitude per unit
0, # voltage angle
base_kv, # base voltage (per unit base)
area_nr, # loss zone nr
1.1, # max voltage per unit
0.9, # min voltage per unit
]
def _make_transformer(self, from_bus, to_bus):
return [
from_bus,
to_bus,
0.01, # resistance
0.01, # reactance
0.01, # line charging susceptance
200, # long term rating (MW)
200, # short term rating (MW)
200, # emergency rating (MW)
1, # off-nominal (correction) taps ratio, 1 for no correction
0, # transformer phase shift angle,
1, # status (1 = on)
-360, # minimum angle
360, # maximum angle
]
def _make_line(self, line, from_bus, to_bus):
return [
from_bus,
to_bus,
line.resistance or 0.01, # default value if None
line.reactance or 0.01,
line.susceptance or 0.01,
200,
200,
200,
0, # not a transformer
0, # not a transformer
1, # status
-360,
360
]
def _make_generator(self, bus_id, power_output):
return [
bus_id,
power_output,
0, # reactive power output
0, # maximum reactive power output
0, # minimum reactive power output
1.0, # per-unit voltage magnitude setpoint
100, # base MVA
1, # status (on)
power_output, # maximum real power output
0, # minimum real power output
0, # Pc1, irrelevant
0, # Pc2
0, # Qc1min
0, # Qc1max
0, # Qc2min
0, # Qc2max
5, # ramp rate load-following (MW/min)
5, # ramp rate 10-min reserve (MW/min)
5, # ramp rate 30-min reserve (MW/min)
0, # ramp rate reactive power
0, # area participation factor
]
pass
def dot(self):
buf = io.StringIO()
buf.write("graph {\n")
buf.write("rankdir LR\n")
for station in self.stations.itervalues():
buf.write('s_{0} [label="{1}"]\n'.format(station.station_id, station.name.replace('"', "'")))
for line in self.lines.itervalues():
buf.write('s_{0} -- s_{1}\n'.format(line.left.station_id, line.right.station_id))
buf.write("}\n")
return buf.getvalue()
def __repr__(self):
return "Network of {0} stations, {1} lines".format(len(self.stations), len(self.lines)).encode('utf-8')
class ScigridNetwork(Network):
class _csv_dialect(csv.excel):
quotechar = b"'"
def read(self, vertices_csv, links_csv):
with io.open(vertices_csv, 'rb') as handle:
for row in csv.DictReader(handle, dialect=self._csv_dialect):
station_id = row['v_id']
name = row['name'].decode('utf-8')
operator = row['operator'].decode('utf-8')
voltages = map(int, row['voltage'].decode('utf-8').split(';')) if row['voltage'] else None
frequencies = map(float, row['frequency'].decode('utf-8').split(';')) if row['frequency'] else None
self.stations[row['v_id']] = Station(station_id=station_id, name=name, operator=operator,
voltages=voltages, frequencies=frequencies, lines=list())
with io.open(links_csv, 'rb') as handle:
for i, row in enumerate(csv.DictReader(handle, dialect=self._csv_dialect)):
line_id = row['l_id']
operator = row['operator'].decode('utf-8')
left = self.stations[row['v_id_1']]
right = self.stations[row['v_id_2']]
resistance = float(row['r_ohmkm']) * int(row['length_m']) / 1000 if row['r_ohmkm'] else None
reactance = float(row['x_ohmkm']) * int(row['length_m']) / 1000 if row['x_ohmkm'] else None
capacitance = float(row['c_nfkm']) * int(row['length_m']) / 1000 if row['c_nfkm'] else None
frequency = float(row['frequency']) if row['frequency'] else None
voltage = int(row['voltage']) if row['voltage'] else None
line = Line(line_id=line_id, operator=operator, left=left, right=right,
voltage=voltage, frequency=frequency,
resistance=resistance, reactance=reactance, capacitance=capacitance)
self.lines[row['l_id']] = line
left.lines.append(line)
right.lines.append(line)
|
#! /usr/bin/env python
import sys
sys.path.append('/usr/share/inkscape/extensions')
import inkex
import os
import subprocess
import tempfile
import shutil
import copy
class PNGExport(inkex.Effect):
def __init__(self):
"""init the effetc library and get options from gui"""
inkex.Effect.__init__(self)
self.OptionParser.add_option("--path", action="store", type="string", dest="path", default="~/", help="")
self.OptionParser.add_option('-f', '--filetype', action='store', type='string', dest='filetype', default='jpeg', help='Exported file type')
def effect(self):
output_path = os.path.expanduser(self.options.path)
curfile = self.args[-1]
layers = self.get_layers(curfile)
counter = 1
for (layer_id, layer_label, layer_type) in layers:
if layer_type == "fixed":
continue
show_layer_ids = [layer[0] for layer in layers if layer[2] == "fixed" or layer[0] == layer_id]
if not os.path.exists(os.path.join(output_path)):
os.makedirs(os.path.join(output_path))
layer_dest_svg_path = os.path.join(output_path, "%s.svg" % layer_label)
layer_dest_png_path = os.path.join(output_path, "%s_%s.png" % (str(counter).zfill(3), layer_label))
self.export_layers(layer_dest_svg_path, show_layer_ids)
self.exportToPng(layer_dest_svg_path, layer_dest_png_path)
if self.options.filetype == "jpeg":
layer_dest_jpg_path = os.path.join(output_path, "%s_%s.jpg" % (str(counter).zfill(3), layer_label))
self.convertPngToJpg(layer_dest_png_path, layer_dest_jpg_path)
os.unlink(layer_dest_png_path)
os.unlink(layer_dest_svg_path)
counter += 1
def export_layers(self, dest, show):
"""
Export selected layers of SVG to the file `dest`.
:arg str dest: path to export SVG file.
:arg list hide: layers to hide. each element is a string.
:arg list show: layers to show. each element is a string.
"""
doc = copy.deepcopy(self.document)
for layer in doc.xpath('//svg:g[@inkscape:groupmode="layer"]', namespaces=inkex.NSS):
layer.attrib['style'] = 'display:none'
id = layer.attrib["id"]
if id in show:
layer.attrib['style'] = 'display:inline'
doc.write(dest)
def get_layers(self, src):
svg_layers = self.document.xpath('//svg:g[@inkscape:groupmode="layer"]', namespaces=inkex.NSS)
layers = []
for layer in svg_layers:
label_attrib_name = "{%s}label" % layer.nsmap['inkscape']
if label_attrib_name not in layer.attrib:
continue
layer_id = layer.attrib["id"]
layer_label = layer.attrib[label_attrib_name]
if layer_label.lower().startswith("[fixed] "):
layer_type = "fixed"
layer_label = layer_label[8:]
elif layer_label.lower().startswith("[export] "):
layer_type = "export"
layer_label = layer_label[9:]
else:
continue
layers.append([layer_id, layer_label, layer_type])
return layers
def exportToPng(self, svg_path, output_path):
command = "inkscape -C -e \"%s\" \"%s\"" % (output_path, svg_path)
p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.wait()
def convertPngToJpg(self, png_path, output_path):
command = "convert \"%s\" \"%s\"" % (png_path, output_path)
p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.wait()
def _main():
e = PNGExport()
e.affect()
exit()
if __name__ == "__main__":
_main()
Add encoding to Subprocess commands
#! /usr/bin/env python
import sys
sys.path.append('/usr/share/inkscape/extensions')
import inkex
import os
import subprocess
import tempfile
import shutil
import copy
class PNGExport(inkex.Effect):
def __init__(self):
"""init the effetc library and get options from gui"""
inkex.Effect.__init__(self)
self.OptionParser.add_option("--path", action="store", type="string", dest="path", default="~/", help="")
self.OptionParser.add_option('-f', '--filetype', action='store', type='string', dest='filetype', default='jpeg', help='Exported file type')
def effect(self):
output_path = os.path.expanduser(self.options.path)
curfile = self.args[-1]
layers = self.get_layers(curfile)
counter = 1
for (layer_id, layer_label, layer_type) in layers:
if layer_type == "fixed":
continue
show_layer_ids = [layer[0] for layer in layers if layer[2] == "fixed" or layer[0] == layer_id]
if not os.path.exists(os.path.join(output_path)):
os.makedirs(os.path.join(output_path))
layer_dest_svg_path = os.path.join(output_path, "%s.svg" % layer_label)
layer_dest_png_path = os.path.join(output_path, "%s_%s.png" % (str(counter).zfill(3), layer_label))
self.export_layers(layer_dest_svg_path, show_layer_ids)
self.exportToPng(layer_dest_svg_path, layer_dest_png_path)
if self.options.filetype == "jpeg":
layer_dest_jpg_path = os.path.join(output_path, "%s_%s.jpg" % (str(counter).zfill(3), layer_label))
self.convertPngToJpg(layer_dest_png_path, layer_dest_jpg_path)
os.unlink(layer_dest_png_path)
os.unlink(layer_dest_svg_path)
counter += 1
def export_layers(self, dest, show):
"""
Export selected layers of SVG to the file `dest`.
:arg str dest: path to export SVG file.
:arg list hide: layers to hide. each element is a string.
:arg list show: layers to show. each element is a string.
"""
doc = copy.deepcopy(self.document)
for layer in doc.xpath('//svg:g[@inkscape:groupmode="layer"]', namespaces=inkex.NSS):
layer.attrib['style'] = 'display:none'
id = layer.attrib["id"]
if id in show:
layer.attrib['style'] = 'display:inline'
doc.write(dest)
def get_layers(self, src):
svg_layers = self.document.xpath('//svg:g[@inkscape:groupmode="layer"]', namespaces=inkex.NSS)
layers = []
for layer in svg_layers:
label_attrib_name = "{%s}label" % layer.nsmap['inkscape']
if label_attrib_name not in layer.attrib:
continue
layer_id = layer.attrib["id"]
layer_label = layer.attrib[label_attrib_name]
if layer_label.lower().startswith("[fixed] "):
layer_type = "fixed"
layer_label = layer_label[8:]
elif layer_label.lower().startswith("[export] "):
layer_type = "export"
layer_label = layer_label[9:]
else:
continue
layers.append([layer_id, layer_label, layer_type])
return layers
def exportToPng(self, svg_path, output_path):
command = "inkscape -C -e \"%s\" \"%s\"" % (output_path, svg_path)
p = subprocess.Popen(command.encode("utf-8"), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.wait()
def convertPngToJpg(self, png_path, output_path):
command = "convert \"%s\" \"%s\"" % (png_path, output_path)
p = subprocess.Popen(command.encode("utf-8"), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.wait()
def _main():
e = PNGExport()
e.affect()
exit()
if __name__ == "__main__":
_main()
|
from functools import wraps
import os
import shutil
import sys
from _pysh.conda import delete_conda_env, reset_conda_env, reset_conda_env_offline, download_conda_deps
from _pysh.config import load_config
from _pysh.pip import install_pip_deps, install_pip_deps_offline, download_pip_deps
from _pysh.shell import shell, shell_local, shell_local_exec
from _pysh.styles import apply_styles
from _pysh.tasks import TaskError, mark_task
from _pysh.utils import rimraf, mkdirp
def prevent_unknown(func):
@wraps(func)
def do_prevent_unknown(opts, unknown_args):
if unknown_args:
raise TaskError("Unknown arguments: {}".format(" ".join(unknown_args)))
return func(opts)
return do_prevent_unknown
@prevent_unknown
def install(opts):
config = load_config(opts)
if opts.offline:
reset_conda_env_offline(opts, config)
install_pip_deps_offline(opts, config)
else:
reset_conda_env(opts, config)
install_pip_deps(opts, config)
# Run install scripts.
install_scripts = config.get("pysh").get("install", [])
if install_scripts:
with mark_task(opts, "Running install scripts"):
for install_script in install_scripts:
shell_local(opts, install_script)
@prevent_unknown
def download_deps(opts):
config = load_config(opts)
download_conda_deps(opts)
download_pip_deps(opts, config)
@prevent_unknown
def dist(opts):
config = load_config(opts)
reset_conda_env(opts, config)
try:
# Create a build environment.
build_path = os.path.join(opts.work_path, "build")
rimraf(build_path)
mkdirp(build_path)
try:
# Copy source.
with mark_task(opts, "Copying source"):
shell(
opts,
"cd {root_path} && git archive HEAD --format=tar | tar -x -C {build_path}",
root_path=opts.root_path,
build_path=build_path,
)
# Download deps.
download_conda_deps(opts)
download_pip_deps(opts, config)
# Copy libs.
with mark_task(opts, "Copying libs"):
shutil.copytree(
opts.lib_path,
os.path.join(build_path, os.path.relpath(opts.lib_path, opts.root_path)),
)
# Compress the build.
dist_path = os.path.join(opts.root_path, opts.dist_dir)
mkdirp(dist_path)
dist_file = os.path.join("{name}-{version}-{os_name}-amd64.zip".format(
name=config.get("name", os.path.basename(opts.root_path)),
version=config.get("version", "1.0.0"),
os_name=opts.os_name,
))
with mark_task(opts, "Creating archive {}".format(dist_file)):
dist_file_path = os.path.join(dist_path, dist_file)
rimraf(dist_file_path)
shell(
opts,
"cd {build_path} && zip -9 -qq -r {dist_file_path} './'",
build_path=build_path,
dist_file_path=dist_file_path,
)
finally:
rimraf(build_path)
finally:
delete_conda_env(opts)
@prevent_unknown
def activate(opts):
config = load_config(opts)
package_name = config.get("name", os.path.basename(opts.root_path))
with mark_task(opts, "Activating {} environment".format(opts.conda_env)):
shell_local_exec(
opts,
apply_styles(opts, """printf "{success}done!{plain}
Deactivate environment with {code}exit{plain} or {code}[Ctl+D]{plain}.
" && export PS1="({code}{{package_name}}{plain}) \\h:\\W \\u\\$ " && bash"""),
package_name=package_name,
)
def run(opts, unknown_args):
shell_local_exec(
opts,
"{unknown_args}",
unknown_args=unknown_args,
)
@prevent_unknown
def welcome(opts):
sys.stdout.write(apply_styles(opts, r'''{success} _
{success} | |
{success} _ __ _ _ ___| |__
{success}| '_ \| | | | / __| '_ \
{success}| |_) | |_| |_\__ \ | | |
{success}| .__/ \__, (_)___/_| |_|
{success}| | __/ |
{success}|_| |___/
{success}py.sh{plain} is now installed!
A standalone Python interpreter has been installed into {code}{{work_dir}}{plain}.
{success}Recommended:{plain} Add {code}{{work_dir}}{plain} to your {code}.gitignore{plain} file.
Use {code}./{{script_name}}{plain} to manage your environment.
{success}Hint:{plain} You can learn a lot from {code}./{{script_name}} --help{plain}
and {code}./{{script_name}} <command name> --help{plain}.
''').format(
work_dir=os.path.relpath(opts.work_path, opts.root_path),
script_name=opts.script_name,
))
Fixing quoting of styles in active shell, which was causing line wrap issues. Closes #27
from functools import wraps
import os
import shutil
import sys
from _pysh.conda import delete_conda_env, reset_conda_env, reset_conda_env_offline, download_conda_deps
from _pysh.config import load_config
from _pysh.pip import install_pip_deps, install_pip_deps_offline, download_pip_deps
from _pysh.shell import shell, shell_local, shell_local_exec
from _pysh.styles import apply_styles
from _pysh.tasks import TaskError, mark_task
from _pysh.utils import rimraf, mkdirp
def prevent_unknown(func):
@wraps(func)
def do_prevent_unknown(opts, unknown_args):
if unknown_args:
raise TaskError("Unknown arguments: {}".format(" ".join(unknown_args)))
return func(opts)
return do_prevent_unknown
@prevent_unknown
def install(opts):
config = load_config(opts)
if opts.offline:
reset_conda_env_offline(opts, config)
install_pip_deps_offline(opts, config)
else:
reset_conda_env(opts, config)
install_pip_deps(opts, config)
# Run install scripts.
install_scripts = config.get("pysh").get("install", [])
if install_scripts:
with mark_task(opts, "Running install scripts"):
for install_script in install_scripts:
shell_local(opts, install_script)
@prevent_unknown
def download_deps(opts):
config = load_config(opts)
download_conda_deps(opts)
download_pip_deps(opts, config)
@prevent_unknown
def dist(opts):
config = load_config(opts)
reset_conda_env(opts, config)
try:
# Create a build environment.
build_path = os.path.join(opts.work_path, "build")
rimraf(build_path)
mkdirp(build_path)
try:
# Copy source.
with mark_task(opts, "Copying source"):
shell(
opts,
"cd {root_path} && git archive HEAD --format=tar | tar -x -C {build_path}",
root_path=opts.root_path,
build_path=build_path,
)
# Download deps.
download_conda_deps(opts)
download_pip_deps(opts, config)
# Copy libs.
with mark_task(opts, "Copying libs"):
shutil.copytree(
opts.lib_path,
os.path.join(build_path, os.path.relpath(opts.lib_path, opts.root_path)),
)
# Compress the build.
dist_path = os.path.join(opts.root_path, opts.dist_dir)
mkdirp(dist_path)
dist_file = os.path.join("{name}-{version}-{os_name}-amd64.zip".format(
name=config.get("name", os.path.basename(opts.root_path)),
version=config.get("version", "1.0.0"),
os_name=opts.os_name,
))
with mark_task(opts, "Creating archive {}".format(dist_file)):
dist_file_path = os.path.join(dist_path, dist_file)
rimraf(dist_file_path)
shell(
opts,
"cd {build_path} && zip -9 -qq -r {dist_file_path} './'",
build_path=build_path,
dist_file_path=dist_file_path,
)
finally:
rimraf(build_path)
finally:
delete_conda_env(opts)
@prevent_unknown
def activate(opts):
config = load_config(opts)
package_name = config.get("name", os.path.basename(opts.root_path))
with mark_task(opts, "Activating {} environment".format(opts.conda_env)):
shell_local_exec(
opts,
apply_styles(opts, """printf "{success}done!{plain}
Deactivate environment with {code}exit{plain} or {code}[Ctl+D]{plain}.
" && export PS1="(\[{code}\]{{package_name}}\[{plain}\]) \\h:\\W \\u\\$ " && bash"""),
package_name=package_name,
)
def run(opts, unknown_args):
shell_local_exec(
opts,
"{unknown_args}",
unknown_args=unknown_args,
)
@prevent_unknown
def welcome(opts):
sys.stdout.write(apply_styles(opts, r'''{success} _
{success} | |
{success} _ __ _ _ ___| |__
{success}| '_ \| | | | / __| '_ \
{success}| |_) | |_| |_\__ \ | | |
{success}| .__/ \__, (_)___/_| |_|
{success}| | __/ |
{success}|_| |___/
{success}py.sh{plain} is now installed!
A standalone Python interpreter has been installed into {code}{{work_dir}}{plain}.
{success}Recommended:{plain} Add {code}{{work_dir}}{plain} to your {code}.gitignore{plain} file.
Use {code}./{{script_name}}{plain} to manage your environment.
{success}Hint:{plain} You can learn a lot from {code}./{{script_name}} --help{plain}
and {code}./{{script_name}} <command name> --help{plain}.
''').format(
work_dir=os.path.relpath(opts.work_path, opts.root_path),
script_name=opts.script_name,
))
|
#
# Copyright (c) 2010 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
import os
import re
import socket
from gofer import singleton
from iniparse import INIConfig
from iniparse.config import Undefined
from gofer.agent.logutil import getLogger
log = getLogger(__name__)
def ndef(x):
"""
Section/property not defined.
@param x: A section/property
@type x: A section or property object.
@return: True if not defined.
"""
return isinstance(x, Undefined)
def nvl(x, d=None):
"""
Not define value.
@param x: An object to check.
@type x: A section/property
@return: d if not defined, else x.
"""
if ndef(x):
return d
else:
return x
class Base(INIConfig):
"""
Base configuration.
Uses L{Reader} which provides import.
"""
def __init__(self, path):
"""
@param path: The path to an INI file.
@type path: str
"""
fp = Reader(path)
try:
INIConfig.__init__(self, fp)
finally:
fp.close()
@singleton
class Config(Base):
"""
The gofer agent configuration.
@cvar ROOT: The root configuration directory.
@type ROOT: str
@cvar PATH: The absolute path to the config directory.
@type PATH: str
@cvar USER: The path to an alternate configuration file
within the user's home.
@type USER: str
@cvar ALT: The environment variable with a path to an alternate
configuration file.
@type ALT: str
"""
ROOT = '/etc/gofer'
FILE = 'agent.conf'
PATH = os.path.join(ROOT, FILE)
USER = os.path.join('~/.gofer', FILE)
CNFD = os.path.join(ROOT, 'conf.d')
ALT = 'GOFER_OVERRIDE'
def __init__(self):
"""
Open the configuration.
Merge (in) alternate configuration file when specified
by environment variable.
"""
try:
Base.__init__(self, self.PATH)
self.__addconfd()
altpath = self.__altpath()
if altpath:
alt = Base(altpath)
self.__mergeIn(alt)
log.info('merged[in]:%s\n%s', altpath, self)
except:
log.error(self.PATH, exc_info=1)
raise
def __update(self, other):
"""
Update with the specified I{other} configuration.
@param other: The conf to update with.
@type other: Base
@return: self
@rtype: L{Config}
"""
for section in other:
for key in other[section]:
self[section][key] = other[section][key]
return self
def __mergeIn(self, other):
"""
Merge (in) the specified I{other} configuration.
@param other: The conf to merge in.
@type other: Base
@return: self
@rtype: L{Config}
"""
for section in other:
if section not in self:
continue
for key in other[section]:
self[section][key] = other[section][key]
return self
def __mergeOut(self, other):
"""
Merge (out) to the specified I{other} configuration.
@param other: The conf to merge out.
@type other: Base
@return: self
@rtype: L{Config}
"""
for section in other:
if section not in self:
continue
for key in other[section]:
other[section][key] = self[section][key]
return self
def write(self):
"""
Write the configuration.
"""
altpath = self.__altpath()
if altpath:
alt = self.__read(altpath)
self.__mergeOut(alt)
log.info('merge[out]:%s\n%s', altpath, alt)
path = altpath
s = str(alt)
else:
path = self.PATH
s = str(self)
fp = open(path, 'w')
try:
fp.write(s)
finally:
fp.close()
def __altpath(self):
"""
Get the I{alternate} configuration path.
Resolution order: ALT, USER
@return: The path to the alternate configuration file.
@rtype: str
"""
path = os.environ.get(self.ALT)
if path:
return path
path = os.path.expanduser(self.USER)
if os.path.exists(path):
return path
else:
None
def __addconfd(self):
"""
Read and merge the conf.d files.
"""
for fn in os.listdir(self.CNFD):
path = os.path.join(self.CNFD, fn)
cfg = Base(path)
self.__update(cfg)
log.info('updated with: %s\n%s', path, self)
class Properties:
"""
Import property specification.
@ivar pattern: The regex for property specification.
@type pattern: I{regex.pattern}
@ivar vdict: The variable dictionary.
@type vdict: dict
@ivar plain: The list of I{plan} properties to import.
@type plain: [str,..]
"""
pattern = re.compile('([^(]+)(\()([^)]+)(\))')
def __init__(self, properties=()):
"""
@param properties: A list of property specifications.
@type properties: [str,..]
"""
self.vdict = {}
self.plain = []
for p in properties:
if not p:
continue
m = self.pattern.match(p)
if m:
key = m.group(1).strip()
value = m.group(3).strip()
self.vdict[key] = value
else:
self.plain.append(p)
def isplain(self, property):
"""
Get whether a property is I{plain} and is to be imported.
@param property: A property name.
@type property: str
@return: True when property is to be imported.
@rtype: bool
"""
return ( property in self.plain )
def var(self, property):
"""
Get the property's declared variable name.
@param property: A property name.
@type property: str
@return: The variable name declared for the property
or None when not declared.
@rtype: str
"""
return self.vdict.get(property)
def empty(self):
"""
Get whether the object is empty.
@return: True no properties defined.
@rtype: bool
"""
return ( len(self) == 0 )
def __iter__(self):
keys = self.vdict.keys()
keys += self.plain
return iter(keys)
def __len__(self):
return ( len(self.vdict)+len(self.plain) )
class Import:
"""
Represents an import directive.
@import:<path>:<section>:<property>,
where <property> is: <name>|<name>(<variable>).
When the <variable> form is used, a variable is assigned the value
to be used as $(var) in the conf rather than imported.
@cvar allproperties: An (empty) object representing all properties
are to be imported.
@type allproperties: L{Properties}
@ivar path: The path to the imported ini file.
@type path: str
@ivar section: The name of the section to be imported.
@type section: str
@ivar properties: The property specification.
@type properties: L{Properties}
"""
allproperties = Properties()
def __init__(self, imp):
"""
@param imp: An import directive.
@type imp: str
"""
part = imp.split(':')
self.path = part[1]
self.section = None
self.properties = self.allproperties
if len(part) > 2:
self.section = part[2].strip()
if len(part) > 3:
plist = [s.strip() for s in part[3].split(',')]
self.properties = Properties(plist)
def __call__(self):
"""
Execute the import directive.
@return: The (imported) lines & declared (vdict) variables.
@rtype: tuple(<imported>,<vdict>)
"""
vdict = {}
input = Base(self.path)
if not self.section:
return (input, vdict)
imported = INIConfig()
S = input[self.section]
if ndef(S):
raise Exception, '[%s] not found in %s' % (self.section, self.path)
for k in S:
v = input[self.section][k]
if self.properties.empty() or self.properties.isplain(k):
imported[self.section][k] = v
else:
var = self.properties.var(k)
if var:
vdict[var] = v
return (imported, vdict)
class Reader:
"""
File reader.
post-process directives.
@ivar idx: The line index.
@type idx: int
@ivar vdict: The variable dictionary.
@type vdict: dict
@ivar path: The path to a file to read.
@type path: str
"""
BUILTIN = {
'hostname':socket.gethostname(),
}
def __init__(self, path):
self.idx = 0
self.vdict = {}
self.path = path
log.info('reading: %s', path)
f = open(path)
try:
bfr = f.read()
self.vdict.update(self.BUILTIN)
self.lines = self.__post(bfr.split('\n'))
finally:
f.close()
def readline(self):
"""
read the next line.
@return: The line of None on EOF.
@rtype: str
"""
if self.idx < len(self.lines):
ln = self.lines[self.idx]
self.idx += 1
return ln+'\n'
def close(self):
pass
def __post(self, input):
"""
Post-process file content for directives.
@param input: The file content (lines).
@type input: list
@return: The post processed content.
@rtype: list
"""
output = []
for ln in input:
if ln.startswith('@import'):
for ln in self.__import(ln):
output.append(ln)
else:
ln = self.__repl(ln)
output.append(ln)
return output
def __import(self, ln):
"""
Procecss an i{import} directive and return the result.
@param ln: A line containing the directive.
@type ln: str
@return: The import result (lines).
@rtype: [str,..]
"""
log.info('processing: %s', ln)
imp = Import(ln)
imported, vdict = imp()
self.vdict.update(vdict)
return str(imported).split('\n')
def __repl(self, ln):
"""
Replace variables contained in the line.
@param ln: A file line.
@type ln: str
@return: The line w/ vars replaced.
@rtype: str
"""
for k,v in self.vdict.items():
var = '$(%s)' % k
if var in ln:
ln = ln.replace(var, v)
log.info('line "%s" s/%s/%s/', ln, var, v)
return ln
def __str__(self):
return self.path
if __name__ == '__main__':
cfg = Config()
print cfg
Replace builtin variables with macros (format=%{macro}).
#
# Copyright (c) 2010 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
import os
import re
import socket
from gofer import singleton
from iniparse import INIConfig
from iniparse.config import Undefined
from gofer.agent.logutil import getLogger
log = getLogger(__name__)
def ndef(x):
"""
Section/property not defined.
@param x: A section/property
@type x: A section or property object.
@return: True if not defined.
"""
return isinstance(x, Undefined)
def nvl(x, d=None):
"""
Not define value.
@param x: An object to check.
@type x: A section/property
@return: d if not defined, else x.
"""
if ndef(x):
return d
else:
return x
class Base(INIConfig):
"""
Base configuration.
Uses L{Reader} which provides import.
"""
def __init__(self, path):
"""
@param path: The path to an INI file.
@type path: str
"""
fp = Reader(path)
try:
INIConfig.__init__(self, fp)
finally:
fp.close()
@singleton
class Config(Base):
"""
The gofer agent configuration.
@cvar ROOT: The root configuration directory.
@type ROOT: str
@cvar PATH: The absolute path to the config directory.
@type PATH: str
@cvar USER: The path to an alternate configuration file
within the user's home.
@type USER: str
@cvar ALT: The environment variable with a path to an alternate
configuration file.
@type ALT: str
"""
ROOT = '/etc/gofer'
FILE = 'agent.conf'
PATH = os.path.join(ROOT, FILE)
USER = os.path.join('~/.gofer', FILE)
CNFD = os.path.join(ROOT, 'conf.d')
ALT = 'GOFER_OVERRIDE'
def __init__(self):
"""
Open the configuration.
Merge (in) alternate configuration file when specified
by environment variable.
"""
try:
Base.__init__(self, self.PATH)
self.__addconfd()
altpath = self.__altpath()
if altpath:
alt = Base(altpath)
self.__mergeIn(alt)
log.info('merged[in]:%s\n%s', altpath, self)
except:
log.error(self.PATH, exc_info=1)
raise
def __update(self, other):
"""
Update with the specified I{other} configuration.
@param other: The conf to update with.
@type other: Base
@return: self
@rtype: L{Config}
"""
for section in other:
for key in other[section]:
self[section][key] = other[section][key]
return self
def __mergeIn(self, other):
"""
Merge (in) the specified I{other} configuration.
@param other: The conf to merge in.
@type other: Base
@return: self
@rtype: L{Config}
"""
for section in other:
if section not in self:
continue
for key in other[section]:
self[section][key] = other[section][key]
return self
def __mergeOut(self, other):
"""
Merge (out) to the specified I{other} configuration.
@param other: The conf to merge out.
@type other: Base
@return: self
@rtype: L{Config}
"""
for section in other:
if section not in self:
continue
for key in other[section]:
other[section][key] = self[section][key]
return self
def write(self):
"""
Write the configuration.
"""
altpath = self.__altpath()
if altpath:
alt = self.__read(altpath)
self.__mergeOut(alt)
log.info('merge[out]:%s\n%s', altpath, alt)
path = altpath
s = str(alt)
else:
path = self.PATH
s = str(self)
fp = open(path, 'w')
try:
fp.write(s)
finally:
fp.close()
def __altpath(self):
"""
Get the I{alternate} configuration path.
Resolution order: ALT, USER
@return: The path to the alternate configuration file.
@rtype: str
"""
path = os.environ.get(self.ALT)
if path:
return path
path = os.path.expanduser(self.USER)
if os.path.exists(path):
return path
else:
None
def __addconfd(self):
"""
Read and merge the conf.d files.
"""
for fn in os.listdir(self.CNFD):
path = os.path.join(self.CNFD, fn)
cfg = Base(path)
self.__update(cfg)
log.info('updated with: %s\n%s', path, self)
class Properties:
"""
Import property specification.
@ivar pattern: The regex for property specification.
@type pattern: I{regex.pattern}
@ivar vdict: The variable dictionary.
@type vdict: dict
@ivar plain: The list of I{plan} properties to import.
@type plain: [str,..]
"""
pattern = re.compile('([^(]+)(\()([^)]+)(\))')
def __init__(self, properties=()):
"""
@param properties: A list of property specifications.
@type properties: [str,..]
"""
self.vdict = {}
self.plain = []
for p in properties:
if not p:
continue
m = self.pattern.match(p)
if m:
key = m.group(1).strip()
value = m.group(3).strip()
self.vdict[key] = value
else:
self.plain.append(p)
def isplain(self, property):
"""
Get whether a property is I{plain} and is to be imported.
@param property: A property name.
@type property: str
@return: True when property is to be imported.
@rtype: bool
"""
return ( property in self.plain )
def var(self, property):
"""
Get the property's declared variable name.
@param property: A property name.
@type property: str
@return: The variable name declared for the property
or None when not declared.
@rtype: str
"""
return self.vdict.get(property)
def empty(self):
"""
Get whether the object is empty.
@return: True no properties defined.
@rtype: bool
"""
return ( len(self) == 0 )
def __iter__(self):
keys = self.vdict.keys()
keys += self.plain
return iter(keys)
def __len__(self):
return ( len(self.vdict)+len(self.plain) )
class Import:
"""
Represents an import directive.
@import:<path>:<section>:<property>,
where <property> is: <name>|<name>(<variable>).
When the <variable> form is used, a variable is assigned the value
to be used as $(var) in the conf rather than imported.
@cvar allproperties: An (empty) object representing all properties
are to be imported.
@type allproperties: L{Properties}
@ivar path: The path to the imported ini file.
@type path: str
@ivar section: The name of the section to be imported.
@type section: str
@ivar properties: The property specification.
@type properties: L{Properties}
"""
allproperties = Properties()
def __init__(self, imp):
"""
@param imp: An import directive.
@type imp: str
"""
part = imp.split(':')
self.path = part[1]
self.section = None
self.properties = self.allproperties
if len(part) > 2:
self.section = part[2].strip()
if len(part) > 3:
plist = [s.strip() for s in part[3].split(',')]
self.properties = Properties(plist)
def __call__(self):
"""
Execute the import directive.
@return: The (imported) lines & declared (vdict) variables.
@rtype: tuple(<imported>,<vdict>)
"""
vdict = {}
input = Base(self.path)
if not self.section:
return (input, vdict)
imported = INIConfig()
S = input[self.section]
if ndef(S):
raise Exception, '[%s] not found in %s' % (self.section, self.path)
for k in S:
v = input[self.section][k]
if self.properties.empty() or self.properties.isplain(k):
imported[self.section][k] = v
else:
var = self.properties.var(k)
if var:
var = '$(%s)' % var.strip()
vdict[var] = v
return (imported, vdict)
class Reader:
"""
File reader.
post-process directives.
@ivar idx: The line index.
@type idx: int
@ivar vdict: The variable dictionary.
@type vdict: dict
@ivar path: The path to a file to read.
@type path: str
"""
MACROS = {
'%{hostname}':socket.gethostname(),
}
def __init__(self, path):
self.idx = 0
self.vdict = {}
self.path = path
log.info('reading: %s', path)
f = open(path)
try:
bfr = f.read()
self.lines = self.__post(bfr.split('\n'))
finally:
f.close()
def readline(self):
"""
read the next line.
@return: The line of None on EOF.
@rtype: str
"""
if self.idx < len(self.lines):
ln = self.lines[self.idx]
self.idx += 1
return ln+'\n'
def close(self):
pass
def __post(self, input):
"""
Post-process file content for directives.
@param input: The file content (lines).
@type input: list
@return: The post processed content.
@rtype: list
"""
output = []
for ln in input:
if ln.startswith('@import'):
for ln in self.__import(ln):
output.append(ln)
else:
ln = self.__repl(ln)
output.append(ln)
return output
def __import(self, ln):
"""
Procecss an i{import} directive and return the result.
@param ln: A line containing the directive.
@type ln: str
@return: The import result (lines).
@rtype: [str,..]
"""
log.info('processing: %s', ln)
imp = Import(ln)
imported, vdict = imp()
self.vdict.update(vdict)
return str(imported).split('\n')
def __repl(self, ln):
"""
Replace variables contained in the line.
@param ln: A file line.
@type ln: str
@return: The line w/ vars replaced.
@rtype: str
"""
for k,v in self.MACROS.items()+self.vdict.items():
if k in ln:
log.info('line "%s" s/%s/%s/', ln, k, v)
ln = ln.replace(k, v)
return ln
def __str__(self):
return self.path
if __name__ == '__main__':
cfg = Config()
print cfg |
"""
Generate the climodat reports, please! Run from run.sh
"""
import pg
mydb = pg.connect('coop', 'iemdb', user='nobody')
import traceback
import psycopg2.extras
pgconn = psycopg2.connect(database='coop', host='iemdb', user='nobody')
cursor = pgconn.cursor(cursor_factory=psycopg2.extras.DictCursor)
import genPrecipEvents
import gen30rains
import genGDD
import genDailyRecords
import genDailyRecordsRain
import genDailyRange
import genDailyMeans
import genCountLows32
import genSpringFall
import genMonthly
import genHDD
import genCDD
import genHeatStress
import genCountRain
import genFrostProbabilities
import genSpringProbabilities
import genCycles
import genCountSnow
import genTempThresholds
import genRecordPeriods
import gen_precip_cats
import constants
import sys
import datetime
update_all = True if datetime.datetime.today().day == 1 else False
runstations = constants.nt.sts.keys()
if len(sys.argv) == 2:
runstations = [sys.argv[1], ]
update_all = True
def caller(func, *args):
ret = func(*args)
return ret
def run_station(dbid):
"""Actually run for the given station"""
table = constants.get_table(dbid)
# print "processing [%s] %s" % (dbid, constants.nt.sts[dbid]["name"])
sql = """
SELECT d.*, c.climoweek from %s d, climoweek c
WHERE station = '%s' and day >= '%s-01-01' and d.sday = c.sday
and precip is not null
ORDER by day ASC
""" % (table, dbid, constants.startyear(dbid))
rs = caller(mydb.query, sql).dictresult()
# Compute monthly
cursor.execute("""
SELECT year, month, sum(precip) as sum_precip,
avg(high) as avg_high,
avg(low) as avg_low,
sum(cdd(high,low,60)) as cdd60,
sum(cdd(high,low,65)) as cdd65,
sum(hdd(high,low,60)) as hdd60,
sum(hdd(high,low,65)) as hdd65,
sum(case when precip >= 0.01 then 1 else 0 end) as rain_days,
sum(case when snow >= 0.1 then 1 else 0 end) as snow_days,
sum(gddxx(40,86,high,low)) as gdd40,
sum(gddxx(48,86,high,low)) as gdd48,
sum(gddxx(50,86,high,low)) as gdd50,
sum(gddxx(52,86,high,low)) as gdd52
from """+table+""" WHERE station = %s GROUP by year, month
""", (dbid, ))
monthly_rows = cursor.fetchall()
out = constants.make_output(constants.nt, dbid, "01")
caller(genPrecipEvents.write, cursor, out, dbid)
out.close()
out = constants.make_output(constants.nt, dbid, "02")
caller(gen30rains.write, mydb, out, dbid)
out.close()
out = constants.make_output(constants.nt, dbid, "03")
caller(genGDD.write, monthly_rows, out, dbid)
out.close()
out = constants.make_output(constants.nt, dbid, "04")
caller(genDailyRecords.write, mydb, out, dbid)
out.close()
out = constants.make_output(constants.nt, dbid, "05")
caller(genDailyRecordsRain.write, mydb, out, dbid)
out.close()
out = constants.make_output(constants.nt, dbid, "06")
caller(genDailyRange.write, mydb, out, dbid)
out.close()
out = constants.make_output(constants.nt, dbid, "07")
caller(genDailyMeans.write, mydb, out, dbid)
out.close()
out = constants.make_output(constants.nt, dbid, "08")
caller(genCountLows32.write, cursor, out, dbid)
out.close()
out = constants.make_output(constants.nt, dbid, "09")
caller(genSpringFall.write, out, rs, dbid, 32)
out.close()
out = constants.make_output(constants.nt, dbid, "10")
caller(genSpringFall.write, out, rs, dbid, 30)
out.close()
out = constants.make_output(constants.nt, dbid, "11")
caller(genSpringFall.write, out, rs, dbid, 28)
out.close()
out = constants.make_output(constants.nt, dbid, "12")
caller(genSpringFall.write, out, rs, dbid, 26)
out.close()
out = constants.make_output(constants.nt, dbid, "13")
caller(genSpringFall.write, out, rs, dbid, 24)
out.close()
out = constants.make_output(constants.nt, dbid, "14")
out2 = constants.make_output(constants.nt, dbid, "15")
out3 = constants.make_output(constants.nt, dbid, "16")
out4 = constants.make_output(constants.nt, dbid, "17")
caller(genMonthly.write, monthly_rows, out, out2, out3, out4, dbid)
out.close()
out2.close()
out3.close()
out4.close()
out = constants.make_output(constants.nt, dbid, "18")
caller(genHDD.write, monthly_rows, out, dbid)
out.close()
out = constants.make_output(constants.nt, dbid, "19")
caller(genCDD.write, monthly_rows, out, dbid)
out.close()
out = constants.make_output(constants.nt, dbid, "20")
caller(genHeatStress.write, cursor, out, dbid)
out.close()
out = constants.make_output(constants.nt, dbid, "21")
caller(genCountRain.write, monthly_rows, out, dbid)
out.close()
out = constants.make_output(constants.nt, dbid, "25")
caller(genCountSnow.write, monthly_rows, out, dbid)
out.close()
out = constants.make_output(constants.nt, dbid, "22")
caller(genFrostProbabilities.write, mydb, out, dbid)
out.close()
out = constants.make_output(constants.nt, dbid, "23")
caller(genSpringProbabilities.write, cursor, out, dbid)
out.close()
out = constants.make_output(constants.nt, dbid, "24")
caller(genCycles.write, out, rs, dbid)
out.close()
out = constants.make_output(constants.nt, dbid, "26")
caller(genTempThresholds.write, cursor, out, dbid)
out.close()
out = constants.make_output(constants.nt, dbid, "27")
caller(genRecordPeriods.write, mydb, out, rs, dbid)
out.close()
out = constants.make_output(constants.nt, dbid, "28")
caller(gen_precip_cats.write, mydb, out, rs, dbid)
out.close()
def main():
for dbid in runstations:
try:
run_station(dbid)
except:
print 'climodat/drive.py failure for %s' % (dbid, )
print traceback.print_exc()
sys.exit()
if __name__ == '__main__':
main()
ignore missing high/low data for climodat reports
This can now happen with the hybrid sites that report high/lows at 7 AM
and precip at midnight
"""
Generate the climodat reports, please! Run from run.sh
"""
import pg
import traceback
import psycopg2.extras
import genPrecipEvents
import gen30rains
import genGDD
import genDailyRecords
import genDailyRecordsRain
import genDailyRange
import genDailyMeans
import genCountLows32
import genSpringFall
import genMonthly
import genHDD
import genCDD
import genHeatStress
import genCountRain
import genFrostProbabilities
import genSpringProbabilities
import genCycles
import genCountSnow
import genTempThresholds
import genRecordPeriods
import gen_precip_cats
import constants
import sys
import datetime
mydb = pg.connect('coop', 'iemdb', user='nobody')
pgconn = psycopg2.connect(database='coop', host='iemdb', user='nobody')
cursor = pgconn.cursor(cursor_factory=psycopg2.extras.DictCursor)
update_all = True if datetime.datetime.today().day == 1 else False
runstations = constants.nt.sts.keys()
if len(sys.argv) == 2:
runstations = [sys.argv[1], ]
update_all = True
def caller(func, *args):
ret = func(*args)
return ret
def run_station(dbid):
"""Actually run for the given station"""
table = constants.get_table(dbid)
# print "processing [%s] %s" % (dbid, constants.nt.sts[dbid]["name"])
sql = """
SELECT d.*, c.climoweek from %s d, climoweek c
WHERE station = '%s' and day >= '%s-01-01' and d.sday = c.sday
and precip is not null and high is not null and low is not null
ORDER by day ASC
""" % (table, dbid, constants.startyear(dbid))
rs = caller(mydb.query, sql).dictresult()
# Compute monthly
cursor.execute("""
SELECT year, month, sum(precip) as sum_precip,
avg(high) as avg_high,
avg(low) as avg_low,
sum(cdd(high,low,60)) as cdd60,
sum(cdd(high,low,65)) as cdd65,
sum(hdd(high,low,60)) as hdd60,
sum(hdd(high,low,65)) as hdd65,
sum(case when precip >= 0.01 then 1 else 0 end) as rain_days,
sum(case when snow >= 0.1 then 1 else 0 end) as snow_days,
sum(gddxx(40,86,high,low)) as gdd40,
sum(gddxx(48,86,high,low)) as gdd48,
sum(gddxx(50,86,high,low)) as gdd50,
sum(gddxx(52,86,high,low)) as gdd52
from """+table+""" WHERE station = %s GROUP by year, month
""", (dbid, ))
monthly_rows = cursor.fetchall()
out = constants.make_output(constants.nt, dbid, "01")
caller(genPrecipEvents.write, cursor, out, dbid)
out.close()
out = constants.make_output(constants.nt, dbid, "02")
caller(gen30rains.write, mydb, out, dbid)
out.close()
out = constants.make_output(constants.nt, dbid, "03")
caller(genGDD.write, monthly_rows, out, dbid)
out.close()
out = constants.make_output(constants.nt, dbid, "04")
caller(genDailyRecords.write, mydb, out, dbid)
out.close()
out = constants.make_output(constants.nt, dbid, "05")
caller(genDailyRecordsRain.write, mydb, out, dbid)
out.close()
out = constants.make_output(constants.nt, dbid, "06")
caller(genDailyRange.write, mydb, out, dbid)
out.close()
out = constants.make_output(constants.nt, dbid, "07")
caller(genDailyMeans.write, mydb, out, dbid)
out.close()
out = constants.make_output(constants.nt, dbid, "08")
caller(genCountLows32.write, cursor, out, dbid)
out.close()
out = constants.make_output(constants.nt, dbid, "09")
caller(genSpringFall.write, out, rs, dbid, 32)
out.close()
out = constants.make_output(constants.nt, dbid, "10")
caller(genSpringFall.write, out, rs, dbid, 30)
out.close()
out = constants.make_output(constants.nt, dbid, "11")
caller(genSpringFall.write, out, rs, dbid, 28)
out.close()
out = constants.make_output(constants.nt, dbid, "12")
caller(genSpringFall.write, out, rs, dbid, 26)
out.close()
out = constants.make_output(constants.nt, dbid, "13")
caller(genSpringFall.write, out, rs, dbid, 24)
out.close()
out = constants.make_output(constants.nt, dbid, "14")
out2 = constants.make_output(constants.nt, dbid, "15")
out3 = constants.make_output(constants.nt, dbid, "16")
out4 = constants.make_output(constants.nt, dbid, "17")
caller(genMonthly.write, monthly_rows, out, out2, out3, out4, dbid)
out.close()
out2.close()
out3.close()
out4.close()
out = constants.make_output(constants.nt, dbid, "18")
caller(genHDD.write, monthly_rows, out, dbid)
out.close()
out = constants.make_output(constants.nt, dbid, "19")
caller(genCDD.write, monthly_rows, out, dbid)
out.close()
out = constants.make_output(constants.nt, dbid, "20")
caller(genHeatStress.write, cursor, out, dbid)
out.close()
out = constants.make_output(constants.nt, dbid, "21")
caller(genCountRain.write, monthly_rows, out, dbid)
out.close()
out = constants.make_output(constants.nt, dbid, "25")
caller(genCountSnow.write, monthly_rows, out, dbid)
out.close()
out = constants.make_output(constants.nt, dbid, "22")
caller(genFrostProbabilities.write, mydb, out, dbid)
out.close()
out = constants.make_output(constants.nt, dbid, "23")
caller(genSpringProbabilities.write, cursor, out, dbid)
out.close()
out = constants.make_output(constants.nt, dbid, "24")
caller(genCycles.write, out, rs, dbid)
out.close()
out = constants.make_output(constants.nt, dbid, "26")
caller(genTempThresholds.write, cursor, out, dbid)
out.close()
out = constants.make_output(constants.nt, dbid, "27")
caller(genRecordPeriods.write, mydb, out, rs, dbid)
out.close()
out = constants.make_output(constants.nt, dbid, "28")
caller(gen_precip_cats.write, mydb, out, rs, dbid)
out.close()
def main():
for dbid in runstations:
try:
run_station(dbid)
except:
print 'climodat/drive.py failure for %s' % (dbid, )
print traceback.print_exc()
sys.exit()
if __name__ == '__main__':
main()
|
debug
|
"""dberrors: database exception classes for SQLObject.
These classes are dictated by the DB API v2.0:
http://www.python.org/topics/database/DatabaseAPI-2.0.html
"""
class Error(StandardError):
pass
class Warning(StandardError):
pass
class InterfaceError(Error):
pass
class DatabaseError(Error):
pass
class InternalError(DatabaseError):
pass
class OperationalError(DatabaseError):
pass
class ProgrammingError(DatabaseError):
pass
class IntegrityError(DatabaseError):
pass
class DataError(DatabaseError):
pass
class NotSupportedError(DatabaseError):
pass
class DuplicateEntryError(IntegrityError):
pass
Add python3 alias
"""dberrors: database exception classes for SQLObject.
These classes are dictated by the DB API v2.0:
http://www.python.org/topics/database/DatabaseAPI-2.0.html
"""
import sys
if sys.version_info[0] >= 3:
StandardError = Exception
class Error(StandardError):
pass
class Warning(StandardError):
pass
class InterfaceError(Error):
pass
class DatabaseError(Error):
pass
class InternalError(DatabaseError):
pass
class OperationalError(DatabaseError):
pass
class ProgrammingError(DatabaseError):
pass
class IntegrityError(DatabaseError):
pass
class DataError(DatabaseError):
pass
class NotSupportedError(DatabaseError):
pass
class DuplicateEntryError(IntegrityError):
pass
|
import cv2
class StatModel(object):
def load(self, fn):
self.model.load(fn)
def save(self, fn):
self.model.save(fn)
class SVM(StatModel):
def __init__(self):
self.model = cv2.SVM()
def train(self, samples, labels):
params = dict(kernel_type=cv2.SVM_RBF, svm_type=cv2.SVM_C_SVC)
self.model.train_auto(samples, labels, None, None, params)
def predict(self, X):
return self.model.predict(X)
SVM returns float
import cv2
class StatModel(object):
def load(self, fn):
self.model.load(fn)
def save(self, fn):
self.model.save(fn)
class SVM(StatModel):
def __init__(self):
self.model = cv2.SVM()
def train(self, samples, labels):
params = dict(kernel_type=cv2.SVM_RBF, svm_type=cv2.SVM_C_SVC)
self.model.train_auto(samples, labels, None, None, params)
def predict(self, X):
return self.model.predict(X, True)
|
#! /usr/bin/env python
#
#-------------------------------------------------------------------------------
#
# Export Layers - GIMP plug-in that exports layers as separate images
#
# Copyright (C) 2013-2015 khalim19 <khalim19@gmail.com>
#
# Export Layers is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Export Layers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Export Layers. If not, see <http://www.gnu.org/licenses/>.
#
#-------------------------------------------------------------------------------
#===============================================================================
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
str = unicode
#===============================================================================
from export_layers import constants
import gettext
gettext.install(constants.DOMAIN_NAME, constants.LOCALE_PATH, unicode=True)
from export_layers import log_output
log_output.log_output(constants.DEBUG)
import os
# Disable overlay scrolling (notably used in Ubuntu) to be consistent with the
# Export menu.
os.environ['LIBOVERLAY_SCROLLBAR'] = '0'
import gimp
import gimpplugin
import gimpenums
from export_layers.pygimplib import pgsettinggroup
from export_layers.pygimplib import pgsettingpersistor
from export_layers.pygimplib import overwrite
from export_layers.pygimplib import pggui
from export_layers import settings_plugin
from export_layers import gui_plugin
from export_layers import exportlayers
#===============================================================================
class ExportLayersPlugin(gimpplugin.plugin):
def __init__(self):
self.settings = settings_plugin.create_settings()
self.session_source = pgsettingpersistor.SessionPersistentSettingSource(constants.SESSION_SOURCE_NAME)
self.persistent_source = pgsettingpersistor.PersistentSettingSource(constants.PERSISTENT_SOURCE_NAME)
def query(self):
gimp.domain_register(constants.DOMAIN_NAME, constants.LOCALE_PATH)
gimp.install_procedure(
"plug_in_export_layers",
_("Export layers as separate images"),
"",
"khalim19 <khalim19@gmail.com>",
"khalim19",
"2013",
_("E_xport Layers..."),
"*",
gimpenums.PLUGIN,
pgsettinggroup.PdbParamCreator.create_params(
self.settings['special']['run_mode'], self.settings['special']['image'], self.settings['main']),
[]
)
gimp.install_procedure(
"plug_in_export_layers_repeat",
_("Run \"{0}\" with the last values specified").format(constants.PLUGIN_TITLE),
_("If the plug-in is run for the first time (i.e. no last values exist), "
"default values will be used."),
"khalim19 <khalim19@gmail.com>",
"khalim19",
"2013",
_("E_xport Layers (repeat)"),
"*",
gimpenums.PLUGIN,
pgsettinggroup.PdbParamCreator.create_params(
self.settings['special']['run_mode'], self.settings['special']['image']),
[]
)
gimp.menu_register("plug_in_export_layers", "<Image>/File/Export")
gimp.menu_register("plug_in_export_layers_repeat", "<Image>/File/Export")
def plug_in_export_layers(self, *args):
run_mode = args[0]
image = args[1]
self.settings['special']['run_mode'].set_value(run_mode)
self.settings['special']['image'].set_value(image)
if run_mode == gimpenums.RUN_INTERACTIVE:
self._run_export_layers_interactive(image)
elif run_mode == gimpenums.RUN_WITH_LAST_VALS:
self._run_with_last_vals(image)
else:
self._run_noninteractive(image, args)
def plug_in_export_layers_repeat(self, run_mode, image):
if run_mode == gimpenums.RUN_INTERACTIVE:
pgsettingpersistor.SettingPersistor.load(
[self.settings['special']['first_plugin_run']], [self.session_source])
if self.settings['special']['first_plugin_run'].value:
self._run_export_layers_interactive(image)
else:
self._run_export_layers_repeat_interactive(image)
else:
self._run_with_last_vals(image)
def _run_noninteractive(self, image, args):
# Start with the third parameter - run_mode and image are already set.
for setting, arg in zip(self.settings['main'], args[2:]):
if isinstance(arg, bytes):
arg = arg.decode()
setting.set_value(arg)
self._run_plugin_noninteractive(gimpenums.RUN_NONINTERACTIVE, image)
def _run_with_last_vals(self, image):
status, status_message = pgsettingpersistor.SettingPersistor.load(
[self.settings['main']], [self.session_source, self.persistent_source])
if status == pgsettingpersistor.SettingPersistor.READ_FAIL:
print(status_message)
self._run_plugin_noninteractive(gimpenums.RUN_WITH_LAST_VALS, image)
@pggui.set_gui_excepthook(_(constants.PLUGIN_TITLE), report_uri_list=constants.BUG_REPORT_URI_LIST)
def _run_export_layers_interactive(self, image):
gui_plugin.export_layers_gui(image, self.settings, self.session_source, self.persistent_source)
@pggui.set_gui_excepthook(_(constants.PLUGIN_TITLE), report_uri_list=constants.BUG_REPORT_URI_LIST)
def _run_export_layers_repeat_interactive(self, image):
gui_plugin.export_layers_repeat_gui(image, self.settings, self.session_source, self.persistent_source)
def _run_plugin_noninteractive(self, run_mode, image):
layer_exporter = exportlayers.LayerExporter(
run_mode, image, self.settings['main'],
overwrite_chooser=overwrite.NoninteractiveOverwriteChooser(self.settings['main']['overwrite_mode'].value),
progress_updater=None
)
try:
layer_exporter.export_layers()
except exportlayers.ExportLayersCancelError as e:
print(e.message)
except exportlayers.ExportLayersError as e:
print(e.message)
raise
self.settings['special']['first_plugin_run'].set_value(False)
pgsettingpersistor.SettingPersistor.save(
[self.settings['main'], self.settings['special']['first_plugin_run']], [self.session_source])
#===============================================================================
if __name__ == "__main__":
ExportLayersPlugin().start()
Add sanity check for disabling overlay scrolling
#! /usr/bin/env python
#
#-------------------------------------------------------------------------------
#
# Export Layers - GIMP plug-in that exports layers as separate images
#
# Copyright (C) 2013-2015 khalim19 <khalim19@gmail.com>
#
# Export Layers is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Export Layers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Export Layers. If not, see <http://www.gnu.org/licenses/>.
#
#-------------------------------------------------------------------------------
#===============================================================================
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
str = unicode
#===============================================================================
from export_layers import constants
import gettext
gettext.install(constants.DOMAIN_NAME, constants.LOCALE_PATH, unicode=True)
from export_layers import log_output
log_output.log_output(constants.DEBUG)
import os
try:
# Disable overlay scrolling (notably used in Ubuntu) to be consistent with the
# Export menu.
os.environ['LIBOVERLAY_SCROLLBAR'] = '0'
except TypeError:
raise
except Exception:
# OS does not support setting environment variables
pass
import gimp
import gimpplugin
import gimpenums
from export_layers.pygimplib import pgsettinggroup
from export_layers.pygimplib import pgsettingpersistor
from export_layers.pygimplib import overwrite
from export_layers.pygimplib import pggui
from export_layers import settings_plugin
from export_layers import gui_plugin
from export_layers import exportlayers
#===============================================================================
class ExportLayersPlugin(gimpplugin.plugin):
def __init__(self):
self.settings = settings_plugin.create_settings()
self.session_source = pgsettingpersistor.SessionPersistentSettingSource(constants.SESSION_SOURCE_NAME)
self.persistent_source = pgsettingpersistor.PersistentSettingSource(constants.PERSISTENT_SOURCE_NAME)
def query(self):
gimp.domain_register(constants.DOMAIN_NAME, constants.LOCALE_PATH)
gimp.install_procedure(
"plug_in_export_layers",
_("Export layers as separate images"),
"",
"khalim19 <khalim19@gmail.com>",
"khalim19",
"2013",
_("E_xport Layers..."),
"*",
gimpenums.PLUGIN,
pgsettinggroup.PdbParamCreator.create_params(
self.settings['special']['run_mode'], self.settings['special']['image'], self.settings['main']),
[]
)
gimp.install_procedure(
"plug_in_export_layers_repeat",
_("Run \"{0}\" with the last values specified").format(constants.PLUGIN_TITLE),
_("If the plug-in is run for the first time (i.e. no last values exist), "
"default values will be used."),
"khalim19 <khalim19@gmail.com>",
"khalim19",
"2013",
_("E_xport Layers (repeat)"),
"*",
gimpenums.PLUGIN,
pgsettinggroup.PdbParamCreator.create_params(
self.settings['special']['run_mode'], self.settings['special']['image']),
[]
)
gimp.menu_register("plug_in_export_layers", "<Image>/File/Export")
gimp.menu_register("plug_in_export_layers_repeat", "<Image>/File/Export")
def plug_in_export_layers(self, *args):
run_mode = args[0]
image = args[1]
self.settings['special']['run_mode'].set_value(run_mode)
self.settings['special']['image'].set_value(image)
if run_mode == gimpenums.RUN_INTERACTIVE:
self._run_export_layers_interactive(image)
elif run_mode == gimpenums.RUN_WITH_LAST_VALS:
self._run_with_last_vals(image)
else:
self._run_noninteractive(image, args)
def plug_in_export_layers_repeat(self, run_mode, image):
if run_mode == gimpenums.RUN_INTERACTIVE:
pgsettingpersistor.SettingPersistor.load(
[self.settings['special']['first_plugin_run']], [self.session_source])
if self.settings['special']['first_plugin_run'].value:
self._run_export_layers_interactive(image)
else:
self._run_export_layers_repeat_interactive(image)
else:
self._run_with_last_vals(image)
def _run_noninteractive(self, image, args):
# Start with the third parameter - run_mode and image are already set.
for setting, arg in zip(self.settings['main'], args[2:]):
if isinstance(arg, bytes):
arg = arg.decode()
setting.set_value(arg)
self._run_plugin_noninteractive(gimpenums.RUN_NONINTERACTIVE, image)
def _run_with_last_vals(self, image):
status, status_message = pgsettingpersistor.SettingPersistor.load(
[self.settings['main']], [self.session_source, self.persistent_source])
if status == pgsettingpersistor.SettingPersistor.READ_FAIL:
print(status_message)
self._run_plugin_noninteractive(gimpenums.RUN_WITH_LAST_VALS, image)
@pggui.set_gui_excepthook(_(constants.PLUGIN_TITLE), report_uri_list=constants.BUG_REPORT_URI_LIST)
def _run_export_layers_interactive(self, image):
gui_plugin.export_layers_gui(image, self.settings, self.session_source, self.persistent_source)
@pggui.set_gui_excepthook(_(constants.PLUGIN_TITLE), report_uri_list=constants.BUG_REPORT_URI_LIST)
def _run_export_layers_repeat_interactive(self, image):
gui_plugin.export_layers_repeat_gui(image, self.settings, self.session_source, self.persistent_source)
def _run_plugin_noninteractive(self, run_mode, image):
layer_exporter = exportlayers.LayerExporter(
run_mode, image, self.settings['main'],
overwrite_chooser=overwrite.NoninteractiveOverwriteChooser(self.settings['main']['overwrite_mode'].value),
progress_updater=None
)
try:
layer_exporter.export_layers()
except exportlayers.ExportLayersCancelError as e:
print(e.message)
except exportlayers.ExportLayersError as e:
print(e.message)
raise
self.settings['special']['first_plugin_run'].set_value(False)
pgsettingpersistor.SettingPersistor.save(
[self.settings['main'], self.settings['special']['first_plugin_run']], [self.session_source])
#===============================================================================
if __name__ == "__main__":
ExportLayersPlugin().start()
|
""" basic collect and runtest protocol implementations """
import py, sys
from time import time
from py._code.code import TerminalRepr
def pytest_namespace():
return {
'fail' : fail,
'skip' : skip,
'importorskip' : importorskip,
'exit' : exit,
}
#
# pytest plugin hooks
def pytest_addoption(parser):
group = parser.getgroup("terminal reporting", "reporting", after="general")
group.addoption('--durations',
action="store", type="int", default=None, metavar="N",
help="show N slowest setup/test durations (N=0 for all)."),
def pytest_terminal_summary(terminalreporter):
durations = terminalreporter.config.option.durations
if durations is None:
return
tr = terminalreporter
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, 'duration'):
dlist.append(rep)
if not dlist:
return
dlist.sort(key=lambda x: x.duration)
dlist.reverse()
if not durations:
tr.write_sep("=", "slowest test durations")
else:
tr.write_sep("=", "slowest %s test durations" % durations)
dlist = dlist[:durations]
for rep in dlist:
nodeid = rep.nodeid.replace("::()::", "::")
tr.write_line("%02.2fs %-8s %s" %
(rep.duration, rep.when, nodeid))
def pytest_sessionstart(session):
session._setupstate = SetupState()
def pytest_sessionfinish(session):
session._setupstate.teardown_all()
class NodeInfo:
def __init__(self, location):
self.location = location
def pytest_runtest_protocol(item, nextitem):
item.ihook.pytest_runtest_logstart(
nodeid=item.nodeid, location=item.location,
)
runtestprotocol(item, nextitem=nextitem)
return True
def runtestprotocol(item, log=True, nextitem=None):
rep = call_and_report(item, "setup", log)
reports = [rep]
if rep.passed:
reports.append(call_and_report(item, "call", log))
reports.append(call_and_report(item, "teardown", log,
nextitem=nextitem))
return reports
def pytest_runtest_setup(item):
item.session._setupstate.prepare(item)
def pytest_runtest_call(item):
item.runtest()
def pytest_runtest_teardown(item, nextitem):
item.session._setupstate.teardown_exact(item, nextitem)
def pytest_report_teststatus(report):
if report.when in ("setup", "teardown"):
if report.failed:
# category, shortletter, verbose-word
return "error", "E", "ERROR"
elif report.skipped:
return "skipped", "s", "SKIPPED"
else:
return "", "", ""
#
# Implementation
def call_and_report(item, when, log=True, **kwds):
call = call_runtest_hook(item, when, **kwds)
hook = item.ihook
report = hook.pytest_runtest_makereport(item=item, call=call)
if log:
hook.pytest_runtest_logreport(report=report)
return report
def call_runtest_hook(item, when, **kwds):
hookname = "pytest_runtest_" + when
ihook = getattr(item.ihook, hookname)
return CallInfo(lambda: ihook(item=item, **kwds), when=when)
class CallInfo:
""" Result/Exception info a function invocation. """
#: None or ExceptionInfo object.
excinfo = None
def __init__(self, func, when):
#: context of invocation: one of "setup", "call",
#: "teardown", "memocollect"
self.when = when
self.start = time()
try:
try:
self.result = func()
except KeyboardInterrupt:
raise
except:
self.excinfo = py.code.ExceptionInfo()
finally:
self.stop = time()
def __repr__(self):
if self.excinfo:
status = "exception: %s" % str(self.excinfo.value)
else:
status = "result: %r" % (self.result,)
return "<CallInfo when=%r %s>" % (self.when, status)
def getslaveinfoline(node):
try:
return node._slaveinfocache
except AttributeError:
d = node.slaveinfo
ver = "%s.%s.%s" % d['version_info'][:3]
node._slaveinfocache = s = "[%s] %s -- Python %s %s" % (
d['id'], d['sysplatform'], ver, d['executable'])
return s
class BaseReport(object):
def __init__(self, **kw):
self.__dict__.update(kw)
def toterminal(self, out):
longrepr = self.longrepr
if hasattr(self, 'node'):
out.line(getslaveinfoline(self.node))
if hasattr(longrepr, 'toterminal'):
longrepr.toterminal(out)
else:
try:
out.line(longrepr)
except UnicodeEncodeError:
out.line("<unprintable longrepr>")
passed = property(lambda x: x.outcome == "passed")
failed = property(lambda x: x.outcome == "failed")
skipped = property(lambda x: x.outcome == "skipped")
@property
def fspath(self):
return self.nodeid.split("::")[0]
def pytest_runtest_makereport(item, call):
when = call.when
duration = call.stop-call.start
keywords = dict([(x,1) for x in item.keywords])
excinfo = call.excinfo
if not call.excinfo:
outcome = "passed"
longrepr = None
else:
excinfo = call.excinfo
if not isinstance(excinfo, py.code.ExceptionInfo):
outcome = "failed"
longrepr = excinfo
elif excinfo.errisinstance(py.test.skip.Exception):
outcome = "skipped"
r = excinfo._getreprcrash()
longrepr = (str(r.path), r.lineno, r.message)
else:
outcome = "failed"
if call.when == "call":
longrepr = item.repr_failure(excinfo)
else: # exception in setup or teardown
longrepr = item._repr_failure_py(excinfo)
return TestReport(item.nodeid, item.location,
keywords, outcome, longrepr, when,
duration=duration)
class TestReport(BaseReport):
""" Basic test report object (also used for setup and teardown calls if
they fail).
"""
def __init__(self, nodeid, location,
keywords, outcome, longrepr, when, sections=(), duration=0, **extra):
#: normalized collection node id
self.nodeid = nodeid
#: a (filesystempath, lineno, domaininfo) tuple indicating the
#: actual location of a test item - it might be different from the
#: collected one e.g. if a method is inherited from a different module.
self.location = location
#: a name -> value dictionary containing all keywords and
#: markers associated with a test invocation.
self.keywords = keywords
#: test outcome, always one of "passed", "failed", "skipped".
self.outcome = outcome
#: None or a failure representation.
self.longrepr = longrepr
#: one of 'setup', 'call', 'teardown' to indicate runtest phase.
self.when = when
#: list of (secname, data) extra information which needs to
#: marshallable
self.sections = list(sections)
#: time it took to run just the test
self.duration = duration
self.__dict__.update(extra)
def __repr__(self):
return "<TestReport %r when=%r outcome=%r>" % (
self.nodeid, self.when, self.outcome)
class TeardownErrorReport(BaseReport):
outcome = "failed"
when = "teardown"
def __init__(self, longrepr, **extra):
self.longrepr = longrepr
self.sections = []
self.__dict__.update(extra)
def pytest_make_collect_report(collector):
call = CallInfo(collector._memocollect, "memocollect")
longrepr = None
if not call.excinfo:
outcome = "passed"
else:
if call.excinfo.errisinstance(py.test.skip.Exception):
outcome = "skipped"
r = collector._repr_failure_py(call.excinfo, "line").reprcrash
longrepr = (str(r.path), r.lineno, r.message)
else:
outcome = "failed"
errorinfo = collector.repr_failure(call.excinfo)
if not hasattr(errorinfo, "toterminal"):
errorinfo = CollectErrorRepr(errorinfo)
longrepr = errorinfo
return CollectReport(collector.nodeid, outcome, longrepr,
getattr(call, 'result', None))
class CollectReport(BaseReport):
def __init__(self, nodeid, outcome, longrepr, result, sections=(), **extra):
self.nodeid = nodeid
self.outcome = outcome
self.longrepr = longrepr
self.result = result or []
self.sections = list(sections)
self.__dict__.update(extra)
@property
def location(self):
return (self.fspath, None, self.fspath)
def __repr__(self):
return "<CollectReport %r lenresult=%s outcome=%r>" % (
self.nodeid, len(self.result), self.outcome)
class CollectErrorRepr(TerminalRepr):
def __init__(self, msg):
self.longrepr = msg
def toterminal(self, out):
out.line(self.longrepr, red=True)
class SetupState(object):
""" shared state for setting up/tearing down test items or collectors. """
def __init__(self):
self.stack = []
self._finalizers = {}
def addfinalizer(self, finalizer, colitem):
""" attach a finalizer to the given colitem.
if colitem is None, this will add a finalizer that
is called at the end of teardown_all().
if colitem is a tuple, it will be used as a key
and needs an explicit call to _callfinalizers(key) later on.
"""
assert hasattr(finalizer, '__call__')
#assert colitem in self.stack
self._finalizers.setdefault(colitem, []).append(finalizer)
def _pop_and_teardown(self):
colitem = self.stack.pop()
self._teardown_with_finalization(colitem)
def _callfinalizers(self, colitem):
finalizers = self._finalizers.pop(colitem, None)
while finalizers:
fin = finalizers.pop()
fin()
def _teardown_with_finalization(self, colitem):
self._callfinalizers(colitem)
if hasattr(colitem, "teardown"):
colitem.teardown()
for colitem in self._finalizers:
assert colitem is None or colitem in self.stack \
or isinstance(colitem, tuple)
def teardown_all(self):
while self.stack:
self._pop_and_teardown()
for key in list(self._finalizers):
self._teardown_with_finalization(key)
assert not self._finalizers
def teardown_exact(self, item, nextitem):
needed_collectors = nextitem and nextitem.listchain() or []
self._teardown_towards(needed_collectors)
def _teardown_towards(self, needed_collectors):
while self.stack:
if self.stack == needed_collectors[:len(self.stack)]:
break
self._pop_and_teardown()
def prepare(self, colitem):
""" setup objects along the collector chain to the test-method
and teardown previously setup objects."""
needed_collectors = colitem.listchain()
self._teardown_towards(needed_collectors)
# check if the last collection node has raised an error
for col in self.stack:
if hasattr(col, '_prepare_exc'):
py.builtin._reraise(*col._prepare_exc)
for col in needed_collectors[len(self.stack):]:
self.stack.append(col)
try:
col.setup()
except Exception:
col._prepare_exc = sys.exc_info()
raise
# =============================================================
# Test OutcomeExceptions and helpers for creating them.
class OutcomeException(Exception):
""" OutcomeException and its subclass instances indicate and
contain info about test and collection outcomes.
"""
def __init__(self, msg=None, pytrace=True):
super(OutcomeException, self).__init__(msg)
self.msg = msg
self.pytrace = pytrace
def __repr__(self):
if self.msg:
return str(self.msg)
return "<%s instance>" %(self.__class__.__name__,)
__str__ = __repr__
class Skipped(OutcomeException):
# XXX hackish: on 3k we fake to live in the builtins
# in order to have Skipped exception printing shorter/nicer
__module__ = 'builtins'
class Failed(OutcomeException):
""" raised from an explicit call to py.test.fail() """
__module__ = 'builtins'
class Exit(KeyboardInterrupt):
""" raised for immediate program exits (no tracebacks/summaries)"""
def __init__(self, msg="unknown reason"):
self.msg = msg
KeyboardInterrupt.__init__(self, msg)
# exposed helper methods
def exit(msg):
""" exit testing process as if KeyboardInterrupt was triggered. """
__tracebackhide__ = True
raise Exit(msg)
exit.Exception = Exit
def skip(msg=""):
""" skip an executing test with the given message. Note: it's usually
better to use the py.test.mark.skipif marker to declare a test to be
skipped under certain conditions like mismatching platforms or
dependencies. See the pytest_skipping plugin for details.
"""
__tracebackhide__ = True
raise Skipped(msg=msg)
skip.Exception = Skipped
def fail(msg="", pytrace=True):
""" explicitely fail an currently-executing test with the given Message.
:arg pytrace: if false the msg represents the full failure information
and no python traceback will be reported.
"""
__tracebackhide__ = True
raise Failed(msg=msg, pytrace=pytrace)
fail.Exception = Failed
def importorskip(modname, minversion=None):
""" return imported module if it has a higher __version__ than the
optionally specified 'minversion' - otherwise call py.test.skip()
with a message detailing the mismatch.
"""
__tracebackhide__ = True
compile(modname, '', 'eval') # to catch syntaxerrors
try:
__import__(modname)
except ImportError:
py.test.skip("could not import %r" %(modname,))
mod = sys.modules[modname]
if minversion is None:
return mod
verattr = getattr(mod, '__version__', None)
if isinstance(minversion, str):
minver = minversion.split(".")
else:
minver = list(minversion)
if verattr is None or verattr.split(".") < minver:
py.test.skip("module %r has __version__ %r, required is: %r" %(
modname, verattr, minversion))
return mod
python 2.4 compatibility
""" basic collect and runtest protocol implementations """
import py, sys
from time import time
from py._code.code import TerminalRepr
def pytest_namespace():
return {
'fail' : fail,
'skip' : skip,
'importorskip' : importorskip,
'exit' : exit,
}
#
# pytest plugin hooks
def pytest_addoption(parser):
group = parser.getgroup("terminal reporting", "reporting", after="general")
group.addoption('--durations',
action="store", type="int", default=None, metavar="N",
help="show N slowest setup/test durations (N=0 for all)."),
def pytest_terminal_summary(terminalreporter):
durations = terminalreporter.config.option.durations
if durations is None:
return
tr = terminalreporter
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, 'duration'):
dlist.append(rep)
if not dlist:
return
dlist.sort(key=lambda x: x.duration)
dlist.reverse()
if not durations:
tr.write_sep("=", "slowest test durations")
else:
tr.write_sep("=", "slowest %s test durations" % durations)
dlist = dlist[:durations]
for rep in dlist:
nodeid = rep.nodeid.replace("::()::", "::")
tr.write_line("%02.2fs %-8s %s" %
(rep.duration, rep.when, nodeid))
def pytest_sessionstart(session):
session._setupstate = SetupState()
def pytest_sessionfinish(session):
session._setupstate.teardown_all()
class NodeInfo:
def __init__(self, location):
self.location = location
def pytest_runtest_protocol(item, nextitem):
item.ihook.pytest_runtest_logstart(
nodeid=item.nodeid, location=item.location,
)
runtestprotocol(item, nextitem=nextitem)
return True
def runtestprotocol(item, log=True, nextitem=None):
rep = call_and_report(item, "setup", log)
reports = [rep]
if rep.passed:
reports.append(call_and_report(item, "call", log))
reports.append(call_and_report(item, "teardown", log,
nextitem=nextitem))
return reports
def pytest_runtest_setup(item):
item.session._setupstate.prepare(item)
def pytest_runtest_call(item):
item.runtest()
def pytest_runtest_teardown(item, nextitem):
item.session._setupstate.teardown_exact(item, nextitem)
def pytest_report_teststatus(report):
if report.when in ("setup", "teardown"):
if report.failed:
# category, shortletter, verbose-word
return "error", "E", "ERROR"
elif report.skipped:
return "skipped", "s", "SKIPPED"
else:
return "", "", ""
#
# Implementation
def call_and_report(item, when, log=True, **kwds):
call = call_runtest_hook(item, when, **kwds)
hook = item.ihook
report = hook.pytest_runtest_makereport(item=item, call=call)
if log:
hook.pytest_runtest_logreport(report=report)
return report
def call_runtest_hook(item, when, **kwds):
hookname = "pytest_runtest_" + when
ihook = getattr(item.ihook, hookname)
return CallInfo(lambda: ihook(item=item, **kwds), when=when)
class CallInfo:
""" Result/Exception info a function invocation. """
#: None or ExceptionInfo object.
excinfo = None
def __init__(self, func, when):
#: context of invocation: one of "setup", "call",
#: "teardown", "memocollect"
self.when = when
self.start = time()
try:
try:
self.result = func()
except KeyboardInterrupt:
raise
except:
self.excinfo = py.code.ExceptionInfo()
finally:
self.stop = time()
def __repr__(self):
if self.excinfo:
status = "exception: %s" % str(self.excinfo.value)
else:
status = "result: %r" % (self.result,)
return "<CallInfo when=%r %s>" % (self.when, status)
def getslaveinfoline(node):
try:
return node._slaveinfocache
except AttributeError:
d = node.slaveinfo
ver = "%s.%s.%s" % d['version_info'][:3]
node._slaveinfocache = s = "[%s] %s -- Python %s %s" % (
d['id'], d['sysplatform'], ver, d['executable'])
return s
class BaseReport(object):
def __init__(self, **kw):
self.__dict__.update(kw)
def toterminal(self, out):
longrepr = self.longrepr
if hasattr(self, 'node'):
out.line(getslaveinfoline(self.node))
if hasattr(longrepr, 'toterminal'):
longrepr.toterminal(out)
else:
try:
out.line(longrepr)
except UnicodeEncodeError:
out.line("<unprintable longrepr>")
passed = property(lambda x: x.outcome == "passed")
failed = property(lambda x: x.outcome == "failed")
skipped = property(lambda x: x.outcome == "skipped")
@property
def fspath(self):
return self.nodeid.split("::")[0]
def pytest_runtest_makereport(item, call):
when = call.when
duration = call.stop-call.start
keywords = dict([(x,1) for x in item.keywords])
excinfo = call.excinfo
if not call.excinfo:
outcome = "passed"
longrepr = None
else:
excinfo = call.excinfo
if not isinstance(excinfo, py.code.ExceptionInfo):
outcome = "failed"
longrepr = excinfo
elif excinfo.errisinstance(py.test.skip.Exception):
outcome = "skipped"
r = excinfo._getreprcrash()
longrepr = (str(r.path), r.lineno, r.message)
else:
outcome = "failed"
if call.when == "call":
longrepr = item.repr_failure(excinfo)
else: # exception in setup or teardown
longrepr = item._repr_failure_py(excinfo)
return TestReport(item.nodeid, item.location,
keywords, outcome, longrepr, when,
duration=duration)
class TestReport(BaseReport):
""" Basic test report object (also used for setup and teardown calls if
they fail).
"""
def __init__(self, nodeid, location,
keywords, outcome, longrepr, when, sections=(), duration=0, **extra):
#: normalized collection node id
self.nodeid = nodeid
#: a (filesystempath, lineno, domaininfo) tuple indicating the
#: actual location of a test item - it might be different from the
#: collected one e.g. if a method is inherited from a different module.
self.location = location
#: a name -> value dictionary containing all keywords and
#: markers associated with a test invocation.
self.keywords = keywords
#: test outcome, always one of "passed", "failed", "skipped".
self.outcome = outcome
#: None or a failure representation.
self.longrepr = longrepr
#: one of 'setup', 'call', 'teardown' to indicate runtest phase.
self.when = when
#: list of (secname, data) extra information which needs to
#: marshallable
self.sections = list(sections)
#: time it took to run just the test
self.duration = duration
self.__dict__.update(extra)
def __repr__(self):
return "<TestReport %r when=%r outcome=%r>" % (
self.nodeid, self.when, self.outcome)
class TeardownErrorReport(BaseReport):
outcome = "failed"
when = "teardown"
def __init__(self, longrepr, **extra):
self.longrepr = longrepr
self.sections = []
self.__dict__.update(extra)
def pytest_make_collect_report(collector):
call = CallInfo(collector._memocollect, "memocollect")
longrepr = None
if not call.excinfo:
outcome = "passed"
else:
if call.excinfo.errisinstance(py.test.skip.Exception):
outcome = "skipped"
r = collector._repr_failure_py(call.excinfo, "line").reprcrash
longrepr = (str(r.path), r.lineno, r.message)
else:
outcome = "failed"
errorinfo = collector.repr_failure(call.excinfo)
if not hasattr(errorinfo, "toterminal"):
errorinfo = CollectErrorRepr(errorinfo)
longrepr = errorinfo
return CollectReport(collector.nodeid, outcome, longrepr,
getattr(call, 'result', None))
class CollectReport(BaseReport):
def __init__(self, nodeid, outcome, longrepr, result, sections=(), **extra):
self.nodeid = nodeid
self.outcome = outcome
self.longrepr = longrepr
self.result = result or []
self.sections = list(sections)
self.__dict__.update(extra)
@property
def location(self):
return (self.fspath, None, self.fspath)
def __repr__(self):
return "<CollectReport %r lenresult=%s outcome=%r>" % (
self.nodeid, len(self.result), self.outcome)
class CollectErrorRepr(TerminalRepr):
def __init__(self, msg):
self.longrepr = msg
def toterminal(self, out):
out.line(self.longrepr, red=True)
class SetupState(object):
""" shared state for setting up/tearing down test items or collectors. """
def __init__(self):
self.stack = []
self._finalizers = {}
def addfinalizer(self, finalizer, colitem):
""" attach a finalizer to the given colitem.
if colitem is None, this will add a finalizer that
is called at the end of teardown_all().
if colitem is a tuple, it will be used as a key
and needs an explicit call to _callfinalizers(key) later on.
"""
assert hasattr(finalizer, '__call__')
#assert colitem in self.stack
self._finalizers.setdefault(colitem, []).append(finalizer)
def _pop_and_teardown(self):
colitem = self.stack.pop()
self._teardown_with_finalization(colitem)
def _callfinalizers(self, colitem):
finalizers = self._finalizers.pop(colitem, None)
while finalizers:
fin = finalizers.pop()
fin()
def _teardown_with_finalization(self, colitem):
self._callfinalizers(colitem)
if hasattr(colitem, "teardown"):
colitem.teardown()
for colitem in self._finalizers:
assert colitem is None or colitem in self.stack \
or isinstance(colitem, tuple)
def teardown_all(self):
while self.stack:
self._pop_and_teardown()
for key in list(self._finalizers):
self._teardown_with_finalization(key)
assert not self._finalizers
def teardown_exact(self, item, nextitem):
needed_collectors = nextitem and nextitem.listchain() or []
self._teardown_towards(needed_collectors)
def _teardown_towards(self, needed_collectors):
while self.stack:
if self.stack == needed_collectors[:len(self.stack)]:
break
self._pop_and_teardown()
def prepare(self, colitem):
""" setup objects along the collector chain to the test-method
and teardown previously setup objects."""
needed_collectors = colitem.listchain()
self._teardown_towards(needed_collectors)
# check if the last collection node has raised an error
for col in self.stack:
if hasattr(col, '_prepare_exc'):
py.builtin._reraise(*col._prepare_exc)
for col in needed_collectors[len(self.stack):]:
self.stack.append(col)
try:
col.setup()
except Exception:
col._prepare_exc = sys.exc_info()
raise
# =============================================================
# Test OutcomeExceptions and helpers for creating them.
class OutcomeException(Exception):
""" OutcomeException and its subclass instances indicate and
contain info about test and collection outcomes.
"""
def __init__(self, msg=None, pytrace=True):
Exception.__init__(self, msg)
self.msg = msg
self.pytrace = pytrace
def __repr__(self):
if self.msg:
return str(self.msg)
return "<%s instance>" %(self.__class__.__name__,)
__str__ = __repr__
class Skipped(OutcomeException):
# XXX hackish: on 3k we fake to live in the builtins
# in order to have Skipped exception printing shorter/nicer
__module__ = 'builtins'
class Failed(OutcomeException):
""" raised from an explicit call to py.test.fail() """
__module__ = 'builtins'
class Exit(KeyboardInterrupt):
""" raised for immediate program exits (no tracebacks/summaries)"""
def __init__(self, msg="unknown reason"):
self.msg = msg
KeyboardInterrupt.__init__(self, msg)
# exposed helper methods
def exit(msg):
""" exit testing process as if KeyboardInterrupt was triggered. """
__tracebackhide__ = True
raise Exit(msg)
exit.Exception = Exit
def skip(msg=""):
""" skip an executing test with the given message. Note: it's usually
better to use the py.test.mark.skipif marker to declare a test to be
skipped under certain conditions like mismatching platforms or
dependencies. See the pytest_skipping plugin for details.
"""
__tracebackhide__ = True
raise Skipped(msg=msg)
skip.Exception = Skipped
def fail(msg="", pytrace=True):
""" explicitely fail an currently-executing test with the given Message.
:arg pytrace: if false the msg represents the full failure information
and no python traceback will be reported.
"""
__tracebackhide__ = True
raise Failed(msg=msg, pytrace=pytrace)
fail.Exception = Failed
def importorskip(modname, minversion=None):
""" return imported module if it has a higher __version__ than the
optionally specified 'minversion' - otherwise call py.test.skip()
with a message detailing the mismatch.
"""
__tracebackhide__ = True
compile(modname, '', 'eval') # to catch syntaxerrors
try:
__import__(modname)
except ImportError:
py.test.skip("could not import %r" %(modname,))
mod = sys.modules[modname]
if minversion is None:
return mod
verattr = getattr(mod, '__version__', None)
if isinstance(minversion, str):
minver = minversion.split(".")
else:
minver = list(minversion)
if verattr is None or verattr.split(".") < minver:
py.test.skip("module %r has __version__ %r, required is: %r" %(
modname, verattr, minversion))
return mod
|
from rpython.rlib import jit
from rpython.rtyper.lltypesystem import rffi, lltype
from capi import CConfig
from rpython.rlib.rarithmetic import intmask
import sys
import os
import capi
import translated
import math
testdb = os.path.join(os.path.dirname(os.path.abspath(__file__)), "test/test.db")
# testdb = os.path.join(os.path.dirname(os.path.abspath(__file__)), "test/big-test.db")
def get_printable_location(pc, rc, self):
op = self._hlops[pc]
opcode = op.get_opcode()
name = self.get_opcode_str(opcode)
unsafe = ''
if not _cache_safe_opcodes[opcode]:
unsafe = ' UNSAFE'
return "%s %s %s%s" % (pc, rc, name, unsafe)
jitdriver = jit.JitDriver(
greens=['pc', 'rc', 'self_'],
reds=[],
should_unroll_one_iteration=lambda *args: True,
get_printable_location=get_printable_location)
class SQPyteException(Exception):
def __init__(self, msg):
print msg
class Sqlite3DB(object):
_immutable_fields_ = ['db']
def __init__(self, db_name):
self.opendb(db_name)
def opendb(self, db_name):
with rffi.scoped_str2charp(db_name) as db_name, lltype.scoped_alloc(capi.SQLITE3PP.TO, 1) as result:
errorcode = capi.sqlite3_open(db_name, result)
assert(errorcode == 0)
self.db = rffi.cast(capi.SQLITE3P, result[0])
_cache_safe_opcodes = [False] * 256
def cache_safe(opcodes=None):
def decorate(func):
ops = opcodes
if ops is None:
name = func.func_name
assert name.startswith("python_")
opcodename = name[len("python_"):]
ops = [getattr(CConfig, opcodename)]
for opcode in ops:
_cache_safe_opcodes[opcode] = True
return func
return decorate
class Sqlite3Query(object):
_immutable_fields_ = ['internalPc', 'db', 'p', '_mem_as_python_list[*]', '_llmem_as_python_list[*]', 'intp',
'_hlops[*]', '_mem_caches']
def __init__(self, db, query):
self.db = db
self.internalPc = lltype.malloc(rffi.LONGP.TO, 1, flavor='raw')
self.intp = lltype.malloc(rffi.INTP.TO, 1, flavor='raw')
self.longp = lltype.malloc(rffi.LONGP.TO, 1, flavor='raw')
self.prepare(query)
def __del__(self):
lltype.free(self.internalPc, flavor='raw')
lltype.free(self.intp, flavor='raw')
lltype.free(self.longp, flavor='raw')
def prepare(self, query):
length = len(query)
with rffi.scoped_str2charp(query) as query, lltype.scoped_alloc(rffi.VOIDPP.TO, 1) as result, lltype.scoped_alloc(rffi.CCHARPP.TO, 1) as unused_buffer:
errorcode = capi.sqlite3_prepare(self.db, query, length, result, unused_buffer)
assert errorcode == 0
self.p = rffi.cast(capi.VDBEP, result[0])
self._init_python_data()
def _init_python_data(self):
from sqpyte.mem import Mem
self._llmem_as_python_list = [self.p.aMem[i] for i in range(self.p.nMem)]
self._mem_as_python_list = [Mem(self, self.p.aMem[i], i)
for i in range(self.p.nMem)]
self._mem_caches = [None] * len(self._mem_as_python_list)
self._hlops = [Op(self, self.p.aOp[i]) for i in range(self.p.nOp)]
@jit.unroll_safe
def invalidate_caches(self):
for mem in self._mem_as_python_list:
mem.invalidate_cache()
def is_op_cache_safe(self, opcode):
return _cache_safe_opcodes[opcode]
def reset_query(self):
capi.sqlite3_reset(self.p)
def python_OP_Init(self, pc, op):
return translated.python_OP_Init_translated(self, pc, op)
def python_OP_Rewind(self, pc, op):
self.internalPc[0] = rffi.cast(rffi.LONG, pc)
rc = capi.impl_OP_Rewind(self.p, self.db, self.internalPc, op.pOp)
retPc = self.internalPc[0]
return retPc, rc
def python_OP_Transaction(self, pc, op):
return capi.impl_OP_Transaction(self.p, self.db, pc, op.pOp)
def python_OP_TableLock(self, rc, op):
return capi.impl_OP_TableLock(self.p, self.db, rc, op.pOp)
@cache_safe()
def python_OP_Goto(self, pc, rc, op):
# self.internalPc[0] = rffi.cast(rffi.LONG, pc)
# retRc = capi.impl_OP_Goto(self.p, self.db, self.internalPc, rc, op.pOp)
# retPc = self.internalPc[0]
# return retPc, retRc
return translated.python_OP_Goto_translated(self, pc, rc, op)
def python_OP_OpenRead_OpenWrite(self, pc, op):
return capi.impl_OP_OpenRead_OpenWrite(self.p, self.db, pc, op.pOp)
# translated.python_OP_OpenRead_OpenWrite_translated(self, self.db, pc, op)
def python_OP_Column(self, pc, op):
return capi.impl_OP_Column(self.p, self.db, pc, op.pOp)
# return translated.python_OP_Column_translated(self, self.db, pc, op)
def python_OP_ResultRow(self, pc, op):
return capi.impl_OP_ResultRow(self.p, self.db, pc, op.pOp)
@cache_safe()
def python_OP_Next(self, pc, op):
# self.internalPc[0] = rffi.cast(rffi.LONG, pc)
# rc = capi.impl_OP_Next(self.p, self.db, self.internalPc, op.pOp)
# retPc = self.internalPc[0]
# return retPc, rc
return translated.python_OP_Next_translated(self, pc, op)
def python_OP_Close(self, op):
capi.impl_OP_Close(self.p, op.pOp)
def python_OP_Halt(self, pc, op):
self.internalPc[0] = rffi.cast(rffi.LONG, pc)
rc = capi.impl_OP_Halt(self.p, self.db, self.internalPc, op.pOp)
retPc = self.internalPc[0]
return retPc, rc
@cache_safe(
opcodes=[CConfig.OP_Eq, CConfig.OP_Ne, CConfig.OP_Lt, CConfig.OP_Le,
CConfig.OP_Gt, CConfig.OP_Ge])
def python_OP_Ne_Eq_Gt_Le_Lt_Ge(self, pc, rc, op):
# self.internalPc[0] = rffi.cast(rffi.LONG, pc)
# rc = capi.impl_OP_Ne_Eq_Gt_Le_Lt_Ge(self.p, self.db, self.internalPc, rc, op.pOp)
# retPc = self.internalPc[0]
# return retPc, rc
return translated.python_OP_Ne_Eq_Gt_Le_Lt_Ge_translated(self, pc, rc, op)
def python_OP_Integer(self, op):
translated.python_OP_Integer(self, op)
#capi.impl_OP_Integer(self.p, op.pOp)
def python_OP_Null(self, op):
capi.impl_OP_Null(self.p, op.pOp)
def python_OP_AggStep(self, rc, pc, op):
#return capi.impl_OP_AggStep(self.p, self.db, rc, op.pOp)
return translated.python_OP_AggStep(self, rc, pc, op)
def python_OP_AggFinal(self, pc, rc, op):
return capi.impl_OP_AggFinal(self.p, self.db, pc, rc, op.pOp)
def python_OP_Copy(self, pc, rc, op):
return capi.impl_OP_Copy(self.p, self.db, pc, rc, op.pOp)
def python_OP_MustBeInt(self, pc, rc, op):
return translated.python_OP_MustBeInt(self, pc, rc, op)
def python_OP_NotExists(self, pc, op):
return translated.python_OP_NotExists(self, pc, op)
#self.internalPc[0] = rffi.cast(rffi.LONG, pc)
#rc = capi.impl_OP_NotExists(self.p, self.db, self.internalPc, op.pOp)
#retPc = self.internalPc[0]
#return retPc, rc
def python_OP_String(self, op):
capi.impl_OP_String(self.p, self.db, op.pOp)
def python_OP_String8(self, pc, rc, op):
return capi.impl_OP_String8(self.p, self.db, pc, rc, op.pOp)
def python_OP_Function(self, pc, rc, op):
return capi.impl_OP_Function(self.p, self.db, pc, rc, op.pOp)
def python_OP_Real(self, op):
# aMem = self.p.aMem
# pOut = aMem[pOp.p2]
# pOut.flags = rffi.cast(rffi.USHORT, CConfig.MEM_Real)
# assert not math.isnan(pOp.p4.pReal)
# pOut.r = pOp.p4.pReal
capi.impl_OP_Real(self.p, op.pOp)
def python_OP_RealAffinity(self, op):
# capi.impl_OP_RealAffinity(self.p, op.pOp)
translated.python_OP_RealAffinity(self, op)
def python_OP_Add_Subtract_Multiply_Divide_Remainder(self, op):
# capi.impl_OP_Add_Subtract_Multiply_Divide_Remainder(self.p, op.pOp)
translated.python_OP_Add_Subtract_Multiply_Divide_Remainder(self, op)
def python_OP_If_IfNot(self, pc, op):
# return capi.impl_OP_If_IfNot(self.p, pc, op.pOp)
return translated.python_OP_If_IfNot(self, pc, op)
def python_OP_Rowid(self, pc, rc, op):
return capi.impl_OP_Rowid(self.p, self.db, pc, rc, op.pOp)
def python_OP_IsNull(self, pc, op):
# return capi.impl_OP_IsNull(self.p, pc, op.pOp)
return translated.python_OP_IsNull(self, pc, op)
def python_OP_SeekLT_SeekLE_SeekGE_SeekGT(self, pc, rc, op):
self.internalPc[0] = rffi.cast(rffi.LONG, pc)
rc = capi.impl_OP_SeekLT_SeekLE_SeekGE_SeekGT(self.p, self.db, self.internalPc, rc, op.pOp)
retPc = self.internalPc[0]
return retPc, rc
def python_OP_Move(self, op):
capi.impl_OP_Move(self.p, op.pOp)
def python_OP_IfZero(self, pc, op):
return capi.impl_OP_IfZero(self.p, pc, op.pOp)
def python_OP_IdxRowid(self, pc, rc, op):
return translated.python_OP_IdxRowid(self, pc, rc, op)
#return capi.impl_OP_IdxRowid(self.p, self.db, pc, rc, op.pOp)
def python_OP_IdxLE_IdxGT_IdxLT_IdxGE(self, pc, op):
self.internalPc[0] = rffi.cast(rffi.LONG, pc)
rc = capi.impl_OP_IdxLE_IdxGT_IdxLT_IdxGE(self.p, self.internalPc, op.pOp)
retPc = self.internalPc[0]
return retPc, rc
def python_OP_Seek(self, op):
#capi.impl_OP_Seek(self.p, op.pOp)
translated.python_OP_Seek(self, op)
def python_OP_Once(self, pc, op):
# return capi.impl_OP_Once(self.p, pc, op.pOp)
return translated.python_OP_Once(self, pc, op)
def python_OP_SCopy(self, op):
capi.impl_OP_SCopy(self.p, op.pOp)
def python_OP_Affinity(self, op):
# capi.impl_OP_Affinity(self.p, self.db, op.pOp)
translated.python_OP_Affinity(self, op)
def python_OP_OpenAutoindex_OpenEphemeral(self, pc, op):
return capi.impl_OP_OpenAutoindex_OpenEphemeral(self.p, self.db, pc, op.pOp)
def python_OP_MakeRecord(self, pc, rc, op):
return capi.impl_OP_MakeRecord(self.p, self.db, pc, rc, op.pOp)
def python_OP_SorterInsert_IdxInsert(self, op):
return capi.impl_OP_SorterInsert_IdxInsert(self.p, self.db, op.pOp)
def python_OP_NoConflict_NotFound_Found(self, pc, rc, op):
self.internalPc[0] = rffi.cast(rffi.LONG, pc)
rc = capi.impl_OP_NoConflict_NotFound_Found(self.p, self.db, self.internalPc, rc, op.pOp)
retPc = self.internalPc[0]
return retPc, rc
def python_OP_RowSetTest(self, pc, rc, op):
self.internalPc[0] = rffi.cast(rffi.LONG, pc)
rc = capi.impl_OP_RowSetTest(self.p, self.db, self.internalPc, rc, op.pOp)
retPc = self.internalPc[0]
return retPc, rc
def python_OP_Gosub(self, pc, op):
return capi.impl_OP_Gosub(self.p, pc, op.pOp)
def python_OP_Return(self, pc, op):
return capi.impl_OP_Return(self.p, pc, op.pOp)
def python_OP_SorterOpen(self, pc, op):
return capi.impl_OP_SorterOpen(self.p, self.db, pc, op.pOp)
def python_OP_NextIfOpen(self, pc, rc, op):
# self.internalPc[0] = rffi.cast(rffi.LONG, pc)
# rc = capi.impl_OP_NextIfOpen(self.p, self.db, self.internalPc, rc, op.pOp)
# retPc = self.internalPc[0]
# return retPc, rc
return translated.python_OP_NextIfOpen_translated(self, pc, rc, op)
def python_OP_Sequence(self, op):
capi.impl_OP_Sequence(self.p, op.pOp)
def python_OP_OpenPseudo(self, pc, rc, op):
return capi.impl_OP_OpenPseudo(self.p, self.db, pc, rc, op.pOp)
def python_OP_SorterSort_Sort(self, pc, op):
self.internalPc[0] = rffi.cast(rffi.LONG, pc)
rc = capi.impl_OP_SorterSort_Sort(self.p, self.db, self.internalPc, op.pOp)
retPc = self.internalPc[0]
return retPc, rc
def python_OP_SorterData(self, op):
return capi.impl_OP_SorterData(self.p, op.pOp)
def python_OP_SorterNext(self, pc, op):
self.internalPc[0] = rffi.cast(rffi.LONG, pc)
rc = capi.impl_OP_SorterNext(self.p, self.db, self.internalPc, op.pOp)
retPc = self.internalPc[0]
return retPc, rc
def python_OP_Noop_Explain(self, op):
translated.python_OP_Noop_Explain_translated(op)
def python_OP_Compare(self, op):
capi.impl_OP_Compare(self.p, op.pOp)
def python_OP_Jump(self, op):
return capi.impl_OP_Jump(op.pOp)
def python_OP_IfPos(self, pc, op):
return translated.python_OP_IfPos(self, pc, op)
def python_OP_CollSeq(self, op):
capi.impl_OP_CollSeq(self.p, op.pOp)
def python_OP_NotNull(self, pc, op):
# return capi.impl_OP_NotNull(self.p, pc, op.pOp)
return translated.python_OP_NotNull(self, pc, op)
def python_OP_InitCoroutine(self, pc, op):
return capi.impl_OP_InitCoroutine(self.p, pc, op.pOp)
def python_OP_Yield(self, pc, op):
return capi.impl_OP_Yield(self.p, pc, op.pOp)
def python_OP_NullRow(self, op):
capi.impl_OP_NullRow(self.p, op.pOp)
def python_OP_EndCoroutine(self, op):
return capi.impl_OP_EndCoroutine(self.p, op.pOp)
def python_OP_ReadCookie(self, op):
capi.impl_OP_ReadCookie(self.p, self.db, op.pOp)
def python_OP_NewRowid(self, pc, rc, op):
return capi.impl_OP_NewRowid(self.p, self.db, pc, rc, op.pOp)
def python_OP_Insert_InsertInt(self, op):
return capi.impl_OP_Insert_InsertInt(self.p, self.db, op.pOp)
def python_OP_SetCookie(self, op):
return capi.impl_OP_SetCookie(self.p, self.db, op.pOp)
def python_OP_ParseSchema(self, pc, rc, op):
return capi.impl_OP_ParseSchema(self.p, self.db, pc, rc, op.pOp)
def python_OP_RowSetAdd(self, pc, rc, op):
return capi.impl_OP_RowSetAdd(self.p, self.db, pc, rc, op.pOp)
def python_OP_RowSetRead(self, pc, rc, op):
self.internalPc[0] = rffi.cast(rffi.LONG, pc)
retRc = capi.impl_OP_RowSetRead(self.p, self.db, self.internalPc, rc, op.pOp)
retPc = self.internalPc[0]
return retPc, retRc
def python_OP_Delete(self, pc, op):
return capi.impl_OP_Delete(self.p, self.db, pc, op.pOp)
def python_OP_DropTable(self, op):
return capi.impl_OP_DropTable(self.db, op.pOp)
def python_sqlite3_column_text(self, iCol):
return capi.sqlite3_column_text(self.p, iCol)
def python_sqlite3_column_bytes(self, iCol):
return capi.sqlite3_column_bytes(self.p, iCol)
def debug_print(self, s):
return
if not jit.we_are_jitted():
print s
@jit.elidable
def get_opcode_str(self, opcode):
return capi.opnames_dict.get(opcode, '')
@jit.elidable
def get_aOp(self):
return self.p.aOp
@jit.elidable
def enc(self):
return self.db.aDb[0].pSchema.enc
def mem_with_index(self, i):
return self._mem_as_python_list[i]
def mainloop(self):
rc = CConfig.SQLITE_OK
pc = jit.promote(rffi.cast(lltype.Signed, self.p.pc))
if pc < 0:
pc = 0 # XXX maybe more to do, see vdbeapi.c:418
i = 0
while True:
jitdriver.jit_merge_point(pc=pc, self_=self, rc=rc)
if rc != CConfig.SQLITE_OK:
break
op = self._hlops[pc]
opcode = op.get_opcode()
oldpc = pc
self.debug_print('>>> %s <<<' % self.get_opcode_str(opcode))
opflags = op.opflags()
if opflags & CConfig.OPFLG_OUT2_PRERELEASE:
pOut = op.mem_of_p(2)
pOut.VdbeMemRelease()
pOut.set_flags(CConfig.MEM_Int)
if not self.is_op_cache_safe(opcode):
self.invalidate_caches()
if opcode == CConfig.OP_Init:
pc = self.python_OP_Init(pc, op)
elif (opcode == CConfig.OP_OpenRead or
opcode == CConfig.OP_OpenWrite):
rc = self.python_OP_OpenRead_OpenWrite(pc, op)
elif opcode == CConfig.OP_Rewind:
pc, rc = self.python_OP_Rewind(pc, op)
elif opcode == CConfig.OP_Transaction:
rc = self.python_OP_Transaction(pc, op)
if rc == CConfig.SQLITE_BUSY:
print 'ERROR: in OP_Transaction SQLITE_BUSY'
return rc
elif opcode == CConfig.OP_TableLock:
rc = self.python_OP_TableLock(rc, op)
elif opcode == CConfig.OP_Goto:
pc, rc = self.python_OP_Goto(pc, rc, op)
elif opcode == CConfig.OP_Column:
rc = self.python_OP_Column(pc, op)
elif opcode == CConfig.OP_ResultRow:
rc = self.python_OP_ResultRow(pc, op)
if rc == CConfig.SQLITE_ROW:
return rc
elif opcode == CConfig.OP_Next:
pc, rc = self.python_OP_Next(pc, op)
elif opcode == CConfig.OP_Close:
self.python_OP_Close(op)
elif opcode == CConfig.OP_Halt:
pc, rc = self.python_OP_Halt(pc, op)
return rc
elif (opcode == CConfig.OP_Eq or
opcode == CConfig.OP_Ne or
opcode == CConfig.OP_Lt or
opcode == CConfig.OP_Le or
opcode == CConfig.OP_Gt or
opcode == CConfig.OP_Ge):
pc, rc = self.python_OP_Ne_Eq_Gt_Le_Lt_Ge(pc, rc, op)
elif opcode == CConfig.OP_Integer:
self.python_OP_Integer(op)
elif opcode == CConfig.OP_Null:
self.python_OP_Null(op)
elif opcode == CConfig.OP_AggStep:
rc = self.python_OP_AggStep(rc, pc, op)
elif opcode == CConfig.OP_AggFinal:
rc = self.python_OP_AggFinal(pc, rc, op)
elif opcode == CConfig.OP_Copy:
rc = self.python_OP_Copy(pc, rc, op)
elif opcode == CConfig.OP_MustBeInt:
pc, rc = self.python_OP_MustBeInt(pc, rc, op)
elif opcode == CConfig.OP_NotExists:
pc, rc = self.python_OP_NotExists(pc, op)
elif opcode == CConfig.OP_String:
self.python_OP_String(op)
elif opcode == CConfig.OP_String8:
rc = self.python_OP_String8(pc, rc, op)
elif opcode == CConfig.OP_Function:
rc = self.python_OP_Function(pc, rc, op)
elif opcode == CConfig.OP_Real:
self.python_OP_Real(op)
elif opcode == CConfig.OP_RealAffinity:
self.python_OP_RealAffinity(op)
elif (opcode == CConfig.OP_Add or
opcode == CConfig.OP_Subtract or
opcode == CConfig.OP_Multiply or
opcode == CConfig.OP_Divide or
opcode == CConfig.OP_Remainder):
self.python_OP_Add_Subtract_Multiply_Divide_Remainder(op)
elif (opcode == CConfig.OP_If or
opcode == CConfig.OP_IfNot):
pc = self.python_OP_If_IfNot(pc, op)
elif opcode == CConfig.OP_Rowid:
rc = self.python_OP_Rowid(pc, rc, op)
elif opcode == CConfig.OP_IsNull:
pc = self.python_OP_IsNull(pc, op)
elif (opcode == CConfig.OP_SeekLT or
opcode == CConfig.OP_SeekLE or
opcode == CConfig.OP_SeekGE or
opcode == CConfig.OP_SeekGT):
pc, rc = self.python_OP_SeekLT_SeekLE_SeekGE_SeekGT(pc, rc, op)
elif opcode == CConfig.OP_Move:
self.python_OP_Move(op)
elif opcode == CConfig.OP_IfZero:
pc = self.python_OP_IfZero(pc, op)
elif opcode == CConfig.OP_IdxRowid:
rc = self.python_OP_IdxRowid(pc, rc, op)
elif (opcode == CConfig.OP_IdxLE or
opcode == CConfig.OP_IdxGT or
opcode == CConfig.OP_IdxLT or
opcode == CConfig.OP_IdxGE):
pc, rc = self.python_OP_IdxLE_IdxGT_IdxLT_IdxGE(pc, op)
elif opcode == CConfig.OP_Seek:
self.python_OP_Seek(op)
elif opcode == CConfig.OP_Once:
pc = self.python_OP_Once(pc, op)
elif opcode == CConfig.OP_SCopy:
self.python_OP_SCopy(op)
elif opcode == CConfig.OP_Affinity:
self.python_OP_Affinity(op)
elif (opcode == CConfig.OP_OpenAutoindex or
opcode == CConfig.OP_OpenEphemeral):
rc = self.python_OP_OpenAutoindex_OpenEphemeral(pc, op)
elif opcode == CConfig.OP_MakeRecord:
rc = self.python_OP_MakeRecord(pc, rc, op)
elif (opcode == CConfig.OP_SorterInsert or
opcode == CConfig.OP_IdxInsert):
rc = self.python_OP_SorterInsert_IdxInsert(op)
elif (opcode == CConfig.OP_NoConflict or
opcode == CConfig.OP_NotFound or
opcode == CConfig.OP_Found):
pc, rc = self.python_OP_NoConflict_NotFound_Found(pc, rc, op)
elif opcode == CConfig.OP_RowSetTest:
pc, rc = self.python_OP_RowSetTest(pc, rc, op)
elif opcode == CConfig.OP_Gosub:
pc = self.python_OP_Gosub(pc, op)
elif opcode == CConfig.OP_Return:
pc = self.python_OP_Return(pc, op)
elif opcode == CConfig.OP_SorterOpen:
rc = self.python_OP_SorterOpen(pc, op)
elif opcode == CConfig.OP_NextIfOpen:
pc, rc = self.python_OP_NextIfOpen(pc, rc, op)
elif opcode == CConfig.OP_Sequence:
self.python_OP_Sequence(op)
elif opcode == CConfig.OP_OpenPseudo:
rc = self.python_OP_OpenPseudo(pc, rc, op)
elif (opcode == CConfig.OP_SorterSort or
opcode == CConfig.OP_Sort):
pc, rc = self.python_OP_SorterSort_Sort(pc, op)
elif opcode == CConfig.OP_SorterData:
rc = self.python_OP_SorterData(op)
elif opcode == CConfig.OP_SorterNext:
pc, rc = self.python_OP_SorterNext(pc, op)
elif (opcode == CConfig.OP_Noop or
opcode == CConfig.OP_Explain):
self.python_OP_Noop_Explain(op)
elif opcode == CConfig.OP_Compare:
self.python_OP_Compare(op)
elif opcode == CConfig.OP_Jump:
pc = self.python_OP_Jump(op)
elif opcode == CConfig.OP_IfPos:
pc = self.python_OP_IfPos(pc, op)
elif opcode == CConfig.OP_CollSeq:
self.python_OP_CollSeq(op)
elif opcode == CConfig.OP_NotNull:
pc = self.python_OP_NotNull(pc, op)
elif opcode == CConfig.OP_InitCoroutine:
pc = self.python_OP_InitCoroutine(pc, op)
elif opcode == CConfig.OP_Yield:
pc = self.python_OP_Yield(pc, op)
elif opcode == CConfig.OP_NullRow:
self.python_OP_NullRow(op)
elif opcode == CConfig.OP_EndCoroutine:
pc = self.python_OP_EndCoroutine(op)
elif opcode == CConfig.OP_ReadCookie:
self.python_OP_ReadCookie(op)
elif opcode == CConfig.OP_NewRowid:
rc = self.python_OP_NewRowid(pc, rc, op)
elif (opcode == CConfig.OP_Insert or
opcode == CConfig.OP_InsertInt):
rc = self.python_OP_Insert_InsertInt(op)
elif opcode == CConfig.OP_SetCookie:
rc = self.python_OP_SetCookie(op)
elif opcode == CConfig.OP_ParseSchema:
rc = self.python_OP_ParseSchema(pc, rc, op)
elif opcode == CConfig.OP_RowSetAdd:
rc = self.python_OP_RowSetAdd(pc, rc, op)
elif opcode == CConfig.OP_RowSetRead:
pc, rc = self.python_OP_RowSetRead(pc, rc, op)
elif opcode == CConfig.OP_Delete:
rc = self.python_OP_Delete(pc, op)
elif opcode == CConfig.OP_DropTable:
self.python_OP_DropTable(op)
else:
raise SQPyteException("SQPyteException: Unimplemented bytecode %s." % opcode)
pc = jit.promote(rffi.cast(lltype.Signed, pc))
pc += 1
if pc <= oldpc:
jitdriver.can_enter_jit(pc=pc, self_=self, rc=rc)
return rc
class Op(object):
_immutable_fields_ = ['hlquery', 'pOp']
def __init__(self, hlquery, pOp):
self.hlquery = hlquery
self.pOp = pOp
@jit.elidable
def get_opcode(self):
return rffi.cast(lltype.Unsigned, self.pOp.opcode)
@jit.elidable
def p_Signed(self, i):
if i == 1:
return rffi.cast(lltype.Signed, self.pOp.p1)
if i == 2:
return rffi.cast(lltype.Signed, self.pOp.p2)
if i == 3:
return rffi.cast(lltype.Signed, self.pOp.p3)
if i == 5:
return rffi.cast(lltype.Signed, self.pOp.p5)
assert 0
@jit.elidable
def p_Unsigned(self, i):
if i == 1:
return rffi.cast(lltype.Unsigned, self.pOp.p1)
if i == 2:
return rffi.cast(lltype.Unsigned, self.pOp.p2)
if i == 3:
return rffi.cast(lltype.Unsigned, self.pOp.p3)
if i == 5:
return rffi.cast(lltype.Unsigned, self.pOp.p5)
assert 0
@jit.elidable
def p4type(self):
return self.pOp.p4type
@jit.elidable
def p4_z(self):
return rffi.charp2str(self.pOp.p4.z)
@jit.elidable
def p4_pFunc(self):
return self.pOp.p4.pFunc
@jit.elidable
def p4_pColl(self):
return self.pOp.p4.pColl
def p2as_pc(self):
return self.p_Signed(2) - 1
def mem_of_p(self, i):
return self.hlquery.mem_with_index(self.p_Signed(i))
def mem_and_flags_of_p(self, i, promote=False):
mem = self.mem_of_p(i)
flags = mem.get_flags(promote=promote)
return mem, flags
@jit.elidable
def opflags(self):
return rffi.cast(lltype.Unsigned, self.pOp.opflags)
def main_work(query):
db = Sqlite3DB(testdb).db
query = Sqlite3Query(db, query)
rc = query.mainloop()
count = 0
while rc == CConfig.SQLITE_ROW:
rc = query.mainloop()
count += 1
print count
def entry_point(argv):
try:
query = argv[1]
except IndexError:
print "You must supply a query to be run: e.g., 'select first_name from people where age > 1;'."
return 1
main_work(query)
return 0
def target(*args):
return entry_point
if __name__ == "__main__":
entry_point(sys.argv)
arithmetic is safe too
Former-commit-id: 602afd07de6575beef64df110f4d97548ef24b1f
from rpython.rlib import jit
from rpython.rtyper.lltypesystem import rffi, lltype
from capi import CConfig
from rpython.rlib.rarithmetic import intmask
import sys
import os
import capi
import translated
import math
testdb = os.path.join(os.path.dirname(os.path.abspath(__file__)), "test/test.db")
# testdb = os.path.join(os.path.dirname(os.path.abspath(__file__)), "test/big-test.db")
def get_printable_location(pc, rc, self):
op = self._hlops[pc]
opcode = op.get_opcode()
name = self.get_opcode_str(opcode)
unsafe = ''
if not _cache_safe_opcodes[opcode]:
unsafe = ' UNSAFE'
return "%s %s %s%s" % (pc, rc, name, unsafe)
jitdriver = jit.JitDriver(
greens=['pc', 'rc', 'self_'],
reds=[],
should_unroll_one_iteration=lambda *args: True,
get_printable_location=get_printable_location)
class SQPyteException(Exception):
def __init__(self, msg):
print msg
class Sqlite3DB(object):
_immutable_fields_ = ['db']
def __init__(self, db_name):
self.opendb(db_name)
def opendb(self, db_name):
with rffi.scoped_str2charp(db_name) as db_name, lltype.scoped_alloc(capi.SQLITE3PP.TO, 1) as result:
errorcode = capi.sqlite3_open(db_name, result)
assert(errorcode == 0)
self.db = rffi.cast(capi.SQLITE3P, result[0])
_cache_safe_opcodes = [False] * 256
def cache_safe(opcodes=None):
def decorate(func):
ops = opcodes
if ops is None:
name = func.func_name
assert name.startswith("python_")
opcodename = name[len("python_"):]
ops = [getattr(CConfig, opcodename)]
for opcode in ops:
_cache_safe_opcodes[opcode] = True
return func
return decorate
class Sqlite3Query(object):
_immutable_fields_ = ['internalPc', 'db', 'p', '_mem_as_python_list[*]', '_llmem_as_python_list[*]', 'intp',
'_hlops[*]', '_mem_caches']
def __init__(self, db, query):
self.db = db
self.internalPc = lltype.malloc(rffi.LONGP.TO, 1, flavor='raw')
self.intp = lltype.malloc(rffi.INTP.TO, 1, flavor='raw')
self.longp = lltype.malloc(rffi.LONGP.TO, 1, flavor='raw')
self.prepare(query)
def __del__(self):
lltype.free(self.internalPc, flavor='raw')
lltype.free(self.intp, flavor='raw')
lltype.free(self.longp, flavor='raw')
def prepare(self, query):
length = len(query)
with rffi.scoped_str2charp(query) as query, lltype.scoped_alloc(rffi.VOIDPP.TO, 1) as result, lltype.scoped_alloc(rffi.CCHARPP.TO, 1) as unused_buffer:
errorcode = capi.sqlite3_prepare(self.db, query, length, result, unused_buffer)
assert errorcode == 0
self.p = rffi.cast(capi.VDBEP, result[0])
self._init_python_data()
def _init_python_data(self):
from sqpyte.mem import Mem
self._llmem_as_python_list = [self.p.aMem[i] for i in range(self.p.nMem)]
self._mem_as_python_list = [Mem(self, self.p.aMem[i], i)
for i in range(self.p.nMem)]
self._mem_caches = [None] * len(self._mem_as_python_list)
self._hlops = [Op(self, self.p.aOp[i]) for i in range(self.p.nOp)]
@jit.unroll_safe
def invalidate_caches(self):
for mem in self._mem_as_python_list:
mem.invalidate_cache()
def is_op_cache_safe(self, opcode):
return _cache_safe_opcodes[opcode]
def reset_query(self):
capi.sqlite3_reset(self.p)
def python_OP_Init(self, pc, op):
return translated.python_OP_Init_translated(self, pc, op)
def python_OP_Rewind(self, pc, op):
self.internalPc[0] = rffi.cast(rffi.LONG, pc)
rc = capi.impl_OP_Rewind(self.p, self.db, self.internalPc, op.pOp)
retPc = self.internalPc[0]
return retPc, rc
def python_OP_Transaction(self, pc, op):
return capi.impl_OP_Transaction(self.p, self.db, pc, op.pOp)
def python_OP_TableLock(self, rc, op):
return capi.impl_OP_TableLock(self.p, self.db, rc, op.pOp)
@cache_safe()
def python_OP_Goto(self, pc, rc, op):
# self.internalPc[0] = rffi.cast(rffi.LONG, pc)
# retRc = capi.impl_OP_Goto(self.p, self.db, self.internalPc, rc, op.pOp)
# retPc = self.internalPc[0]
# return retPc, retRc
return translated.python_OP_Goto_translated(self, pc, rc, op)
def python_OP_OpenRead_OpenWrite(self, pc, op):
return capi.impl_OP_OpenRead_OpenWrite(self.p, self.db, pc, op.pOp)
# translated.python_OP_OpenRead_OpenWrite_translated(self, self.db, pc, op)
def python_OP_Column(self, pc, op):
return capi.impl_OP_Column(self.p, self.db, pc, op.pOp)
# return translated.python_OP_Column_translated(self, self.db, pc, op)
def python_OP_ResultRow(self, pc, op):
return capi.impl_OP_ResultRow(self.p, self.db, pc, op.pOp)
@cache_safe()
def python_OP_Next(self, pc, op):
# self.internalPc[0] = rffi.cast(rffi.LONG, pc)
# rc = capi.impl_OP_Next(self.p, self.db, self.internalPc, op.pOp)
# retPc = self.internalPc[0]
# return retPc, rc
return translated.python_OP_Next_translated(self, pc, op)
def python_OP_Close(self, op):
capi.impl_OP_Close(self.p, op.pOp)
def python_OP_Halt(self, pc, op):
self.internalPc[0] = rffi.cast(rffi.LONG, pc)
rc = capi.impl_OP_Halt(self.p, self.db, self.internalPc, op.pOp)
retPc = self.internalPc[0]
return retPc, rc
@cache_safe(
opcodes=[CConfig.OP_Eq, CConfig.OP_Ne, CConfig.OP_Lt, CConfig.OP_Le,
CConfig.OP_Gt, CConfig.OP_Ge])
def python_OP_Ne_Eq_Gt_Le_Lt_Ge(self, pc, rc, op):
# self.internalPc[0] = rffi.cast(rffi.LONG, pc)
# rc = capi.impl_OP_Ne_Eq_Gt_Le_Lt_Ge(self.p, self.db, self.internalPc, rc, op.pOp)
# retPc = self.internalPc[0]
# return retPc, rc
return translated.python_OP_Ne_Eq_Gt_Le_Lt_Ge_translated(self, pc, rc, op)
def python_OP_Integer(self, op):
translated.python_OP_Integer(self, op)
#capi.impl_OP_Integer(self.p, op.pOp)
def python_OP_Null(self, op):
capi.impl_OP_Null(self.p, op.pOp)
def python_OP_AggStep(self, rc, pc, op):
#return capi.impl_OP_AggStep(self.p, self.db, rc, op.pOp)
return translated.python_OP_AggStep(self, rc, pc, op)
def python_OP_AggFinal(self, pc, rc, op):
return capi.impl_OP_AggFinal(self.p, self.db, pc, rc, op.pOp)
def python_OP_Copy(self, pc, rc, op):
return capi.impl_OP_Copy(self.p, self.db, pc, rc, op.pOp)
def python_OP_MustBeInt(self, pc, rc, op):
return translated.python_OP_MustBeInt(self, pc, rc, op)
def python_OP_NotExists(self, pc, op):
return translated.python_OP_NotExists(self, pc, op)
#self.internalPc[0] = rffi.cast(rffi.LONG, pc)
#rc = capi.impl_OP_NotExists(self.p, self.db, self.internalPc, op.pOp)
#retPc = self.internalPc[0]
#return retPc, rc
def python_OP_String(self, op):
capi.impl_OP_String(self.p, self.db, op.pOp)
def python_OP_String8(self, pc, rc, op):
return capi.impl_OP_String8(self.p, self.db, pc, rc, op.pOp)
def python_OP_Function(self, pc, rc, op):
return capi.impl_OP_Function(self.p, self.db, pc, rc, op.pOp)
def python_OP_Real(self, op):
# aMem = self.p.aMem
# pOut = aMem[pOp.p2]
# pOut.flags = rffi.cast(rffi.USHORT, CConfig.MEM_Real)
# assert not math.isnan(pOp.p4.pReal)
# pOut.r = pOp.p4.pReal
capi.impl_OP_Real(self.p, op.pOp)
def python_OP_RealAffinity(self, op):
# capi.impl_OP_RealAffinity(self.p, op.pOp)
translated.python_OP_RealAffinity(self, op)
@cache_safe(
opcodes=[CConfig.OP_Add, CConfig.OP_Subtract, CConfig.OP_Multiply,
CConfig.OP_Divide, CConfig.OP_Remainder])
def python_OP_Add_Subtract_Multiply_Divide_Remainder(self, op):
# capi.impl_OP_Add_Subtract_Multiply_Divide_Remainder(self.p, op.pOp)
translated.python_OP_Add_Subtract_Multiply_Divide_Remainder(self, op)
def python_OP_If_IfNot(self, pc, op):
# return capi.impl_OP_If_IfNot(self.p, pc, op.pOp)
return translated.python_OP_If_IfNot(self, pc, op)
def python_OP_Rowid(self, pc, rc, op):
return capi.impl_OP_Rowid(self.p, self.db, pc, rc, op.pOp)
def python_OP_IsNull(self, pc, op):
# return capi.impl_OP_IsNull(self.p, pc, op.pOp)
return translated.python_OP_IsNull(self, pc, op)
def python_OP_SeekLT_SeekLE_SeekGE_SeekGT(self, pc, rc, op):
self.internalPc[0] = rffi.cast(rffi.LONG, pc)
rc = capi.impl_OP_SeekLT_SeekLE_SeekGE_SeekGT(self.p, self.db, self.internalPc, rc, op.pOp)
retPc = self.internalPc[0]
return retPc, rc
def python_OP_Move(self, op):
capi.impl_OP_Move(self.p, op.pOp)
def python_OP_IfZero(self, pc, op):
return capi.impl_OP_IfZero(self.p, pc, op.pOp)
def python_OP_IdxRowid(self, pc, rc, op):
return translated.python_OP_IdxRowid(self, pc, rc, op)
#return capi.impl_OP_IdxRowid(self.p, self.db, pc, rc, op.pOp)
def python_OP_IdxLE_IdxGT_IdxLT_IdxGE(self, pc, op):
self.internalPc[0] = rffi.cast(rffi.LONG, pc)
rc = capi.impl_OP_IdxLE_IdxGT_IdxLT_IdxGE(self.p, self.internalPc, op.pOp)
retPc = self.internalPc[0]
return retPc, rc
def python_OP_Seek(self, op):
#capi.impl_OP_Seek(self.p, op.pOp)
translated.python_OP_Seek(self, op)
def python_OP_Once(self, pc, op):
# return capi.impl_OP_Once(self.p, pc, op.pOp)
return translated.python_OP_Once(self, pc, op)
def python_OP_SCopy(self, op):
capi.impl_OP_SCopy(self.p, op.pOp)
def python_OP_Affinity(self, op):
# capi.impl_OP_Affinity(self.p, self.db, op.pOp)
translated.python_OP_Affinity(self, op)
def python_OP_OpenAutoindex_OpenEphemeral(self, pc, op):
return capi.impl_OP_OpenAutoindex_OpenEphemeral(self.p, self.db, pc, op.pOp)
def python_OP_MakeRecord(self, pc, rc, op):
return capi.impl_OP_MakeRecord(self.p, self.db, pc, rc, op.pOp)
def python_OP_SorterInsert_IdxInsert(self, op):
return capi.impl_OP_SorterInsert_IdxInsert(self.p, self.db, op.pOp)
def python_OP_NoConflict_NotFound_Found(self, pc, rc, op):
self.internalPc[0] = rffi.cast(rffi.LONG, pc)
rc = capi.impl_OP_NoConflict_NotFound_Found(self.p, self.db, self.internalPc, rc, op.pOp)
retPc = self.internalPc[0]
return retPc, rc
def python_OP_RowSetTest(self, pc, rc, op):
self.internalPc[0] = rffi.cast(rffi.LONG, pc)
rc = capi.impl_OP_RowSetTest(self.p, self.db, self.internalPc, rc, op.pOp)
retPc = self.internalPc[0]
return retPc, rc
def python_OP_Gosub(self, pc, op):
return capi.impl_OP_Gosub(self.p, pc, op.pOp)
def python_OP_Return(self, pc, op):
return capi.impl_OP_Return(self.p, pc, op.pOp)
def python_OP_SorterOpen(self, pc, op):
return capi.impl_OP_SorterOpen(self.p, self.db, pc, op.pOp)
def python_OP_NextIfOpen(self, pc, rc, op):
# self.internalPc[0] = rffi.cast(rffi.LONG, pc)
# rc = capi.impl_OP_NextIfOpen(self.p, self.db, self.internalPc, rc, op.pOp)
# retPc = self.internalPc[0]
# return retPc, rc
return translated.python_OP_NextIfOpen_translated(self, pc, rc, op)
def python_OP_Sequence(self, op):
capi.impl_OP_Sequence(self.p, op.pOp)
def python_OP_OpenPseudo(self, pc, rc, op):
return capi.impl_OP_OpenPseudo(self.p, self.db, pc, rc, op.pOp)
def python_OP_SorterSort_Sort(self, pc, op):
self.internalPc[0] = rffi.cast(rffi.LONG, pc)
rc = capi.impl_OP_SorterSort_Sort(self.p, self.db, self.internalPc, op.pOp)
retPc = self.internalPc[0]
return retPc, rc
def python_OP_SorterData(self, op):
return capi.impl_OP_SorterData(self.p, op.pOp)
def python_OP_SorterNext(self, pc, op):
self.internalPc[0] = rffi.cast(rffi.LONG, pc)
rc = capi.impl_OP_SorterNext(self.p, self.db, self.internalPc, op.pOp)
retPc = self.internalPc[0]
return retPc, rc
def python_OP_Noop_Explain(self, op):
translated.python_OP_Noop_Explain_translated(op)
def python_OP_Compare(self, op):
capi.impl_OP_Compare(self.p, op.pOp)
def python_OP_Jump(self, op):
return capi.impl_OP_Jump(op.pOp)
def python_OP_IfPos(self, pc, op):
return translated.python_OP_IfPos(self, pc, op)
def python_OP_CollSeq(self, op):
capi.impl_OP_CollSeq(self.p, op.pOp)
def python_OP_NotNull(self, pc, op):
# return capi.impl_OP_NotNull(self.p, pc, op.pOp)
return translated.python_OP_NotNull(self, pc, op)
def python_OP_InitCoroutine(self, pc, op):
return capi.impl_OP_InitCoroutine(self.p, pc, op.pOp)
def python_OP_Yield(self, pc, op):
return capi.impl_OP_Yield(self.p, pc, op.pOp)
def python_OP_NullRow(self, op):
capi.impl_OP_NullRow(self.p, op.pOp)
def python_OP_EndCoroutine(self, op):
return capi.impl_OP_EndCoroutine(self.p, op.pOp)
def python_OP_ReadCookie(self, op):
capi.impl_OP_ReadCookie(self.p, self.db, op.pOp)
def python_OP_NewRowid(self, pc, rc, op):
return capi.impl_OP_NewRowid(self.p, self.db, pc, rc, op.pOp)
def python_OP_Insert_InsertInt(self, op):
return capi.impl_OP_Insert_InsertInt(self.p, self.db, op.pOp)
def python_OP_SetCookie(self, op):
return capi.impl_OP_SetCookie(self.p, self.db, op.pOp)
def python_OP_ParseSchema(self, pc, rc, op):
return capi.impl_OP_ParseSchema(self.p, self.db, pc, rc, op.pOp)
def python_OP_RowSetAdd(self, pc, rc, op):
return capi.impl_OP_RowSetAdd(self.p, self.db, pc, rc, op.pOp)
def python_OP_RowSetRead(self, pc, rc, op):
self.internalPc[0] = rffi.cast(rffi.LONG, pc)
retRc = capi.impl_OP_RowSetRead(self.p, self.db, self.internalPc, rc, op.pOp)
retPc = self.internalPc[0]
return retPc, retRc
def python_OP_Delete(self, pc, op):
return capi.impl_OP_Delete(self.p, self.db, pc, op.pOp)
def python_OP_DropTable(self, op):
return capi.impl_OP_DropTable(self.db, op.pOp)
def python_sqlite3_column_text(self, iCol):
return capi.sqlite3_column_text(self.p, iCol)
def python_sqlite3_column_bytes(self, iCol):
return capi.sqlite3_column_bytes(self.p, iCol)
def debug_print(self, s):
return
if not jit.we_are_jitted():
print s
@jit.elidable
def get_opcode_str(self, opcode):
return capi.opnames_dict.get(opcode, '')
@jit.elidable
def get_aOp(self):
return self.p.aOp
@jit.elidable
def enc(self):
return self.db.aDb[0].pSchema.enc
def mem_with_index(self, i):
return self._mem_as_python_list[i]
def mainloop(self):
rc = CConfig.SQLITE_OK
pc = jit.promote(rffi.cast(lltype.Signed, self.p.pc))
if pc < 0:
pc = 0 # XXX maybe more to do, see vdbeapi.c:418
i = 0
while True:
jitdriver.jit_merge_point(pc=pc, self_=self, rc=rc)
if rc != CConfig.SQLITE_OK:
break
op = self._hlops[pc]
opcode = op.get_opcode()
oldpc = pc
self.debug_print('>>> %s <<<' % self.get_opcode_str(opcode))
opflags = op.opflags()
if opflags & CConfig.OPFLG_OUT2_PRERELEASE:
pOut = op.mem_of_p(2)
pOut.VdbeMemRelease()
pOut.set_flags(CConfig.MEM_Int)
if not self.is_op_cache_safe(opcode):
self.invalidate_caches()
if opcode == CConfig.OP_Init:
pc = self.python_OP_Init(pc, op)
elif (opcode == CConfig.OP_OpenRead or
opcode == CConfig.OP_OpenWrite):
rc = self.python_OP_OpenRead_OpenWrite(pc, op)
elif opcode == CConfig.OP_Rewind:
pc, rc = self.python_OP_Rewind(pc, op)
elif opcode == CConfig.OP_Transaction:
rc = self.python_OP_Transaction(pc, op)
if rc == CConfig.SQLITE_BUSY:
print 'ERROR: in OP_Transaction SQLITE_BUSY'
return rc
elif opcode == CConfig.OP_TableLock:
rc = self.python_OP_TableLock(rc, op)
elif opcode == CConfig.OP_Goto:
pc, rc = self.python_OP_Goto(pc, rc, op)
elif opcode == CConfig.OP_Column:
rc = self.python_OP_Column(pc, op)
elif opcode == CConfig.OP_ResultRow:
rc = self.python_OP_ResultRow(pc, op)
if rc == CConfig.SQLITE_ROW:
return rc
elif opcode == CConfig.OP_Next:
pc, rc = self.python_OP_Next(pc, op)
elif opcode == CConfig.OP_Close:
self.python_OP_Close(op)
elif opcode == CConfig.OP_Halt:
pc, rc = self.python_OP_Halt(pc, op)
return rc
elif (opcode == CConfig.OP_Eq or
opcode == CConfig.OP_Ne or
opcode == CConfig.OP_Lt or
opcode == CConfig.OP_Le or
opcode == CConfig.OP_Gt or
opcode == CConfig.OP_Ge):
pc, rc = self.python_OP_Ne_Eq_Gt_Le_Lt_Ge(pc, rc, op)
elif opcode == CConfig.OP_Integer:
self.python_OP_Integer(op)
elif opcode == CConfig.OP_Null:
self.python_OP_Null(op)
elif opcode == CConfig.OP_AggStep:
rc = self.python_OP_AggStep(rc, pc, op)
elif opcode == CConfig.OP_AggFinal:
rc = self.python_OP_AggFinal(pc, rc, op)
elif opcode == CConfig.OP_Copy:
rc = self.python_OP_Copy(pc, rc, op)
elif opcode == CConfig.OP_MustBeInt:
pc, rc = self.python_OP_MustBeInt(pc, rc, op)
elif opcode == CConfig.OP_NotExists:
pc, rc = self.python_OP_NotExists(pc, op)
elif opcode == CConfig.OP_String:
self.python_OP_String(op)
elif opcode == CConfig.OP_String8:
rc = self.python_OP_String8(pc, rc, op)
elif opcode == CConfig.OP_Function:
rc = self.python_OP_Function(pc, rc, op)
elif opcode == CConfig.OP_Real:
self.python_OP_Real(op)
elif opcode == CConfig.OP_RealAffinity:
self.python_OP_RealAffinity(op)
elif (opcode == CConfig.OP_Add or
opcode == CConfig.OP_Subtract or
opcode == CConfig.OP_Multiply or
opcode == CConfig.OP_Divide or
opcode == CConfig.OP_Remainder):
self.python_OP_Add_Subtract_Multiply_Divide_Remainder(op)
elif (opcode == CConfig.OP_If or
opcode == CConfig.OP_IfNot):
pc = self.python_OP_If_IfNot(pc, op)
elif opcode == CConfig.OP_Rowid:
rc = self.python_OP_Rowid(pc, rc, op)
elif opcode == CConfig.OP_IsNull:
pc = self.python_OP_IsNull(pc, op)
elif (opcode == CConfig.OP_SeekLT or
opcode == CConfig.OP_SeekLE or
opcode == CConfig.OP_SeekGE or
opcode == CConfig.OP_SeekGT):
pc, rc = self.python_OP_SeekLT_SeekLE_SeekGE_SeekGT(pc, rc, op)
elif opcode == CConfig.OP_Move:
self.python_OP_Move(op)
elif opcode == CConfig.OP_IfZero:
pc = self.python_OP_IfZero(pc, op)
elif opcode == CConfig.OP_IdxRowid:
rc = self.python_OP_IdxRowid(pc, rc, op)
elif (opcode == CConfig.OP_IdxLE or
opcode == CConfig.OP_IdxGT or
opcode == CConfig.OP_IdxLT or
opcode == CConfig.OP_IdxGE):
pc, rc = self.python_OP_IdxLE_IdxGT_IdxLT_IdxGE(pc, op)
elif opcode == CConfig.OP_Seek:
self.python_OP_Seek(op)
elif opcode == CConfig.OP_Once:
pc = self.python_OP_Once(pc, op)
elif opcode == CConfig.OP_SCopy:
self.python_OP_SCopy(op)
elif opcode == CConfig.OP_Affinity:
self.python_OP_Affinity(op)
elif (opcode == CConfig.OP_OpenAutoindex or
opcode == CConfig.OP_OpenEphemeral):
rc = self.python_OP_OpenAutoindex_OpenEphemeral(pc, op)
elif opcode == CConfig.OP_MakeRecord:
rc = self.python_OP_MakeRecord(pc, rc, op)
elif (opcode == CConfig.OP_SorterInsert or
opcode == CConfig.OP_IdxInsert):
rc = self.python_OP_SorterInsert_IdxInsert(op)
elif (opcode == CConfig.OP_NoConflict or
opcode == CConfig.OP_NotFound or
opcode == CConfig.OP_Found):
pc, rc = self.python_OP_NoConflict_NotFound_Found(pc, rc, op)
elif opcode == CConfig.OP_RowSetTest:
pc, rc = self.python_OP_RowSetTest(pc, rc, op)
elif opcode == CConfig.OP_Gosub:
pc = self.python_OP_Gosub(pc, op)
elif opcode == CConfig.OP_Return:
pc = self.python_OP_Return(pc, op)
elif opcode == CConfig.OP_SorterOpen:
rc = self.python_OP_SorterOpen(pc, op)
elif opcode == CConfig.OP_NextIfOpen:
pc, rc = self.python_OP_NextIfOpen(pc, rc, op)
elif opcode == CConfig.OP_Sequence:
self.python_OP_Sequence(op)
elif opcode == CConfig.OP_OpenPseudo:
rc = self.python_OP_OpenPseudo(pc, rc, op)
elif (opcode == CConfig.OP_SorterSort or
opcode == CConfig.OP_Sort):
pc, rc = self.python_OP_SorterSort_Sort(pc, op)
elif opcode == CConfig.OP_SorterData:
rc = self.python_OP_SorterData(op)
elif opcode == CConfig.OP_SorterNext:
pc, rc = self.python_OP_SorterNext(pc, op)
elif (opcode == CConfig.OP_Noop or
opcode == CConfig.OP_Explain):
self.python_OP_Noop_Explain(op)
elif opcode == CConfig.OP_Compare:
self.python_OP_Compare(op)
elif opcode == CConfig.OP_Jump:
pc = self.python_OP_Jump(op)
elif opcode == CConfig.OP_IfPos:
pc = self.python_OP_IfPos(pc, op)
elif opcode == CConfig.OP_CollSeq:
self.python_OP_CollSeq(op)
elif opcode == CConfig.OP_NotNull:
pc = self.python_OP_NotNull(pc, op)
elif opcode == CConfig.OP_InitCoroutine:
pc = self.python_OP_InitCoroutine(pc, op)
elif opcode == CConfig.OP_Yield:
pc = self.python_OP_Yield(pc, op)
elif opcode == CConfig.OP_NullRow:
self.python_OP_NullRow(op)
elif opcode == CConfig.OP_EndCoroutine:
pc = self.python_OP_EndCoroutine(op)
elif opcode == CConfig.OP_ReadCookie:
self.python_OP_ReadCookie(op)
elif opcode == CConfig.OP_NewRowid:
rc = self.python_OP_NewRowid(pc, rc, op)
elif (opcode == CConfig.OP_Insert or
opcode == CConfig.OP_InsertInt):
rc = self.python_OP_Insert_InsertInt(op)
elif opcode == CConfig.OP_SetCookie:
rc = self.python_OP_SetCookie(op)
elif opcode == CConfig.OP_ParseSchema:
rc = self.python_OP_ParseSchema(pc, rc, op)
elif opcode == CConfig.OP_RowSetAdd:
rc = self.python_OP_RowSetAdd(pc, rc, op)
elif opcode == CConfig.OP_RowSetRead:
pc, rc = self.python_OP_RowSetRead(pc, rc, op)
elif opcode == CConfig.OP_Delete:
rc = self.python_OP_Delete(pc, op)
elif opcode == CConfig.OP_DropTable:
self.python_OP_DropTable(op)
else:
raise SQPyteException("SQPyteException: Unimplemented bytecode %s." % opcode)
pc = jit.promote(rffi.cast(lltype.Signed, pc))
pc += 1
if pc <= oldpc:
jitdriver.can_enter_jit(pc=pc, self_=self, rc=rc)
return rc
class Op(object):
_immutable_fields_ = ['hlquery', 'pOp']
def __init__(self, hlquery, pOp):
self.hlquery = hlquery
self.pOp = pOp
@jit.elidable
def get_opcode(self):
return rffi.cast(lltype.Unsigned, self.pOp.opcode)
@jit.elidable
def p_Signed(self, i):
if i == 1:
return rffi.cast(lltype.Signed, self.pOp.p1)
if i == 2:
return rffi.cast(lltype.Signed, self.pOp.p2)
if i == 3:
return rffi.cast(lltype.Signed, self.pOp.p3)
if i == 5:
return rffi.cast(lltype.Signed, self.pOp.p5)
assert 0
@jit.elidable
def p_Unsigned(self, i):
if i == 1:
return rffi.cast(lltype.Unsigned, self.pOp.p1)
if i == 2:
return rffi.cast(lltype.Unsigned, self.pOp.p2)
if i == 3:
return rffi.cast(lltype.Unsigned, self.pOp.p3)
if i == 5:
return rffi.cast(lltype.Unsigned, self.pOp.p5)
assert 0
@jit.elidable
def p4type(self):
return self.pOp.p4type
@jit.elidable
def p4_z(self):
return rffi.charp2str(self.pOp.p4.z)
@jit.elidable
def p4_pFunc(self):
return self.pOp.p4.pFunc
@jit.elidable
def p4_pColl(self):
return self.pOp.p4.pColl
def p2as_pc(self):
return self.p_Signed(2) - 1
def mem_of_p(self, i):
return self.hlquery.mem_with_index(self.p_Signed(i))
def mem_and_flags_of_p(self, i, promote=False):
mem = self.mem_of_p(i)
flags = mem.get_flags(promote=promote)
return mem, flags
@jit.elidable
def opflags(self):
return rffi.cast(lltype.Unsigned, self.pOp.opflags)
def main_work(query):
db = Sqlite3DB(testdb).db
query = Sqlite3Query(db, query)
rc = query.mainloop()
count = 0
while rc == CConfig.SQLITE_ROW:
rc = query.mainloop()
count += 1
print count
def entry_point(argv):
try:
query = argv[1]
except IndexError:
print "You must supply a query to be run: e.g., 'select first_name from people where age > 1;'."
return 1
main_work(query)
return 0
def target(*args):
return entry_point
if __name__ == "__main__":
entry_point(sys.argv)
|
"""
Imgur CLI
"""
import argparse
import logging
import os
import sys
from collections import namedtuple
import imgurpython
from imgur_cli import __version__
from imgur_cli import cli_api
from imgur_cli.exceptions import CommandError
from imgur_cli.utils import cli_arg
try:
from imgur_cli.config import config
except ImportError:
config = None
logger = logging.getLogger(__name__)
def imgur_credentials():
ImgurCredentials = namedtuple('ImgurCredentials',
['client_id', 'client_secret', 'access_token',
'refresh_token', 'mashape_key'])
if config:
client_id = config.get('IMGUR_CLIENT_ID')
client_secret = config.get('IMGUR_CLIENT_SECRET')
access_token = config.get('IMGUR_ACCESS_TOKEN')
refresh_token = config.get('IMGUR_REFRESH_TOKEN')
mashape_key = config.get('IMGUR_MASHAPE_KEY')
else:
client_id = os.environ.get('IMGUR_CLIENT_ID')
client_secret = os.environ.get('IMGUR_CLIENT_SECRET')
access_token = os.environ.get('IMGUR_ACCESS_TOKEN')
refresh_token = os.environ.get('IMGUR_REFRESH_TOKEN')
mashape_key = os.environ.get('IMGUR_MASHAPE_KEY')
if not client_id or not client_secret:
raise imgurpython.client.ImgurClientError('Client credentials not found. '
'Ensure you have both client id '
'and client secret')
return ImgurCredentials(client_id, client_secret, access_token,
refresh_token, mashape_key)
class ImgurCli():
@property
def base_parser(self):
parser = argparse.ArgumentParser(prog='imgur', description=__doc__.strip(),
epilog='See "imgur help COMMAND" for '
'help on a specific command')
# Global arguments
parser.add_argument('-v', '--version', action='version',
version='%(prog)s {0}'.format(__version__))
parser.add_argument('--debug', default=False, action='store_true',
help='Print debugging output')
return parser
def setup_debugging(self):
streamformat = "%(levelname)s (%(module)s:%(lineno)d) %(message)s"
# Set up the root logger to debug so that the submodules can print
# debug messages
logging.basicConfig(level=logging.DEBUG, format=streamformat)
@property
def subcommand_parser(self):
parser = self.base_parser
self.subcommands = {}
subparsers = parser.add_subparsers(metavar='<subcommand>')
actions_module = cli_api
self._find_actions(subparsers, actions_module)
self._find_actions(subparsers, self)
self._add_base_completion_subparser(subparsers)
return parser
def _find_actions(self, subparsers, actions_module):
pass
def _add_base_completion_subparser(self, subparsers):
subparser = subparsers.add_parser('bash_completion', add_help=False)
self.subcommands['bash_completion'] = subparser
subparser.set_defaults(func=self.do_bash_completion)
def do_bash_completion(self):
"""Prints arguments for bash-completion"""
commands = set()
options = set()
for key, value in self.subcommands.items():
commands.add(key)
options.update(option for option in
value._optionals._option_string_actions.keys())
commands.remove('bash_completion')
print(' '.join(commands | options))
@cli_arg('command', metavar='<subcommand', nargs='?',
help='Display help for <subcommand>')
def do_help(self, args):
"""Display help about this program or one of its subcommands"""
print('UEG -> do_help', args.command)
if args.command:
if args.command in self.subcommands:
self.subcommands[args.command].print_help()
else:
raise CommandError('{0} is not valid subcommand'
.format(args.command))
else:
self.parser.print_help()
def main(self, argv):
credentials = imgur_credentials()
self.parser = self.subcommand_parser
if not argv:
self.parser.print_help()
return 0
args = self.parser.parse_args(argv)
# Short-circuit and deal with help right away
if args.func == self.do_help:
self.do_help(args)
return 0
if args.func == self.do_bash_completion:
self.do_bash_completion()
return 0
self.client = imgurpython.ImgurClient(*credentials)
args.func(self.client, args)
def main():
try:
imgur_cli = ImgurCli()
imgur_cli.main(sys.argv[1:])
except Exception as e:
logger.debug(e, exc_info=1)
print(e, file=sys.stderr)
sys.exit(1)
if __name__ == '__main__':
main()
Define dynamic loading of subcommand logic
"""
Imgur CLI
"""
import argparse
import logging
import os
import sys
from collections import namedtuple
import imgurpython
from imgur_cli import __version__
from imgur_cli import cli_api
from imgur_cli.exceptions import CommandError
from imgur_cli.utils import cli_arg
try:
from imgur_cli.config import config
except ImportError:
config = None
logger = logging.getLogger(__name__)
def imgur_credentials():
ImgurCredentials = namedtuple('ImgurCredentials',
['client_id', 'client_secret', 'access_token',
'refresh_token', 'mashape_key'])
if config:
client_id = config.get('IMGUR_CLIENT_ID')
client_secret = config.get('IMGUR_CLIENT_SECRET')
access_token = config.get('IMGUR_ACCESS_TOKEN')
refresh_token = config.get('IMGUR_REFRESH_TOKEN')
mashape_key = config.get('IMGUR_MASHAPE_KEY')
else:
client_id = os.environ.get('IMGUR_CLIENT_ID')
client_secret = os.environ.get('IMGUR_CLIENT_SECRET')
access_token = os.environ.get('IMGUR_ACCESS_TOKEN')
refresh_token = os.environ.get('IMGUR_REFRESH_TOKEN')
mashape_key = os.environ.get('IMGUR_MASHAPE_KEY')
if not client_id or not client_secret:
raise imgurpython.client.ImgurClientError('Client credentials not found. '
'Ensure you have both client id '
'and client secret')
return ImgurCredentials(client_id, client_secret, access_token,
refresh_token, mashape_key)
class ImgurCli():
@property
def base_parser(self):
parser = argparse.ArgumentParser(prog='imgur', description=__doc__.strip(),
epilog='See "imgur help COMMAND" for '
'help on a specific command')
# Global arguments
parser.add_argument('-v', '--version', action='version',
version='%(prog)s {0}'.format(__version__))
parser.add_argument('--debug', default=False, action='store_true',
help='Print debugging output')
return parser
def setup_debugging(self):
streamformat = "%(levelname)s (%(module)s:%(lineno)d) %(message)s"
# Set up the root logger to debug so that the submodules can print
# debug messages
logging.basicConfig(level=logging.DEBUG, format=streamformat)
@property
def subcommand_parser(self):
parser = self.base_parser
self.subcommands = {}
subparsers = parser.add_subparsers(metavar='<subcommand>')
actions_module = cli_api
self._find_actions(subparsers, actions_module)
self._find_actions(subparsers, self)
self._add_base_completion_subparser(subparsers)
return parser
def _find_actions(self, subparsers, actions_module):
for attr in (action for action in dir(actions_module)
if action.startswith('cmd_')):
command = attr[4:].replace('_', '-')
callback = getattr(actions_module, attr)
description = callback.__doc__ or ''
action_help = description.strip()
arguments = getattr(callback, 'arguments', [])
subparser = subparsers.add_parser(command, help=action_help,
description=description,
add_help=False)
subparser.add_argument('-h', '--help', action='help',
help=argparse.SUPPRESS)
self.subcommands[command] = subparser
for args, kwargs in arguments:
subparser.add_argument(*args, **kwargs)
subparser.set_defaults(func=callback)
def _add_base_completion_subparser(self, subparsers):
subparser = subparsers.add_parser('bash_completion', add_help=False)
self.subcommands['bash_completion'] = subparser
subparser.set_defaults(func=self.cmd_bash_completion)
def cmd_bash_completion(self):
"""Prints arguments for bash-completion"""
commands = set()
options = set()
for key, value in self.subcommands.items():
commands.add(key)
options.update(option for option in
value._optionals._option_string_actions.keys())
commands.remove('bash-completion')
commands.remove('bash_completion')
print(' '.join(commands | options))
@cli_arg('command', metavar='<subcommand', nargs='?',
help='Display help for <subcommand>')
def cmd_help(self, args):
"""Display help about this program or one of its subcommands"""
print('UEG -> do_help', args.command)
if args.command:
if args.command in self.subcommands:
self.subcommands[args.command].print_help()
else:
raise CommandError('{0} is not valid subcommand'
.format(args.command))
else:
self.parser.print_help()
def main(self, argv):
credentials = imgur_credentials()
self.parser = self.subcommand_parser
if not argv:
self.parser.print_help()
return 0
args = self.parser.parse_args(argv)
print(args.func)
# Short-circuit and deal with help right away
if args.func == self.cmd_help:
self.do_help(args)
return 0
if args.func == self.cmd_bash_completion:
self.do_bash_completion()
return 0
self.client = imgurpython.ImgurClient(*credentials)
args.func(self.client, args)
def main():
try:
imgur_cli = ImgurCli()
imgur_cli.main(sys.argv[1:])
except Exception as e:
logger.debug(e, exc_info=1)
print(e, file=sys.stderr)
sys.exit(1)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
import asyncio
import sys
from random import SystemRandom
random = SystemRandom()
import pickle
import discord
sys.path.append("..")
import josecommon as jcommon
import joseerror as je
class JoseMemes(jcommon.Extension):
def __init__(self, cl):
self.memes = {}
self.WIDE_MAP = dict((i, i + 0xFEE0) for i in range(0x21, 0x7F))
self.WIDE_MAP[0x20] = 0x3000
jcommon.Extension.__init__(self, cl)
async def ext_load(self):
await self.load_memes()
async def ext_unload(self):
# supress every kind of debug to self.say
old_cur = self.current
self.current = None
await self.save_memes()
self.currrent = old_cur
async def load_memes(self):
try:
self.memes = pickle.load(open('ext/josememes.db', 'rb'))
return True
except Exception as e:
if self.current is not None:
await self.debug("load_memes: erro carregando josememes.db(%s)" % e)
return False
else:
print('load_memes: erro: %s' % e)
return False
self.memes = {}
async def save_memes(self):
try:
pickle.dump(self.memes, open("ext/josememes.db", 'wb'))
return True
except Exception as e:
if self.current is not None:
await self.debug("save_memes: pyerr: %s" % e)
else:
print(traceback.print_exc())
return False
async def c_aprovado(self, message, args):
'''`!aprovado` - O Melhor Sean Anthony®'''
await self.say('http://gaveta.com.br/images/Aprovacao-Sean-Anthony.png')
async def c_meme(self, message, args):
'''!meme: Adicione e mostre memes com o josé!
*alias*: !m
Subcomandos:
`!meme add <trigger>;<meme>` - toda vez que alguém mandar um `!meme get <trigger>`, josé falará `<meme>`
`!meme get <trigger>` - josé falará o que estiver programado para falar de acordo com `<trigger>`
`!meme list` - mostra todos os memes que estão escritos no josé
`!meme search <termo>` - procura o banco de dados de memes por um meme específico
`!meme rm <meme>` - remove um meme
`!meme rename <nome antigo>;<nome novo>` - altera o `<trigger>` de um meme
`!meme owner <meme>` - mostra quem "criou" o `<meme>`
Tenha cuidado ao adicionar coisas NSFW.
'''
if len(args) < 2:
await self.say(self.c_meme.__doc__)
return
elif args[1] == 'add':
args_s = ' '.join(args[2:])
args_sp = args_s.split(';')
meme = args_sp[0]
url = args_sp[1]
if meme in self.memes:
await self.say("%s: meme já existe" % meme)
return
else:
self.memes[meme] = {
'owner': message.author.id,
'data': url,
}
await self.save_memes()
await self.say("%s: meme adicionado!" % meme)
return
elif args[1] == 'rm':
meme = ' '.join(args[2:])
if meme in self.memes:
meme_data = self.memes[meme]
is_admin = await self.brolecheck(jcommon.MASTER_ROLE)
if (message.author.id == meme_data['owner']) or is_admin:
del self.memes[meme]
await self.say("%s: meme removido" % meme)
return
else:
raise je.PermissionError()
return
else:
await self.say("%s: meme não encontrado" % meme)
return
elif args[1] == 'save':
done = await self.save_memes()
if done:
await self.say("jmemes: banco de dados salvo")
else:
raise IOError("banco de dados não salvo corretamente")
return
elif args[1] == 'load':
done = await self.load_memes()
if done:
await self.say("jmemes: banco de dados carregado")
else:
raise IOError("banco de dados não carregado corretamente")
return
elif args[1] == 'saveload':
print('saveload')
done = await self.save_memes()
if done:
await self.say("jmemes: banco de dados salvo")
else:
raise IOError("banco de dados não salvo corretamente")
done = await self.load_memes()
if done:
await self.say("jmemes: banco de dados carregado")
else:
raise IOError("banco de dados não carregado corretamente")
return
elif args[1] == 'list':
await self.say("memes: %s" % ', '.join(self.memes.keys()))
elif args[1] == 'get':
meme = ' '.join(args[2:])
if meme in self.memes:
await self.say(self.memes[meme]['data'])
else:
await self.say("%s: meme não encontrado" % meme)
return
elif args[1] == 'all':
await self.say(self.codeblock('python', self.memes))
elif args[1] == 'search':
term = ' '.join(args[2:])
probables = [key for key in self.memes if term in key]
if len(probables) > 0:
await self.say("Resultados: %s" % ', '.join(probables))
else:
await self.say("%r: Nenhum resultado encontrado" % term)
elif args[1] == 'rename':
args_s = ' '.join(args[2:])
args_sp = args_s.split(';')
oldname = args_sp[0]
newname = args_sp[1]
if not oldname in self.memes:
await self.say("%s: meme não encontrado" % oldname)
return
# swapping
old_meme = self.memes[oldname]
if old_meme['owner'] != message.author.id:
raise je.PermissionError()
self.memes[newname] = {
'owner': message.author.id,
'data': old_meme['data'],
}
del self.memes[oldname]
await self.say("%s foi renomeado para %s!" % (oldname, newname))
return
elif args[1] == 'owner':
meme = ' '.join(args[2:])
if meme in self.memes:
u = discord.utils.get(message.server.members, id=self.memes[meme]['owner'])
await self.say("%s foi criado por %s" % (meme, u))
else:
await self.say("%s: meme não encontrado" % meme)
return
else:
await self.say("comando inválido: %s" % args[1])
return
async def c_m(self, message, args):
'''`!m` - alias para `!meme`'''
await self.c_meme(message, args)
async def c_fullwidth(self, message, args):
'''`!fullwidth texto` - converte texto para fullwidth'''
# looks like discord made fullwidth suppoert available again :D
text = ' '.join(args[1:])
await self.say(text.translate(self.WIDE_MAP))
async def c_fw(self, message, args):
'''`!fw` - alias para `!fullwidth`'''
await self.c_fullwidth(message, args)
async def c_emoji(self, message, args):
'''`!emoji` - gera de 1 a 5 emojis aleatórios'''
res = await jcommon.random_emoji(random.randint(1,5))
await self.say(res)
save meme database on certain subcommands
#!/usr/bin/env python3
import asyncio
import sys
from random import SystemRandom
random = SystemRandom()
import pickle
import discord
sys.path.append("..")
import josecommon as jcommon
import joseerror as je
class JoseMemes(jcommon.Extension):
def __init__(self, cl):
self.memes = {}
self.WIDE_MAP = dict((i, i + 0xFEE0) for i in range(0x21, 0x7F))
self.WIDE_MAP[0x20] = 0x3000
jcommon.Extension.__init__(self, cl)
async def ext_load(self):
await self.load_memes()
async def ext_unload(self):
# supress every kind of debug to self.say
old_cur = self.current
self.current = None
await self.save_memes()
self.currrent = old_cur
async def load_memes(self):
try:
self.memes = pickle.load(open('ext/josememes.db', 'rb'))
return True
except Exception as e:
if self.current is not None:
await self.debug("load_memes: erro carregando josememes.db(%s)" % e)
return False
else:
print('load_memes: erro: %s' % e)
return False
self.memes = {}
async def save_memes(self):
try:
pickle.dump(self.memes, open("ext/josememes.db", 'wb'))
return True
except Exception as e:
if self.current is not None:
await self.debug("save_memes: pyerr: %s" % e)
else:
print(traceback.print_exc())
return False
async def c_aprovado(self, message, args):
'''`!aprovado` - O Melhor Sean Anthony®'''
await self.say('http://gaveta.com.br/images/Aprovacao-Sean-Anthony.png')
async def c_meme(self, message, args):
'''!meme: Adicione e mostre memes com o josé!
*alias*: !m
Subcomandos:
`!meme add <trigger>;<meme>` - toda vez que alguém mandar um `!meme get <trigger>`, josé falará `<meme>`
`!meme get <trigger>` - josé falará o que estiver programado para falar de acordo com `<trigger>`
`!meme list` - mostra todos os memes que estão escritos no josé
`!meme search <termo>` - procura o banco de dados de memes por um meme específico
`!meme rm <meme>` - remove um meme
`!meme rename <nome antigo>;<nome novo>` - altera o `<trigger>` de um meme
`!meme owner <meme>` - mostra quem "criou" o `<meme>`
Tenha cuidado ao adicionar coisas NSFW.
'''
if len(args) < 2:
await self.say(self.c_meme.__doc__)
return
elif args[1] == 'add':
args_s = ' '.join(args[2:])
args_sp = args_s.split(';')
meme = args_sp[0]
url = args_sp[1]
if meme in self.memes:
await self.say("%s: meme já existe" % meme)
return
else:
self.memes[meme] = {
'owner': message.author.id,
'data': url,
}
await self.save_memes()
await self.say("%s: meme adicionado!" % meme)
return
elif args[1] == 'rm':
meme = ' '.join(args[2:])
if meme in self.memes:
meme_data = self.memes[meme]
is_admin = await self.brolecheck(jcommon.MASTER_ROLE)
if (message.author.id == meme_data['owner']) or is_admin:
del self.memes[meme]
await self.save_memes()
await self.say("%s: meme removido" % meme)
return
else:
raise je.PermissionError()
return
else:
await self.say("%s: meme não encontrado" % meme)
return
elif args[1] == 'save':
done = await self.save_memes()
if done:
await self.say("jmemes: banco de dados salvo")
else:
raise IOError("banco de dados não salvo corretamente")
return
elif args[1] == 'load':
done = await self.load_memes()
if done:
await self.say("jmemes: banco de dados carregado")
else:
raise IOError("banco de dados não carregado corretamente")
return
elif args[1] == 'saveload':
print('saveload')
done = await self.save_memes()
if done:
await self.say("jmemes: banco de dados salvo")
else:
raise IOError("banco de dados não salvo corretamente")
done = await self.load_memes()
if done:
await self.say("jmemes: banco de dados carregado")
else:
raise IOError("banco de dados não carregado corretamente")
return
elif args[1] == 'list':
await self.say("memes: %s" % ', '.join(self.memes.keys()))
elif args[1] == 'get':
meme = ' '.join(args[2:])
if meme in self.memes:
await self.say(self.memes[meme]['data'])
else:
await self.say("%s: meme não encontrado" % meme)
return
elif args[1] == 'all':
await self.say(self.codeblock('python', self.memes))
elif args[1] == 'search':
term = ' '.join(args[2:])
probables = [key for key in self.memes if term in key]
if len(probables) > 0:
await self.say("Resultados: %s" % ', '.join(probables))
else:
await self.say("%r: Nenhum resultado encontrado" % term)
elif args[1] == 'rename':
args_s = ' '.join(args[2:])
args_sp = args_s.split(';')
oldname = args_sp[0]
newname = args_sp[1]
if not oldname in self.memes:
await self.say("%s: meme não encontrado" % oldname)
return
# swapping
old_meme = self.memes[oldname]
if old_meme['owner'] != message.author.id:
raise je.PermissionError()
self.memes[newname] = {
'owner': message.author.id,
'data': old_meme['data'],
}
del self.memes[oldname]
await self.say("%s foi renomeado para %s!" % (oldname, newname))
await self.save_memes()
return
elif args[1] == 'owner':
meme = ' '.join(args[2:])
if meme in self.memes:
u = discord.utils.get(message.server.members, id=self.memes[meme]['owner'])
await self.say("%s foi criado por %s" % (meme, u))
else:
await self.say("%s: meme não encontrado" % meme)
return
else:
await self.say("comando inválido: %s" % args[1])
return
async def c_m(self, message, args):
'''`!m` - alias para `!meme`'''
await self.c_meme(message, args)
async def c_fullwidth(self, message, args):
'''`!fullwidth texto` - converte texto para fullwidth'''
# looks like discord made fullwidth suppoert available again :D
text = ' '.join(args[1:])
await self.say(text.translate(self.WIDE_MAP))
async def c_fw(self, message, args):
'''`!fw` - alias para `!fullwidth`'''
await self.c_fullwidth(message, args)
async def c_emoji(self, message, args):
'''`!emoji` - gera de 1 a 5 emojis aleatórios'''
res = await jcommon.random_emoji(random.randint(1,5))
await self.say(res)
|
#! /usr/bin/env python3
assert __name__ == '__main__'
'''
To update ANGLE in Gecko, use Windows with git-bash, and setup depot_tools, python2, and
python3. Because depot_tools expects `python` to be `python2` (shame!), python2 must come
before python3 in your path.
Upstream: https://chromium.googlesource.com/angle/angle
Our repo: https://github.com/mozilla/angle
It has branches like 'firefox-60' which is the branch we use for pulling into
Gecko with this script.
This script leaves a record of the merge-base and cherry-picks that we pull into
Gecko. (gfx/angle/cherries.log)
ANGLE<->Chrome version mappings are here: https://omahaproxy.appspot.com/
An easy choice is to grab Chrome's Beta's ANGLE branch.
## Usage
Prepare your env:
~~~
export PATH="$PATH:/path/to/depot_tools"
~~~
If this is a new repo, don't forget:
~~~
# In the angle repo:
./scripts/bootstrap.py
gclient sync
~~~
Update: (in the angle repo)
~~~
# In the angle repo:
/path/to/gecko/gfx/angle/update-angle.py origin/chromium/XXXX
git push moz # Push the firefox-XX branch to github.com/mozilla/angle
~~~~
'''
import json
import os
import pathlib
import re
import shutil
import subprocess
import sys
from typing import * # mypy annotations
REPO_DIR = pathlib.Path.cwd()
GN_ENV = dict(os.environ)
# We need to set DEPOT_TOOLS_WIN_TOOLCHAIN to 0 for non-Googlers, but otherwise
# leave it unset since vs_toolchain.py assumes that the user is a Googler with
# the Visual Studio files in depot_tools if DEPOT_TOOLS_WIN_TOOLCHAIN is not
# explicitly set to 0.
vs_found = False
for directory in os.environ['PATH'].split(os.pathsep):
vs_dir = os.path.join(directory, 'win_toolchain', 'vs_files')
if os.path.exists(vs_dir):
vs_found = True
break
if not vs_found:
GN_ENV['DEPOT_TOOLS_WIN_TOOLCHAIN'] = '0'
(OUT_DIR, *ROOTS) = sys.argv[1:]
assert len(ROOTS), 'Usage: export_targets.py OUT_DIR ROOTS...'
for x in ROOTS:
assert x.startswith('//:')
# ------------------------------------------------------------------------------
def run_checked(*args, **kwargs):
print(' ', args, file=sys.stderr)
sys.stderr.flush()
return subprocess.run(args, check=True, **kwargs)
def sortedi(x):
return sorted(x, key=str.lower)
def dag_traverse(root_keys: Sequence[str], pre_recurse_func: Callable[[str], list]):
visited_keys: Set[str] = set()
def recurse(key):
if key in visited_keys:
return
visited_keys.add(key)
t = pre_recurse_func(key)
try:
(next_keys, post_recurse_func) = t
except ValueError:
(next_keys,) = t
post_recurse_func = None
for x in next_keys:
recurse(x)
if post_recurse_func:
post_recurse_func(key)
return
for x in root_keys:
recurse(x)
return
# ------------------------------------------------------------------------------
print('Importing graph', file=sys.stderr)
try:
p = run_checked('gn', 'desc', '--format=json', str(OUT_DIR), '*', stdout=subprocess.PIPE,
env=GN_ENV, shell=(True if sys.platform == 'win32' else False))
except subprocess.CalledProcessError:
sys.stderr.buffer.write(b'`gn` failed. Is depot_tools in your PATH?\n')
exit(1)
# -
print('\nProcessing graph', file=sys.stderr)
descs = json.loads(p.stdout.decode())
# Ready to traverse
# ------------------------------------------------------------------------------
LIBRARY_TYPES = ('shared_library', 'static_library')
def flattened_target(target_name: str, descs: dict, stop_at_lib: bool =True) -> dict:
flattened = dict(descs[target_name])
EXPECTED_TYPES = LIBRARY_TYPES + ('source_set', 'group', 'action')
def pre(k):
dep = descs[k]
dep_type = dep['type']
deps = dep['deps']
if stop_at_lib and dep_type in LIBRARY_TYPES:
return ((),)
if dep_type == 'copy':
assert not deps, (target_name, dep['deps'])
else:
assert dep_type in EXPECTED_TYPES, (k, dep_type)
for (k,v) in dep.items():
if type(v) in (list, tuple, set):
flattened[k] = sortedi(set(flattened.get(k, []) + v))
else:
#flattened.setdefault(k, v)
pass
return (deps,)
dag_traverse(descs[target_name]['deps'], pre)
return flattened
# ------------------------------------------------------------------------------
# Check that includes are valid. (gn's version of this check doesn't seem to work!)
INCLUDE_REGEX = re.compile(b'(?:^|\\n) *# *include +([<"])([^>"]+)[>"]')
assert INCLUDE_REGEX.match(b'#include "foo"')
assert INCLUDE_REGEX.match(b'\n#include "foo"')
# Most of these are ignored because this script does not currently handle
# #includes in #ifdefs properly, so they will erroneously be marked as being
# included, but not part of the source list.
IGNORED_INCLUDES = {
b'compiler/translator/TranslatorESSL.h',
b'compiler/translator/TranslatorGLSL.h',
b'compiler/translator/TranslatorHLSL.h',
b'compiler/translator/TranslatorMetal.h',
b'compiler/translator/TranslatorVulkan.h',
b'libANGLE/renderer/d3d/DeviceD3D.h',
b'libANGLE/renderer/d3d/DisplayD3D.h',
b'libANGLE/renderer/d3d/RenderTargetD3D.h',
b'libANGLE/renderer/d3d/d3d11/winrt/NativeWindow11WinRT.h',
b'libANGLE/renderer/gl/glx/DisplayGLX.h',
b'libANGLE/renderer/gl/cgl/DisplayCGL.h',
b'libANGLE/renderer/gl/egl/ozone/DisplayOzone.h',
b'libANGLE/renderer/gl/egl/android/DisplayAndroid.h',
b'libANGLE/renderer/gl/wgl/DisplayWGL.h',
b'libANGLE/renderer/metal/DisplayMtl_api.h',
b'libANGLE/renderer/null/DisplayNULL.h',
b'libANGLE/renderer/vulkan/android/DisplayVkAndroid.h',
b'libANGLE/renderer/vulkan/fuchsia/DisplayVkFuchsia.h',
b'libANGLE/renderer/vulkan/ggp/DisplayVkGGP.h',
b'libANGLE/renderer/vulkan/win32/DisplayVkWin32.h',
b'libANGLE/renderer/vulkan/xcb/DisplayVkXcb.h',
b'kernel/image.h',
}
IGNORED_INCLUDE_PREFIXES = {
b'android',
b'Carbon',
b'CoreFoundation',
b'CoreServices',
b'IOSurface',
b'mach',
b'mach-o',
b'OpenGL',
b'pci',
b'sys',
b'wrl',
b'X11',
}
IGNORED_DIRECTORIES = {
'//third_party/glslang',
'//third_party/spirv-tools',
'//third_party/SwiftShader',
'//third_party/vulkan-headers',
'//third_party/vulkan-loader',
'//third_party/vulkan-tools',
'//third_party/vulkan-validation-layers',
}
def has_all_includes(target_name: str, descs: dict) -> bool:
for ignored_directory in IGNORED_DIRECTORIES:
if target_name.startswith(ignored_directory):
return True
flat = flattened_target(target_name, descs, stop_at_lib=False)
acceptable_sources = flat.get('sources', []) + flat.get('outputs', [])
acceptable_sources = {x.rsplit('/', 1)[-1].encode() for x in acceptable_sources}
ret = True
desc = descs[target_name]
for cur_file in desc.get('sources', []):
assert cur_file.startswith('/'), cur_file
if not cur_file.startswith('//'):
continue
cur_file = pathlib.Path(cur_file[2:])
text = cur_file.read_bytes()
for m in INCLUDE_REGEX.finditer(text):
if m.group(1) == b'<':
continue
include = m.group(2)
if include in IGNORED_INCLUDES:
continue
try:
(prefix, _) = include.split(b'/', 1)
if prefix in IGNORED_INCLUDE_PREFIXES:
continue
except ValueError:
pass
include_file = include.rsplit(b'/', 1)[-1]
if include_file not in acceptable_sources:
#print(' acceptable_sources:')
#for x in sorted(acceptable_sources):
# print(' ', x)
print('Warning in {}: {}: Invalid include: {}'.format(target_name, cur_file, include), file=sys.stderr)
ret = False
#print('Looks valid:', m.group())
continue
return ret
# -
# Gather real targets:
def gather_libraries(roots: Sequence[str], descs: dict) -> Set[str]:
libraries = set()
def fn(target_name):
cur = descs[target_name]
print(' ' + cur['type'], target_name, file=sys.stderr)
assert has_all_includes(target_name, descs), target_name
if cur['type'] in ('shared_library', 'static_library'):
libraries.add(target_name)
return (cur['deps'], )
dag_traverse(roots, fn)
return libraries
# -
libraries = gather_libraries(ROOTS, descs)
print(f'\n{len(libraries)} libraries:', file=sys.stderr)
for k in libraries:
print(f' {k}', file=sys.stderr)
print('\nstdout begins:', file=sys.stderr)
sys.stderr.flush()
# ------------------------------------------------------------------------------
# Output
out = {k: flattened_target(k, descs) for k in libraries}
for (k,desc) in out.items():
dep_libs: Set[str] = set()
for dep_name in set(desc['deps']):
dep = descs[dep_name]
if dep['type'] in LIBRARY_TYPES:
dep_libs.add(dep_name[3:])
desc['deps'] = sortedi(dep_libs)
json.dump(out, sys.stdout, indent=' ')
exit(0)
Remove some ignores from export_targets.py.
These were fixed upstream.
Bug: angleproject:4077
Change-Id: I656616a111e1703f8a910c27e6be3ec3e918f6ba
Reviewed-on: https://chromium-review.googlesource.com/c/angle/angle/+/1899726
Reviewed-by: Jonah Ryan-Davis <682bdb9e7d8ba359ef169b8db374e7a99d394a46@google.com>
Commit-Queue: Jamie Madill <7e492b4f1c8458024932de3ba475cbf015424c30@chromium.org>
#! /usr/bin/env python3
assert __name__ == '__main__'
'''
To update ANGLE in Gecko, use Windows with git-bash, and setup depot_tools, python2, and
python3. Because depot_tools expects `python` to be `python2` (shame!), python2 must come
before python3 in your path.
Upstream: https://chromium.googlesource.com/angle/angle
Our repo: https://github.com/mozilla/angle
It has branches like 'firefox-60' which is the branch we use for pulling into
Gecko with this script.
This script leaves a record of the merge-base and cherry-picks that we pull into
Gecko. (gfx/angle/cherries.log)
ANGLE<->Chrome version mappings are here: https://omahaproxy.appspot.com/
An easy choice is to grab Chrome's Beta's ANGLE branch.
## Usage
Prepare your env:
~~~
export PATH="$PATH:/path/to/depot_tools"
~~~
If this is a new repo, don't forget:
~~~
# In the angle repo:
./scripts/bootstrap.py
gclient sync
~~~
Update: (in the angle repo)
~~~
# In the angle repo:
/path/to/gecko/gfx/angle/update-angle.py origin/chromium/XXXX
git push moz # Push the firefox-XX branch to github.com/mozilla/angle
~~~~
'''
import json
import os
import pathlib
import re
import shutil
import subprocess
import sys
from typing import * # mypy annotations
REPO_DIR = pathlib.Path.cwd()
GN_ENV = dict(os.environ)
# We need to set DEPOT_TOOLS_WIN_TOOLCHAIN to 0 for non-Googlers, but otherwise
# leave it unset since vs_toolchain.py assumes that the user is a Googler with
# the Visual Studio files in depot_tools if DEPOT_TOOLS_WIN_TOOLCHAIN is not
# explicitly set to 0.
vs_found = False
for directory in os.environ['PATH'].split(os.pathsep):
vs_dir = os.path.join(directory, 'win_toolchain', 'vs_files')
if os.path.exists(vs_dir):
vs_found = True
break
if not vs_found:
GN_ENV['DEPOT_TOOLS_WIN_TOOLCHAIN'] = '0'
if len(sys.argv) < 3:
sys.exit('Usage: export_targets.py OUT_DIR ROOTS...')
(OUT_DIR, *ROOTS) = sys.argv[1:]
for x in ROOTS:
assert x.startswith('//:')
# ------------------------------------------------------------------------------
def run_checked(*args, **kwargs):
print(' ', args, file=sys.stderr)
sys.stderr.flush()
return subprocess.run(args, check=True, **kwargs)
def sortedi(x):
return sorted(x, key=str.lower)
def dag_traverse(root_keys: Sequence[str], pre_recurse_func: Callable[[str], list]):
visited_keys: Set[str] = set()
def recurse(key):
if key in visited_keys:
return
visited_keys.add(key)
t = pre_recurse_func(key)
try:
(next_keys, post_recurse_func) = t
except ValueError:
(next_keys,) = t
post_recurse_func = None
for x in next_keys:
recurse(x)
if post_recurse_func:
post_recurse_func(key)
return
for x in root_keys:
recurse(x)
return
# ------------------------------------------------------------------------------
print('Importing graph', file=sys.stderr)
try:
p = run_checked('gn', 'desc', '--format=json', str(OUT_DIR), '*', stdout=subprocess.PIPE,
env=GN_ENV, shell=(True if sys.platform == 'win32' else False))
except subprocess.CalledProcessError:
sys.stderr.buffer.write(b'"gn desc" failed. Is depot_tools in your PATH?\n')
exit(1)
# -
print('\nProcessing graph', file=sys.stderr)
descs = json.loads(p.stdout.decode())
# Ready to traverse
# ------------------------------------------------------------------------------
LIBRARY_TYPES = ('shared_library', 'static_library')
def flattened_target(target_name: str, descs: dict, stop_at_lib: bool =True) -> dict:
flattened = dict(descs[target_name])
EXPECTED_TYPES = LIBRARY_TYPES + ('source_set', 'group', 'action')
def pre(k):
dep = descs[k]
dep_type = dep['type']
deps = dep['deps']
if stop_at_lib and dep_type in LIBRARY_TYPES:
return ((),)
if dep_type == 'copy':
assert not deps, (target_name, dep['deps'])
else:
assert dep_type in EXPECTED_TYPES, (k, dep_type)
for (k,v) in dep.items():
if type(v) in (list, tuple, set):
flattened[k] = sortedi(set(flattened.get(k, []) + v))
else:
#flattened.setdefault(k, v)
pass
return (deps,)
dag_traverse(descs[target_name]['deps'], pre)
return flattened
# ------------------------------------------------------------------------------
# Check that includes are valid. (gn's version of this check doesn't seem to work!)
INCLUDE_REGEX = re.compile(b'(?:^|\\n) *# *include +([<"])([^>"]+)[>"]')
assert INCLUDE_REGEX.match(b'#include "foo"')
assert INCLUDE_REGEX.match(b'\n#include "foo"')
# Most of these are ignored because this script does not currently handle
# #includes in #ifdefs properly, so they will erroneously be marked as being
# included, but not part of the source list.
IGNORED_INCLUDES = {
b'compiler/translator/TranslatorESSL.h',
b'compiler/translator/TranslatorGLSL.h',
b'compiler/translator/TranslatorHLSL.h',
b'compiler/translator/TranslatorMetal.h',
b'compiler/translator/TranslatorVulkan.h',
b'libANGLE/renderer/d3d/DeviceD3D.h',
b'libANGLE/renderer/d3d/DisplayD3D.h',
b'libANGLE/renderer/d3d/RenderTargetD3D.h',
b'libANGLE/renderer/d3d/d3d11/winrt/NativeWindow11WinRT.h',
b'libANGLE/renderer/gl/glx/DisplayGLX.h',
b'libANGLE/renderer/gl/cgl/DisplayCGL.h',
b'libANGLE/renderer/gl/egl/ozone/DisplayOzone.h',
b'libANGLE/renderer/gl/egl/android/DisplayAndroid.h',
b'libANGLE/renderer/gl/wgl/DisplayWGL.h',
b'libANGLE/renderer/metal/DisplayMtl_api.h',
b'libANGLE/renderer/null/DisplayNULL.h',
b'libANGLE/renderer/vulkan/android/DisplayVkAndroid.h',
b'libANGLE/renderer/vulkan/fuchsia/DisplayVkFuchsia.h',
b'libANGLE/renderer/vulkan/ggp/DisplayVkGGP.h',
b'libANGLE/renderer/vulkan/win32/DisplayVkWin32.h',
b'libANGLE/renderer/vulkan/xcb/DisplayVkXcb.h',
b'kernel/image.h',
}
IGNORED_INCLUDE_PREFIXES = {
b'android',
b'Carbon',
b'CoreFoundation',
b'CoreServices',
b'IOSurface',
b'mach',
b'mach-o',
b'OpenGL',
b'pci',
b'sys',
b'wrl',
b'X11',
}
IGNORED_DIRECTORIES = {
'//third_party/SwiftShader',
'//third_party/vulkan-headers',
'//third_party/vulkan-loader',
'//third_party/vulkan-tools',
'//third_party/vulkan-validation-layers',
}
def has_all_includes(target_name: str, descs: dict) -> bool:
for ignored_directory in IGNORED_DIRECTORIES:
if target_name.startswith(ignored_directory):
return True
flat = flattened_target(target_name, descs, stop_at_lib=False)
acceptable_sources = flat.get('sources', []) + flat.get('outputs', [])
acceptable_sources = {x.rsplit('/', 1)[-1].encode() for x in acceptable_sources}
ret = True
desc = descs[target_name]
for cur_file in desc.get('sources', []):
assert cur_file.startswith('/'), cur_file
if not cur_file.startswith('//'):
continue
cur_file = pathlib.Path(cur_file[2:])
text = cur_file.read_bytes()
for m in INCLUDE_REGEX.finditer(text):
if m.group(1) == b'<':
continue
include = m.group(2)
if include in IGNORED_INCLUDES:
continue
try:
(prefix, _) = include.split(b'/', 1)
if prefix in IGNORED_INCLUDE_PREFIXES:
continue
except ValueError:
pass
include_file = include.rsplit(b'/', 1)[-1]
if include_file not in acceptable_sources:
#print(' acceptable_sources:')
#for x in sorted(acceptable_sources):
# print(' ', x)
print('Warning in {}: {}: Invalid include: {}'.format(target_name, cur_file, include), file=sys.stderr)
ret = False
#print('Looks valid:', m.group())
continue
return ret
# -
# Gather real targets:
def gather_libraries(roots: Sequence[str], descs: dict) -> Set[str]:
libraries = set()
def fn(target_name):
cur = descs[target_name]
print(' ' + cur['type'], target_name, file=sys.stderr)
assert has_all_includes(target_name, descs), target_name
if cur['type'] in ('shared_library', 'static_library'):
libraries.add(target_name)
return (cur['deps'], )
dag_traverse(roots, fn)
return libraries
# -
libraries = gather_libraries(ROOTS, descs)
print(f'\n{len(libraries)} libraries:', file=sys.stderr)
for k in libraries:
print(f' {k}', file=sys.stderr)
print('\nstdout begins:', file=sys.stderr)
sys.stderr.flush()
# ------------------------------------------------------------------------------
# Output
out = {k: flattened_target(k, descs) for k in libraries}
for (k,desc) in out.items():
dep_libs: Set[str] = set()
for dep_name in set(desc['deps']):
dep = descs[dep_name]
if dep['type'] in LIBRARY_TYPES:
dep_libs.add(dep_name[3:])
desc['deps'] = sortedi(dep_libs)
json.dump(out, sys.stdout, indent=' ')
exit(0)
|
# -*- coding: utf-8 -*-
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
#
# coverage.py documentation build configuration file, created by
# sphinx-quickstart on Wed May 13 22:18:33 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# Copied from django docs:
sys.path.append(os.path.join(os.path.dirname(__file__), "_ext"))
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.ifconfig',
'sphinxcontrib.spelling',
'px_xlator',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Coverage.py'
copyright = u'2009\N{EN DASH}2015, Ned Batchelder'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '4.0'
# The full version, including alpha/beta/rc tags.
release = '4.0b2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
#html_translator_class = "px_xlator.PxTranslator"
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
#html_theme = 'default'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# otherwise, readthedocs.org uses their theme by default, so no need to specify it
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
#html_style = "neds.css"
#html_add_permalinks = ""
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_templates']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_use_modindex = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = '.px'
# Output file base name for HTML help builder.
htmlhelp_basename = 'coveragepydoc'
# -- Spelling ---
spelling_word_list_filename = 'dict.txt'
spelling_show_suggestions = False
# When auto-doc'ing a class, write the class' docstring and the __init__ docstring
# into the class docs.
autoclass_content = "class"
prerelease = bool(max(release).isalpha())
def setup(app):
app.add_config_value('prerelease', False, 'env')
print "** Prerelease = %r" % prerelease
Try the alabaster theme
# -*- coding: utf-8 -*-
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
#
# coverage.py documentation build configuration file, created by
# sphinx-quickstart on Wed May 13 22:18:33 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# Copied from django docs:
sys.path.append(os.path.join(os.path.dirname(__file__), "_ext"))
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.ifconfig',
'sphinxcontrib.spelling',
'px_xlator',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Coverage.py'
copyright = u'2009\N{EN DASH}2015, Ned Batchelder'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '4.0'
# The full version, including alpha/beta/rc tags.
release = '4.0b2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
#html_translator_class = "px_xlator.PxTranslator"
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
#html_theme = 'default'
if 0:
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# otherwise, readthedocs.org uses their theme by default, so no need to specify it
import alabaster
html_theme = 'alabaster'
html_theme_path = ['.', alabaster.get_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'description':
'A quick guide to Python for experienced programmers',
'show_related': True,
'github_button': False,
'show_powered_by': False,
'font_family':
'Georgia Pro, Georgia, '
'serif',
'head_font_family':
'Franklin Gothic Medium, Franklin Gothic, ITC Franklin Gothic, '
'Helvetica, Arial, '
'sans-serif',
'code_font_family':
'Consolas, '
'Menlo, '
'monospace',
}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
#html_style = "neds.css"
#html_add_permalinks = ""
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_templates']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_use_modindex = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = '.px'
# Output file base name for HTML help builder.
htmlhelp_basename = 'coveragepydoc'
# -- Spelling ---
spelling_word_list_filename = 'dict.txt'
spelling_show_suggestions = False
# When auto-doc'ing a class, write the class' docstring and the __init__ docstring
# into the class docs.
autoclass_content = "class"
prerelease = bool(max(release).isalpha())
def setup(app):
app.add_config_value('prerelease', False, 'env')
print "** Prerelease = %r" % prerelease
|
"""Constants from SRA XML schema."""
EXISTING_STUDY_TYPES_ACTIVE = [
"Cancer Genomics",
"Epigenetics",
"Exome Sequencing",
"Metagenomics",
"Other",
"Pooled Clone Sequencing",
"Population Genomics",
"Synthetic Genomics",
"Transcriptome Analysis",
"Whole Genome Sequencing",
]
EXISTING_STUDY_TYPES_DEPRICATED = {
"Resequencing": "Whole Genome Sequencing",
"Forensic or Paleo-genomics": "Other",
"Gene Regulation Study": "Transcriptome Analysis",
"RNASeq": "Transcriptome Sequencing",
}
LIBRARY_STRATEGY = [
"AMPLICON",
"ATAC-seq",
"Bisulfite-Seq",
"ChIP-Seq",
"CLONE",
"CLONEEND",
"CTS",
"DNase-Hypersensitivity",
"EST",
"FAIRE-seq",
"FINISHING",
"FL-cDNA",
"HI-C",
"MBD-Seq",
"MeDIP-Seq",
"miRNA-Seq",
"MNase-Seq",
"MRE-Seq",
"ncRNA-Seq",
"OTHER",
"POOLCLONE",
"RIP-Seq",
"RNA-Seq",
"Synthetic-Long-Read",
"SELEX",
"Tn-Seq",
"WCS",
"WGA",
"WGS",
"WXS",
]
LIBRARY_SOURCE = [
"GENOMIC",
"METAGENOMIC",
"METATRANSCRIPTOMIC",
"NON GENOMIC",
"OTHER",
"SYNTHETIC",
"TRANSCRIPTOMIC",
"VIRAL RNA",
]
LIBRARY_SELECTION = [
"5-methylcytidine antibody",
"CAGE",
"cDNA",
"CF-H",
"CF-M",
"CF-S",
"CF-T",
"ChIP",
"DNAse",
"HMPR",
"Hybrid Selection",
"MBD2 protein methyl-CpG binding domain",
"MDA",
"MF",
"MNase",
"MSLL",
"Oligo-dT",
"other",
"padlock probes capture method",
"PCR",
"PolyA",
"RACE",
"RANDOM",
"RANDOM PCR",
"Reduced Representation",
"Restriction Digest",
"RT-PCR",
"size fractionation",
"unspecified",
]
LIBRARY_LAYOUT = [
"PAIRED",
"SINGLE",
]
PLATFORMS = [
"ABI_SOLID",
"CAPILLARY",
"COMPLETE_GENOMICS",
"HELICOS",
"ILLUMINA",
"ION_TORRENT",
"LS454",
"OXFORD_NANOPORE",
"PACBIO_SMRT",
]
INSTRUMENT_MODEL_ACTIVE = [
"454 GS",
"454 GS 20",
"454 GS FLX",
"454 GS FLX+",
"454 GS FLX Titanium",
"454 GS Junior",
"AB 310 Genetic Analyzer",
"AB 3130 Genetic Analyzer",
"AB 3130xL Genetic Analyzer",
"AB 3500 Genetic Analyzer",
"AB 3500xL Genetic Analyzer",
"AB 3730 Genetic Analyzer",
"AB 3730xL Genetic Analyzer",
"AB 5500 Genetic Analyzer",
"AB 5500xl Genetic Analyzer",
"AB SOLiD 3 Plus System",
"AB SOLiD 4hq System",
"AB SOLiD 4 System",
"AB SOLiD PI System",
"AB SOLiD System",
"AB SOLiD System 2.0",
"AB SOLiD System 3.0",
"Complete Genomics",
"Helicos HeliScope",
"Illumina Genome Analyzer",
"Illumina Genome Analyzer II",
"Illumina Genome Analyzer IIx",
"Illumina HiScanSQ",
"Illumina HiSeq 1000",
"Illumina HiSeq 1500",
"Illumina HiSeq 2000",
"Illumina HiSeq 2500",
"Illumina HiSeq 3000",
"Illumina HiSeq 3500",
"Illumina HiSeq 4000",
"Illumina HiSeq X Five",
"Illumina HiSeq X Ten",
"Illumina MiSeq",
"Illumina MiniSeq",
"Illumina NextSeq 500",
"Ion Torrent PGM",
"Ion Torrent Proton",
"NextSeq 500",
"NextSeq 550",
"MinION",
"PacBio RS",
"unspecified",
]
INSTRUMENT_MODEL_DEPRICATED = {
"AB SOLiD 5500": "AB 5500 Genetic Analyzer",
"AB SOLiD 5500xl": "AB 5500xl Genetic Analyzer",
"HiSeq X Five": "Illumina HiSeq X Five",
"HiSeq X Ten": "Illumina HiSeq X Ten",
}
Add PacBio RS II to instrument_models.
"""Constants from SRA XML schema."""
EXISTING_STUDY_TYPES_ACTIVE = [
"Cancer Genomics",
"Epigenetics",
"Exome Sequencing",
"Metagenomics",
"Other",
"Pooled Clone Sequencing",
"Population Genomics",
"Synthetic Genomics",
"Transcriptome Analysis",
"Whole Genome Sequencing",
]
EXISTING_STUDY_TYPES_DEPRICATED = {
"Resequencing": "Whole Genome Sequencing",
"Forensic or Paleo-genomics": "Other",
"Gene Regulation Study": "Transcriptome Analysis",
"RNASeq": "Transcriptome Sequencing",
}
LIBRARY_STRATEGY = [
"AMPLICON",
"ATAC-seq",
"Bisulfite-Seq",
"ChIP-Seq",
"CLONE",
"CLONEEND",
"CTS",
"DNase-Hypersensitivity",
"EST",
"FAIRE-seq",
"FINISHING",
"FL-cDNA",
"HI-C",
"MBD-Seq",
"MeDIP-Seq",
"miRNA-Seq",
"MNase-Seq",
"MRE-Seq",
"ncRNA-Seq",
"OTHER",
"POOLCLONE",
"RIP-Seq",
"RNA-Seq",
"Synthetic-Long-Read",
"SELEX",
"Tn-Seq",
"WCS",
"WGA",
"WGS",
"WXS",
]
LIBRARY_SOURCE = [
"GENOMIC",
"METAGENOMIC",
"METATRANSCRIPTOMIC",
"NON GENOMIC",
"OTHER",
"SYNTHETIC",
"TRANSCRIPTOMIC",
"VIRAL RNA",
]
LIBRARY_SELECTION = [
"5-methylcytidine antibody",
"CAGE",
"cDNA",
"CF-H",
"CF-M",
"CF-S",
"CF-T",
"ChIP",
"DNAse",
"HMPR",
"Hybrid Selection",
"MBD2 protein methyl-CpG binding domain",
"MDA",
"MF",
"MNase",
"MSLL",
"Oligo-dT",
"other",
"padlock probes capture method",
"PCR",
"PolyA",
"RACE",
"RANDOM",
"RANDOM PCR",
"Reduced Representation",
"Restriction Digest",
"RT-PCR",
"size fractionation",
"unspecified",
]
LIBRARY_LAYOUT = [
"PAIRED",
"SINGLE",
]
PLATFORMS = [
"ABI_SOLID",
"CAPILLARY",
"COMPLETE_GENOMICS",
"HELICOS",
"ILLUMINA",
"ION_TORRENT",
"LS454",
"OXFORD_NANOPORE",
"PACBIO_SMRT",
]
INSTRUMENT_MODEL_ACTIVE = [
"454 GS",
"454 GS 20",
"454 GS FLX",
"454 GS FLX+",
"454 GS FLX Titanium",
"454 GS Junior",
"AB 310 Genetic Analyzer",
"AB 3130 Genetic Analyzer",
"AB 3130xL Genetic Analyzer",
"AB 3500 Genetic Analyzer",
"AB 3500xL Genetic Analyzer",
"AB 3730 Genetic Analyzer",
"AB 3730xL Genetic Analyzer",
"AB 5500 Genetic Analyzer",
"AB 5500xl Genetic Analyzer",
"AB SOLiD 3 Plus System",
"AB SOLiD 4hq System",
"AB SOLiD 4 System",
"AB SOLiD PI System",
"AB SOLiD System",
"AB SOLiD System 2.0",
"AB SOLiD System 3.0",
"Complete Genomics",
"Helicos HeliScope",
"Illumina Genome Analyzer",
"Illumina Genome Analyzer II",
"Illumina Genome Analyzer IIx",
"Illumina HiScanSQ",
"Illumina HiSeq 1000",
"Illumina HiSeq 1500",
"Illumina HiSeq 2000",
"Illumina HiSeq 2500",
"Illumina HiSeq 3000",
"Illumina HiSeq 3500",
"Illumina HiSeq 4000",
"Illumina HiSeq X Five",
"Illumina HiSeq X Ten",
"Illumina MiSeq",
"Illumina MiniSeq",
"Illumina NextSeq 500",
"Ion Torrent PGM",
"Ion Torrent Proton",
"NextSeq 500",
"NextSeq 550",
"MinION",
"PacBio RS",
"PacBio RS II",
"unspecified",
]
INSTRUMENT_MODEL_DEPRICATED = {
"AB SOLiD 5500": "AB 5500 Genetic Analyzer",
"AB SOLiD 5500xl": "AB 5500xl Genetic Analyzer",
"HiSeq X Five": "Illumina HiSeq X Five",
"HiSeq X Ten": "Illumina HiSeq X Ten",
}
|
import re
import signal
import logging
import uuid
from subprocess import check_output
from ipykernel.kernelbase import Kernel
from pexpect import replwrap, EOF
from tornado.log import LogFormatter as ColoredFormatter
__version__ = '0.1'
version_pat = re.compile(r'version: (\d+(\.\d+)+)')
def make_logger(name, fname=None) -> logging.Logger:
if fname is None:
fname = name + '.log'
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
file_formatter = logging.Formatter('%(asctime)s | %(name)s | %(levelname)s: %(message)s')
FORMAT = '%(color)s[%(levelname)1.1s %(asctime)s.%(msecs).03d %(name)s]%(end_color)s %(message)s'
stream_formatter = ColoredFormatter(fmt=FORMAT, datefmt='%H:%M:%S')
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(stream_formatter)
stream_handler.setLevel(logging.DEBUG)
logger.addHandler(stream_handler)
file_handler = logging.FileHandler(filename=fname, mode='a')
file_handler.setFormatter(file_formatter)
file_handler.setLevel(logging.DEBUG)
logger.addHandler(file_handler)
return logger
logger = make_logger('IMongo', fname='imongo_kernel.log')
class MyREPLWrapper(replwrap.REPLWrapper):
"""
A subclass of REPLWrapper specific for the MongoDB shell.
run_command is the only method overridden.
"""
def __init__(self, *args, **kwargs):
replwrap.REPLWrapper.__init__(self, *args, **kwargs)
logger.info('Making MyREPLWrapper')
self.args = args
self.kwargs = kwargs
def _filter_response(self, res):
msg = re.sub('\[\d+[A-Z]', '', res)
msg = re.sub('\[J', '', msg)
msg = [l.strip() for l in msg.split('\x1b') if l]
output = []
for l in msg[::-1]:
if not output:
output.append(l)
continue
if l not in output[-1]:
output.append(l)
return output[0]
def _isbufferempty(self):
condition1 = self.child.buffer.strip() == '\x1b[47G\x1b[J\x1b[47G'
condition2 = self.child.buffer.strip() == ''
return condition1 or condition2
def _send_line(self, cmd):
try:
self.child.sendline(cmd)
logger.debug('Command sent. Waiting for prompt')
except Exception as e:
exeception_msg = 'Unexpected exeception occurred.'
logger.error('{}: {}: {}'.format(exeception_msg, e.__class__.__name__, e.args))
raise RuntimeError(exeception_msg)
def run_command(self, command, timeout=-1):
"""Send a command to the REPL, wait for and return output.
:param str command: The command to send. Trailing newlines are not needed.
This should be a complete block of input that will trigger execution;
if a continuation prompt is found after sending input, :exc:`ValueError`
will be raised.
:param int timeout: How long to wait for the next prompt. -1 means the
default from the :class:`pexpect.spawn` object (default 30 seconds).
None means to wait indefinitely.
"""
# Clean input command by removing indentation
# There seems to be a limitation with pexepect/mongo when entering
# lines longer than 1000 characters. If that is the case, a ValueError
# exception is raised.
cmd = re.sub('\s{2,}', ' ', ' '.join([l for l in command.splitlines() if l]))
logger.debug('Command length: {} chars'.format(len(cmd)))
logger.debug('Command: {}'.format(cmd))
if len(cmd) > 1000:
error = ('Code too long. Please commands with less than 1000 effective chracters.\n'
'Indentation spaces/tabs don\'t count towards "effective" characters.')
logger.error(error)
raise ValueError(error.replace('\n', ' '))
self._send_line(cmd)
match = self._expect_prompt(timeout=timeout)
logger.debug('Prompt type: {}'.format(match))
logger.debug('Iterating over message')
response = []
while not self._isbufferempty():
response.append(self.child.before)
logger.debug('Buffer not empty, sending blank line')
match = self._expect_prompt(timeout=timeout)
if match == 1:
# If continuation prompt is detected, restart child (by raising ValueError)
error = ('Code incomplete. Please enter valid and complete code.\n'
'Continuation prompt functionality not implemented yet.')
logger.error(error.replace('\n', ' '))
raise ValueError(error)
self._send_line('')
response.append(self.child.before)
response = self._filter_response(''.join(response))
logger.debug('Response: {}'.format(response))
return response
# noinspection PyAbstractClass
class MongoKernel(Kernel):
implementation = 'IMongo'
implementation_version = __version__
_banner = None
language_info = {'name': 'javascript',
'codemirror_mode': 'shell',
'mimetype': 'text/x-mongodb',
'file_extension': '.js'}
@property
def language_version(self):
m = version_pat.search(self.banner)
return m.group(1)
@property
def banner(self):
if self._banner is None:
self._banner = check_output(['mongo', '--version']).decode('utf-8').strip()
return self._banner
def __init__(self, **kwargs):
Kernel.__init__(self, **kwargs)
logger.debug(self.language_info)
logger.debug(self.language_version)
logger.debug(self.banner)
self._start_mongo()
def _start_mongo(self):
# Signal handlers are inherited by forked processes, and we can't easily
# reset it from the subprocess. Since kernelapp ignores SIGINT except in
# message handlers, we need to temporarily reset the SIGINT handler here
# so that bash and its children are interruptible.
# sig = signal.signal(signal.SIGINT, signal.SIG_DFL)
# dir_func is an assitant Javascript function to be used bydo_complete.
# May be a slightly hackish approach.
# http://stackoverflow.com/questions/5523747/equivalent-of-pythons-dir-in-javascript
sig = signal.signal(signal.SIGINT, signal.SIG_DFL)
try:
prompt = 'mongo{}mongo'.format(uuid.uuid4())
cont_prompt = '... ' # Using expect_exact, so regex (i.e. '\.\.\. $') don't work
prompt_cmd = "prompt = '{}'".format(prompt)
dir_func = """function dir(object) {
attributes = [];
for (attr in object) {attributes.push(attr);}
attributes.sort();
return attributes;}"""
spawn_cmd = """mongo --eval "{}" --shell""".format(';'.join([prompt_cmd, dir_func]))
self.mongowrapper = MyREPLWrapper(spawn_cmd, orig_prompt=prompt,
prompt_change=None, continuation_prompt=cont_prompt)
finally:
signal.signal(signal.SIGINT, sig)
def do_execute(self, code, silent, store_history=True,
user_expressions=None, allow_stdin=False):
if not code.strip():
return {'status': 'ok',
'execution_count': self.execution_count,
'payload': [],
'user_expressions': {}}
interrupted = False
error = None
try:
output = self.mongowrapper.run_command(code.rstrip())
except KeyboardInterrupt:
self.mongowrapper.child.sendeof()
interrupted = True
output = None
error = 'KeyboardInterrupt.'
self._start_mongo()
except (EOF, ValueError, RuntimeError) as e:
output = None
error = e.args[0]
self._start_mongo()
finally:
if error:
error_msg = {'name': 'stderr', 'text': error + '\nRestarting mongo shell...'}
self.send_response(self.iopub_socket, 'stream', error_msg)
if interrupted:
return {'status': 'abort', 'execution_count': self.execution_count}
if not silent and not error:
result = {'data': { "text/plain": output},
'execution_count': self.execution_count}
self.send_response(self.iopub_socket, 'execute_result', result)
# TODO: Error catching messages such as the one below:
#2016-11-14T12:47:11.718+0900 E QUERY [thread1] ReferenceError: aaa is not defined :
#@(shell):1:1
return_msg = {'status': 'ok', 'execution_count': self.execution_count,
'payload': [], 'user_expressions': {}}
logger.debug('Return message: {}'.format(return_msg))
return return_msg
def do_complete(self, code, cursor_pos):
# TODO: Implement. Currently not working.
code = code[:cursor_pos]
default = {'matches': [], 'cursor_start': 0,
'cursor_end': cursor_pos, 'metadata': dict(),
'status': 'ok'}
if not code or code[-1] == ' ':
return default
tokens = code.replace(';', ' ').split()
if not tokens:
return default
matches = []
token = tokens[-1]
start = cursor_pos - len(token)
logger.debug('Tokens: {}'.format(tokens))
logger.debug('Comp code: {}'.format(code))
# matches = self.mongowrapper.run_command("dir(")
# [i.strip().replace(',', '').replace('"', '') for i in s.splitlines()[2:-1]]
# if token[0] == '$':
# # complete variables
# cmd = 'compgen -A arrayvar -A export -A variable %s' % token[1:] # strip leading $
# output = self.mongowrapper.run_command(cmd).rstrip()
# completions = set(output.split())
# # append matches including leading $
# matches.extend(['$'+c for c in completions])
# else:
# # complete functions and builtins
# cmd = 'compgen -cdfa %s' % token
# output = self.mongowrapper.run_command(cmd).rstrip()
# matches.extend(output.split())
#
# if not matches:
# return default
# matches = [m for m in matches if m.startswith(token)]
return {'matches': sorted(matches), 'cursor_start': start,
'cursor_end': cursor_pos, 'metadata': dict(),
'status': 'ok'}
Fix version pattern
import re
import signal
import logging
import uuid
from subprocess import check_output
from ipykernel.kernelbase import Kernel
from pexpect import replwrap, EOF
from tornado.log import LogFormatter as ColoredFormatter
__version__ = '0.1'
version_pat = re.compile(r'version\D*(\d+(\.\d+)+)')
def make_logger(name, fname=None) -> logging.Logger:
if fname is None:
fname = name + '.log'
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
file_formatter = logging.Formatter('%(asctime)s | %(name)s | %(levelname)s: %(message)s')
FORMAT = '%(color)s[%(levelname)1.1s %(asctime)s.%(msecs).03d %(name)s]%(end_color)s %(message)s'
stream_formatter = ColoredFormatter(fmt=FORMAT, datefmt='%H:%M:%S')
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(stream_formatter)
stream_handler.setLevel(logging.DEBUG)
logger.addHandler(stream_handler)
file_handler = logging.FileHandler(filename=fname, mode='a')
file_handler.setFormatter(file_formatter)
file_handler.setLevel(logging.DEBUG)
logger.addHandler(file_handler)
return logger
logger = make_logger('IMongo', fname='imongo_kernel.log')
class MyREPLWrapper(replwrap.REPLWrapper):
"""
A subclass of REPLWrapper specific for the MongoDB shell.
run_command is the only method overridden.
"""
def __init__(self, *args, **kwargs):
replwrap.REPLWrapper.__init__(self, *args, **kwargs)
logger.info('Making MyREPLWrapper')
self.args = args
self.kwargs = kwargs
def _filter_response(self, res):
msg = re.sub('\[\d+[A-Z]', '', res)
msg = re.sub('\[J', '', msg)
msg = [l.strip() for l in msg.split('\x1b') if l]
output = []
for l in msg[::-1]:
if not output:
output.append(l)
continue
if l not in output[-1]:
output.append(l)
return output[0]
def _isbufferempty(self):
condition1 = self.child.buffer.strip() == '\x1b[47G\x1b[J\x1b[47G'
condition2 = self.child.buffer.strip() == ''
return condition1 or condition2
def _send_line(self, cmd):
try:
self.child.sendline(cmd)
logger.debug('Command sent. Waiting for prompt')
except Exception as e:
exeception_msg = 'Unexpected exeception occurred.'
logger.error('{}: {}: {}'.format(exeception_msg, e.__class__.__name__, e.args))
raise RuntimeError(exeception_msg)
def run_command(self, command, timeout=-1):
"""Send a command to the REPL, wait for and return output.
:param str command: The command to send. Trailing newlines are not needed.
This should be a complete block of input that will trigger execution;
if a continuation prompt is found after sending input, :exc:`ValueError`
will be raised.
:param int timeout: How long to wait for the next prompt. -1 means the
default from the :class:`pexpect.spawn` object (default 30 seconds).
None means to wait indefinitely.
"""
# Clean input command by removing indentation
# There seems to be a limitation with pexepect/mongo when entering
# lines longer than 1000 characters. If that is the case, a ValueError
# exception is raised.
cmd = re.sub('\s{2,}', ' ', ' '.join([l for l in command.splitlines() if l]))
logger.debug('Command length: {} chars'.format(len(cmd)))
logger.debug('Command: {}'.format(cmd))
if len(cmd) > 1000:
error = ('Code too long. Please commands with less than 1000 effective chracters.\n'
'Indentation spaces/tabs don\'t count towards "effective" characters.')
logger.error(error)
raise ValueError(error.replace('\n', ' '))
self._send_line(cmd)
match = self._expect_prompt(timeout=timeout)
logger.debug('Prompt type: {}'.format(match))
logger.debug('Iterating over message')
response = []
while not self._isbufferempty():
response.append(self.child.before)
logger.debug('Buffer not empty, sending blank line')
match = self._expect_prompt(timeout=timeout)
if match == 1:
# If continuation prompt is detected, restart child (by raising ValueError)
error = ('Code incomplete. Please enter valid and complete code.\n'
'Continuation prompt functionality not implemented yet.')
logger.error(error.replace('\n', ' '))
raise ValueError(error)
self._send_line('')
response.append(self.child.before)
response = self._filter_response(''.join(response))
logger.debug('Response: {}'.format(response))
return response
# noinspection PyAbstractClass
class MongoKernel(Kernel):
implementation = 'IMongo'
implementation_version = __version__
_banner = None
language_info = {'name': 'javascript',
'codemirror_mode': 'shell',
'mimetype': 'text/x-mongodb',
'file_extension': '.js'}
@property
def language_version(self):
m = version_pat.search(self.banner)
return m.group(1)
@property
def banner(self):
if self._banner is None:
self._banner = check_output(['mongo', '--version']).decode('utf-8').strip()
return self._banner
def __init__(self, **kwargs):
Kernel.__init__(self, **kwargs)
logger.debug(self.language_info)
logger.debug(self.language_version)
logger.debug(self.banner)
self._start_mongo()
def _start_mongo(self):
# Signal handlers are inherited by forked processes, and we can't easily
# reset it from the subprocess. Since kernelapp ignores SIGINT except in
# message handlers, we need to temporarily reset the SIGINT handler here
# so that bash and its children are interruptible.
# sig = signal.signal(signal.SIGINT, signal.SIG_DFL)
# dir_func is an assitant Javascript function to be used bydo_complete.
# May be a slightly hackish approach.
# http://stackoverflow.com/questions/5523747/equivalent-of-pythons-dir-in-javascript
sig = signal.signal(signal.SIGINT, signal.SIG_DFL)
try:
prompt = 'mongo{}mongo'.format(uuid.uuid4())
cont_prompt = '... ' # Using expect_exact, so regex (i.e. '\.\.\. $') don't work
prompt_cmd = "prompt = '{}'".format(prompt)
dir_func = """function dir(object) {
attributes = [];
for (attr in object) {attributes.push(attr);}
attributes.sort();
return attributes;}"""
spawn_cmd = """mongo --eval "{}" --shell""".format(';'.join([prompt_cmd, dir_func]))
self.mongowrapper = MyREPLWrapper(spawn_cmd, orig_prompt=prompt,
prompt_change=None, continuation_prompt=cont_prompt)
finally:
signal.signal(signal.SIGINT, sig)
def do_execute(self, code, silent, store_history=True,
user_expressions=None, allow_stdin=False):
if not code.strip():
return {'status': 'ok',
'execution_count': self.execution_count,
'payload': [],
'user_expressions': {}}
interrupted = False
error = None
try:
output = self.mongowrapper.run_command(code.rstrip())
except KeyboardInterrupt:
self.mongowrapper.child.sendeof()
interrupted = True
output = None
error = 'KeyboardInterrupt.'
self._start_mongo()
except (EOF, ValueError, RuntimeError) as e:
output = None
error = e.args[0]
self._start_mongo()
finally:
if error:
error_msg = {'name': 'stderr', 'text': error + '\nRestarting mongo shell...'}
self.send_response(self.iopub_socket, 'stream', error_msg)
if interrupted:
return {'status': 'abort', 'execution_count': self.execution_count}
if not silent and not error:
result = {'data': { "text/plain": output},
'execution_count': self.execution_count}
self.send_response(self.iopub_socket, 'execute_result', result)
# TODO: Error catching messages such as the one below:
#2016-11-14T12:47:11.718+0900 E QUERY [thread1] ReferenceError: aaa is not defined :
#@(shell):1:1
return_msg = {'status': 'ok', 'execution_count': self.execution_count,
'payload': [], 'user_expressions': {}}
logger.debug('Return message: {}'.format(return_msg))
return return_msg
def do_complete(self, code, cursor_pos):
# TODO: Implement. Currently not working.
code = code[:cursor_pos]
default = {'matches': [], 'cursor_start': 0,
'cursor_end': cursor_pos, 'metadata': dict(),
'status': 'ok'}
if not code or code[-1] == ' ':
return default
tokens = code.replace(';', ' ').split()
if not tokens:
return default
matches = []
token = tokens[-1]
start = cursor_pos - len(token)
logger.debug('Tokens: {}'.format(tokens))
logger.debug('Comp code: {}'.format(code))
# matches = self.mongowrapper.run_command("dir(")
# [i.strip().replace(',', '').replace('"', '') for i in s.splitlines()[2:-1]]
# if token[0] == '$':
# # complete variables
# cmd = 'compgen -A arrayvar -A export -A variable %s' % token[1:] # strip leading $
# output = self.mongowrapper.run_command(cmd).rstrip()
# completions = set(output.split())
# # append matches including leading $
# matches.extend(['$'+c for c in completions])
# else:
# # complete functions and builtins
# cmd = 'compgen -cdfa %s' % token
# output = self.mongowrapper.run_command(cmd).rstrip()
# matches.extend(output.split())
#
# if not matches:
# return default
# matches = [m for m in matches if m.startswith(token)]
return {'matches': sorted(matches), 'cursor_start': start,
'cursor_end': cursor_pos, 'metadata': dict(),
'status': 'ok'}
|
# ----------------------------Compatibility Imports----------------------------
from __future__ import print_function
from six.moves import range
import sys
import hashlib
if sys.version_info < (3, 6):
import sha3
# -----------------------------------------------------------------------------
import os
import hmac # Python 2.7 and 3.3+
# TODO: Implement hash method choices
class HashCheck(object):
"""Class for comparing, processing, and generating hash digests."""
@staticmethod
def process_digest(digest):
"""Determines if source of digest is stored in a text file, or if it's a
string provided by user.
Args:
digest (str): filename or string containing digest to be processed
Returns:
str: hash digest stipped of leading and trailing whitespace
"""
if os.path.isfile(digest):
with open(digest, 'r') as f:
return f.read().split(' ')[0]
else:
return digest.strip()
@staticmethod
def generate_digest(filename):
"""Returns hexadecimal digest generated from filename
Args:
filename (str): filename of binary file
Returns:
str: hash digest generated from binary file
"""
buffer_size = 65536 # Buffer used to cut down on memory for large files.
blocks = (os.path.getsize(filename) // buffer_size) + 1
hash_digest = hashlib.sha256()
with open(filename, 'rb') as f:
# generator expression used for reading file in chunks
generator = (f.read(buffer_size) for _ in range(blocks))
for data in generator:
hash_digest.update(data)
return hash_digest.hexdigest()
@staticmethod
def compare_digests(digest_1, digest_2):
"""Returns result of equality comparison betwen digest_1 and digest_2
Args:
digest_1 (str): digest to be compared against digest_2
digest_2 (str): digest to be compared against digest_1
Returns:
bool: result of comparison of digest_1 and digest_2
"""
return hmac.compare_digest(digest_1, digest_2)
determine_sha_method() function added to hashchk.py
determine_sha_method() determines SHA method by dispatching a user provided digest against a dictionary of available SHA2 and SHA3 methods. The function defaults to SHA2, however SHA3 is also supported
# ----------------------------Compatibility Imports----------------------------
from __future__ import print_function
from six.moves import range
import sys
import hashlib
if sys.version_info < (3, 6):
import sha3
# -----------------------------------------------------------------------------
import os
import hmac # Python 2.7 and 3.3+
# TODO: Implement hash method choices
class HashCheck(object):
"""Class for comparing, processing, and generating hash digests."""
@staticmethod
def process_digest(digest):
"""Determines if source of digest is stored in a text file, or if it's a
string provided by user.
Args:
digest (str): filename or string containing digest to be processed
Returns:
str: hash digest stripped of leading and trailing whitespace
"""
if os.path.isfile(digest):
with open(digest, 'r') as f:
return f.read().split(' ')[0]
else:
return digest.strip()
@staticmethod
def generate_digest(filename):
"""Returns hexadecimal digest generated from filename
Args:
filename (str): filename of binary file
Returns:
str: hash digest generated from binary file
"""
buffer_size = 65536 # Buffer used to cut down on memory for large files.
blocks = (os.path.getsize(filename) // buffer_size) + 1
hash_digest = hashlib.sha256()
with open(filename, 'rb') as f:
# generator expression used for reading file in chunks
generator = (f.read(buffer_size) for _ in range(blocks))
for data in generator:
hash_digest.update(data)
return hash_digest.hexdigest()
@staticmethod
def compare_digests(digest_1, digest_2):
"""Returns result of equality comparison between digest_1 and digest_2
Args:
digest_1 (str): digest to be compared against digest_2
digest_2 (str): digest to be compared against digest_1
Returns:
bool: result of comparison of digest_1 and digest_2
"""
return hmac.compare_digest(digest_1, digest_2)
def determine_sha_method(digest, family='sha2'):
"""Returns SHA method to be used for digest comparison
Args:
digest (str): user provided hexdigest used to determine sha method
family (str, optional): determines sha2 vs sha3 usage
Returns:
object: built in hashlib method built from sha_variants dictionary
"""
sha_versions = {
'sha2': {56: 'sha224', 64: 'sha256', 96: 'sha384', 128: 'sha512'},
'sha3': {56: 'sha3_224', 64: 'sha3_256', 96: 'sha3_384', 128: 'sha3_512'}
}
variant = sha_versions[family][len(digest)]
return getattr(hashlib, variant)
if __name__ == '__main__':
s = determine_sha_method(
'cda7a4ef4ff52524f06ebb8a3aea815f7df0dbcf27a7d501141f6f0fdf726ccd')
print(s)
|
#!/usr/bin/env python3
import discord
import subprocess
import sys
sys.path.append("..")
import jauxiliar as jaux
import josecommon as jcommon
import joseerror as je
def pip_freeze():
out = subprocess.check_output('pip freeze', shell=True)
return out
class JoseWatch(jaux.Auxiliar):
def __init__(self, cl):
jaux.Auxiliar.__init__(self, cl)
self.watch = {}
self.requirements = {}
reqlist = None
with open('requirements.txt', 'r') as reqfile:
reqlist = (reqfile.read()).split('\n')
for pkg in reqlist:
r = pkg.split('==')
if len(r) != 2:
continue
pkgname, pkgversion = r[0], r[1]
self.requirements[pkgname] = pkgversion
self.cbk_new('jwatch.updates', self.checkupdates, 3600)
async def ext_load(self):
return True, ''
async def ext_unload(self):
return True, ''
async def checkupdates(self):
future_pip = self.loop.run_in_executor(None, pip_freeze)
out = await future_pip
out = out.decode('utf-8')
packages = out.split('\n')
res = []
for pkgline in packages:
r = pkgline.split('==')
if len(r) != 2:
continue
pkgname, pkgversion = r[0], r[1]
if pkgname in self.requirements:
cur_version = self.requirements[pkgname]
# :^)
if pkgname == 'discord.py[voice]':
pḱgname = 'discord.py'
pkgdata = await self.json_from_url('http://pypi.python.org/pypi/{}/json'.format\
(pkgname))
new_version = pkgdata['info']['version']
if new_version != cur_version:
# !!!!!
res.append(" * `%r` needs update from %s to %s" % \
(pkgname, cur_version, new_version))
await self.say_results(res)
return res
async def say_results(self, res):
if len(res) <= 0:
return
em = discord.Embed(title='NEW UPDATES')
for string in res:
em.add_field(name='', value='{}'.format(string))
em.set_footer(text="Total of {} updates".format(len(res)))
jose_dev_server = [server for server in self.client.servers \
if server.id == jcommon.JOSE_DEV_SERVER_ID][0]
channel = discord.utils.get(jose_dev_server.channels, name='chat')
await self.client.send_message(channel, embed=em)
async def c_checkpkgs(self, message, args, cxt):
await self.is_admin(cxt.message.author.id)
res = await self.checkupdates()
if len(res) < 0:
await cxt.say("`No updates found.`")
return
jwatch: don't use embeds
#!/usr/bin/env python3
import discord
import subprocess
import sys
sys.path.append("..")
import jauxiliar as jaux
import josecommon as jcommon
import joseerror as je
def pip_freeze():
out = subprocess.check_output('pip freeze', shell=True)
return out
class JoseWatch(jaux.Auxiliar):
def __init__(self, cl):
jaux.Auxiliar.__init__(self, cl)
self.watch = {}
self.requirements = {}
reqlist = None
with open('requirements.txt', 'r') as reqfile:
reqlist = (reqfile.read()).split('\n')
for pkg in reqlist:
r = pkg.split('==')
if len(r) != 2:
continue
pkgname, pkgversion = r[0], r[1]
self.requirements[pkgname] = pkgversion
self.cbk_new('jwatch.updates', self.checkupdates, 3600)
async def ext_load(self):
return True, ''
async def ext_unload(self):
return True, ''
async def checkupdates(self):
future_pip = self.loop.run_in_executor(None, pip_freeze)
out = await future_pip
out = out.decode('utf-8')
packages = out.split('\n')
res = []
for pkgline in packages:
r = pkgline.split('==')
if len(r) != 2:
continue
pkgname, pkgversion = r[0], r[1]
if pkgname in self.requirements:
cur_version = self.requirements[pkgname]
# :^)
if pkgname == 'discord.py[voice]':
pḱgname = 'discord.py'
pkgdata = await self.json_from_url('http://pypi.python.org/pypi/{}/json'.format\
(pkgname))
new_version = pkgdata['info']['version']
if new_version != cur_version:
# !!!!!
res.append(" * `%r` needs update from %s to %s" % \
(pkgname, cur_version, new_version))
await self.say_results(res)
return res
async def say_results(self, res):
if len(res) <= 0:
return
jose_dev_server = [server for server in self.client.servers \
if server.id == jcommon.JOSE_DEV_SERVER_ID][0]
channel = discord.utils.get(jose_dev_server.channels, name='chat')
await self.client.send_message(channel, '\n'.join(res))
async def c_checkpkgs(self, message, args, cxt):
await self.is_admin(cxt.message.author.id)
res = await self.checkupdates()
await cxt.send_typing()
if len(res) < 0:
await cxt.say("`No updates found.`")
return
|
# -*- coding: utf-8 -*-
#
# Wyrm documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 28 11:37:55 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('..'))
sys.path.insert(0, os.path.abspath('../wyrm'))
import wyrm
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autosummary',
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
'sphinx.ext.coverage',
'numpydoc',
]
autosummary_generate = True
numpydoc_show_class_members = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Wyrm'
copyright = u'2013, Bastian Venthur'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = wyrm.__version__
# The full version, including alpha/beta/rc tags.
release = version + '.beta'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
html_theme_options = {'collapsiblesidebar' : True,
'externalrefs' : True}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Wyrmdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Wyrm.tex', u'Wyrm Documentation',
u'Bastian Venthur', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'wyrm', u'Wyrm Documentation',
[u'Bastian Venthur'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Wyrm', u'Wyrm Documentation',
u'Bastian Venthur', 'Wyrm', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
Updated copyright years
# -*- coding: utf-8 -*-
#
# Wyrm documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 28 11:37:55 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('..'))
sys.path.insert(0, os.path.abspath('../wyrm'))
import wyrm
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autosummary',
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
'sphinx.ext.coverage',
'numpydoc',
]
autosummary_generate = True
numpydoc_show_class_members = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Wyrm'
copyright = u'2012 - 2014, Bastian Venthur'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = wyrm.__version__
# The full version, including alpha/beta/rc tags.
release = version + '.beta'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
html_theme_options = {'collapsiblesidebar' : True,
'externalrefs' : True}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Wyrmdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Wyrm.tex', u'Wyrm Documentation',
u'Bastian Venthur', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'wyrm', u'Wyrm Documentation',
[u'Bastian Venthur'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Wyrm', u'Wyrm Documentation',
u'Bastian Venthur', 'Wyrm', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
|
#!/usr/bin/env python
import sys, signal
if len( sys.argv ) < 3:
print "usage " + sys.argv[0] + " <instance list file> <directory>"
sys.exit(0)
import glob, os
def handler( signal, frame ):
print "Script interrupted"
sys.exit(0)
instance_list = sys.argv[1]
target_directory = sys.argv[2]
getfeatures_exec = './getfeatures '
signal.signal( signal.SIGINT, handler )
for line in open( instance_list ):
if line[0] == '#':
continue
name, path, number = line.split(' ')
print "Obtaining features from " + name,
files = glob.glob( path )
print "with " + str( len( files ) ) + " instances"
for file in files:
os.popen2( getfeatures_exec + file + ' > ' + target_directory + '/' + name + '.' + os.path.basename(file) + '.features' )
Fixing bug
#!/usr/bin/env python
import sys, signal
if len( sys.argv ) < 3:
print "usage " + sys.argv[0] + " <instance list file> <directory>"
sys.exit(0)
import glob, os
def handler( signal, frame ):
print "Script interrupted"
sys.exit(0)
instance_list = sys.argv[1]
target_directory = sys.argv[2]
getfeatures_exec = './getfeatures '
signal.signal( signal.SIGINT, handler )
for line in open( instance_list ):
if line[0] == '#':
continue
name, path, number = line.split()
print "Obtaining features from " + name,
files = glob.glob( path )
print "with " + str( len( files ) ) + " instances"
for file in files:
os.popen( getfeatures_exec + file + ' > ' + target_directory + '/' + name + '.' + os.path.basename(file) + '.features' )
|
#
# OpenSlide Python documentation build configuration file, created by
# sphinx-quickstart on Sun Feb 16 15:06:45 2014.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../openslide'))
sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.coverage',
'sphinx.ext.intersphinx',
'jekyll_fix',
]
# Add any paths that contain templates here, relative to this directory.
# templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'OpenSlide Python'
copyright = '2010-2016 Carnegie Mellon University and others'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
import _version # noqa: E402 module-level-import-not-at-top-of-file
# The short X.Y version.
version = _version.__version__
# The full version, including alpha/beta/rc tags.
release = _version.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'stickysidebar': True,
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': ['localtoc.html', 'sourcelink.html'],
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'OpenSlidePythondoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
(
'index',
'OpenSlidePython.tex',
'OpenSlide Python Documentation',
'OpenSlide project',
'manual',
),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
'index',
'openslidepython',
'OpenSlide Python Documentation',
['OpenSlide project'],
1,
)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
'index',
'OpenSlidePython',
'OpenSlide Python Documentation',
'OpenSlide project',
'OpenSlidePython',
'One line description of project.',
'Miscellaneous',
),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# intersphinx
intersphinx_mapping = {
'python': ('https://docs.python.org/3/', None),
'pillow': ('https://pillow.readthedocs.io/en/latest/', None),
}
doc: update copyright date
#
# OpenSlide Python documentation build configuration file, created by
# sphinx-quickstart on Sun Feb 16 15:06:45 2014.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../openslide'))
sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.coverage',
'sphinx.ext.intersphinx',
'jekyll_fix',
]
# Add any paths that contain templates here, relative to this directory.
# templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'OpenSlide Python'
copyright = '2010-2022 Carnegie Mellon University and others'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
import _version # noqa: E402 module-level-import-not-at-top-of-file
# The short X.Y version.
version = _version.__version__
# The full version, including alpha/beta/rc tags.
release = _version.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'stickysidebar': True,
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': ['localtoc.html', 'sourcelink.html'],
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'OpenSlidePythondoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
(
'index',
'OpenSlidePython.tex',
'OpenSlide Python Documentation',
'OpenSlide project',
'manual',
),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
'index',
'openslidepython',
'OpenSlide Python Documentation',
['OpenSlide project'],
1,
)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
'index',
'OpenSlidePython',
'OpenSlide Python Documentation',
'OpenSlide project',
'OpenSlidePython',
'One line description of project.',
'Miscellaneous',
),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# intersphinx
intersphinx_mapping = {
'python': ('https://docs.python.org/3/', None),
'pillow': ('https://pillow.readthedocs.io/en/latest/', None),
}
|
from gpiozero import Button
import git
from time import sleep
from subprocess import Popen, PIPE
from keyboard_simulator import keyboard
import uinput
GIT_PATH = '/home/pi/projects/labrador/'
restart_supervisor = "supervisorctl reload"
class GPIO_HANDLER:
GIT = git.cmd.Git(GIT_PATH)
DEVICE = uinput.Device([uinput.KEY_F5])
@classmethod
def refresh(cls):
print("f5")
cls.DEVICE.emit_click(uinput.KEY_F5)
@classmethod
def restart_supervisor(cls):
_process = Popen(['supervisorctl', 'reload'], stdout=PIPE)
output, error = _process.communicate()
sleep(20)
@classmethod
def rebuild_css(cls):
_process = Popen([
'scss',
'/home/pi/projects/labrador/labrador/static/scss/theme.scss',
'theme.css'],
stdout=PIPE)
output, error = _process.communicate()
@classmethod
def update(cls):
print("Pulling latest update")
cls.GIT.pull()
# print("Rebuilding CSS")
# cls.rebuild_css()
print("restarting Services")
cls.restart_supervisor()
print("refreshing")
cls.refresh()
if __name__ == '__main__':
update_btn = Button(17)
update_btn.when_released = GPIO_HANDLER.update
update_btn = Button(22)
update_btn.when_released = GPIO_HANDLER.refresh
while True:
pass
# channel_list = [17, 22, 23, 27]
fixed boot script
from gpiozero import Button
import git
from time import sleep
from subprocess import Popen, PIPE
import uinput
GIT_PATH = '/home/pi/projects/labrador/'
restart_supervisor = "supervisorctl reload"
class GPIO_HANDLER:
GIT = git.cmd.Git(GIT_PATH)
DEVICE = uinput.Device([uinput.KEY_F5])
@classmethod
def refresh(cls):
print("f5")
cls.DEVICE.emit_click(uinput.KEY_F5)
@classmethod
def restart_supervisor(cls):
_process = Popen(['supervisorctl', 'reload'], stdout=PIPE)
output, error = _process.communicate()
sleep(20)
@classmethod
def rebuild_css(cls):
_process = Popen([
'scss',
'/home/pi/projects/labrador/labrador/static/scss/theme.scss',
'theme.css'],
stdout=PIPE)
output, error = _process.communicate()
@classmethod
def update(cls):
print("Pulling latest update")
cls.GIT.pull()
# print("Rebuilding CSS")
# cls.rebuild_css()
print("restarting Services")
cls.restart_supervisor()
print("refreshing")
cls.refresh()
if __name__ == '__main__':
update_btn = Button(17)
update_btn.when_released = GPIO_HANDLER.update
update_btn = Button(22)
update_btn.when_released = GPIO_HANDLER.refresh
while True:
pass
# channel_list = [17, 22, 23, 27]
|
# coding: utf-8
""" Gaia Challenge 2 -- Pal 5 Challenge """
from __future__ import division, print_function
__author__ = "adrn <adrn@astro.columbia.edu>"
# Standard library
import logging
import os
import sys
# Third-party
from astropy import log as logger
import matplotlib.pyplot as plt
import numpy as np
import streamteam.dynamics as sd
import streamteam.integrate as si
import streamteam.io as io
import streamteam.potential as sp
from streamteam.units import galactic
from streamteam.util import get_pool
import triangle
from astropy.constants import G
Gee = G.decompose(galactic).value
# streams
from streams.util import streamspath
from streams.rewinder import Rewinder, RewinderSampler
import streams.coordinates as stc
global pool
pool = None
def plot_traces(chain, p0=None, truths=None):
figs = []
for i in range(chain.shape[-1]):
fig,ax = plt.subplots(1,1,figsize=(10,6))
for ch in chain[...,i]:
ax.plot(ch, marker=None, drawstyle='steps', alpha=0.2, color='k')
if p0 is not None:
for pp in p0[:,i]:
ax.axhline(pp, alpha=0.2, color='r')
if truths is not None:
ax.axhline(truths[i], alpha=0.7, color='g')
figs.append(fig)
return figs
def main(ix, mpi=False, overwrite=False):
pool = get_pool(mpi=mpi)
cfg_path = os.path.join(streamspath, "config/hans_challenge{}.yml".format(ix))
model = Rewinder.from_config(cfg_path)
out_path = os.path.join(streamspath, "output/{}".format(model.config['name']))
if not os.path.exists(out_path):
os.makedirs(out_path)
chain_file = os.path.join(out_path, "chain.npy")
sampler = RewinderSampler(model, nwalkers=64, pool=pool)
true_parameter_values = dict(potential=dict(v_h=1., r_h=12),
progenitor=dict(m0=2E9),
hyper=dict(alpha=1.125, theta=0.))
truth = model.vectorize(true_parameter_values)
if overwrite and os.path.exists(chain_file):
os.remove(chain_file)
if not os.path.exists(chain_file):
# p0_sigma = model.vectorize(parameter_sigmas)
p0_sigma = np.abs(truth*1E-6)
p0 = np.random.normal(truth, p0_sigma, size=(sampler.nwalkers, sampler.dim))
# burn in
sampler.run_inference(p0, 100)
best_pos = sampler.flatchain[sampler.flatlnprobability.argmax()]
figs = plot_traces(sampler.chain, p0=None, truths=truth)
for i,fig in enumerate(figs):
fig.savefig(os.path.join(out_path, "burn_{}.png".format(i)))
sampler.reset()
logger.info("Done burning in")
# restart walkers from best position, burn again
new_pos = np.random.normal(best_pos, p0_sigma,
size=(sampler.nwalkers, p0.shape[1]))
sampler.run_inference(new_pos, 500)
pos = sampler.chain[:,-1].copy()
sampler.reset()
logger.info("Done re-burn")
# run for inference steps
sampler.run_inference(pos, 500)
logger.debug("Acceptance fraction: {}".format(sampler.acceptance_fraction))
chain = sampler.chain
np.save(chain_file, chain)
else:
chain = np.load(chain_file)
figs = plot_traces(chain, p0=None, truths=truth)
for i,fig in enumerate(figs):
fig.savefig(os.path.join(out_path, "{}.png".format(i)))
flatchain = np.vstack(chain)
extents = [(0.8,1.2), (5,30)]
fig = triangle.corner(flatchain, truths=truth,
extents=extents)
# labels=[r"$M$ [$M_\odot$]", r"$R_h$ [kpc]", "$q_z$"])
fig.savefig(os.path.join(out_path, "corner.png"))
pool.close()
sys.exit(0)
if __name__ == "__main__":
from argparse import ArgumentParser
parser = ArgumentParser(description="")
parser.add_argument("-v", "--verbose", action="store_true",
dest="verbose", default=False,
help="Be chatty! (default = False)")
parser.add_argument("-q", "--quiet", action="store_true", dest="quiet",
default=False, help="Be quiet! (default = False)")
parser.add_argument("-o", "--overwrite", dest="overwrite", default=False,
action="store_true", help="Nukem.")
# threading
parser.add_argument("--mpi", dest="mpi", default=False, action="store_true",
help="Run with MPI.")
parser.add_argument("-i", dest="ix", required=True)
args = parser.parse_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
elif args.quiet:
logging.basicConfig(level=logging.ERROR)
else:
logging.basicConfig(level=logging.INFO)
try:
main(args.ix, mpi=args.mpi, overwrite=args.overwrite)
except:
pool.close() if hasattr(pool, 'close') else None
raise
sys.exit(1)
pool.close() if hasattr(pool, 'close') else None
sys.exit(0)
triangle
# coding: utf-8
""" Gaia Challenge 2 -- Pal 5 Challenge """
from __future__ import division, print_function
__author__ = "adrn <adrn@astro.columbia.edu>"
# Standard library
import logging
import os
import sys
# Third-party
from astropy import log as logger
import matplotlib.pyplot as plt
import numpy as np
import streamteam.dynamics as sd
import streamteam.integrate as si
import streamteam.io as io
import streamteam.potential as sp
from streamteam.units import galactic
from streamteam.util import get_pool
import triangle
from astropy.constants import G
Gee = G.decompose(galactic).value
# streams
from streams.util import streamspath
from streams.rewinder import Rewinder, RewinderSampler
import streams.coordinates as stc
global pool
pool = None
def plot_traces(chain, p0=None, truths=None):
figs = []
for i in range(chain.shape[-1]):
fig,ax = plt.subplots(1,1,figsize=(10,6))
for ch in chain[...,i]:
ax.plot(ch, marker=None, drawstyle='steps', alpha=0.2, color='k')
if p0 is not None:
for pp in p0[:,i]:
ax.axhline(pp, alpha=0.2, color='r')
if truths is not None:
ax.axhline(truths[i], alpha=0.7, color='g')
figs.append(fig)
return figs
def main(ix, mpi=False, overwrite=False):
pool = get_pool(mpi=mpi)
cfg_path = os.path.join(streamspath, "config/hans_challenge{}.yml".format(ix))
model = Rewinder.from_config(cfg_path)
out_path = os.path.join(streamspath, "output/{}".format(model.config['name']))
if not os.path.exists(out_path):
os.makedirs(out_path)
chain_file = os.path.join(out_path, "chain.npy")
sampler = RewinderSampler(model, nwalkers=64, pool=pool)
true_parameter_values = dict(potential=dict(v_h=1., r_h=12),
progenitor=dict(m0=2E9),
hyper=dict(alpha=1.125, theta=0.))
truth = model.vectorize(true_parameter_values)
if overwrite and os.path.exists(chain_file):
os.remove(chain_file)
if not os.path.exists(chain_file):
# p0_sigma = model.vectorize(parameter_sigmas)
p0_sigma = np.abs(truth*1E-6)
p0 = np.random.normal(truth, p0_sigma, size=(sampler.nwalkers, sampler.dim))
# burn in
sampler.run_inference(p0, 100)
best_pos = sampler.flatchain[sampler.flatlnprobability.argmax()]
figs = plot_traces(sampler.chain, p0=None, truths=truth)
for i,fig in enumerate(figs):
fig.savefig(os.path.join(out_path, "burn_{}.png".format(i)))
sampler.reset()
logger.info("Done burning in")
# restart walkers from best position, burn again
new_pos = np.random.normal(best_pos, p0_sigma,
size=(sampler.nwalkers, p0.shape[1]))
sampler.run_inference(new_pos, 500)
pos = sampler.chain[:,-1].copy()
sampler.reset()
logger.info("Done re-burn")
# run for inference steps
sampler.run_inference(pos, 500)
logger.debug("Acceptance fraction: {}".format(sampler.acceptance_fraction))
chain = sampler.chain
np.save(chain_file, chain)
else:
chain = np.load(chain_file)
figs = plot_traces(chain, p0=None, truths=truth)
for i,fig in enumerate(figs):
fig.savefig(os.path.join(out_path, "{}.png".format(i)))
flatchain = np.vstack(chain)
extents = [(0.8,1.2), (5,30)]
fig = triangle.corner(flatchain, truths=truth)
# extents=extents)
# labels=[r"$M$ [$M_\odot$]", r"$R_h$ [kpc]", "$q_z$"])
fig.savefig(os.path.join(out_path, "corner.png"))
pool.close()
sys.exit(0)
if __name__ == "__main__":
from argparse import ArgumentParser
parser = ArgumentParser(description="")
parser.add_argument("-v", "--verbose", action="store_true",
dest="verbose", default=False,
help="Be chatty! (default = False)")
parser.add_argument("-q", "--quiet", action="store_true", dest="quiet",
default=False, help="Be quiet! (default = False)")
parser.add_argument("-o", "--overwrite", dest="overwrite", default=False,
action="store_true", help="Nukem.")
# threading
parser.add_argument("--mpi", dest="mpi", default=False, action="store_true",
help="Run with MPI.")
parser.add_argument("-i", dest="ix", required=True)
args = parser.parse_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
elif args.quiet:
logging.basicConfig(level=logging.ERROR)
else:
logging.basicConfig(level=logging.INFO)
try:
main(args.ix, mpi=args.mpi, overwrite=args.overwrite)
except:
pool.close() if hasattr(pool, 'close') else None
raise
sys.exit(1)
pool.close() if hasattr(pool, 'close') else None
sys.exit(0)
|
# -*- coding: utf-8 -*-
#
# xarray documentation build configuration file, created by
# sphinx-quickstart on Thu Feb 6 18:57:54 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import sys
import warnings
import os
import datetime
print("python exec:", sys.executable)
print("sys.path:", sys.path)
try:
import numpy
print("numpy: %s, %s" % (numpy.__version__, numpy.__file__))
except ImportError:
print("no numpy")
try:
import scipy
print("scipy: %s, %s" % (scipy.__version__, scipy.__file__))
except ImportError:
print("no scipy")
try:
import pandas
print("pandas: %s, %s" % (pandas.__version__, pandas.__file__))
except ImportError:
print("no pandas")
try:
import matplotlib
matplotlib.use('Agg')
print("matplotlib: %s, %s" % (matplotlib.__version__, matplotlib.__file__))
except ImportError:
print("no matplotlib")
try:
import dask
print("dask: %s, %s" % (dask.__version__, dask.__file__))
except ImportError:
print("no dask")
try:
import IPython
print("ipython: %s, %s" % (IPython.__version__, IPython.__file__))
except ImportError:
print("no ipython")
try:
with warnings.catch_warnings():
# https://github.com/mwaskom/seaborn/issues/892
warnings.simplefilter("ignore")
import seaborn
print("seaborn: %s, %s" % (seaborn.__version__, seaborn.__file__))
except ImportError:
print("no seaborn")
try:
import cartopy
print("cartopy: %s, %s" % (cartopy.__version__, cartopy.__file__))
except ImportError:
print("no cartopy")
try:
import netCDF4
print("netCDF4: %s, %s" % (netCDF4.__version__, netCDF4.__file__))
except ImportError:
print("no netCDF4")
try:
import rasterio
print("rasterio: %s, %s" % (rasterio.__version__, rasterio.__file__))
except ImportError:
print("no rasterio")
import xarray
print("xarray: %s, %s" % (xarray.__version__, xarray.__file__))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.extlinks',
'sphinx.ext.mathjax',
'numpydoc',
'IPython.sphinxext.ipython_directive',
'IPython.sphinxext.ipython_console_highlighting',
'sphinx_gallery.gen_gallery',
]
extlinks = {'issue': ('https://github.com/pydata/xarray/issues/%s', 'GH'),
'pull': ('https://github.com/pydata/xarray/pull/%s', 'PR'),
}
sphinx_gallery_conf = {'examples_dirs': 'gallery',
'gallery_dirs': 'auto_gallery',
'backreferences_dir': False
}
autosummary_generate = True
numpydoc_class_members_toctree = True
numpydoc_show_class_members = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'xarray'
copyright = '2014-%s, xarray Developers' % datetime.datetime.now().year
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = xarray.version.short_version
# The full version, including alpha/beta/rc tags.
release = xarray.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%Y-%m-%d'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# on_rtd is whether we are on readthedocs.org, this line of code grabbed from
# docs.readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# otherwise, readthedocs.org uses their theme by default, so no need to specify it
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Sometimes the savefig directory doesn't exist and needs to be created
# https://github.com/ipython/ipython/issues/8733
# becomes obsolete when we can pin ipython>=5.2; see doc/environment.yml
ipython_savefig_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'_build','html','_static')
if not os.path.exists(ipython_savefig_dir):
os.makedirs(ipython_savefig_dir)
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = today_fmt
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'xarraydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'xarray.tex', 'xarray Documentation',
'xarray Developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'xarray', 'xarray Documentation',
['xarray Developers'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'xarray', 'xarray Documentation',
'xarray Developers', 'xarray', 'N-D labeled arrays and datasets in Python.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/3/', None),
'pandas': ('https://pandas.pydata.org/pandas-docs/stable/', None),
'iris': ('http://scitools.org.uk/iris/docs/latest/', None),
'numpy': ('https://docs.scipy.org/doc/numpy/', None),
'numba': ('https://numba.pydata.org/numba-doc/latest/', None),
}
Simplify imports for docs build
# -*- coding: utf-8 -*-
#
# xarray documentation build configuration file, created by
# sphinx-quickstart on Thu Feb 6 18:57:54 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import sys
import os
import datetime
import importlib
print("python exec:", sys.executable)
print("sys.path:", sys.path)
for name in ('numpy scipy pandas matplotlib dask IPython seaborn '
'cartopy netCDF4 rasterio').split():
try:
module = importlib.import_module(name)
if name == 'matplotlib':
module.use('Agg')
fname = module.__file__.rstrip('__init__.py')
print("%s: %s, %s" % (name, module.__version__, fname))
except ImportError:
print("no %s" % name)
import xarray
print("xarray: %s, %s" % (xarray.__version__, xarray.__file__))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.extlinks',
'sphinx.ext.mathjax',
'numpydoc',
'IPython.sphinxext.ipython_directive',
'IPython.sphinxext.ipython_console_highlighting',
'sphinx_gallery.gen_gallery',
]
extlinks = {'issue': ('https://github.com/pydata/xarray/issues/%s', 'GH'),
'pull': ('https://github.com/pydata/xarray/pull/%s', 'PR'),
}
sphinx_gallery_conf = {'examples_dirs': 'gallery',
'gallery_dirs': 'auto_gallery',
'backreferences_dir': False
}
autosummary_generate = True
numpydoc_class_members_toctree = True
numpydoc_show_class_members = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'xarray'
copyright = '2014-%s, xarray Developers' % datetime.datetime.now().year
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = xarray.version.short_version
# The full version, including alpha/beta/rc tags.
release = xarray.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%Y-%m-%d'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# on_rtd is whether we are on readthedocs.org, this line of code grabbed from
# docs.readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# otherwise, readthedocs.org uses their theme by default, so no need to specify it
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Sometimes the savefig directory doesn't exist and needs to be created
# https://github.com/ipython/ipython/issues/8733
# becomes obsolete when we can pin ipython>=5.2; see doc/environment.yml
ipython_savefig_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'_build','html','_static')
if not os.path.exists(ipython_savefig_dir):
os.makedirs(ipython_savefig_dir)
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = today_fmt
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'xarraydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'xarray.tex', 'xarray Documentation',
'xarray Developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'xarray', 'xarray Documentation',
['xarray Developers'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'xarray', 'xarray Documentation',
'xarray Developers', 'xarray', 'N-D labeled arrays and datasets in Python.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/3/', None),
'pandas': ('https://pandas.pydata.org/pandas-docs/stable/', None),
'iris': ('http://scitools.org.uk/iris/docs/latest/', None),
'numpy': ('https://docs.scipy.org/doc/numpy/', None),
'numba': ('https://numba.pydata.org/numba-doc/latest/', None),
}
|
#!/usr/bin/python
###################################################################################################################
### This code is developed by HighEnergyDataScientests Team.
### Do not copy or modify without written approval from one of the team members.
###################################################################################################################
import pandas as pd
import numpy as np
import xgboost as xgb
import operator
from sklearn import preprocessing
from sklearn.cross_validation import train_test_split
import matplotlib
matplotlib.use("Agg") #Needed to save figures
import matplotlib.pyplot as plt
#seed = 260681
def ceate_feature_map(features):
outfile = open('xgb.fmap', 'w')
for i, feat in enumerate(features):
outfile.write('{0}\t{1}\tq\n'.format(i, feat))
outfile.close()
print "## Loading Data"
train = pd.read_csv('../inputs/train.csv')
test = pd.read_csv('../inputs/test.csv')
print "## Data Processing"
y = train.QuoteConversion_Flag.values
train = train.drop('QuoteNumber', axis=1)
#test = test.drop('QuoteNumber', axis=1)
# Lets play with some dates
train['Date'] = pd.to_datetime(pd.Series(train['Original_Quote_Date']))
train = train.drop('Original_Quote_Date', axis=1)
test['Date'] = pd.to_datetime(pd.Series(test['Original_Quote_Date']))
test = test.drop('Original_Quote_Date', axis=1)
train['Year'] = train['Date'].apply(lambda x: int(str(x)[:4]))
train['Month'] = train['Date'].apply(lambda x: int(str(x)[5:7]))
train['weekday'] = train['Date'].dt.dayofweek
test['Year'] = test['Date'].apply(lambda x: int(str(x)[:4]))
test['Month'] = test['Date'].apply(lambda x: int(str(x)[5:7]))
test['weekday'] = test['Date'].dt.dayofweek
train = train.drop('Date', axis=1)
test = test.drop('Date', axis=1)
train = train.fillna(-1)
test = test.fillna(-1)
print "## Data Encoding"
for f in train.columns:
if train[f].dtype=='object':
print(f)
lbl = preprocessing.LabelEncoder()
lbl.fit(list(train[f].values) + list(test[f].values))
train[f] = lbl.transform(list(train[f].values))
test[f] = lbl.transform(list(test[f].values))
features = [s for s in train.columns.ravel().tolist() if s != 'QuoteConversion_Flag']
print "Features: ", features
#for f in sorted(set(features)):
# print f
#exit()
print "## Training"
params = {"objective": "binary:logistic",
"eta": 0.3,
"nthread":-1,
"max_depth": 10,
"subsample": 0.8,
"colsample_bytree": 0.8,
"eval_metric": "auc",
"silent": 1,
"seed": 1301
}
num_boost_round = 500
print("Train a XGBoost model")
X_train, X_valid = train_test_split(train, test_size=0.01)
y_train = X_train['QuoteConversion_Flag']
y_valid = X_valid['QuoteConversion_Flag']
dtrain = xgb.DMatrix(X_train[features], y_train)
dvalid = xgb.DMatrix(X_valid[features], y_valid)
watchlist = [(dtrain, 'train'),(dvalid, 'eval')]
gbm = xgb.train(params, dtrain, num_boost_round, evals=watchlist, maximize=True, early_stopping_rounds=100, verbose_eval=True)
print "## Creating Feature Importance Map"
ceate_feature_map(features)
importance = gbm.get_fscore(fmap='xgb.fmap')
importance = sorted(importance.items(), key=operator.itemgetter(1))
df = pd.DataFrame(importance, columns=['feature', 'fscore'])
df['fscore'] = df['fscore'] / df['fscore'].sum()
featp = df.plot(kind='barh', x='feature', y='fscore', legend=False, figsize=(6, 10))
plt.title('XGBoost Feature Importance')
plt.xlabel('relative importance')
fig_featp = featp.get_figure()
fig_featp.savefig('feature_importance_xgb.png',bbox_inches='tight',pad_inches=1)
df.to_csv("feature_importance.csv")
print "## Predicting test data"
preds = gbm.predict(xgb.DMatrix(test[features]))
test["QuoteConversion_Flag"] = preds
test[['QuoteNumber',"QuoteConversion_Flag"]].to_csv('xgb_benchmark.csv', index=False)
Update homesite_model.py
#!/usr/bin/python
###################################################################################################################
### This code is developed by HighEnergyDataScientests Team.
### Do not copy or modify without written approval from one of the team members.
###################################################################################################################
import pandas as pd
import numpy as np
import xgboost as xgb
import operator
from sklearn import preprocessing
from sklearn.cross_validation import train_test_split
import matplotlib
matplotlib.use("Agg") #Needed to save figures
import matplotlib.pyplot as plt
#seed = 260681
def ceate_feature_map(features):
outfile = open('xgb.fmap', 'w')
for i, feat in enumerate(features):
outfile.write('{0}\t{1}\tq\n'.format(i, feat))
outfile.close()
print("## Loading Data")
train = pd.read_csv('../inputs/train.csv')
test = pd.read_csv('../inputs/test.csv')
print("## Data Processing")
y = train.QuoteConversion_Flag.values
train = train.drop('QuoteNumber', axis=1)
#test = test.drop('QuoteNumber', axis=1)
# Lets play with some dates
train['Date'] = pd.to_datetime(pd.Series(train['Original_Quote_Date']))
train = train.drop('Original_Quote_Date', axis=1)
test['Date'] = pd.to_datetime(pd.Series(test['Original_Quote_Date']))
test = test.drop('Original_Quote_Date', axis=1)
train['Year'] = train['Date'].apply(lambda x: int(str(x)[:4]))
train['Month'] = train['Date'].apply(lambda x: int(str(x)[5:7]))
train['weekday'] = train['Date'].dt.dayofweek
test['Year'] = test['Date'].apply(lambda x: int(str(x)[:4]))
test['Month'] = test['Date'].apply(lambda x: int(str(x)[5:7]))
test['weekday'] = test['Date'].dt.dayofweek
train = train.drop('Date', axis=1)
test = test.drop('Date', axis=1)
train = train.fillna(-1)
test = test.fillna(-1)
print("## Data Encoding")
for f in train.columns:
if train[f].dtype=='object':
print(f)
lbl = preprocessing.LabelEncoder()
lbl.fit(list(train[f].values) + list(test[f].values))
train[f] = lbl.transform(list(train[f].values))
test[f] = lbl.transform(list(test[f].values))
features = [s for s in train.columns.ravel().tolist() if s != 'QuoteConversion_Flag']
print("Features: ", features)
#for f in sorted(set(features)):
# print f
#exit()
print("## Training")
params = {"objective": "binary:logistic",
"eta": 0.3,
"nthread":-1,
"max_depth": 10,
"subsample": 0.8,
"colsample_bytree": 0.8,
"eval_metric": "auc",
"silent": 1,
"seed": 1301
}
num_boost_round = 500
print("Train a XGBoost model")
X_train, X_valid = train_test_split(train, test_size=0.01)
y_train = X_train['QuoteConversion_Flag']
y_valid = X_valid['QuoteConversion_Flag']
dtrain = xgb.DMatrix(X_train[features], y_train)
dvalid = xgb.DMatrix(X_valid[features], y_valid)
watchlist = [(dtrain, 'train'),(dvalid, 'eval')]
gbm = xgb.train(params, dtrain, num_boost_round, evals=watchlist, maximize=True, early_stopping_rounds=100, verbose_eval=True)
print("## Creating Feature Importance Map")
ceate_feature_map(features)
importance = gbm.get_fscore(fmap='xgb.fmap')
importance = sorted(importance.items(), key=operator.itemgetter(1))
df = pd.DataFrame(importance, columns=['feature', 'fscore'])
df['fscore'] = df['fscore'] / df['fscore'].sum()
featp = df.plot(kind='barh', x='feature', y='fscore', legend=False, figsize=(6, 10))
plt.title('XGBoost Feature Importance')
plt.xlabel('relative importance')
fig_featp = featp.get_figure()
fig_featp.savefig('feature_importance_xgb.png',bbox_inches='tight',pad_inches=1)
df.to_csv("feature_importance.csv")
print("## Predicting test data")
preds = gbm.predict(xgb.DMatrix(test[features]))
test["QuoteConversion_Flag"] = preds
test[['QuoteNumber',"QuoteConversion_Flag"]].to_csv('xgb_benchmark.csv', index=False)
|
# -*- coding: utf-8 -*-
# pylint: disable=C0103,W0622
'''
Sphinx documentation for Salt
'''
import functools
import sys
import os
import types
from sphinx.directives import TocTree
# pylint: disable=R0903
class Mock(object):
'''
Mock out specified imports
This allows autodoc to do its thing without having oodles of req'd
installed libs. This doesn't work with ``import *`` imports.
http://read-the-docs.readthedocs.org/en/latest/faq.html#i-get-import-errors-on-libraries-that-depend-on-c-modules
'''
def __init__(self, *args, **kwargs):
pass
__all__ = []
def __call__(self, *args, **kwargs):
ret = Mock()
# If mocked function is used as a decorator, expose decorated function.
# if args and callable(args[-1]):
# functools.update_wrapper(ret, args[0])
return ret
@classmethod
def __getattr__(cls, name):
if name in ('__file__', '__path__'):
return '/dev/null'
else:
return Mock()
# pylint: enable=R0903
MOCK_MODULES = [
# salt core
'Crypto',
'Crypto.Signature',
'Crypto.Cipher',
'Crypto.Hash',
'Crypto.PublicKey',
'Crypto.Random',
'M2Crypto',
'msgpack',
'yaml',
'yaml.constructor',
'yaml.nodes',
'yaml.scanner',
'zmq',
'zmq.eventloop',
# third-party libs for cloud modules
'libcloud',
'libcloud.compute',
'libcloud.compute.base',
'libcloud.compute.deployment',
'libcloud.compute.providers',
'libcloud.compute.types',
'libcloud.loadbalancer',
'libcloud.loadbalancer.types',
'libcloud.loadbalancer.providers',
'libcloud.common',
'libcloud.common.google',
# third-party libs for netapi modules
'cherrypy',
'cherrypy.lib',
'cherrypy.process',
'cherrypy.wsgiserver',
'cherrypy.wsgiserver.ssl_builtin',
'tornado',
'tornado.concurrent',
'tornado.gen',
'tornado.httpserver',
'tornado.ioloop',
'tornado.web',
'tornado.websocket',
'ws4py',
'ws4py.server',
'ws4py.server.cherrypyserver',
'ws4py.websocket',
# modules, renderers, states, returners, et al
'django',
'libvirt',
'MySQLdb',
'MySQLdb.cursors',
'psutil',
'psutil.version_info',
'pycassa',
'pymongo',
'rabbitmq_server',
'redis',
'requests',
'requests.exceptions',
'rpm',
'rpmUtils',
'rpmUtils.arch',
'yum',
'OpenSSL',
'zfs'
]
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = Mock()
# Define a fake version attribute for libcloud so docs build as supposed
sys.modules['libcloud'].__version__ = '0.0.0'
# -- Add paths to PYTHONPATH ---------------------------------------------------
try:
docs_basepath = os.path.abspath(os.path.dirname(__file__))
except NameError:
# sphinx-intl and six execute some code which will raise this NameError
# assume we're in the doc/ directory
docs_basepath = os.path.abspath(os.path.dirname('.'))
addtl_paths = (
os.pardir, # salt itself (for autodoc)
'_ext', # custom Sphinx extensions
)
for path in addtl_paths:
sys.path.insert(0, os.path.abspath(os.path.join(docs_basepath, path)))
# We're now able to import salt
import salt.version
formulas_dir = os.path.join(os.pardir, docs_basepath, 'formulas')
# ----- Intersphinx Settings ------------------------------------------------>
intersphinx_mapping = {
'python2': ('http://docs.python.org/2', None),
'python3': ('http://docs.python.org/3', None)
}
# <---- Intersphinx Settings -------------------------------------------------
# -- General Configuration -----------------------------------------------------
project = 'Salt'
copyright = '2015 SaltStack, Inc.'
version = salt.version.__version__
latest_release = '2015.8.0' # latest release
previous_release = '2014.7.6' # latest release from previous branch
previous_release_dir = '2014.7' # path on web server for previous branch
build_type = 'inactive' # latest, previous, develop, inactive
# set release to 'version' for develop so sha is used
# - otherwise -
# set release to 'latest_release' or 'previous_release'
release = previous_release # version, latest_release, previous_release
# Set google custom search engine
if release == latest_release:
search_cx = '004624818632696854117:yfmprrbw3pk' # latest
elif release.startswith('2014.7'):
search_cx = '004624818632696854117:thhslradbru' # 2014.7
elif release.startswith('2015.5'):
search_cx = '004624818632696854117:ovogwef29do' # 2015.5
else:
search_cx = '004624818632696854117:haj7bjntf4s' # develop
needs_sphinx = '1.3'
spelling_lang = 'en_US'
language = 'en'
locale_dirs = [
'_locale',
]
master_doc = 'contents'
templates_path = ['_templates']
exclude_patterns = ['_build', '_incl/*', 'ref/cli/_includes/*.rst']
extensions = [
'saltdomain', # Must come early
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.extlinks',
'sphinx.ext.intersphinx',
'httpdomain',
'youtube',
'saltautodoc', # Must be AFTER autodoc
'shorturls',
]
try:
import sphinxcontrib.spelling
except ImportError:
pass
else:
extensions += ['sphinxcontrib.spelling']
modindex_common_prefix = ['salt.']
autosummary_generate = True
# Define a substitution for linking to the latest release tarball
rst_prolog = """\
.. |current_release_doc| replace:: :doc:`/topics/releases/{release}`
.. |saltrepo| replace:: https://github.com/saltstack/salt
.. _`salt-users`: https://groups.google.com/forum/#!forum/salt-users
.. _`salt-announce`: https://groups.google.com/forum/#!forum/salt-announce
.. _`salt-packagers`: https://groups.google.com/forum/#!forum/salt-packagers
.. |windownload| raw:: html
<p>x86: <a href="https://repo.saltstack.com/windows/Salt-Minion-{release}-x86-Setup.exe"><strong>Salt-Minion-{release}-x86-Setup.exe</strong></a>
| <a href="https://repo.saltstack.com/windows/Salt-Minion-{release}-x86-Setup.exe.md5"><strong>md5</strong></a></p>
<p>AMD64: <a href="https://repo.saltstack.com/windows/Salt-Minion-{release}-AMD64-Setup.exe"><strong>Salt-Minion-{release}-AMD64-Setup.exe</strong></a>
| <a href="https://repo.saltstack.com/windows/Salt-Minion-{release}-AMD64-Setup.exe.md5"><strong>md5</strong></a></p>
""".format(release=release)
# A shortcut for linking to tickets on the GitHub issue tracker
extlinks = {
'blob': ('https://github.com/saltstack/salt/blob/%s/%%s' % 'develop', None),
'download': ('https://cloud.github.com/downloads/saltstack/salt/%s', None),
'issue': ('https://github.com/saltstack/salt/issues/%s', 'issue '),
'formula_url': ('https://github.com/saltstack-formulas/%s', ''),
}
# ----- Localization -------------------------------------------------------->
locale_dirs = ['locale/']
gettext_compact = False
# <---- Localization ---------------------------------------------------------
### HTML options
html_theme = 'saltstack2' #change to 'saltstack' to use previous theme
html_theme_path = ['_themes']
html_title = u''
html_short_title = 'Salt'
html_static_path = ['_static']
html_logo = None # specified in the theme layout.html
html_favicon = 'favicon.ico'
html_use_smartypants = False
# Set a var if we're building docs for the live site or not
on_saltstack = 'SALT_ON_SALTSTACK' in os.environ
# Use Google customized search or use Sphinx built-in JavaScript search
if on_saltstack:
html_search_template = 'googlesearch.html'
else:
html_search_template = 'searchbox.html'
html_additional_pages = {
'404': '404.html',
}
html_default_sidebars = [
html_search_template,
'version.html',
'localtoc.html',
'relations.html',
'sourcelink.html',
'saltstack.html',
]
html_sidebars = {
'ref/**/all/salt.*': [
html_search_template,
'version.html',
'modules-sidebar.html',
'localtoc.html',
'relations.html',
'sourcelink.html',
'saltstack.html',
],
'ref/formula/all/*': [
],
}
html_context = {
'on_saltstack': on_saltstack,
'html_default_sidebars': html_default_sidebars,
'github_base': 'https://github.com/saltstack/salt',
'github_issues': 'https://github.com/saltstack/salt/issues',
'github_downloads': 'https://github.com/saltstack/salt/downloads',
'latest_release': latest_release,
'previous_release': previous_release,
'previous_release_dir': previous_release_dir,
'search_cx': search_cx,
'build_type': build_type,
}
html_use_index = True
html_last_updated_fmt = '%b %d, %Y'
html_show_sourcelink = False
html_show_sphinx = True
html_show_copyright = True
### Latex options
latex_documents = [
('contents', 'Salt.tex', 'Salt Documentation', 'SaltStack, Inc.', 'manual'),
]
latex_logo = '_static/salt-logo.png'
latex_elements = {
'inputenc': '', # use XeTeX instead of the inputenc LaTeX package.
'utf8extra': '',
'preamble': '''
\usepackage{fontspec}
\setsansfont{Linux Biolinum O}
\setromanfont{Linux Libertine O}
\setmonofont{Source Code Pro}
''',
}
### Linux Biolinum, Linux Libertine: http://www.linuxlibertine.org/
### Source Code Pro: https://github.com/adobe-fonts/source-code-pro/releases
### Linkcheck options
linkcheck_ignore = [r'http://127.0.0.1',
r'http://salt:\d+',
r'http://local:\d+',
r'https://console.aws.amazon.com',
r'http://192.168.33.10',
r'http://domain:\d+',
r'http://123.456.789.012:\d+',
r'http://localhost',
r'https://groups.google.com/forum/#!forum/salt-users',
r'http://logstash.net/docs/latest/inputs/udp',
r'http://logstash.net/docs/latest/inputs/zeromq',
r'http://www.youtube.com/saltstack',
r'http://raven.readthedocs.org',
r'https://getsentry.com',
r'http://salt-cloud.readthedocs.org',
r'http://salt.readthedocs.org',
r'http://www.pip-installer.org/',
r'http://www.windowsazure.com/',
r'https://github.com/watching',
r'dash-feed://',
r'https://github.com/saltstack/salt/',
r'http://bootstrap.saltstack.org',
r'https://bootstrap.saltstack.com',
r'https://raw.githubusercontent.com/saltstack/salt-bootstrap/stable/bootstrap-salt.sh',
r'media.readthedocs.org/dash/salt/latest/salt.xml',
r'https://portal.aws.amazon.com/gp/aws/securityCredentials',
r'https://help.github.com/articles/fork-a-repo',
r'dash-feed://https%3A//media.readthedocs.org/dash/salt/latest/salt.xml'
]
linkcheck_anchors = False
### Manpage options
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
authors = [
'Thomas S. Hatch <thatch45@gmail.com> and many others, please see the Authors file',
]
man_pages = [
('contents', 'salt', 'Salt Documentation', authors, 7),
('ref/cli/salt', 'salt', 'salt', authors, 1),
('ref/cli/salt-master', 'salt-master', 'salt-master Documentation', authors, 1),
('ref/cli/salt-minion', 'salt-minion', 'salt-minion Documentation', authors, 1),
('ref/cli/salt-key', 'salt-key', 'salt-key Documentation', authors, 1),
('ref/cli/salt-cp', 'salt-cp', 'salt-cp Documentation', authors, 1),
('ref/cli/salt-call', 'salt-call', 'salt-call Documentation', authors, 1),
('ref/cli/salt-syndic', 'salt-syndic', 'salt-syndic Documentation', authors, 1),
('ref/cli/salt-run', 'salt-run', 'salt-run Documentation', authors, 1),
('ref/cli/salt-ssh', 'salt-ssh', 'salt-ssh Documentation', authors, 1),
('ref/cli/salt-cloud', 'salt-cloud', 'Salt Cloud Command', authors, 1),
('ref/cli/salt-api', 'salt-api', 'salt-api Command', authors, 1),
('ref/cli/salt-unity', 'salt-unity', 'salt-unity Command', authors, 1),
]
### epub options
epub_title = 'Salt Documentation'
epub_author = 'SaltStack, Inc.'
epub_publisher = epub_author
epub_copyright = copyright
epub_scheme = 'URL'
epub_identifier = 'http://saltstack.org/'
#epub_tocdepth = 3
def skip_mod_init_member(app, what, name, obj, skip, options):
if name.startswith('_'):
return True
if isinstance(obj, types.FunctionType) and obj.__name__ == 'mod_init':
return True
return False
def _normalize_version(args):
_, path = args
return '.'.join([x.zfill(4) for x in (path.split('/')[-1].split('.'))])
class ReleasesTree(TocTree):
option_spec = dict(TocTree.option_spec)
def run(self):
rst = super(ReleasesTree, self).run()
entries = rst[0][0]['entries'][:]
entries.sort(key=_normalize_version, reverse=True)
rst[0][0]['entries'][:] = entries
return rst
def setup(app):
app.add_directive('releasestree', ReleasesTree)
app.connect('autodoc-skip-member', skip_mod_init_member)
updated version number to not reference a specific build from the latest branch
# -*- coding: utf-8 -*-
# pylint: disable=C0103,W0622
'''
Sphinx documentation for Salt
'''
import functools
import sys
import os
import types
from sphinx.directives import TocTree
# pylint: disable=R0903
class Mock(object):
'''
Mock out specified imports
This allows autodoc to do its thing without having oodles of req'd
installed libs. This doesn't work with ``import *`` imports.
http://read-the-docs.readthedocs.org/en/latest/faq.html#i-get-import-errors-on-libraries-that-depend-on-c-modules
'''
def __init__(self, *args, **kwargs):
pass
__all__ = []
def __call__(self, *args, **kwargs):
ret = Mock()
# If mocked function is used as a decorator, expose decorated function.
# if args and callable(args[-1]):
# functools.update_wrapper(ret, args[0])
return ret
@classmethod
def __getattr__(cls, name):
if name in ('__file__', '__path__'):
return '/dev/null'
else:
return Mock()
# pylint: enable=R0903
MOCK_MODULES = [
# salt core
'Crypto',
'Crypto.Signature',
'Crypto.Cipher',
'Crypto.Hash',
'Crypto.PublicKey',
'Crypto.Random',
'M2Crypto',
'msgpack',
'yaml',
'yaml.constructor',
'yaml.nodes',
'yaml.scanner',
'zmq',
'zmq.eventloop',
# third-party libs for cloud modules
'libcloud',
'libcloud.compute',
'libcloud.compute.base',
'libcloud.compute.deployment',
'libcloud.compute.providers',
'libcloud.compute.types',
'libcloud.loadbalancer',
'libcloud.loadbalancer.types',
'libcloud.loadbalancer.providers',
'libcloud.common',
'libcloud.common.google',
# third-party libs for netapi modules
'cherrypy',
'cherrypy.lib',
'cherrypy.process',
'cherrypy.wsgiserver',
'cherrypy.wsgiserver.ssl_builtin',
'tornado',
'tornado.concurrent',
'tornado.gen',
'tornado.httpserver',
'tornado.ioloop',
'tornado.web',
'tornado.websocket',
'ws4py',
'ws4py.server',
'ws4py.server.cherrypyserver',
'ws4py.websocket',
# modules, renderers, states, returners, et al
'django',
'libvirt',
'MySQLdb',
'MySQLdb.cursors',
'psutil',
'psutil.version_info',
'pycassa',
'pymongo',
'rabbitmq_server',
'redis',
'requests',
'requests.exceptions',
'rpm',
'rpmUtils',
'rpmUtils.arch',
'yum',
'OpenSSL',
'zfs'
]
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = Mock()
# Define a fake version attribute for libcloud so docs build as supposed
sys.modules['libcloud'].__version__ = '0.0.0'
# -- Add paths to PYTHONPATH ---------------------------------------------------
try:
docs_basepath = os.path.abspath(os.path.dirname(__file__))
except NameError:
# sphinx-intl and six execute some code which will raise this NameError
# assume we're in the doc/ directory
docs_basepath = os.path.abspath(os.path.dirname('.'))
addtl_paths = (
os.pardir, # salt itself (for autodoc)
'_ext', # custom Sphinx extensions
)
for path in addtl_paths:
sys.path.insert(0, os.path.abspath(os.path.join(docs_basepath, path)))
# We're now able to import salt
import salt.version
formulas_dir = os.path.join(os.pardir, docs_basepath, 'formulas')
# ----- Intersphinx Settings ------------------------------------------------>
intersphinx_mapping = {
'python2': ('http://docs.python.org/2', None),
'python3': ('http://docs.python.org/3', None)
}
# <---- Intersphinx Settings -------------------------------------------------
# -- General Configuration -----------------------------------------------------
project = 'Salt'
copyright = '2015 SaltStack, Inc.'
version = salt.version.__version__
latest_release = '2015.8.x' # latest release
previous_release = '2014.7.6' # latest release from previous branch
previous_release_dir = '2014.7' # path on web server for previous branch
build_type = 'inactive' # latest, previous, develop, inactive
# set release to 'version' for develop so sha is used
# - otherwise -
# set release to 'latest_release' or 'previous_release'
release = previous_release # version, latest_release, previous_release
# Set google custom search engine
if release == latest_release:
search_cx = '004624818632696854117:yfmprrbw3pk' # latest
elif release.startswith('2014.7'):
search_cx = '004624818632696854117:thhslradbru' # 2014.7
elif release.startswith('2015.5'):
search_cx = '004624818632696854117:ovogwef29do' # 2015.5
else:
search_cx = '004624818632696854117:haj7bjntf4s' # develop
needs_sphinx = '1.3'
spelling_lang = 'en_US'
language = 'en'
locale_dirs = [
'_locale',
]
master_doc = 'contents'
templates_path = ['_templates']
exclude_patterns = ['_build', '_incl/*', 'ref/cli/_includes/*.rst']
extensions = [
'saltdomain', # Must come early
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.extlinks',
'sphinx.ext.intersphinx',
'httpdomain',
'youtube',
'saltautodoc', # Must be AFTER autodoc
'shorturls',
]
try:
import sphinxcontrib.spelling
except ImportError:
pass
else:
extensions += ['sphinxcontrib.spelling']
modindex_common_prefix = ['salt.']
autosummary_generate = True
# Define a substitution for linking to the latest release tarball
rst_prolog = """\
.. |current_release_doc| replace:: :doc:`/topics/releases/{release}`
.. |saltrepo| replace:: https://github.com/saltstack/salt
.. _`salt-users`: https://groups.google.com/forum/#!forum/salt-users
.. _`salt-announce`: https://groups.google.com/forum/#!forum/salt-announce
.. _`salt-packagers`: https://groups.google.com/forum/#!forum/salt-packagers
.. |windownload| raw:: html
<p>x86: <a href="https://repo.saltstack.com/windows/Salt-Minion-{release}-x86-Setup.exe"><strong>Salt-Minion-{release}-x86-Setup.exe</strong></a>
| <a href="https://repo.saltstack.com/windows/Salt-Minion-{release}-x86-Setup.exe.md5"><strong>md5</strong></a></p>
<p>AMD64: <a href="https://repo.saltstack.com/windows/Salt-Minion-{release}-AMD64-Setup.exe"><strong>Salt-Minion-{release}-AMD64-Setup.exe</strong></a>
| <a href="https://repo.saltstack.com/windows/Salt-Minion-{release}-AMD64-Setup.exe.md5"><strong>md5</strong></a></p>
""".format(release=release)
# A shortcut for linking to tickets on the GitHub issue tracker
extlinks = {
'blob': ('https://github.com/saltstack/salt/blob/%s/%%s' % 'develop', None),
'download': ('https://cloud.github.com/downloads/saltstack/salt/%s', None),
'issue': ('https://github.com/saltstack/salt/issues/%s', 'issue '),
'formula_url': ('https://github.com/saltstack-formulas/%s', ''),
}
# ----- Localization -------------------------------------------------------->
locale_dirs = ['locale/']
gettext_compact = False
# <---- Localization ---------------------------------------------------------
### HTML options
html_theme = 'saltstack2' #change to 'saltstack' to use previous theme
html_theme_path = ['_themes']
html_title = u''
html_short_title = 'Salt'
html_static_path = ['_static']
html_logo = None # specified in the theme layout.html
html_favicon = 'favicon.ico'
html_use_smartypants = False
# Set a var if we're building docs for the live site or not
on_saltstack = 'SALT_ON_SALTSTACK' in os.environ
# Use Google customized search or use Sphinx built-in JavaScript search
if on_saltstack:
html_search_template = 'googlesearch.html'
else:
html_search_template = 'searchbox.html'
html_additional_pages = {
'404': '404.html',
}
html_default_sidebars = [
html_search_template,
'version.html',
'localtoc.html',
'relations.html',
'sourcelink.html',
'saltstack.html',
]
html_sidebars = {
'ref/**/all/salt.*': [
html_search_template,
'version.html',
'modules-sidebar.html',
'localtoc.html',
'relations.html',
'sourcelink.html',
'saltstack.html',
],
'ref/formula/all/*': [
],
}
html_context = {
'on_saltstack': on_saltstack,
'html_default_sidebars': html_default_sidebars,
'github_base': 'https://github.com/saltstack/salt',
'github_issues': 'https://github.com/saltstack/salt/issues',
'github_downloads': 'https://github.com/saltstack/salt/downloads',
'latest_release': latest_release,
'previous_release': previous_release,
'previous_release_dir': previous_release_dir,
'search_cx': search_cx,
'build_type': build_type,
}
html_use_index = True
html_last_updated_fmt = '%b %d, %Y'
html_show_sourcelink = False
html_show_sphinx = True
html_show_copyright = True
### Latex options
latex_documents = [
('contents', 'Salt.tex', 'Salt Documentation', 'SaltStack, Inc.', 'manual'),
]
latex_logo = '_static/salt-logo.png'
latex_elements = {
'inputenc': '', # use XeTeX instead of the inputenc LaTeX package.
'utf8extra': '',
'preamble': '''
\usepackage{fontspec}
\setsansfont{Linux Biolinum O}
\setromanfont{Linux Libertine O}
\setmonofont{Source Code Pro}
''',
}
### Linux Biolinum, Linux Libertine: http://www.linuxlibertine.org/
### Source Code Pro: https://github.com/adobe-fonts/source-code-pro/releases
### Linkcheck options
linkcheck_ignore = [r'http://127.0.0.1',
r'http://salt:\d+',
r'http://local:\d+',
r'https://console.aws.amazon.com',
r'http://192.168.33.10',
r'http://domain:\d+',
r'http://123.456.789.012:\d+',
r'http://localhost',
r'https://groups.google.com/forum/#!forum/salt-users',
r'http://logstash.net/docs/latest/inputs/udp',
r'http://logstash.net/docs/latest/inputs/zeromq',
r'http://www.youtube.com/saltstack',
r'http://raven.readthedocs.org',
r'https://getsentry.com',
r'http://salt-cloud.readthedocs.org',
r'http://salt.readthedocs.org',
r'http://www.pip-installer.org/',
r'http://www.windowsazure.com/',
r'https://github.com/watching',
r'dash-feed://',
r'https://github.com/saltstack/salt/',
r'http://bootstrap.saltstack.org',
r'https://bootstrap.saltstack.com',
r'https://raw.githubusercontent.com/saltstack/salt-bootstrap/stable/bootstrap-salt.sh',
r'media.readthedocs.org/dash/salt/latest/salt.xml',
r'https://portal.aws.amazon.com/gp/aws/securityCredentials',
r'https://help.github.com/articles/fork-a-repo',
r'dash-feed://https%3A//media.readthedocs.org/dash/salt/latest/salt.xml'
]
linkcheck_anchors = False
### Manpage options
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
authors = [
'Thomas S. Hatch <thatch45@gmail.com> and many others, please see the Authors file',
]
man_pages = [
('contents', 'salt', 'Salt Documentation', authors, 7),
('ref/cli/salt', 'salt', 'salt', authors, 1),
('ref/cli/salt-master', 'salt-master', 'salt-master Documentation', authors, 1),
('ref/cli/salt-minion', 'salt-minion', 'salt-minion Documentation', authors, 1),
('ref/cli/salt-key', 'salt-key', 'salt-key Documentation', authors, 1),
('ref/cli/salt-cp', 'salt-cp', 'salt-cp Documentation', authors, 1),
('ref/cli/salt-call', 'salt-call', 'salt-call Documentation', authors, 1),
('ref/cli/salt-syndic', 'salt-syndic', 'salt-syndic Documentation', authors, 1),
('ref/cli/salt-run', 'salt-run', 'salt-run Documentation', authors, 1),
('ref/cli/salt-ssh', 'salt-ssh', 'salt-ssh Documentation', authors, 1),
('ref/cli/salt-cloud', 'salt-cloud', 'Salt Cloud Command', authors, 1),
('ref/cli/salt-api', 'salt-api', 'salt-api Command', authors, 1),
('ref/cli/salt-unity', 'salt-unity', 'salt-unity Command', authors, 1),
]
### epub options
epub_title = 'Salt Documentation'
epub_author = 'SaltStack, Inc.'
epub_publisher = epub_author
epub_copyright = copyright
epub_scheme = 'URL'
epub_identifier = 'http://saltstack.org/'
#epub_tocdepth = 3
def skip_mod_init_member(app, what, name, obj, skip, options):
if name.startswith('_'):
return True
if isinstance(obj, types.FunctionType) and obj.__name__ == 'mod_init':
return True
return False
def _normalize_version(args):
_, path = args
return '.'.join([x.zfill(4) for x in (path.split('/')[-1].split('.'))])
class ReleasesTree(TocTree):
option_spec = dict(TocTree.option_spec)
def run(self):
rst = super(ReleasesTree, self).run()
entries = rst[0][0]['entries'][:]
entries.sort(key=_normalize_version, reverse=True)
rst[0][0]['entries'][:] = entries
return rst
def setup(app):
app.add_directive('releasestree', ReleasesTree)
app.connect('autodoc-skip-member', skip_mod_init_member)
|
# -*- coding: utf-8 -*-
#
# tox documentation build configuration file, created by
# sphinx-quickstart on Sat May 29 10:42:26 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.txt'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'tox'
copyright = u'2013, holger krekel and others'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
release = version = "1.7.0"
# The full version, including alpha/beta/rc tags.
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'sphinxdoc'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {'index': 'indexsidebar.html'}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {'index': 'indexsidebar.html'}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'toxdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'a4'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '12pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'tox.tex', u'tox Documentation',
u'holger krekel', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'tox', u'tox Documentation',
[u'holger krekel'], 1)
]
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'tox'
epub_author = u'holger krekel'
epub_publisher = u'holger krekel'
epub_copyright = u'2010, holger krekel'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
def setup(app):
#from sphinx.ext.autodoc import cut_lines
#app.connect('autodoc-process-docstring', cut_lines(4, what=['module']))
app.add_description_unit('confval', 'confval',
objname='configuration value',
indextemplate='pair: %s; configuration value')
linkcheck_timeout = 30
linkcheck_ignore = [r'http://holgerkrekel.net']
bump doc version to 1.7.1
# -*- coding: utf-8 -*-
#
# tox documentation build configuration file, created by
# sphinx-quickstart on Sat May 29 10:42:26 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.txt'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'tox'
copyright = u'2013, holger krekel and others'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
release = version = "1.7.1"
# The full version, including alpha/beta/rc tags.
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'sphinxdoc'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {'index': 'indexsidebar.html'}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {'index': 'indexsidebar.html'}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'toxdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'a4'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '12pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'tox.tex', u'tox Documentation',
u'holger krekel', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'tox', u'tox Documentation',
[u'holger krekel'], 1)
]
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'tox'
epub_author = u'holger krekel'
epub_publisher = u'holger krekel'
epub_copyright = u'2010, holger krekel'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
def setup(app):
#from sphinx.ext.autodoc import cut_lines
#app.connect('autodoc-process-docstring', cut_lines(4, what=['module']))
app.add_description_unit('confval', 'confval',
objname='configuration value',
indextemplate='pair: %s; configuration value')
linkcheck_timeout = 30
linkcheck_ignore = [r'http://holgerkrekel.net']
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# EVE SRP documentation build configuration file, created by
# sphinx-quickstart on Wed Mar 26 18:10:59 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import re
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'sphinx.ext.intersphinx',
'sphinx.ext.graphviz',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'EVE SRP'
copyright = '2014, Will Ross'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
with open('../evesrp/__init__.py', 'r') as f:
init_contents = f.read()
re_results = re.search(
(r'^__version__ *= *u?[\'"](?P<release>(?P<version>\d+\.\d+)(\.\d+)?'
r'(-\w+)?)[\'"]'),
init_contents, re.MULTILINE)
if re_results:
version = re_results.group('version')
release = re_results.group('release')
else:
raise Exception(u"Unable to find __version__ in evesrp/__init__.py")
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['.build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Interphinx setup -------------------
intersphinx_mapping = {
'principal': ('https://pythonhosted.org/Flask-Principal/', None),
'login': ('https://flask-login.readthedocs.org/en/latest/', None),
'sqlalchemy': ('http://docs.sqlalchemy.org/en/rel_0_9/', None),
'requests': ('http://docs.python-requests.org/en/latest/', None),
'python3': ('https://docs.python.org/3/', None),
'flasksqlalchemy': ('http://pythonhosted.org/Flask-SQLAlchemy/', None),
'six': ('https://pythonhosted.org/six/', None),
'flaskwtf': ('https://flask-wtf.readthedocs.org/en/v0.9.4/', None),
'flaskoauthlib': ('https://flask-oauthlib.readthedocs.org/en/latest/',
None),
'flask': ('http://flask.pocoo.org/docs/0.10/', None),
}
# -- Autodoc configuration --------------
autodoc_default_flags = ['members', 'special-members']
# -- Options for HTML output ----------------------------------------------
# Use ReadTheDocs' theme locally, and it defaults to it on their site
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['.static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'EVESRPdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'EVESRP.tex', 'EVE SRP Documentation',
'Will Ross', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'evesrp', 'EVE SRP Documentation',
['Will Ross'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'EVESRP', 'EVE SRP Documentation',
'Will Ross', 'EVESRP', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
Fix Sphinx config to detect buxfix releases
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# EVE SRP documentation build configuration file, created by
# sphinx-quickstart on Wed Mar 26 18:10:59 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import re
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'sphinx.ext.intersphinx',
'sphinx.ext.graphviz',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'EVE SRP'
copyright = '2014, Will Ross'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
with open('../evesrp/__init__.py', 'r') as f:
init_contents = f.read()
re_results = re.search(
(r'^__version__ *= *u?[\'"](?P<release>(?P<version>\d+\.\d+)([\.\d]+)?'
r'(-\w+)?)[\'"]'),
init_contents, re.MULTILINE)
if re_results:
version = re_results.group('version')
release = re_results.group('release')
else:
raise Exception(u"Unable to find __version__ in evesrp/__init__.py")
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['.build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Interphinx setup -------------------
intersphinx_mapping = {
'principal': ('https://pythonhosted.org/Flask-Principal/', None),
'login': ('https://flask-login.readthedocs.org/en/latest/', None),
'sqlalchemy': ('http://docs.sqlalchemy.org/en/rel_0_9/', None),
'requests': ('http://docs.python-requests.org/en/latest/', None),
'python3': ('https://docs.python.org/3/', None),
'flasksqlalchemy': ('http://pythonhosted.org/Flask-SQLAlchemy/', None),
'six': ('https://pythonhosted.org/six/', None),
'flaskwtf': ('https://flask-wtf.readthedocs.org/en/v0.9.4/', None),
'flaskoauthlib': ('https://flask-oauthlib.readthedocs.org/en/latest/',
None),
'flask': ('http://flask.pocoo.org/docs/0.10/', None),
}
# -- Autodoc configuration --------------
autodoc_default_flags = ['members', 'special-members']
# -- Options for HTML output ----------------------------------------------
# Use ReadTheDocs' theme locally, and it defaults to it on their site
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['.static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'EVESRPdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'EVESRP.tex', 'EVE SRP Documentation',
'Will Ross', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'evesrp', 'EVE SRP Documentation',
['Will Ross'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'EVESRP', 'EVE SRP Documentation',
'Will Ross', 'EVESRP', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
# -*- coding: utf-8 -*-
import os
import sys
import alagitpull
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
sys.path.insert(0, project_root)
sys.path.append(os.path.abspath(
os.path.join(os.path.dirname(__file__), "_ext")))
# package data
about = {}
with open("../unihan_etl/__about__.py") as fp:
exec(fp.read(), about)
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'releases',
'alagitpull',
'sphinxarg.ext' # sphinx-argparse
]
releases_unstable_prehistory = True
releases_document_name = "history"
releases_issue_uri = "https://github.com/cihai/unihan-etl/issues/%s"
releases_release_uri = "https://github.com/cihai/unihan-etl/tree/v%s"
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = about['__title__']
copyright = about['__copyright__']
version = '%s' % ('.'.join(about['__version__'].split('.'))[:2])
release = '%s' % (about['__version__'])
exclude_patterns = ['_build']
pygments_style = 'sphinx'
html_theme_path = [alagitpull.get_path()]
html_static_path = ['_static']
html_favicon = 'favicon.ico'
html_theme = 'alagitpull'
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html',
'more.html',
'searchbox.html',
]
}
html_theme_options = {
'logo': 'img/cihai.svg',
'github_user': 'cihai',
'github_repo': 'unihan-etl',
'github_type': 'star',
'github_banner': True,
'projects': alagitpull.projects,
'project_name': about['__title__'],
}
alagitpull_internal_hosts = [
'libtmux.git-pull.com',
'0.0.0.0',
]
alagitpull_external_hosts_new_window = True
htmlhelp_basename = '%sdoc' % about['__title__']
latex_documents = [
('index', '{0}.tex'.format(about['__package_name__']),
'{0} Documentation'.format(about['__title__']),
about['__author__'], 'manual'),
]
man_pages = [
('index', about['__package_name__'],
'{0} Documentation'.format(about['__title__']),
about['__author__'], 1),
]
texinfo_documents = [
('index', '{0}'.format(about['__package_name__']),
'{0} Documentation'.format(about['__title__']),
about['__author__'], about['__package_name__'],
about['__description__'], 'Miscellaneous'),
]
intersphinx_mapping = {
'python': ('http://docs.python.org/', None),
'sphinx': ('http://sphinx.readthedocs.org/en/latest/', None),
'sqlalchemy': ('http://sqlalchemy.readthedocs.org/en/latest/', None),
}
enable napoleon in docs
# -*- coding: utf-8 -*-
import os
import sys
import alagitpull
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
sys.path.insert(0, project_root)
sys.path.append(os.path.abspath(
os.path.join(os.path.dirname(__file__), "_ext")))
# package data
about = {}
with open("../unihan_etl/__about__.py") as fp:
exec(fp.read(), about)
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinxcontrib.napoleon',
'releases',
'alagitpull',
'sphinxarg.ext' # sphinx-argparse
]
releases_unstable_prehistory = True
releases_document_name = "history"
releases_issue_uri = "https://github.com/cihai/unihan-etl/issues/%s"
releases_release_uri = "https://github.com/cihai/unihan-etl/tree/v%s"
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = about['__title__']
copyright = about['__copyright__']
version = '%s' % ('.'.join(about['__version__'].split('.'))[:2])
release = '%s' % (about['__version__'])
exclude_patterns = ['_build']
pygments_style = 'sphinx'
html_theme_path = [alagitpull.get_path()]
html_static_path = ['_static']
html_favicon = 'favicon.ico'
html_theme = 'alagitpull'
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html',
'more.html',
'searchbox.html',
]
}
html_theme_options = {
'logo': 'img/cihai.svg',
'github_user': 'cihai',
'github_repo': 'unihan-etl',
'github_type': 'star',
'github_banner': True,
'projects': alagitpull.projects,
'project_name': about['__title__'],
}
alagitpull_internal_hosts = [
'libtmux.git-pull.com',
'0.0.0.0',
]
alagitpull_external_hosts_new_window = True
htmlhelp_basename = '%sdoc' % about['__title__']
latex_documents = [
('index', '{0}.tex'.format(about['__package_name__']),
'{0} Documentation'.format(about['__title__']),
about['__author__'], 'manual'),
]
man_pages = [
('index', about['__package_name__'],
'{0} Documentation'.format(about['__title__']),
about['__author__'], 1),
]
texinfo_documents = [
('index', '{0}'.format(about['__package_name__']),
'{0} Documentation'.format(about['__title__']),
about['__author__'], about['__package_name__'],
about['__description__'], 'Miscellaneous'),
]
intersphinx_mapping = {
'python': ('http://docs.python.org/', None),
'sphinx': ('http://sphinx.readthedocs.org/en/latest/', None),
'sqlalchemy': ('http://sqlalchemy.readthedocs.org/en/latest/', None),
}
|
# -*- coding: utf-8 -*-
# pylint: disable=C0103,W0622
'''
Sphinx documentation for Salt
'''
import functools
import sys
import os
import types
import time
from sphinx.directives import TocTree
# pylint: disable=R0903
class Mock(object):
'''
Mock out specified imports.
This allows autodoc to do its thing without having oodles of req'd
installed libs. This doesn't work with ``import *`` imports.
This Mock class can be configured to return a specific values at specific names, if required.
http://read-the-docs.readthedocs.org/en/latest/faq.html#i-get-import-errors-on-libraries-that-depend-on-c-modules
'''
def __init__(self, mapping=None, *args, **kwargs):
"""
Mapping allows autodoc to bypass the Mock object, but actually assign
a specific value, expected by a specific attribute returned.
"""
self.__mapping = mapping or {}
__all__ = []
def __call__(self, *args, **kwargs):
# If mocked function is used as a decorator, expose decorated function.
# if args and callable(args[-1]):
# functools.update_wrapper(ret, args[0])
return Mock(mapping=self.__mapping)
def __getattr__(self, name):
#__mapping = {'total': 0}
data = None
if name in self.__mapping:
data = self.__mapping.get(name)
elif name in ('__file__', '__path__'):
data = '/dev/null'
elif name == '__qualname__':
raise AttributeError("'Mock' object has no attribute '__qualname__'")
else:
data = Mock(mapping=self.__mapping)
return data
def __iter__(self):
return self
def next(self):
raise StopIteration
# pylint: enable=R0903
MOCK_MODULES = [
# Python stdlib
'user',
# salt core
'Crypto',
'Crypto.Signature',
'Crypto.Cipher',
'Crypto.Hash',
'Crypto.PublicKey',
'Crypto.Random',
'Crypto.Signature',
'Crypto.Signature.PKCS1_v1_5',
'M2Crypto',
'msgpack',
'yaml',
'yaml.constructor',
'yaml.nodes',
'yaml.parser',
'yaml.scanner',
'zmq',
'zmq.eventloop',
'zmq.eventloop.ioloop',
# third-party libs for cloud modules
'libcloud',
'libcloud.compute',
'libcloud.compute.base',
'libcloud.compute.deployment',
'libcloud.compute.providers',
'libcloud.compute.types',
'libcloud.loadbalancer',
'libcloud.loadbalancer.types',
'libcloud.loadbalancer.providers',
'libcloud.common',
'libcloud.common.google',
# third-party libs for netapi modules
'cherrypy',
'cherrypy.lib',
'cherrypy.process',
'cherrypy.wsgiserver',
'cherrypy.wsgiserver.ssl_builtin',
'tornado',
'tornado.concurrent',
'tornado.escape',
'tornado.gen',
'tornado.httpclient',
'tornado.httpserver',
'tornado.httputil',
'tornado.ioloop',
'tornado.iostream',
'tornado.netutil',
'tornado.simple_httpclient',
'tornado.stack_context',
'tornado.web',
'tornado.websocket',
'tornado.locks',
'ws4py',
'ws4py.server',
'ws4py.server.cherrypyserver',
'ws4py.websocket',
# modules, renderers, states, returners, et al
'ClusterShell',
'ClusterShell.NodeSet',
'django',
'libvirt',
'MySQLdb',
'MySQLdb.cursors',
'nagios_json',
'psutil',
'pycassa',
'pymongo',
'rabbitmq_server',
'redis',
#'requests',
#'requests.exceptions',
'rpm',
'rpmUtils',
'rpmUtils.arch',
'yum',
'OpenSSL',
'zfs',
'salt.ext.six.moves.winreg',
'win32security',
'ntsecuritycon',
'napalm',
'dson',
'jnpr',
'json',
'lxml',
'lxml.etree',
'jnpr.junos',
'jnpr.junos.utils',
'jnpr.junos.utils.config',
'jnpr.junos.utils.sw',
'dns',
'dns.resolver',
'keyring',
'netaddr',
'netaddr.IPAddress',
'netaddr.core',
'netaddr.core.AddrFormatError',
'pyroute2',
'pyroute2.ipdb',
'avahi',
'dbus',
'twisted',
'twisted.internet',
'twisted.internet.protocol',
'twisted.internet.protocol.DatagramProtocol',
'msgpack',
]
for mod_name in MOCK_MODULES:
if mod_name == 'psutil':
mock = Mock(mapping={'total': 0}) # Otherwise it will crash Sphinx
else:
mock = Mock()
sys.modules[mod_name] = mock
def mock_decorator_with_params(*oargs, **okwargs):
'''
Optionally mock a decorator that takes parameters
E.g.:
@blah(stuff=True)
def things():
pass
'''
def inner(fn, *iargs, **ikwargs):
if hasattr(fn, '__call__'):
return fn
else:
return Mock()
return inner
# Define a fake version attribute for the following libs.
sys.modules['libcloud'].__version__ = '0.0.0'
sys.modules['msgpack'].version = (1, 0, 0)
sys.modules['psutil'].version_info = (3, 0, 0)
sys.modules['pymongo'].version = '0.0.0'
sys.modules['ntsecuritycon'].STANDARD_RIGHTS_REQUIRED = 0
sys.modules['ntsecuritycon'].SYNCHRONIZE = 0
# Define a fake version attribute for the following libs.
sys.modules['cherrypy'].config = mock_decorator_with_params
# -- Add paths to PYTHONPATH ---------------------------------------------------
try:
docs_basepath = os.path.abspath(os.path.dirname(__file__))
except NameError:
# sphinx-intl and six execute some code which will raise this NameError
# assume we're in the doc/ directory
docs_basepath = os.path.abspath(os.path.dirname('.'))
addtl_paths = (
os.pardir, # salt itself (for autodoc)
'_ext', # custom Sphinx extensions
)
for path in addtl_paths:
sys.path.insert(0, os.path.abspath(os.path.join(docs_basepath, path)))
# We're now able to import salt
import salt.version
formulas_dir = os.path.join(os.pardir, docs_basepath, 'formulas')
# ----- Intersphinx Settings ------------------------------------------------>
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None)
}
# <---- Intersphinx Settings -------------------------------------------------
# -- General Configuration -----------------------------------------------------
# Set a var if we're building docs for the live site or not
on_saltstack = 'SALT_ON_SALTSTACK' in os.environ
project = 'Salt'
version = salt.version.__version__
latest_release = '2018.3.2' # latest release
previous_release = '2017.7.6' # latest release from previous branch
previous_release_dir = '2017.7' # path on web server for previous branch
next_release = '' # next release
next_release_dir = '' # path on web server for next release branch
today = ''
copyright = ''
if on_saltstack:
today = "Generated on " + time.strftime("%B %d, %Y") + " at " + time.strftime("%X %Z") + "."
copyright = time.strftime("%Y")
# < --- START do not merge these settings to other branches START ---> #
build_type = 'previous' # latest, previous, develop, next
release = previous_release # version, latest_release, previous_release
# < --- END do not merge these settings to other branches END ---> #
# Set google custom search engine
if release == latest_release:
search_cx = '004624818632696854117:yfmprrbw3pk' # latest
elif release.startswith('2014.7'):
search_cx = '004624818632696854117:thhslradbru' # 2014.7
elif release.startswith('2015.5'):
search_cx = '004624818632696854117:ovogwef29do' # 2015.5
elif release.startswith('2015.8'):
search_cx = '004624818632696854117:aw_tegffouy' # 2015.8
else:
search_cx = '004624818632696854117:haj7bjntf4s' # develop
needs_sphinx = '1.3'
spelling_lang = 'en_US'
language = 'en'
locale_dirs = [
'_locale',
]
master_doc = 'contents'
templates_path = ['_templates']
exclude_patterns = ['_build', '_incl/*', 'ref/cli/_includes/*.rst']
extensions = [
'saltdomain', # Must come early
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.autosummary',
'sphinx.ext.extlinks',
'sphinx.ext.intersphinx',
'httpdomain',
'youtube',
'saltautodoc', # Must be AFTER autodoc
'shorturls',
]
try:
import sphinxcontrib.spelling
except ImportError:
pass
else:
extensions += ['sphinxcontrib.spelling']
modindex_common_prefix = ['salt.']
autosummary_generate = True
# Define a substitution for linking to the latest release tarball
rst_prolog = """\
.. |current_release_doc| replace:: :doc:`/topics/releases/{release}`
.. |saltrepo| replace:: https://github.com/saltstack/salt
.. _`salt-users`: https://groups.google.com/forum/#!forum/salt-users
.. _`salt-announce`: https://groups.google.com/forum/#!forum/salt-announce
.. _`salt-packagers`: https://groups.google.com/forum/#!forum/salt-packagers
.. _`salt-slack`: https://saltstackcommunity.herokuapp.com/
.. |windownload| raw:: html
<p>Python2 x86: <a
href="https://repo.saltstack.com/windows/Salt-Minion-{release}-Py2-x86-Setup.exe"><strong>Salt-Minion-{release}-x86-Setup.exe</strong></a>
| <a href="https://repo.saltstack.com/windows/Salt-Minion-{release}-Py2-x86-Setup.exe.md5"><strong>md5</strong></a></p>
<p>Python2 AMD64: <a
href="https://repo.saltstack.com/windows/Salt-Minion-{release}-Py2-AMD64-Setup.exe"><strong>Salt-Minion-{release}-AMD64-Setup.exe</strong></a>
| <a href="https://repo.saltstack.com/windows/Salt-Minion-{release}-Py2-AMD64-Setup.exe.md5"><strong>md5</strong></a></p>
<p>Python3 x86: <a
href="https://repo.saltstack.com/windows/Salt-Minion-{release}-Py3-x86-Setup.exe"><strong>Salt-Minion-{release}-x86-Setup.exe</strong></a>
| <a href="https://repo.saltstack.com/windows/Salt-Minion-{release}-Py3-x86-Setup.exe.md5"><strong>md5</strong></a></p>
<p>Python3 AMD64: <a
href="https://repo.saltstack.com/windows/Salt-Minion-{release}-Py3-AMD64-Setup.exe"><strong>Salt-Minion-{release}-AMD64-Setup.exe</strong></a>
| <a href="https://repo.saltstack.com/windows/Salt-Minion-{release}-Py3-AMD64-Setup.exe.md5"><strong>md5</strong></a></p>
.. |osxdownloadpy2| raw:: html
<p>x86_64: <a href="https://repo.saltstack.com/osx/salt-{release}-py2-x86_64.pkg"><strong>salt-{release}-py2-x86_64.pkg</strong></a>
| <a href="https://repo.saltstack.com/osx/salt-{release}-py2-x86_64.pkg.md5"><strong>md5</strong></a></p>
.. |osxdownloadpy3| raw:: html
<p>x86_64: <a href="https://repo.saltstack.com/osx/salt-{release}-py3-x86_64.pkg"><strong>salt-{release}-py3-x86_64.pkg</strong></a>
| <a href="https://repo.saltstack.com/osx/salt-{release}-py3-x86_64.pkg.md5"><strong>md5</strong></a></p>
""".format(release=release)
# A shortcut for linking to tickets on the GitHub issue tracker
extlinks = {
'blob': ('https://github.com/saltstack/salt/blob/%s/%%s' % 'develop', None),
'issue': ('https://github.com/saltstack/salt/issues/%s', 'issue #'),
'pull': ('https://github.com/saltstack/salt/pull/%s', 'PR #'),
'formula_url': ('https://github.com/saltstack-formulas/%s', ''),
}
# ----- Localization -------------------------------------------------------->
locale_dirs = ['locale/']
gettext_compact = False
# <---- Localization ---------------------------------------------------------
### HTML options
html_theme = os.environ.get('HTML_THEME', 'saltstack2') # set 'HTML_THEME=saltstack' to use previous theme
html_theme_path = ['_themes']
html_title = u''
html_short_title = 'Salt'
html_static_path = ['_static']
html_logo = None # specified in the theme layout.html
html_favicon = 'favicon.ico'
html_use_smartypants = False
# Use Google customized search or use Sphinx built-in JavaScript search
if on_saltstack:
html_search_template = 'googlesearch.html'
else:
html_search_template = 'searchbox.html'
html_additional_pages = {
'404': '404.html',
}
html_default_sidebars = [
html_search_template,
'version.html',
'localtoc.html',
'relations.html',
'sourcelink.html',
'saltstack.html',
]
html_sidebars = {
'ref/**/all/salt.*': [
html_search_template,
'version.html',
'modules-sidebar.html',
'localtoc.html',
'relations.html',
'sourcelink.html',
'saltstack.html',
],
'ref/formula/all/*': [
],
}
html_context = {
'on_saltstack': on_saltstack,
'html_default_sidebars': html_default_sidebars,
'github_base': 'https://github.com/saltstack/salt',
'github_issues': 'https://github.com/saltstack/salt/issues',
'github_downloads': 'https://github.com/saltstack/salt/downloads',
'latest_release': latest_release,
'previous_release': previous_release,
'previous_release_dir': previous_release_dir,
'next_release': next_release,
'next_release_dir': next_release_dir,
'search_cx': search_cx,
'build_type': build_type,
'today': today,
'copyright': copyright,
}
html_use_index = True
html_last_updated_fmt = '%b %d, %Y'
html_show_sourcelink = False
html_show_sphinx = True
html_show_copyright = True
### Latex options
latex_documents = [
('contents', 'Salt.tex', 'Salt Documentation', 'SaltStack, Inc.', 'manual'),
]
latex_logo = '_static/salt-logo.png'
latex_elements = {
'inputenc': '', # use XeTeX instead of the inputenc LaTeX package.
'utf8extra': '',
'preamble': '''
\usepackage{fontspec}
\setsansfont{Linux Biolinum O}
\setromanfont{Linux Libertine O}
\setmonofont{Source Code Pro}
''',
}
### Linux Biolinum, Linux Libertine: http://www.linuxlibertine.org/
### Source Code Pro: https://github.com/adobe-fonts/source-code-pro/releases
### Linkcheck options
linkcheck_ignore = [r'http://127.0.0.1',
r'http://salt:\d+',
r'http://local:\d+',
r'https://console.aws.amazon.com',
r'http://192.168.33.10',
r'http://domain:\d+',
r'http://123.456.789.012:\d+',
r'http://localhost',
r'https://groups.google.com/forum/#!forum/salt-users',
r'http://logstash.net/docs/latest/inputs/udp',
r'http://logstash.net/docs/latest/inputs/zeromq',
r'http://www.youtube.com/saltstack',
r'https://raven.readthedocs.io',
r'https://getsentry.com',
r'https://salt-cloud.readthedocs.io',
r'https://salt.readthedocs.io',
r'http://www.pip-installer.org/',
r'http://www.windowsazure.com/',
r'https://github.com/watching',
r'dash-feed://',
r'https://github.com/saltstack/salt/',
r'http://bootstrap.saltstack.org',
r'https://bootstrap.saltstack.com',
r'https://raw.githubusercontent.com/saltstack/salt-bootstrap/stable/bootstrap-salt.sh',
r'media.readthedocs.org/dash/salt/latest/salt.xml',
r'https://portal.aws.amazon.com/gp/aws/securityCredentials',
r'https://help.github.com/articles/fork-a-repo',
r'dash-feed://https%3A//media.readthedocs.org/dash/salt/latest/salt.xml'
]
linkcheck_anchors = False
### Manpage options
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
authors = [
'Thomas S. Hatch <thatch45@gmail.com> and many others, please see the Authors file',
]
man_pages = [
('contents', 'salt', 'Salt Documentation', authors, 7),
('ref/cli/salt', 'salt', 'salt', authors, 1),
('ref/cli/salt-master', 'salt-master', 'salt-master Documentation', authors, 1),
('ref/cli/salt-minion', 'salt-minion', 'salt-minion Documentation', authors, 1),
('ref/cli/salt-key', 'salt-key', 'salt-key Documentation', authors, 1),
('ref/cli/salt-cp', 'salt-cp', 'salt-cp Documentation', authors, 1),
('ref/cli/salt-call', 'salt-call', 'salt-call Documentation', authors, 1),
('ref/cli/salt-proxy', 'salt-proxy', 'salt-proxy Documentation', authors, 1),
('ref/cli/salt-syndic', 'salt-syndic', 'salt-syndic Documentation', authors, 1),
('ref/cli/salt-run', 'salt-run', 'salt-run Documentation', authors, 1),
('ref/cli/salt-ssh', 'salt-ssh', 'salt-ssh Documentation', authors, 1),
('ref/cli/salt-cloud', 'salt-cloud', 'Salt Cloud Command', authors, 1),
('ref/cli/salt-api', 'salt-api', 'salt-api Command', authors, 1),
('ref/cli/salt-unity', 'salt-unity', 'salt-unity Command', authors, 1),
('ref/cli/spm', 'spm', 'Salt Package Manager Command', authors, 1),
]
### epub options
epub_title = 'Salt Documentation'
epub_author = 'SaltStack, Inc.'
epub_publisher = epub_author
epub_copyright = copyright
epub_scheme = 'URL'
epub_identifier = 'http://saltstack.com/'
#epub_tocdepth = 3
def skip_mod_init_member(app, what, name, obj, skip, options):
if name.startswith('_'):
return True
if isinstance(obj, types.FunctionType) and obj.__name__ == 'mod_init':
return True
return False
def _normalize_version(args):
_, path = args
return '.'.join([x.zfill(4) for x in (path.split('/')[-1].split('.'))])
class ReleasesTree(TocTree):
option_spec = dict(TocTree.option_spec)
def run(self):
rst = super(ReleasesTree, self).run()
entries = rst[0][0]['entries'][:]
entries.sort(key=_normalize_version, reverse=True)
rst[0][0]['entries'][:] = entries
return rst
def setup(app):
app.add_directive('releasestree', ReleasesTree)
app.connect('autodoc-skip-member', skip_mod_init_member)
Update release versions for the 2017.7 branch
# -*- coding: utf-8 -*-
# pylint: disable=C0103,W0622
'''
Sphinx documentation for Salt
'''
import functools
import sys
import os
import types
import time
from sphinx.directives import TocTree
# pylint: disable=R0903
class Mock(object):
'''
Mock out specified imports.
This allows autodoc to do its thing without having oodles of req'd
installed libs. This doesn't work with ``import *`` imports.
This Mock class can be configured to return a specific values at specific names, if required.
http://read-the-docs.readthedocs.org/en/latest/faq.html#i-get-import-errors-on-libraries-that-depend-on-c-modules
'''
def __init__(self, mapping=None, *args, **kwargs):
"""
Mapping allows autodoc to bypass the Mock object, but actually assign
a specific value, expected by a specific attribute returned.
"""
self.__mapping = mapping or {}
__all__ = []
def __call__(self, *args, **kwargs):
# If mocked function is used as a decorator, expose decorated function.
# if args and callable(args[-1]):
# functools.update_wrapper(ret, args[0])
return Mock(mapping=self.__mapping)
def __getattr__(self, name):
#__mapping = {'total': 0}
data = None
if name in self.__mapping:
data = self.__mapping.get(name)
elif name in ('__file__', '__path__'):
data = '/dev/null'
elif name == '__qualname__':
raise AttributeError("'Mock' object has no attribute '__qualname__'")
else:
data = Mock(mapping=self.__mapping)
return data
def __iter__(self):
return self
def next(self):
raise StopIteration
# pylint: enable=R0903
MOCK_MODULES = [
# Python stdlib
'user',
# salt core
'Crypto',
'Crypto.Signature',
'Crypto.Cipher',
'Crypto.Hash',
'Crypto.PublicKey',
'Crypto.Random',
'Crypto.Signature',
'Crypto.Signature.PKCS1_v1_5',
'M2Crypto',
'msgpack',
'yaml',
'yaml.constructor',
'yaml.nodes',
'yaml.parser',
'yaml.scanner',
'zmq',
'zmq.eventloop',
'zmq.eventloop.ioloop',
# third-party libs for cloud modules
'libcloud',
'libcloud.compute',
'libcloud.compute.base',
'libcloud.compute.deployment',
'libcloud.compute.providers',
'libcloud.compute.types',
'libcloud.loadbalancer',
'libcloud.loadbalancer.types',
'libcloud.loadbalancer.providers',
'libcloud.common',
'libcloud.common.google',
# third-party libs for netapi modules
'cherrypy',
'cherrypy.lib',
'cherrypy.process',
'cherrypy.wsgiserver',
'cherrypy.wsgiserver.ssl_builtin',
'tornado',
'tornado.concurrent',
'tornado.escape',
'tornado.gen',
'tornado.httpclient',
'tornado.httpserver',
'tornado.httputil',
'tornado.ioloop',
'tornado.iostream',
'tornado.netutil',
'tornado.simple_httpclient',
'tornado.stack_context',
'tornado.web',
'tornado.websocket',
'tornado.locks',
'ws4py',
'ws4py.server',
'ws4py.server.cherrypyserver',
'ws4py.websocket',
# modules, renderers, states, returners, et al
'ClusterShell',
'ClusterShell.NodeSet',
'django',
'libvirt',
'MySQLdb',
'MySQLdb.cursors',
'nagios_json',
'psutil',
'pycassa',
'pymongo',
'rabbitmq_server',
'redis',
#'requests',
#'requests.exceptions',
'rpm',
'rpmUtils',
'rpmUtils.arch',
'yum',
'OpenSSL',
'zfs',
'salt.ext.six.moves.winreg',
'win32security',
'ntsecuritycon',
'napalm',
'dson',
'jnpr',
'json',
'lxml',
'lxml.etree',
'jnpr.junos',
'jnpr.junos.utils',
'jnpr.junos.utils.config',
'jnpr.junos.utils.sw',
'dns',
'dns.resolver',
'keyring',
'netaddr',
'netaddr.IPAddress',
'netaddr.core',
'netaddr.core.AddrFormatError',
'pyroute2',
'pyroute2.ipdb',
'avahi',
'dbus',
'twisted',
'twisted.internet',
'twisted.internet.protocol',
'twisted.internet.protocol.DatagramProtocol',
'msgpack',
]
for mod_name in MOCK_MODULES:
if mod_name == 'psutil':
mock = Mock(mapping={'total': 0}) # Otherwise it will crash Sphinx
else:
mock = Mock()
sys.modules[mod_name] = mock
def mock_decorator_with_params(*oargs, **okwargs):
'''
Optionally mock a decorator that takes parameters
E.g.:
@blah(stuff=True)
def things():
pass
'''
def inner(fn, *iargs, **ikwargs):
if hasattr(fn, '__call__'):
return fn
else:
return Mock()
return inner
# Define a fake version attribute for the following libs.
sys.modules['libcloud'].__version__ = '0.0.0'
sys.modules['msgpack'].version = (1, 0, 0)
sys.modules['psutil'].version_info = (3, 0, 0)
sys.modules['pymongo'].version = '0.0.0'
sys.modules['ntsecuritycon'].STANDARD_RIGHTS_REQUIRED = 0
sys.modules['ntsecuritycon'].SYNCHRONIZE = 0
# Define a fake version attribute for the following libs.
sys.modules['cherrypy'].config = mock_decorator_with_params
# -- Add paths to PYTHONPATH ---------------------------------------------------
try:
docs_basepath = os.path.abspath(os.path.dirname(__file__))
except NameError:
# sphinx-intl and six execute some code which will raise this NameError
# assume we're in the doc/ directory
docs_basepath = os.path.abspath(os.path.dirname('.'))
addtl_paths = (
os.pardir, # salt itself (for autodoc)
'_ext', # custom Sphinx extensions
)
for path in addtl_paths:
sys.path.insert(0, os.path.abspath(os.path.join(docs_basepath, path)))
# We're now able to import salt
import salt.version
formulas_dir = os.path.join(os.pardir, docs_basepath, 'formulas')
# ----- Intersphinx Settings ------------------------------------------------>
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None)
}
# <---- Intersphinx Settings -------------------------------------------------
# -- General Configuration -----------------------------------------------------
# Set a var if we're building docs for the live site or not
on_saltstack = 'SALT_ON_SALTSTACK' in os.environ
project = 'Salt'
version = salt.version.__version__
latest_release = '2018.3.2' # latest release
previous_release = '2017.7.7' # latest release from previous branch
previous_release_dir = '2017.7' # path on web server for previous branch
next_release = '' # next release
next_release_dir = '' # path on web server for next release branch
today = ''
copyright = ''
if on_saltstack:
today = "Generated on " + time.strftime("%B %d, %Y") + " at " + time.strftime("%X %Z") + "."
copyright = time.strftime("%Y")
# < --- START do not merge these settings to other branches START ---> #
build_type = 'previous' # latest, previous, develop, next
release = previous_release # version, latest_release, previous_release
# < --- END do not merge these settings to other branches END ---> #
# Set google custom search engine
if release == latest_release:
search_cx = '004624818632696854117:yfmprrbw3pk' # latest
elif release.startswith('2014.7'):
search_cx = '004624818632696854117:thhslradbru' # 2014.7
elif release.startswith('2015.5'):
search_cx = '004624818632696854117:ovogwef29do' # 2015.5
elif release.startswith('2015.8'):
search_cx = '004624818632696854117:aw_tegffouy' # 2015.8
else:
search_cx = '004624818632696854117:haj7bjntf4s' # develop
needs_sphinx = '1.3'
spelling_lang = 'en_US'
language = 'en'
locale_dirs = [
'_locale',
]
master_doc = 'contents'
templates_path = ['_templates']
exclude_patterns = ['_build', '_incl/*', 'ref/cli/_includes/*.rst']
extensions = [
'saltdomain', # Must come early
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.autosummary',
'sphinx.ext.extlinks',
'sphinx.ext.intersphinx',
'httpdomain',
'youtube',
'saltautodoc', # Must be AFTER autodoc
'shorturls',
]
try:
import sphinxcontrib.spelling
except ImportError:
pass
else:
extensions += ['sphinxcontrib.spelling']
modindex_common_prefix = ['salt.']
autosummary_generate = True
# Define a substitution for linking to the latest release tarball
rst_prolog = """\
.. |current_release_doc| replace:: :doc:`/topics/releases/{release}`
.. |saltrepo| replace:: https://github.com/saltstack/salt
.. _`salt-users`: https://groups.google.com/forum/#!forum/salt-users
.. _`salt-announce`: https://groups.google.com/forum/#!forum/salt-announce
.. _`salt-packagers`: https://groups.google.com/forum/#!forum/salt-packagers
.. _`salt-slack`: https://saltstackcommunity.herokuapp.com/
.. |windownload| raw:: html
<p>Python2 x86: <a
href="https://repo.saltstack.com/windows/Salt-Minion-{release}-Py2-x86-Setup.exe"><strong>Salt-Minion-{release}-x86-Setup.exe</strong></a>
| <a href="https://repo.saltstack.com/windows/Salt-Minion-{release}-Py2-x86-Setup.exe.md5"><strong>md5</strong></a></p>
<p>Python2 AMD64: <a
href="https://repo.saltstack.com/windows/Salt-Minion-{release}-Py2-AMD64-Setup.exe"><strong>Salt-Minion-{release}-AMD64-Setup.exe</strong></a>
| <a href="https://repo.saltstack.com/windows/Salt-Minion-{release}-Py2-AMD64-Setup.exe.md5"><strong>md5</strong></a></p>
<p>Python3 x86: <a
href="https://repo.saltstack.com/windows/Salt-Minion-{release}-Py3-x86-Setup.exe"><strong>Salt-Minion-{release}-x86-Setup.exe</strong></a>
| <a href="https://repo.saltstack.com/windows/Salt-Minion-{release}-Py3-x86-Setup.exe.md5"><strong>md5</strong></a></p>
<p>Python3 AMD64: <a
href="https://repo.saltstack.com/windows/Salt-Minion-{release}-Py3-AMD64-Setup.exe"><strong>Salt-Minion-{release}-AMD64-Setup.exe</strong></a>
| <a href="https://repo.saltstack.com/windows/Salt-Minion-{release}-Py3-AMD64-Setup.exe.md5"><strong>md5</strong></a></p>
.. |osxdownloadpy2| raw:: html
<p>x86_64: <a href="https://repo.saltstack.com/osx/salt-{release}-py2-x86_64.pkg"><strong>salt-{release}-py2-x86_64.pkg</strong></a>
| <a href="https://repo.saltstack.com/osx/salt-{release}-py2-x86_64.pkg.md5"><strong>md5</strong></a></p>
.. |osxdownloadpy3| raw:: html
<p>x86_64: <a href="https://repo.saltstack.com/osx/salt-{release}-py3-x86_64.pkg"><strong>salt-{release}-py3-x86_64.pkg</strong></a>
| <a href="https://repo.saltstack.com/osx/salt-{release}-py3-x86_64.pkg.md5"><strong>md5</strong></a></p>
""".format(release=release)
# A shortcut for linking to tickets on the GitHub issue tracker
extlinks = {
'blob': ('https://github.com/saltstack/salt/blob/%s/%%s' % 'develop', None),
'issue': ('https://github.com/saltstack/salt/issues/%s', 'issue #'),
'pull': ('https://github.com/saltstack/salt/pull/%s', 'PR #'),
'formula_url': ('https://github.com/saltstack-formulas/%s', ''),
}
# ----- Localization -------------------------------------------------------->
locale_dirs = ['locale/']
gettext_compact = False
# <---- Localization ---------------------------------------------------------
### HTML options
html_theme = os.environ.get('HTML_THEME', 'saltstack2') # set 'HTML_THEME=saltstack' to use previous theme
html_theme_path = ['_themes']
html_title = u''
html_short_title = 'Salt'
html_static_path = ['_static']
html_logo = None # specified in the theme layout.html
html_favicon = 'favicon.ico'
html_use_smartypants = False
# Use Google customized search or use Sphinx built-in JavaScript search
if on_saltstack:
html_search_template = 'googlesearch.html'
else:
html_search_template = 'searchbox.html'
html_additional_pages = {
'404': '404.html',
}
html_default_sidebars = [
html_search_template,
'version.html',
'localtoc.html',
'relations.html',
'sourcelink.html',
'saltstack.html',
]
html_sidebars = {
'ref/**/all/salt.*': [
html_search_template,
'version.html',
'modules-sidebar.html',
'localtoc.html',
'relations.html',
'sourcelink.html',
'saltstack.html',
],
'ref/formula/all/*': [
],
}
html_context = {
'on_saltstack': on_saltstack,
'html_default_sidebars': html_default_sidebars,
'github_base': 'https://github.com/saltstack/salt',
'github_issues': 'https://github.com/saltstack/salt/issues',
'github_downloads': 'https://github.com/saltstack/salt/downloads',
'latest_release': latest_release,
'previous_release': previous_release,
'previous_release_dir': previous_release_dir,
'next_release': next_release,
'next_release_dir': next_release_dir,
'search_cx': search_cx,
'build_type': build_type,
'today': today,
'copyright': copyright,
}
html_use_index = True
html_last_updated_fmt = '%b %d, %Y'
html_show_sourcelink = False
html_show_sphinx = True
html_show_copyright = True
### Latex options
latex_documents = [
('contents', 'Salt.tex', 'Salt Documentation', 'SaltStack, Inc.', 'manual'),
]
latex_logo = '_static/salt-logo.png'
latex_elements = {
'inputenc': '', # use XeTeX instead of the inputenc LaTeX package.
'utf8extra': '',
'preamble': '''
\usepackage{fontspec}
\setsansfont{Linux Biolinum O}
\setromanfont{Linux Libertine O}
\setmonofont{Source Code Pro}
''',
}
### Linux Biolinum, Linux Libertine: http://www.linuxlibertine.org/
### Source Code Pro: https://github.com/adobe-fonts/source-code-pro/releases
### Linkcheck options
linkcheck_ignore = [r'http://127.0.0.1',
r'http://salt:\d+',
r'http://local:\d+',
r'https://console.aws.amazon.com',
r'http://192.168.33.10',
r'http://domain:\d+',
r'http://123.456.789.012:\d+',
r'http://localhost',
r'https://groups.google.com/forum/#!forum/salt-users',
r'http://logstash.net/docs/latest/inputs/udp',
r'http://logstash.net/docs/latest/inputs/zeromq',
r'http://www.youtube.com/saltstack',
r'https://raven.readthedocs.io',
r'https://getsentry.com',
r'https://salt-cloud.readthedocs.io',
r'https://salt.readthedocs.io',
r'http://www.pip-installer.org/',
r'http://www.windowsazure.com/',
r'https://github.com/watching',
r'dash-feed://',
r'https://github.com/saltstack/salt/',
r'http://bootstrap.saltstack.org',
r'https://bootstrap.saltstack.com',
r'https://raw.githubusercontent.com/saltstack/salt-bootstrap/stable/bootstrap-salt.sh',
r'media.readthedocs.org/dash/salt/latest/salt.xml',
r'https://portal.aws.amazon.com/gp/aws/securityCredentials',
r'https://help.github.com/articles/fork-a-repo',
r'dash-feed://https%3A//media.readthedocs.org/dash/salt/latest/salt.xml'
]
linkcheck_anchors = False
### Manpage options
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
authors = [
'Thomas S. Hatch <thatch45@gmail.com> and many others, please see the Authors file',
]
man_pages = [
('contents', 'salt', 'Salt Documentation', authors, 7),
('ref/cli/salt', 'salt', 'salt', authors, 1),
('ref/cli/salt-master', 'salt-master', 'salt-master Documentation', authors, 1),
('ref/cli/salt-minion', 'salt-minion', 'salt-minion Documentation', authors, 1),
('ref/cli/salt-key', 'salt-key', 'salt-key Documentation', authors, 1),
('ref/cli/salt-cp', 'salt-cp', 'salt-cp Documentation', authors, 1),
('ref/cli/salt-call', 'salt-call', 'salt-call Documentation', authors, 1),
('ref/cli/salt-proxy', 'salt-proxy', 'salt-proxy Documentation', authors, 1),
('ref/cli/salt-syndic', 'salt-syndic', 'salt-syndic Documentation', authors, 1),
('ref/cli/salt-run', 'salt-run', 'salt-run Documentation', authors, 1),
('ref/cli/salt-ssh', 'salt-ssh', 'salt-ssh Documentation', authors, 1),
('ref/cli/salt-cloud', 'salt-cloud', 'Salt Cloud Command', authors, 1),
('ref/cli/salt-api', 'salt-api', 'salt-api Command', authors, 1),
('ref/cli/salt-unity', 'salt-unity', 'salt-unity Command', authors, 1),
('ref/cli/spm', 'spm', 'Salt Package Manager Command', authors, 1),
]
### epub options
epub_title = 'Salt Documentation'
epub_author = 'SaltStack, Inc.'
epub_publisher = epub_author
epub_copyright = copyright
epub_scheme = 'URL'
epub_identifier = 'http://saltstack.com/'
#epub_tocdepth = 3
def skip_mod_init_member(app, what, name, obj, skip, options):
if name.startswith('_'):
return True
if isinstance(obj, types.FunctionType) and obj.__name__ == 'mod_init':
return True
return False
def _normalize_version(args):
_, path = args
return '.'.join([x.zfill(4) for x in (path.split('/')[-1].split('.'))])
class ReleasesTree(TocTree):
option_spec = dict(TocTree.option_spec)
def run(self):
rst = super(ReleasesTree, self).run()
entries = rst[0][0]['entries'][:]
entries.sort(key=_normalize_version, reverse=True)
rst[0][0]['entries'][:] = entries
return rst
def setup(app):
app.add_directive('releasestree', ReleasesTree)
app.connect('autodoc-skip-member', skip_mod_init_member)
|
# -*- coding: utf-8 -*-
import os
import sys
import alabaster
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
sys.path.insert(0, project_root)
sys.path.append(os.path.abspath(
os.path.join(os.path.dirname(__file__), "_ext")))
# package data
about = {}
with open("../cihai/__about__.py") as fp:
exec(fp.read(), about)
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'releases',
'alabaster',
]
releases_unstable_prehistory = True
releases_document_name = "history"
releases_issue_uri = "https://github.com/cihai/cihai/issues/%s"
releases_release_uri = "https://github.com/cihai/cihai/tree/v%s"
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = about['__title__']
copyright = about['__copyright__']
version = '%s' % ('.'.join(about['__version__'].split('.'))[:2])
release = '%s' % (about['__version__'])
exclude_patterns = ['_build']
pygments_style = 'sphinx'
html_theme_path = [alabaster.get_path()]
html_favicon = 'favicon.ico'
html_theme = 'alabaster'
html_sidebars = {
'**': [
'about.html',
'star.html',
'navigation.html',
'relations.html',
'more.html',
'searchbox.html',
]
}
html_theme_options = {
'logo': 'img/cihai.svg',
}
html_theme_path = ['_themes']
html_static_path = ['_static']
htmlhelp_basename = '%sdoc' % about['__title__']
latex_documents = [
('index', '{0}.tex'.format(about['__package_name__']),
'{0} Documentation'.format(about['__title__']),
about['__author__'], 'manual'),
]
man_pages = [
('index', about['__package_name__'],
'{0} Documentation'.format(about['__title__']),
about['__author__'], 1),
]
texinfo_documents = [
('index', '{0}'.format(about['__package_name__']),
'{0} Documentation'.format(about['__title__']),
about['__author__'], about['__package_name__'],
about['__description__'], 'Miscellaneous'),
]
intersphinx_mapping = {
'python': ('http://docs.python.org/', None),
'sphinx': ('http://www.sphinx-doc.org/en/stable/', None),
'sqlalchemy': ('http://docs.sqlalchemy.org/en/latest/', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable', None),
'unihan-tabular': ('https://unihan-tabular.git-pull.com/en/latest/', None)
}
set autodoc to sort methods by group
# -*- coding: utf-8 -*-
import os
import sys
import alabaster
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
sys.path.insert(0, project_root)
sys.path.append(os.path.abspath(
os.path.join(os.path.dirname(__file__), "_ext")))
# package data
about = {}
with open("../cihai/__about__.py") as fp:
exec(fp.read(), about)
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'releases',
'alabaster',
]
releases_unstable_prehistory = True
releases_document_name = "history"
releases_issue_uri = "https://github.com/cihai/cihai/issues/%s"
releases_release_uri = "https://github.com/cihai/cihai/tree/v%s"
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = about['__title__']
copyright = about['__copyright__']
version = '%s' % ('.'.join(about['__version__'].split('.'))[:2])
release = '%s' % (about['__version__'])
exclude_patterns = ['_build']
pygments_style = 'sphinx'
html_theme_path = [alabaster.get_path()]
html_favicon = 'favicon.ico'
html_theme = 'alabaster'
html_sidebars = {
'**': [
'about.html',
'star.html',
'navigation.html',
'relations.html',
'more.html',
'searchbox.html',
]
}
html_theme_options = {
'logo': 'img/cihai.svg',
}
html_theme_path = ['_themes']
html_static_path = ['_static']
htmlhelp_basename = '%sdoc' % about['__title__']
latex_documents = [
('index', '{0}.tex'.format(about['__package_name__']),
'{0} Documentation'.format(about['__title__']),
about['__author__'], 'manual'),
]
man_pages = [
('index', about['__package_name__'],
'{0} Documentation'.format(about['__title__']),
about['__author__'], 1),
]
texinfo_documents = [
('index', '{0}'.format(about['__package_name__']),
'{0} Documentation'.format(about['__title__']),
about['__author__'], about['__package_name__'],
about['__description__'], 'Miscellaneous'),
]
intersphinx_mapping = {
'python': ('http://docs.python.org/', None),
'sphinx': ('http://www.sphinx-doc.org/en/stable/', None),
'sqlalchemy': ('http://docs.sqlalchemy.org/en/latest/', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable', None),
'unihan-tabular': ('https://unihan-tabular.git-pull.com/en/latest/', None)
}
autodoc_member_order = 'groupwise'
|
# -*- coding: utf-8 -*-
#
# GPy documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 18 15:30:28 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
#Mocking uninstalled modules: https://read-the-docs.readthedocs.org/en/latest/faq.html
#class Mock(object):
#__all__ = []
#def __init__(self, *args, **kwargs):
#for key, value in kwargs.iteritems():
#setattr(self, key, value)
#def __call__(self, *args, **kwargs):
#return Mock()
#__add__ = __mul__ = __getitem__ = __setitem__ = \
#__delitem__ = __sub__ = __floordiv__ = __mod__ = __divmod__ = \
#__pow__ = __lshift__ = __rshift__ = __and__ = __xor__ = __or__ = \
#__rmul__ = __rsub__ = __rfloordiv__ = __rmod__ = __rdivmod__ = \
#__rpow__ = __rlshift__ = __rrshift__ = __rand__ = __rxor__ = __ror__ = \
#__imul__ = __isub__ = __ifloordiv__ = __imod__ = __idivmod__ = \
#__ipow__ = __ilshift__ = __irshift__ = __iand__ = __ixor__ = __ior__ = \
#__neg__ = __pos__ = __abs__ = __invert__ = __call__
#def __getattr__(self, name):
#if name in ('__file__', '__path__'):
#return '/dev/null'
#if name == 'sqrt':
#return math.sqrt
#elif name[0] != '_' and name[0] == name[0].upper():
#return type(name, (), {})
#else:
#return Mock(**vars(self))
#def __lt__(self, *args, **kwargs):
#return True
#__nonzero__ = __le__ = __eq__ = __ne__ = __gt__ = __ge__ = __contains__ = \
#__lt__
#def __repr__(self):
## Use _mock_repr to fake the __repr__ call
#res = getattr(self, "_mock_repr")
#return res if isinstance(res, str) else "Mock"
#def __hash__(self):
#return 1
#__len__ = __int__ = __long__ = __index__ = __hash__
#def __oct__(self):
#return '01'
#def __hex__(self):
#return '0x1'
#def __float__(self):
#return 0.1
#def __complex__(self):
#return 1j
#MOCK_MODULES = [
#'pylab', 'scipy', 'matplotlib', 'matplotlib.pyplot', 'pyfits',
#'scipy.constants.constants', 'matplotlib.cm',
#'matplotlib.image', 'matplotlib.colors', 'sunpy.cm',
#'pandas', 'pandas.io', 'pandas.io.parsers',
#'suds', 'matplotlib.ticker', 'matplotlib.colorbar',
#'matplotlib.dates', 'scipy.optimize', 'scipy.ndimage',
#'matplotlib.figure', 'scipy.ndimage.interpolation', 'bs4']
#for mod_name in MOCK_MODULES:
#sys.modules[mod_name] = Mock()
#sys.modules['numpy'] = Mock(pi=math.pi, G=6.67364e-11,
#ndarray=type('ndarray', (), {}),
#dtype=lambda _: Mock(_mock_repr='np.dtype(\'float32\')'))
#sys.modules['scipy.constants'] = Mock(pi=math.pi, G=6.67364e-11)
##############################################################################
##
## Mock out imports with C dependencies because ReadTheDocs can't build them.
class Mock(object):
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return Mock()
@classmethod
def __getattr__(cls, name):
if name in ('__file__', '__path__'):
return '/dev/null'
elif name[0] == name[0].upper():
mockType = type(name, (), {})
mockType.__module__ = __name__
return mockType
else:
return Mock()
#sys.path.append("../GPy")
#import mock
print "Mocking"
MOCK_MODULES = ['pylab', 'matplotlib', 'sympy', 'sympy.utilities', 'sympy.utilities.codegen', 'sympy.core.cache', 'sympy.core', 'sympy.parsing', 'sympy.parsing.sympy_parser']#'matplotlib', 'matplotlib.color', 'matplotlib.pyplot', 'pylab' ]
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = Mock()
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('..'))
print "Adding path"
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.append(os.path.abspath('./sphinxext'))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
print "Importing extensions"
extensions = ['sphinxext.ipython_directive',
'sphinx.ext.autodoc', 'sphinx.ext.viewcode'
#'matplotlib.sphinxext.mathmpl',
#'matplotlib.sphinxext.only_directives',
#'matplotlib.sphinxext.plot_directive',
#'ipython_directive'
]
#'sphinx.ext.doctest',
#'ipython_console_highlighting',
#'inheritance_diagram',
#'numpydoc']
print "finished importing"
# ----------------------- READTHEDOCS ------------------
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if on_rtd:
sys.path.append("../GPy")
os.system("pwd")
os.system("sphinx-apidoc -f -o . ../GPy")
#os.system("cd ..")
#os.system("cd ./docs")
print "Compiled files"
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'GPy'
copyright = u'2013, Author'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ''
# The full version, including alpha/beta/rc tags.
release = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# This is to revert to the default theme on readthedocs
html_style = '/default.css'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'GPydoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'GPy.tex', u'GPy Documentation',
u'Author', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'gpy', u'GPy Documentation',
[u'Author'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'GPy', u'GPy Documentation',
u'Author', 'GPy', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'GPy'
epub_author = u'Author'
epub_publisher = u'Author'
epub_copyright = u'2013, Author'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
#############################################################################
#
# Include constructors in all the docs
# Got this method from:
# http://stackoverflow.com/questions/5599254/how-to-use-sphinxs-autodoc-to-document-a-classs-init-self-method
#def skip(app, what, name, obj, skip, options):
#if name == "__init__":
#return False
#return skip
#def setup(app):
#app.connect("autodoc-skip-member", skip)
Changed to matplotlib sphinxext
# -*- coding: utf-8 -*-
#
# GPy documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 18 15:30:28 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
#Mocking uninstalled modules: https://read-the-docs.readthedocs.org/en/latest/faq.html
#class Mock(object):
#__all__ = []
#def __init__(self, *args, **kwargs):
#for key, value in kwargs.iteritems():
#setattr(self, key, value)
#def __call__(self, *args, **kwargs):
#return Mock()
#__add__ = __mul__ = __getitem__ = __setitem__ = \
#__delitem__ = __sub__ = __floordiv__ = __mod__ = __divmod__ = \
#__pow__ = __lshift__ = __rshift__ = __and__ = __xor__ = __or__ = \
#__rmul__ = __rsub__ = __rfloordiv__ = __rmod__ = __rdivmod__ = \
#__rpow__ = __rlshift__ = __rrshift__ = __rand__ = __rxor__ = __ror__ = \
#__imul__ = __isub__ = __ifloordiv__ = __imod__ = __idivmod__ = \
#__ipow__ = __ilshift__ = __irshift__ = __iand__ = __ixor__ = __ior__ = \
#__neg__ = __pos__ = __abs__ = __invert__ = __call__
#def __getattr__(self, name):
#if name in ('__file__', '__path__'):
#return '/dev/null'
#if name == 'sqrt':
#return math.sqrt
#elif name[0] != '_' and name[0] == name[0].upper():
#return type(name, (), {})
#else:
#return Mock(**vars(self))
#def __lt__(self, *args, **kwargs):
#return True
#__nonzero__ = __le__ = __eq__ = __ne__ = __gt__ = __ge__ = __contains__ = \
#__lt__
#def __repr__(self):
## Use _mock_repr to fake the __repr__ call
#res = getattr(self, "_mock_repr")
#return res if isinstance(res, str) else "Mock"
#def __hash__(self):
#return 1
#__len__ = __int__ = __long__ = __index__ = __hash__
#def __oct__(self):
#return '01'
#def __hex__(self):
#return '0x1'
#def __float__(self):
#return 0.1
#def __complex__(self):
#return 1j
#MOCK_MODULES = [
#'pylab', 'scipy', 'matplotlib', 'matplotlib.pyplot', 'pyfits',
#'scipy.constants.constants', 'matplotlib.cm',
#'matplotlib.image', 'matplotlib.colors', 'sunpy.cm',
#'pandas', 'pandas.io', 'pandas.io.parsers',
#'suds', 'matplotlib.ticker', 'matplotlib.colorbar',
#'matplotlib.dates', 'scipy.optimize', 'scipy.ndimage',
#'matplotlib.figure', 'scipy.ndimage.interpolation', 'bs4']
#for mod_name in MOCK_MODULES:
#sys.modules[mod_name] = Mock()
#sys.modules['numpy'] = Mock(pi=math.pi, G=6.67364e-11,
#ndarray=type('ndarray', (), {}),
#dtype=lambda _: Mock(_mock_repr='np.dtype(\'float32\')'))
#sys.modules['scipy.constants'] = Mock(pi=math.pi, G=6.67364e-11)
##############################################################################
##
## Mock out imports with C dependencies because ReadTheDocs can't build them.
class Mock(object):
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return Mock()
@classmethod
def __getattr__(cls, name):
if name in ('__file__', '__path__'):
return '/dev/null'
elif name[0] == name[0].upper():
mockType = type(name, (), {})
mockType.__module__ = __name__
return mockType
else:
return Mock()
#sys.path.append("../GPy")
#import mock
print "Mocking"
MOCK_MODULES = ['pylab', 'matplotlib', 'sympy', 'sympy.utilities', 'sympy.utilities.codegen', 'sympy.core.cache', 'sympy.core', 'sympy.parsing', 'sympy.parsing.sympy_parser']#'matplotlib', 'matplotlib.color', 'matplotlib.pyplot', 'pylab' ]
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = Mock()
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('..'))
print "Adding path"
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.append(os.path.abspath('./sphinxext'))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
print "Importing extensions"
print sys.path
extensions = ['matplotlib.sphinxext.ipython_directive',
'sphinx.ext.autodoc', 'sphinx.ext.viewcode'
#'matplotlib.sphinxext.mathmpl',
#'matplotlib.sphinxext.only_directives',
#'matplotlib.sphinxext.plot_directive',
#'ipython_directive'
]
#'sphinx.ext.doctest',
#'ipython_console_highlighting',
#'inheritance_diagram',
#'numpydoc']
print "finished importing"
# ----------------------- READTHEDOCS ------------------
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if on_rtd:
sys.path.append("../GPy")
os.system("pwd")
os.system("sphinx-apidoc -f -o . ../GPy")
#os.system("cd ..")
#os.system("cd ./docs")
print "Compiled files"
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'GPy'
copyright = u'2013, Author'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ''
# The full version, including alpha/beta/rc tags.
release = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# This is to revert to the default theme on readthedocs
html_style = '/default.css'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'GPydoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'GPy.tex', u'GPy Documentation',
u'Author', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'gpy', u'GPy Documentation',
[u'Author'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'GPy', u'GPy Documentation',
u'Author', 'GPy', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'GPy'
epub_author = u'Author'
epub_publisher = u'Author'
epub_copyright = u'2013, Author'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
#############################################################################
#
# Include constructors in all the docs
# Got this method from:
# http://stackoverflow.com/questions/5599254/how-to-use-sphinxs-autodoc-to-document-a-classs-init-self-method
#def skip(app, what, name, obj, skip, options):
#if name == "__init__":
#return False
#return skip
#def setup(app):
#app.connect("autodoc-skip-member", skip)
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
"""
python src/histology_analyser.py -i ~/data/medical/data_orig/jatra_mikro_data/Nejlepsi_rozliseni_nevycistene -t 6800 -cr 0 -1 100 300 100 300
"""
import sys
import os.path
path_to_script = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(path_to_script, "../extern/dicom2fem/src"))
import logging
logger = logging.getLogger(__name__)
import argparse
import numpy as np
import scipy.ndimage
import misc
import datareader
import SimpleITK as sitk
import scipy.ndimage
from PyQt4.QtGui import QApplication
import seed_editor_qt as seqt
import skelet3d
import segmentation
GAUSSIAN_SIGMA = 1
class HistologyAnalyser:
def __init__ (self, data3d, metadata, threshold):
self.data3d = data3d
self.metadata = metadata
self.threshold = threshold
def run(self):
import pdb; pdb.set_trace()
#self.preprocessing()
data3d_thr = segmentation.vesselSegmentation(
self.data3d,
segmentation = np.ones(self.data3d.shape, dtype='int8'),
#segmentation = oseg.orig_scale_segmentation,
threshold = -1,
inputSigma = 0.15,
dilationIterations = 2,
nObj = 1,
biggestObjects = False,
# dataFiltering = True,
interactivity = True,
binaryClosingIterations = 5,
binaryOpeningIterations = 1)
#self.data3d_thri = self.muxImage(
# self.data3d_thr2.astype(np.uint16),
# metadata
# )
#sitk.Show(self.data3d_thri)
#self.data3di = self.muxImage(
# self.data3d.astype(np.uint16),
# metadata
# )
#sitk.Show(self.data3di)
app = QApplication(sys.argv)
#app.exec_()
print "skelet"
data3d_skel = skelet3d.skelet3d(data3d_thr)
pyed = seqt.QTSeedEditor(
data3d,
contours=data3d_thr.astype(np.int8),
seeds=data3d_skel.astype(np.int8)
)
#app.exec_()
data3d_nodes = skeleton_nodes(data3d_skel, data3d_thr)
skeleton_analysis(data3d_nodes)
data3d_nodes[data3d_nodes==3] = 2
print "skelet with nodes"
import pdb; pdb.set_trace()
pyed = seqt.QTSeedEditor(
data3d,
seeds=(data3d_nodes).astype(np.int8),
contours=data3d_thr.astype(np.int8)
)
import pdb; pdb.set_trace()
def preprocessing(self):
self.data3d = scipy.ndimage.filters.gaussian_filter(
self.data3d,
GAUSSIAN_SIGMA
)
self.data3d_thr = self.data3d > self.threshold
self.data3d_thr2 = scipy.ndimage.morphology.binary_opening(
self.data3d_thr
)
#gf = sitk.SmoothingRecursiveGaussianImageFilter()
#gf.SetSigma(5)
#gf = sitk.DiscreteGaussianImageFilter()
#gf.SetVariance(1.0)
#self.data3di2 = gf.Execute(self.data3di)#, 5)
pass
def muxImage(self, data3d, metadata):
data3di = sitk.GetImageFromArray(data3d)
data3di.SetSpacing(metadata['voxelsize_mm'])
return data3di
#sitk.
def show(self):
app = QApplication(sys.argv)
seqt.QTSeedEditor(self.output.astype(np.int16))
app.exec_()
def skeleton_nodes(data3d_skel, data3d_thr):
"""
Return 3d ndarray where 0 is background, 1 is skeleton, 2 is node
and 3 is terminal node
"""
# @TODO remove data3d_thr
# ----------------- get nodes --------------------------
kernel = np.ones([3,3,3])
#kernel[0,0,0]=0
#kernel[0,0,2]=0
#kernel[0,2,0]=0
#kernel[0,2,2]=0
#kernel[2,0,0]=0
#kernel[2,0,2]=0
#kernel[2,2,0]=0
#kernel[2,2,2]=0
#kernel = np.zeros([3,3,3])
#kernel[1,1,:] = 1
#kernel[1,:,1] = 1
#kernel[:,1,1] = 1
#data3d_skel = np.zeros([40,40,40])
#data3d_skel[10,10,10] = 1
#data3d_skel[10,11,10] = 1
#data3d_skel[10,12,10] = 1
#data3d_skel = data3d_skel.astype(np.int8)
mocnost = scipy.ndimage.filters.convolve(data3d_skel, kernel) * data3d_skel
#import pdb; pdb.set_trace()
nodes = (mocnost > 3).astype(np.int8)
terminals = (mocnost == 2).astype(np.int8)
nt = nodes - terminals
pyed = seqt.QTSeedEditor(
mocnost,
contours=data3d_thr.astype(np.int8),
seeds=nt
)
import pdb; pdb.set_trace()
data3d_skel[nodes==1] = 2
data3d_skel[terminals==1] = 3
return data3d_skel
def element_analysis(sklabel, el_number):
element = (sklabel == el_number)
dilat_element = scipy.ndimage.morphology.binary_dilation(element)
print 'element_analysis'
import pdb; pdb.set_trace()
# element dilate * sklabel[sklabel < 0]
pass
def connection_analysis(sklabel, el_number):
element = (sklabel == el_number)
# element dilate * sklabel[sklabel < 0]
pass
def skeleton_analysis(skelet_nodes, volume_data = None):
sklabel_edg, len_edg = scipy.ndimage.label(skelet_nodes == 1, structure=np.ones([3,3,3]))
sklabel_nod, len_nod = scipy.ndimage.label(skelet_nodes > 1, structure=np.ones([3,3,3]))
sklabel = sklabel_edg - sklabel_nod
stats = []
for i in range (1,len_edg):
elst = element_analysis(sklabel, i)
stats.append(elst)
return stats
import pdb; pdb.set_trace()
if __name__ == "__main__":
import misc
logger = logging.getLogger()
logger.setLevel(logging.WARNING)
ch = logging.StreamHandler()
logger.addHandler(ch)
#logger.debug('input params')
# input parser
parser = argparse.ArgumentParser(description='\
3D visualization of segmentation\n\
\npython show_segmentation.py\n\
\npython show_segmentation.py -i resection.pkl -l 2 3 4 -d 4')
parser.add_argument('-i', '--inputfile',
default='organ.pkl',
help='input file')
parser.add_argument('-t', '--threshold', type=int,
default=6600,
help='data threshold, default 1')
parser.add_argument('-cr', '--crop', type=int, metavar='N', nargs='+',
default=[0,-1,0,-1,0,-1],
help='segmentation labels, default 1')
args = parser.parse_args()
#data = misc.obj_from_file(args.inputfile, filetype = 'pickle')
dr = datareader.DataReader()
data3d, metadata = dr.Get3DData(args.inputfile)
# crop data
cr = args.crop
data3d = data3d[cr[0]:cr[1], cr[2]:cr[3], cr[4]:cr[5]]
ha = HistologyAnalyser(data3d, metadata, args.threshold)
ha.run()
ha.show()
almost edge connections
#! /usr/bin/python
# -*- coding: utf-8 -*-
"""
python src/histology_analyser.py -i ~/data/medical/data_orig/jatra_mikro_data/Nejlepsi_rozliseni_nevycistene -t 6800 -cr 0 -1 100 300 100 300
"""
import sys
import os.path
path_to_script = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(path_to_script, "../extern/dicom2fem/src"))
import logging
logger = logging.getLogger(__name__)
import argparse
import numpy as np
import scipy.ndimage
import misc
import datareader
import SimpleITK as sitk
import scipy.ndimage
from PyQt4.QtGui import QApplication
import seed_editor_qt as seqt
import skelet3d
import segmentation
GAUSSIAN_SIGMA = 1
class HistologyAnalyser:
def __init__ (self, data3d, metadata, threshold):
self.data3d = data3d
self.metadata = metadata
self.threshold = threshold
def run(self):
#self.preprocessing()
data3d_thr = segmentation.vesselSegmentation(
self.data3d,
segmentation = np.ones(self.data3d.shape, dtype='int8'),
#segmentation = oseg.orig_scale_segmentation,
threshold = -1,
inputSigma = 0.15,
dilationIterations = 2,
nObj = 1,
biggestObjects = False,
# dataFiltering = True,
interactivity = True,
binaryClosingIterations = 5,
binaryOpeningIterations = 1)
#self.data3d_thri = self.muxImage(
# self.data3d_thr2.astype(np.uint16),
# metadata
# )
#sitk.Show(self.data3d_thri)
#self.data3di = self.muxImage(
# self.data3d.astype(np.uint16),
# metadata
# )
#sitk.Show(self.data3di)
app = QApplication(sys.argv)
#app.exec_()
print "skelet"
data3d_skel = skelet3d.skelet3d(data3d_thr)
# pyed = seqt.QTSeedEditor(
# data3d,
# contours=data3d_thr.astype(np.int8),
# seeds=data3d_skel.astype(np.int8)
# )
#app.exec_()
data3d_nodes = skeleton_nodes(data3d_skel, data3d_thr)
skeleton_analysis(data3d_nodes)
data3d_nodes[data3d_nodes==3] = 2
print "skelet with nodes"
import pdb; pdb.set_trace()
# pyed = seqt.QTSeedEditor(
# data3d,
# seeds=(data3d_nodes).astype(np.int8),
# contours=data3d_thr.astype(np.int8)
# )
# import pdb; pdb.set_trace()
def preprocessing(self):
self.data3d = scipy.ndimage.filters.gaussian_filter(
self.data3d,
GAUSSIAN_SIGMA
)
self.data3d_thr = self.data3d > self.threshold
self.data3d_thr2 = scipy.ndimage.morphology.binary_opening(
self.data3d_thr
)
#gf = sitk.SmoothingRecursiveGaussianImageFilter()
#gf.SetSigma(5)
#gf = sitk.DiscreteGaussianImageFilter()
#gf.SetVariance(1.0)
#self.data3di2 = gf.Execute(self.data3di)#, 5)
pass
def muxImage(self, data3d, metadata):
data3di = sitk.GetImageFromArray(data3d)
data3di.SetSpacing(metadata['voxelsize_mm'])
return data3di
#sitk.
def show(self):
app = QApplication(sys.argv)
seqt.QTSeedEditor(self.output.astype(np.int16))
app.exec_()
def skeleton_nodes(data3d_skel, data3d_thr):
"""
Return 3d ndarray where 0 is background, 1 is skeleton, 2 is node
and 3 is terminal node
"""
# @TODO remove data3d_thr
# ----------------- get nodes --------------------------
kernel = np.ones([3,3,3])
#kernel[0,0,0]=0
#kernel[0,0,2]=0
#kernel[0,2,0]=0
#kernel[0,2,2]=0
#kernel[2,0,0]=0
#kernel[2,0,2]=0
#kernel[2,2,0]=0
#kernel[2,2,2]=0
#kernel = np.zeros([3,3,3])
#kernel[1,1,:] = 1
#kernel[1,:,1] = 1
#kernel[:,1,1] = 1
#data3d_skel = np.zeros([40,40,40])
#data3d_skel[10,10,10] = 1
#data3d_skel[10,11,10] = 1
#data3d_skel[10,12,10] = 1
#data3d_skel = data3d_skel.astype(np.int8)
mocnost = scipy.ndimage.filters.convolve(data3d_skel, kernel) * data3d_skel
#import pdb; pdb.set_trace()
nodes = (mocnost > 3).astype(np.int8)
terminals = (mocnost == 2).astype(np.int8)
nt = nodes - terminals
pyed = seqt.QTSeedEditor(
mocnost,
contours=data3d_thr.astype(np.int8),
seeds=nt
)
import pdb; pdb.set_trace()
data3d_skel[nodes==1] = 2
data3d_skel[terminals==1] = 3
return data3d_skel
def node_analysis(sklabel):
pass
def element_neighbors(sklabel, el_number):
"""
Gives array of element neghbors numbers
"""
element = (sklabel == el_number)
dilat_element = scipy.ndimage.morphology.binary_dilation(
element,
structure=np.ones([3,3,3])
)
neighborhood = sklabel * dilat_element
neighbors = np.unique(neighborhood)[np.unique(neighborhood)<0]
return neighbors
def edge_analysis(sklabel, edg_number):
print 'element_analysis'
elneigh = element_neighbors(sklabel,edg_number)
import pdb; pdb.set_trace()
# element dilate * sklabel[sklabel < 0]
pass
def connection_analysis(sklabel, el_number):
element = (sklabel == el_number)
# element dilate * sklabel[sklabel < 0]
pass
def skeleton_analysis(skelet_nodes, volume_data = None):
"""
Glossary:
element: line structure of skeleton connected to node on both ends
node: connection point of elements. It is one or few voxelsize_mm
terminal: terminal node
"""
sklabel_edg, len_edg = scipy.ndimage.label(skelet_nodes == 1, structure=np.ones([3,3,3]))
sklabel_nod, len_nod = scipy.ndimage.label(skelet_nodes > 1, structure=np.ones([3,3,3]))
sklabel = sklabel_edg - sklabel_nod
stats = []
for i in range (1,len_edg):
edgst = edge_analysis(sklabel, i)
stats.append(edgst)
return stats
import pdb; pdb.set_trace()
if __name__ == "__main__":
import misc
logger = logging.getLogger()
logger.setLevel(logging.WARNING)
ch = logging.StreamHandler()
logger.addHandler(ch)
#logger.debug('input params')
# input parser
parser = argparse.ArgumentParser(description='\
3D visualization of segmentation\n\
\npython show_segmentation.py\n\
\npython show_segmentation.py -i resection.pkl -l 2 3 4 -d 4')
parser.add_argument('-i', '--inputfile',
default='organ.pkl',
help='input file')
parser.add_argument('-t', '--threshold', type=int,
default=6600,
help='data threshold, default 1')
parser.add_argument('-cr', '--crop', type=int, metavar='N', nargs='+',
default=[0,-1,0,-1,0,-1],
help='segmentation labels, default 1')
args = parser.parse_args()
#data = misc.obj_from_file(args.inputfile, filetype = 'pickle')
dr = datareader.DataReader()
data3d, metadata = dr.Get3DData(args.inputfile)
# crop data
cr = args.crop
data3d = data3d[cr[0]:cr[1], cr[2]:cr[3], cr[4]:cr[5]]
ha = HistologyAnalyser(data3d, metadata, args.threshold)
ha.run()
ha.show()
|
# -*- coding: utf-8 -*-
#
# DataViews documentation build configuration file, created by
# sphinx-quickstart on Wed May 14 14:25:57 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../param/'))
sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('..'))
sys.path.insert(0, os.path.abspath('sphinxext'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.inheritance_diagram'
]
inheritance_graph_attrs = dict(rankdir="LR", size='"8.0, 10.0"', fontsize=20)
default_edge_attrs = {
'arrowsize': 1.0,
'style': '"setlinewidth(0.5)"',
}
inheritance_node_attrs = dict(fontsize=20, height=1.0)
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'DataViews'
copyright = u'2014, IOAM: Jean-Luc Stevens and Philipp Rudiger'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2014.05.14'
# The full version, including alpha/beta/rc tags.
release = '2014.05.14'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', 'test_data', 'reference_data', 'nbpublisher',
'extensions']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = 'DataViews'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static','Reference_Manual']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = True
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'DataViewsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'DataViews.tex', u'DataViews Documentation',
u'IOAM: Jean-Luc Stevens and Philipp Rudiger', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'dataviews', u'DataViews Documentation',
[u'IOAM: Jean-Luc Stevens and Philipp Rudiger'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'DataViews', u'DataViews Documentation',
u'IOAM: Jean-Luc Stevens and Philipp Rudiger', 'DataViews', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
from extensions.paramdoc import param_formatter
def setup(app):
app.connect('autodoc-process-docstring', param_formatter)
Added necessary paths to sys.path and PYTHONPATH in Sphinx conf.py
# -*- coding: utf-8 -*-
#
# DataViews documentation build configuration file, created by
# sphinx-quickstart on Wed May 14 14:25:57 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
paths = ['../param/', '.', '..']
for path in paths:
abs_path = os.path.abspath(path)
sys.path.insert(0, abs_path)
os.environ["PYTHONPATH"] = abs_path # PYTHONPATH needs to be set for runipy
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.inheritance_diagram'
]
inheritance_graph_attrs = dict(rankdir="LR", size='"8.0, 10.0"', fontsize=20)
default_edge_attrs = {
'arrowsize': 1.0,
'style': '"setlinewidth(0.5)"',
}
inheritance_node_attrs = dict(fontsize=20, height=1.0)
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'DataViews'
copyright = u'2014, IOAM: Jean-Luc Stevens and Philipp Rudiger'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2014.05.14'
# The full version, including alpha/beta/rc tags.
release = '2014.05.14'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', 'test_data', 'reference_data', 'nbpublisher',
'extensions']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = 'DataViews'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static','Reference_Manual']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = True
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'DataViewsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'DataViews.tex', u'DataViews Documentation',
u'IOAM: Jean-Luc Stevens and Philipp Rudiger', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'dataviews', u'DataViews Documentation',
[u'IOAM: Jean-Luc Stevens and Philipp Rudiger'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'DataViews', u'DataViews Documentation',
u'IOAM: Jean-Luc Stevens and Philipp Rudiger', 'DataViews', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
from extensions.paramdoc import param_formatter
def setup(app):
app.connect('autodoc-process-docstring', param_formatter)
|
from django.contrib.auth.models import User
from django.contrib.gis.db import models
from django.contrib.gis.geos import Point
defaults = {
'location': Point(-117.0382, 32.5149, srid=4326),
'state': 'BC',
'city': 'Tijuana',
'country': 'MX',
}
class AddressModel(models.Model):
"""
An abstract base class model that provides address fields.
"""
number = models.CharField(max_length=30, default = "")
street = models.CharField(max_length=254, default = "")
unit = models.CharField(max_length=30, null=True, blank=True)
city = models.CharField(max_length=100, default = defaults.city)
state = models.CharField(max_length=2, default = defaults.state)
zipcode = models.CharField(max_length=12, default = "")
country = models.CharField(max_length=2, default = defaults.country)
location = models.PointField(srid=4326, default = defaults.location)
class Meta:
abstract = True
class UpdatedByModel(models.Model):
comment = models.CharField(max_length=254, default="")
updated_by = models.ForeignKey(User,
on_delete=models.CASCADE)
updated_on = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
Hospital webapp
from django.contrib.auth.models import User
from django.contrib.gis.db import models
from django.contrib.gis.geos import Point
defaults = {
'location': Point(-117.0382, 32.5149, srid=4326),
'state': 'BC',
'city': 'Tijuana',
'country': 'MX',
}
class AddressModel(models.Model):
"""
An abstract base class model that provides address fields.
"""
number = models.CharField(max_length=30, default = "")
street = models.CharField(max_length=254, default = "")
unit = models.CharField(max_length=30, null=True, blank=True)
city = models.CharField(max_length=100, default = defaults['city'])
state = models.CharField(max_length=2, default = defaults['state'])
zipcode = models.CharField(max_length=12, default = "")
country = models.CharField(max_length=2, default = defaults['country'])
location = models.PointField(srid=4326, default = defaults['location'])
class Meta:
abstract = True
class UpdatedByModel(models.Model):
comment = models.CharField(max_length=254, default="")
updated_by = models.ForeignKey(User,
on_delete=models.CASCADE)
updated_on = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
from datetime import datetime, timezone
import faulthandler
import gc
import os
import subprocess
import sys
import time
import warnings
import numpy as np
import matplotlib
import sphinx
from sphinx_gallery.sorting import FileNameSortKey, ExplicitOrder
from numpydoc import docscrape
import mne
from mne.fixes import _compare_version
from mne.tests.test_docstring_parameters import error_ignores
from mne.utils import (linkcode_resolve, # noqa, analysis:ignore
_assert_no_instances, sizeof_fmt, run_subprocess)
from mne.viz import Brain # noqa
matplotlib.use('agg')
faulthandler.enable()
os.environ['_MNE_BROWSER_NO_BLOCK'] = 'true'
os.environ['MNE_BROWSER_OVERVIEW_MODE'] = 'hidden'
os.environ['MNE_BROWSER_THEME'] = 'light'
os.environ['MNE_3D_OPTION_THEME'] = 'light'
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
curdir = os.path.dirname(__file__)
sys.path.append(os.path.abspath(os.path.join(curdir, '..', 'mne')))
sys.path.append(os.path.abspath(os.path.join(curdir, 'sphinxext')))
# -- Project information -----------------------------------------------------
project = 'MNE'
td = datetime.now(tz=timezone.utc)
# We need to triage which date type we use so that incremental builds work
# (Sphinx looks at variable changes and rewrites all files if some change)
copyright = (
f'2012–{td.year}, MNE Developers. Last updated <time datetime="{td.isoformat()}" class="localized">{td.strftime("%Y-%m-%d %H:%M %Z")}</time>\n' # noqa: E501
'<script type="text/javascript">$(function () { $("time.localized").each(function () { var el = $(this); el.text(new Date(el.attr("datetime")).toLocaleString([], {dateStyle: "medium", timeStyle: "long"})); }); } )</script>') # noqa: E501
if os.getenv('MNE_FULL_DATE', 'false').lower() != 'true':
copyright = f'2012–{td.year}, MNE Developers. Last updated locally.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = mne.__version__
# The short X.Y version.
version = '.'.join(release.split('.')[:2])
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '2.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.linkcode',
'sphinx.ext.mathjax',
'sphinx.ext.todo',
'sphinx.ext.graphviz',
'numpydoc',
'sphinx_gallery.gen_gallery',
'gen_commands',
'gh_substitutions',
'mne_substitutions',
'newcontrib_substitutions',
'gen_names',
'sphinxcontrib.bibtex',
'sphinx_copybutton',
'sphinx_design',
'sphinxcontrib.youtube'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_includes']
# The suffix of source filenames.
source_suffix = '.rst'
# The main toctree document.
master_doc = 'index'
# List of documents that shouldn't be included in the build.
unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
default_role = "py:obj"
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'default'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['mne.']
# -- Sphinx-Copybutton configuration -----------------------------------------
copybutton_prompt_text = r">>> |\.\.\. |\$ "
copybutton_prompt_is_regexp = True
# -- Intersphinx configuration -----------------------------------------------
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'numpy': ('https://numpy.org/devdocs', None),
'scipy': ('https://scipy.github.io/devdocs', None),
'matplotlib': ('https://matplotlib.org/stable', None),
'sklearn': ('https://scikit-learn.org/stable', None),
'numba': ('https://numba.pydata.org/numba-doc/latest', None),
'joblib': ('https://joblib.readthedocs.io/en/latest', None),
'nibabel': ('https://nipy.org/nibabel', None),
'nilearn': ('http://nilearn.github.io', None),
'nitime': ('https://nipy.org/nitime/', None),
'surfer': ('https://pysurfer.github.io/', None),
'mne_bids': ('https://mne.tools/mne-bids/stable', None),
'mne-connectivity': ('https://mne.tools/mne-connectivity/stable', None),
'pandas': ('https://pandas.pydata.org/pandas-docs/stable', None),
'seaborn': ('https://seaborn.pydata.org/', None),
'statsmodels': ('https://www.statsmodels.org/dev', None),
'patsy': ('https://patsy.readthedocs.io/en/latest', None),
'pyvista': ('https://docs.pyvista.org', None),
'imageio': ('https://imageio.readthedocs.io/en/latest', None),
'mne_realtime': ('https://mne.tools/mne-realtime', None),
'picard': ('https://pierreablin.github.io/picard/', None),
'qdarkstyle': ('https://qdarkstylesheet.readthedocs.io/en/latest', None),
'eeglabio': ('https://eeglabio.readthedocs.io/en/latest', None),
'dipy': ('https://dipy.org/documentation/latest/',
'https://dipy.org/documentation/latest/objects.inv/'),
'pooch': ('https://www.fatiando.org/pooch/latest/', None),
'pybv': ('https://pybv.readthedocs.io/en/latest/', None),
}
# NumPyDoc configuration -----------------------------------------------------
# Define what extra methods numpydoc will document
docscrape.ClassDoc.extra_public_methods = mne.utils._doc_special_members
numpydoc_class_members_toctree = False
numpydoc_attributes_as_param_list = True
numpydoc_xref_param_type = True
numpydoc_xref_aliases = {
# Python
'file-like': ':term:`file-like <python:file object>`',
'iterator': ':term:`iterator <python:iterator>`',
'path-like': ':term:`path-like`',
'array-like': ':term:`array-like`',
'Path': ':class:`python:pathlib.Path`',
# Matplotlib
'colormap': ':doc:`colormap <matplotlib:tutorials/colors/colormaps>`',
'color': ':doc:`color <matplotlib:api/colors_api>`',
'Axes': 'matplotlib.axes.Axes',
'Figure': 'matplotlib.figure.Figure',
'Axes3D': 'mpl_toolkits.mplot3d.axes3d.Axes3D',
'ColorbarBase': 'matplotlib.colorbar.ColorbarBase',
# sklearn
'LeaveOneOut': 'sklearn.model_selection.LeaveOneOut',
# joblib
'joblib.Parallel': 'joblib.Parallel',
# nibabel
'Nifti1Image': 'nibabel.nifti1.Nifti1Image',
'Nifti2Image': 'nibabel.nifti2.Nifti2Image',
'SpatialImage': 'nibabel.spatialimages.SpatialImage',
# MNE
'Label': 'mne.Label', 'Forward': 'mne.Forward', 'Evoked': 'mne.Evoked',
'Info': 'mne.Info', 'SourceSpaces': 'mne.SourceSpaces',
'SourceMorph': 'mne.SourceMorph',
'Epochs': 'mne.Epochs', 'Layout': 'mne.channels.Layout',
'EvokedArray': 'mne.EvokedArray', 'BiHemiLabel': 'mne.BiHemiLabel',
'AverageTFR': 'mne.time_frequency.AverageTFR',
'EpochsTFR': 'mne.time_frequency.EpochsTFR',
'Raw': 'mne.io.Raw', 'ICA': 'mne.preprocessing.ICA',
'Covariance': 'mne.Covariance', 'Annotations': 'mne.Annotations',
'DigMontage': 'mne.channels.DigMontage',
'VectorSourceEstimate': 'mne.VectorSourceEstimate',
'VolSourceEstimate': 'mne.VolSourceEstimate',
'VolVectorSourceEstimate': 'mne.VolVectorSourceEstimate',
'MixedSourceEstimate': 'mne.MixedSourceEstimate',
'MixedVectorSourceEstimate': 'mne.MixedVectorSourceEstimate',
'SourceEstimate': 'mne.SourceEstimate', 'Projection': 'mne.Projection',
'ConductorModel': 'mne.bem.ConductorModel',
'Dipole': 'mne.Dipole', 'DipoleFixed': 'mne.DipoleFixed',
'InverseOperator': 'mne.minimum_norm.InverseOperator',
'CrossSpectralDensity': 'mne.time_frequency.CrossSpectralDensity',
'SourceMorph': 'mne.SourceMorph',
'Xdawn': 'mne.preprocessing.Xdawn',
'Report': 'mne.Report', 'Forward': 'mne.Forward',
'TimeDelayingRidge': 'mne.decoding.TimeDelayingRidge',
'Vectorizer': 'mne.decoding.Vectorizer',
'UnsupervisedSpatialFilter': 'mne.decoding.UnsupervisedSpatialFilter',
'TemporalFilter': 'mne.decoding.TemporalFilter',
'SSD': 'mne.decoding.SSD',
'Scaler': 'mne.decoding.Scaler', 'SPoC': 'mne.decoding.SPoC',
'PSDEstimator': 'mne.decoding.PSDEstimator',
'LinearModel': 'mne.decoding.LinearModel',
'FilterEstimator': 'mne.decoding.FilterEstimator',
'EMS': 'mne.decoding.EMS', 'CSP': 'mne.decoding.CSP',
'Beamformer': 'mne.beamformer.Beamformer',
'Transform': 'mne.transforms.Transform',
'Coregistration': 'mne.coreg.Coregistration',
'Figure3D': 'mne.viz.Figure3D',
# dipy
'dipy.align.AffineMap': 'dipy.align.imaffine.AffineMap',
'dipy.align.DiffeomorphicMap': 'dipy.align.imwarp.DiffeomorphicMap',
}
numpydoc_xref_ignore = {
# words
'instance', 'instances', 'of', 'default', 'shape', 'or',
'with', 'length', 'pair', 'matplotlib', 'optional', 'kwargs', 'in',
'dtype', 'object',
# shapes
'n_vertices', 'n_faces', 'n_channels', 'm', 'n', 'n_events', 'n_colors',
'n_times', 'obj', 'n_chan', 'n_epochs', 'n_picks', 'n_ch_groups',
'n_dipoles', 'n_ica_components', 'n_pos', 'n_node_names', 'n_tapers',
'n_signals', 'n_step', 'n_freqs', 'wsize', 'Tx', 'M', 'N', 'p', 'q', 'r',
'n_observations', 'n_regressors', 'n_cols', 'n_frequencies', 'n_tests',
'n_samples', 'n_permutations', 'nchan', 'n_points', 'n_features',
'n_parts', 'n_features_new', 'n_components', 'n_labels', 'n_events_in',
'n_splits', 'n_scores', 'n_outputs', 'n_trials', 'n_estimators', 'n_tasks',
'nd_features', 'n_classes', 'n_targets', 'n_slices', 'n_hpi', 'n_fids',
'n_elp', 'n_pts', 'n_tris', 'n_nodes', 'n_nonzero', 'n_events_out',
'n_segments', 'n_orient_inv', 'n_orient_fwd', 'n_orient', 'n_dipoles_lcmv',
'n_dipoles_fwd', 'n_picks_ref', 'n_coords', 'n_meg', 'n_good_meg',
'n_moments', 'n_patterns', 'n_new_events',
# Undocumented (on purpose)
'RawKIT', 'RawEximia', 'RawEGI', 'RawEEGLAB', 'RawEDF', 'RawCTF', 'RawBTi',
'RawBrainVision', 'RawCurry', 'RawNIRX', 'RawGDF', 'RawSNIRF', 'RawBOXY',
'RawPersyst', 'RawNihon', 'RawNedf', 'RawHitachi',
# sklearn subclasses
'mapping', 'to', 'any',
# unlinkable
'CoregistrationUI',
'IntracranialElectrodeLocator',
'mne_qt_browser.figure.MNEQtBrowser',
}
numpydoc_validate = True
numpydoc_validation_checks = {'all'} | set(error_ignores)
numpydoc_validation_exclude = { # set of regex
# dict subclasses
r'\.clear', r'\.get$', r'\.copy$', r'\.fromkeys', r'\.items', r'\.keys',
r'\.pop', r'\.popitem', r'\.setdefault', r'\.update', r'\.values',
# list subclasses
r'\.append', r'\.count', r'\.extend', r'\.index', r'\.insert', r'\.remove',
r'\.sort',
# we currently don't document these properly (probably okay)
r'\.__getitem__', r'\.__contains__', r'\.__hash__', r'\.__mul__',
r'\.__sub__', r'\.__add__', r'\.__iter__', r'\.__div__', r'\.__neg__',
# copied from sklearn
r'mne\.utils\.deprecated',
# deprecations
r'mne\.connectivity\.degree', r'mne\.connectivity\.seed_target_indices',
r'mne\.viz\.plot_sensors_connectivity',
r'mne\.viz\.plot_connectivity_circle',
}
# -- Sphinx-gallery configuration --------------------------------------------
class Resetter(object):
"""Simple class to make the str(obj) static for Sphinx build env hash."""
def __init__(self):
self.t0 = time.time()
def __repr__(self):
return f'<{self.__class__.__name__}>'
def __call__(self, gallery_conf, fname, when):
import matplotlib.pyplot as plt
try:
from pyvista import Plotter # noqa
except ImportError:
Plotter = None # noqa
try:
from pyvistaqt import BackgroundPlotter # noqa
except ImportError:
BackgroundPlotter = None # noqa
try:
from vtkmodules.vtkCommonDataModel import vtkPolyData # noqa
except ImportError:
vtkPolyData = None # noqa
try:
from mne_qt_browser._pg_figure import MNEQtBrowser
except ImportError:
MNEQtBrowser = None
from mne.viz.backends.renderer import backend
_Renderer = backend._Renderer if backend is not None else None
reset_warnings(gallery_conf, fname)
# in case users have interactive mode turned on in matplotlibrc,
# turn it off here (otherwise the build can be very slow)
plt.ioff()
plt.rcParams['animation.embed_limit'] = 30.
# neo holds on to an exception, which in turn holds a stack frame,
# which will keep alive the global vars during SG execution
try:
import neo
neo.io.stimfitio.STFIO_ERR = None
except Exception:
pass
gc.collect()
when = f'mne/conf.py:Resetter.__call__:{when}:{fname}'
# Support stuff like
# MNE_SKIP_INSTANCE_ASSERTIONS="Brain,Plotter,BackgroundPlotter,vtkPolyData,_Renderer" make html_dev-memory # noqa: E501
# to just test MNEQtBrowser
skips = os.getenv('MNE_SKIP_INSTANCE_ASSERTIONS', '').lower()
prefix = ''
if skips not in ('true', '1', 'all'):
prefix = 'Clean '
skips = skips.split(',')
if 'brain' not in skips:
_assert_no_instances(Brain, when) # calls gc.collect()
if Plotter is not None and 'plotter' not in skips:
_assert_no_instances(Plotter, when)
if BackgroundPlotter is not None and \
'backgroundplotter' not in skips:
_assert_no_instances(BackgroundPlotter, when)
if vtkPolyData is not None and 'vtkpolydata' not in skips:
_assert_no_instances(vtkPolyData, when)
if '_renderer' not in skips:
_assert_no_instances(_Renderer, when)
if MNEQtBrowser is not None and \
'mneqtbrowser' not in skips:
# Ensure any manual fig.close() events get properly handled
from mne_qt_browser._pg_figure import QApplication
inst = QApplication.instance()
if inst is not None:
for _ in range(2):
inst.processEvents()
_assert_no_instances(MNEQtBrowser, when)
# This will overwrite some Sphinx printing but it's useful
# for memory timestamps
if os.getenv('SG_STAMP_STARTS', '').lower() == 'true':
import psutil
process = psutil.Process(os.getpid())
mem = sizeof_fmt(process.memory_info().rss)
print(f'{prefix}{time.time() - self.t0:6.1f} s : {mem}'.ljust(22))
examples_dirs = ['../tutorials', '../examples']
gallery_dirs = ['auto_tutorials', 'auto_examples']
os.environ['_MNE_BUILDING_DOC'] = 'true'
scrapers = ('matplotlib',)
try:
mne.viz.set_3d_backend(mne.viz.get_3d_backend())
except Exception:
report_scraper = None
else:
backend = mne.viz.get_3d_backend()
if backend in ('notebook', 'pyvistaqt'):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
import pyvista
pyvista.OFF_SCREEN = False
scrapers += (
mne.gui._GUIScraper(),
mne.viz._brain._BrainScraper(),
'pyvista',
)
report_scraper = mne.report._ReportScraper()
scrapers += (report_scraper,)
del backend
try:
import mne_qt_browser
_min_ver = _compare_version(mne_qt_browser.__version__, '>=', '0.2')
if mne.viz.get_browser_backend() == 'qt' and _min_ver:
scrapers += (mne.viz._scraper._MNEQtBrowserScraper(),)
except ImportError:
pass
compress_images = ('images', 'thumbnails')
# let's make things easier on Windows users
# (on Linux and macOS it's easy enough to require this)
if sys.platform.startswith('win'):
try:
subprocess.check_call(['optipng', '--version'])
except Exception:
compress_images = ()
sphinx_gallery_conf = {
'doc_module': ('mne',),
'reference_url': dict(mne=None),
'examples_dirs': examples_dirs,
'subsection_order': ExplicitOrder(['../examples/io/',
'../examples/simulation/',
'../examples/preprocessing/',
'../examples/visualization/',
'../examples/time_frequency/',
'../examples/stats/',
'../examples/decoding/',
'../examples/connectivity/',
'../examples/forward/',
'../examples/inverse/',
'../examples/realtime/',
'../examples/datasets/',
'../tutorials/intro/',
'../tutorials/io/',
'../tutorials/raw/',
'../tutorials/preprocessing/',
'../tutorials/epochs/',
'../tutorials/evoked/',
'../tutorials/time-freq/',
'../tutorials/forward/',
'../tutorials/inverse/',
'../tutorials/stats-sensor-space/',
'../tutorials/stats-source-space/',
'../tutorials/machine-learning/',
'../tutorials/clinical/',
'../tutorials/simulation/',
'../tutorials/sample-datasets/',
'../tutorials/misc/']),
'gallery_dirs': gallery_dirs,
'default_thumb_file': os.path.join('_static', 'mne_helmet.png'),
'backreferences_dir': 'generated',
'plot_gallery': 'True', # Avoid annoying Unicode/bool default warning
'thumbnail_size': (160, 112),
'remove_config_comments': True,
'min_reported_time': 1.,
'abort_on_example_error': False,
'reset_modules': ('matplotlib', Resetter()), # called w/each script
'reset_modules_order': 'both',
'image_scrapers': scrapers,
'show_memory': not sys.platform.startswith(('win', 'darwin')),
'line_numbers': False, # messes with style
'within_subsection_order': FileNameSortKey,
'capture_repr': ('_repr_html_',),
'junit': os.path.join('..', 'test-results', 'sphinx-gallery', 'junit.xml'),
'matplotlib_animations': True,
'compress_images': compress_images,
'filename_pattern': '^((?!sgskip).)*$',
}
# Files were renamed from plot_* with:
# find . -type f -name 'plot_*.py' -exec sh -c 'x="{}"; xn=`basename "${x}"`; git mv "$x" `dirname "${x}"`/${xn:5}' \; # noqa
def append_attr_meth_examples(app, what, name, obj, options, lines):
"""Append SG examples backreferences to method and attr docstrings."""
# NumpyDoc nicely embeds method and attribute docstrings for us, but it
# does not respect the autodoc templates that would otherwise insert
# the .. include:: lines, so we need to do it.
# Eventually this could perhaps live in SG.
if what in ('attribute', 'method'):
size = os.path.getsize(os.path.join(
os.path.dirname(__file__), 'generated', '%s.examples' % (name,)))
if size > 0:
lines += """
.. _sphx_glr_backreferences_{1}:
.. rubric:: Examples using ``{0}``:
.. minigallery:: {1}
""".format(name.split('.')[-1], name).split('\n')
# -- Other extension configuration -------------------------------------------
linkcheck_request_headers = dict(user_agent='Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36') # noqa: E501
linkcheck_ignore = [ # will be compiled to regex
r'https://datashare.is.ed.ac.uk/handle/10283/2189\?show=full', # noqa Max retries exceeded with url: /handle/10283/2189?show=full (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1123)')))
'https://doi.org/10.1002/mds.870120629', # Read timed out.
'https://doi.org/10.1088/0031-9155/32/1/004', # noqa Read timed out. (read timeout=15)
'https://doi.org/10.1088/0031-9155/40/3/001', # noqa Read timed out. (read timeout=15)
'https://doi.org/10.1088/0031-9155/51/7/008', # noqa Read timed out. (read timeout=15)
'https://doi.org/10.1088/0031-9155/57/7/1937', # noqa Read timed out. (read timeout=15)
'https://doi.org/10.1088/0967-3334/22/4/305', # noqa Read timed out. (read timeout=15)
'https://doi.org/10.1088/1741-2552/aacfe4', # noqa Read timed out. (read timeout=15)
'https://doi.org/10.1093/sleep/18.7.557', # noqa 403 Client Error: Forbidden for url: https://academic.oup.com/sleep/article-lookup/doi/10.1093/sleep/18.7.557
'https://doi.org/10.1162/089976699300016719', # noqa 403 Client Error: Forbidden for url: https://direct.mit.edu/neco/article/11/2/417-441/6242
'https://doi.org/10.1162/jocn.1993.5.2.162', # noqa 403 Client Error: Forbidden for url: https://direct.mit.edu/jocn/article/5/2/162-176/3095
'https://doi.org/10.1162/neco.1995.7.6.1129', # noqa 403 Client Error: Forbidden for url: https://direct.mit.edu/neco/article/7/6/1129-1159/5909
'https://doi.org/10.1162/jocn_a_00405', # noqa 403 Client Error: Forbidden for url: https://direct.mit.edu/jocn/article/25/9/1477-1492/27980
'https://doi.org/10.1167/15.6.4', # noqa 403 Client Error: Forbidden for url: https://jov.arvojournals.org/article.aspx?doi=10.1167/15.6.4
'https://doi.org/10.7488/ds/1556', # noqa Max retries exceeded with url: /handle/10283/2189 (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1122)')))
'https://imaging.mrc-cbu.cam.ac.uk/imaging/MniTalairach', # noqa Max retries exceeded with url: /imaging/MniTalairach (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1122)')))
'https://www.nyu.edu/', # noqa Max retries exceeded with url: / (Caused by SSLError(SSLError(1, '[SSL: DH_KEY_TOO_SMALL] dh key too small (_ssl.c:1122)')))
'https://docs.python.org/3/library/.*', # noqa ('Connection aborted.', ConnectionResetError(104, 'Connection reset by peer'))
'https://hal.archives-ouvertes.fr/hal-01848442.*', # noqa Sometimes: 503 Server Error: Service Unavailable for url: https://hal.archives-ouvertes.fr/hal-01848442/
'http://www.cs.ucl.ac.uk/staff/d.barber/brml.*', # noqa Sometimes: Read timed out
'https://compumedicsneuroscan.com/scan-acquire-configuration-files.*', # noqa SSL certificate error as of 2021/09/28
'https://chrisholdgraf.com', # noqa Max retries exceeded sometimes
'https://www.dtu.dk/english/service/phonebook/person.*', # noqa Too slow
'https://speakerdeck.com/dengemann/eeg-sensor-covariance-using-cross-validation', # noqa Too slow
'https://doi.org/10.1002/hbm.10024', # noqa Too slow sometimes
'https://www.researchgate.net', # noqa As of 2022/05/31 we get "403 Forbidden" errors, might have to do with https://stackoverflow.com/questions/72347165 but not worth the effort to fix
]
linkcheck_anchors = False # saves a bit of time
linkcheck_timeout = 15 # some can be quite slow
# autodoc / autosummary
autosummary_generate = True
autodoc_default_options = {'inherited-members': None}
# sphinxcontrib-bibtex
bibtex_bibfiles = ['./references.bib']
bibtex_style = 'unsrt'
bibtex_footbibliography_header = ''
# -- Nitpicky ----------------------------------------------------------------
nitpicky = True
nitpick_ignore = [
("py:class", "None. Remove all items from D."),
("py:class", "a set-like object providing a view on D's items"),
("py:class", "a set-like object providing a view on D's keys"),
("py:class", "v, remove specified key and return the corresponding value."), # noqa: E501
("py:class", "None. Update D from dict/iterable E and F."),
("py:class", "an object providing a view on D's values"),
("py:class", "a shallow copy of D"),
("py:class", "(k, v), remove and return some (key, value) pair as a"),
("py:class", "_FuncT"), # type hint used in @verbose decorator
("py:class", "mne.utils._logging._FuncT"),
]
suppress_warnings = ['image.nonlocal_uri'] # we intentionally link outside
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'pydata_sphinx_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
switcher_version_match = 'dev' if release.endswith('dev0') else version
html_theme_options = {
'icon_links': [
dict(name='GitHub',
url='https://github.com/mne-tools/mne-python',
icon='fab fa-github-square'),
dict(name='Twitter',
url='https://twitter.com/mne_python',
icon='fab fa-twitter-square'),
dict(name='Discourse',
url='https://mne.discourse.group/',
icon='fab fa-discourse'),
dict(name='Discord',
url='https://discord.gg/rKfvxTuATa',
icon='fab fa-discord')
],
'icon_links_label': 'Quick Links', # for screen reader
'use_edit_page_button': False,
'navigation_with_keys': False,
'show_toc_level': 1,
'navbar_end': ['theme-switcher', 'version-switcher', 'navbar-icon-links'],
'footer_items': ['copyright'],
'google_analytics_id': 'UA-37225609-1',
'switcher': {
'json_url': 'https://mne.tools/dev/_static/versions.json',
'version_match': switcher_version_match,
}
}
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "_static/mne_logo_small.svg"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "_static/favicon.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = [
'style.css',
]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
html_extra_path = [
'contributing.html',
'documentation.html',
'getting_started.html',
'install_mne_python.html',
]
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'index': ['search-field.html', 'sidebar-quicklinks.html'],
}
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
html_copy_source = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# accommodate different logo shapes (width values in rem)
xs = '2'
sm = '2.5'
md = '3'
lg = '4.5'
xl = '5'
xxl = '6'
# variables to pass to HTML templating engine
html_context = {
'build_dev_html': bool(int(os.environ.get('BUILD_DEV_HTML', False))),
'default_mode': 'auto',
'pygment_light_style': 'tango',
'pygment_dark_style': 'native',
'funders': [
dict(img='nih.png', size='3', title='National Institutes of Health'),
dict(img='nsf.png', size='3.5',
title='US National Science Foundation'),
dict(img='erc.svg', size='3.5', title='European Research Council'),
dict(img='doe.svg', size='3', title='US Department of Energy'),
dict(img='anr.svg', size='4.5',
title='Agence Nationale de la Recherche'),
dict(img='cds.png', size='2.25',
title='Paris-Saclay Center for Data Science'),
dict(img='google.svg', size='2.25', title='Google'),
dict(img='amazon.svg', size='2.5', title='Amazon'),
dict(img='czi.svg', size='2.5', title='Chan Zuckerberg Initiative'),
],
'institutions': [
dict(name='Massachusetts General Hospital',
img='MGH.svg',
url='https://www.massgeneral.org/',
size=sm),
dict(name='Athinoula A. Martinos Center for Biomedical Imaging',
img='Martinos.png',
url='https://martinos.org/',
size=md),
dict(name='Harvard Medical School',
img='Harvard.png',
url='https://hms.harvard.edu/',
size=sm),
dict(name='Massachusetts Institute of Technology',
img='MIT.svg',
url='https://web.mit.edu/',
size=md),
dict(name='New York University',
img='NYU.png',
url='https://www.nyu.edu/',
size=xs),
dict(name='Commissariat à l´énergie atomique et aux énergies alternatives', # noqa E501
img='CEA.png',
url='http://www.cea.fr/',
size=md),
dict(name='Aalto-yliopiston perustieteiden korkeakoulu',
img='Aalto.svg',
url='https://sci.aalto.fi/',
size=md),
dict(name='Télécom ParisTech',
img='Telecom_Paris_Tech.svg',
url='https://www.telecom-paris.fr/',
size=md),
dict(name='University of Washington',
img='Washington.png',
url='https://www.washington.edu/',
size=md),
dict(name='Institut du Cerveau et de la Moelle épinière',
img='ICM.jpg',
url='https://icm-institute.org/',
size=md),
dict(name='Boston University',
img='BU.svg',
url='https://www.bu.edu/',
size=lg),
dict(name='Institut national de la santé et de la recherche médicale',
img='Inserm.svg',
url='https://www.inserm.fr/',
size=xl),
dict(name='Forschungszentrum Jülich',
img='Julich.svg',
url='https://www.fz-juelich.de/',
size=xl),
dict(name='Technische Universität Ilmenau',
img='Ilmenau.gif',
url='https://www.tu-ilmenau.de/',
size=xxl),
dict(name='Berkeley Institute for Data Science',
img='BIDS.png',
url='https://bids.berkeley.edu/',
size=lg),
dict(name='Institut national de recherche en informatique et en automatique', # noqa E501
img='inria.png',
url='https://www.inria.fr/',
size=xl),
dict(name='Aarhus Universitet',
img='Aarhus.png',
url='https://www.au.dk/',
size=xl),
dict(name='Karl-Franzens-Universität Graz',
img='Graz.jpg',
url='https://www.uni-graz.at/',
size=md),
dict(name='SWPS Uniwersytet Humanistycznospołeczny',
img='SWPS.svg',
url='https://www.swps.pl/',
size=xl),
dict(name='Max-Planck-Institut für Bildungsforschung',
img='MPIB.svg',
url='https://www.mpib-berlin.mpg.de/',
size=xxl),
dict(name='Macquarie University',
img='Macquarie.png',
url='https://www.mq.edu.au/',
size=lg),
dict(name='Children’s Hospital of Philadelphia Research Institute',
img='CHOP.svg',
url='https://imaging.research.chop.edu/',
size=xxl),
],
# \u00AD is an optional hyphen (not rendered unless needed)
# If these are changed, the Makefile should be updated, too
'carousel': [
dict(title='Source Estimation',
text='Distributed, sparse, mixed-norm, beam\u00ADformers, dipole fitting, and more.', # noqa E501
url='auto_tutorials/inverse/30_mne_dspm_loreta.html',
img='sphx_glr_30_mne_dspm_loreta_008.gif',
alt='dSPM'),
dict(title='Machine Learning',
text='Advanced decoding models including time general\u00ADiza\u00ADtion.', # noqa E501
url='auto_tutorials/machine-learning/50_decoding.html',
img='sphx_glr_50_decoding_006.png',
alt='Decoding'),
dict(title='Encoding Models',
text='Receptive field estima\u00ADtion with optional smooth\u00ADness priors.', # noqa E501
url='auto_tutorials/machine-learning/30_strf.html',
img='sphx_glr_30_strf_001.png',
alt='STRF'),
dict(title='Statistics',
text='Parametric and non-parametric, permutation tests and clustering.', # noqa E501
url='auto_tutorials/stats-source-space/20_cluster_1samp_spatiotemporal.html', # noqa E501
img='sphx_glr_20_cluster_1samp_spatiotemporal_001.png',
alt='Clusters'),
dict(title='Connectivity',
text='All-to-all spectral and effective connec\u00ADtivity measures.', # noqa E501
url='https://mne.tools/mne-connectivity/stable/auto_examples/mne_inverse_label_connectivity.html', # noqa E501
img='https://mne.tools/mne-connectivity/stable/_images/sphx_glr_mne_inverse_label_connectivity_001.png', # noqa E501
alt='Connectivity'),
dict(title='Data Visualization',
text='Explore your data from multiple perspectives.',
url='auto_tutorials/evoked/20_visualize_evoked.html',
img='sphx_glr_20_visualize_evoked_007.png',
alt='Visualization'),
]
}
# Output file base name for HTML help builder.
htmlhelp_basename = 'mne-doc'
# -- Options for LaTeX output ------------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = []
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "_static/logo.png"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
latex_toplevel_sectioning = 'part'
_np_print_defaults = np.get_printoptions()
# -- Warnings management -----------------------------------------------------
def reset_warnings(gallery_conf, fname):
"""Ensure we are future compatible and ignore silly warnings."""
# In principle, our examples should produce no warnings.
# Here we cause warnings to become errors, with a few exceptions.
# This list should be considered alongside
# setup.cfg -> [tool:pytest] -> filterwarnings
# remove tweaks from other module imports or example runs
warnings.resetwarnings()
# restrict
warnings.filterwarnings('error')
# allow these, but show them
warnings.filterwarnings('always', '.*non-standard config type: "foo".*')
warnings.filterwarnings('always', '.*config type: "MNEE_USE_CUUDAA".*')
warnings.filterwarnings('always', '.*cannot make axes width small.*')
warnings.filterwarnings('always', '.*Axes that are not compatible.*')
warnings.filterwarnings('always', '.*FastICA did not converge.*')
# ECoG BIDS spec violations:
warnings.filterwarnings('always', '.*Fiducial point nasion not found.*')
warnings.filterwarnings('always', '.*DigMontage is only a subset of.*')
warnings.filterwarnings( # xhemi morph (should probably update sample)
'always', '.*does not exist, creating it and saving it.*')
warnings.filterwarnings('default', module='sphinx') # internal warnings
warnings.filterwarnings(
'always', '.*converting a masked element to nan.*') # matplotlib?
# allow these warnings, but don't show them
warnings.filterwarnings(
'ignore', '.*OpenSSL\\.rand is deprecated.*')
warnings.filterwarnings('ignore', '.*is currently using agg.*')
warnings.filterwarnings( # SciPy-related warning (maybe 1.2.0 will fix it)
'ignore', '.*the matrix subclass is not the recommended.*')
warnings.filterwarnings( # some joblib warning
'ignore', '.*semaphore_tracker: process died unexpectedly.*')
warnings.filterwarnings( # needed until SciPy 1.2.0 is released
'ignore', '.*will be interpreted as an array index.*', module='scipy')
warnings.filterwarnings(
'ignore', '.*invalid escape sequence.*', lineno=90) # quantities
warnings.filterwarnings(
'ignore', '.*invalid escape sequence.*', lineno=14) # mne-connectivity
warnings.filterwarnings(
'ignore', '.*invalid escape sequence.*', lineno=281) # mne-conn
warnings.filterwarnings(
'ignore', '.*"is not" with a literal.*', module='nilearn')
warnings.filterwarnings( # scikit-learn FastICA whiten=True deprecation
'ignore', r'.*From version 1\.3 whiten.*')
warnings.filterwarnings( # seaborn -> pandas
'ignore', '.*iteritems is deprecated and will be.*')
warnings.filterwarnings( # PyOpenGL for macOS
'ignore', '.*PyOpenGL was not found.*')
warnings.filterwarnings( # macOS Epochs
'ignore', '.*Plotting epochs on MacOS.*')
for key in ('HasTraits', r'numpy\.testing', 'importlib', r'np\.loads',
'Using or importing the ABCs from', # internal modules on 3.7
r"it will be an error for 'np\.bool_'", # ndimage
"DocumenterBridge requires a state object", # sphinx dev
"'U' mode is deprecated", # sphinx io
r"joblib is deprecated in 0\.21", # nilearn
'The usage of `cmp` is deprecated and will', # sklearn/pytest
'scipy.* is deprecated and will be removed in', # dipy
r'Converting `np\.character` to a dtype is deprecated', # vtk
r'sphinx\.util\.smartypants is deprecated',
'is a deprecated alias for the builtin', # NumPy
'the old name will be removed', # Jinja, via sphinx
r'Passing a schema to Validator\.iter_errors', # jsonschema
"default value of type 'dict' in an Any trait will", # traits
'rcParams is deprecated', # PyVista rcParams -> global_theme
'to mean no clipping',
r'the `scipy\.ndimage.*` namespace is deprecated', # Dipy
'`np.MachAr` is deprecated', # Numba
'distutils Version classes are deprecated', # pydata-sphinx-th
'The module matplotlib.tight_layout is deprecated', # nilearn
):
warnings.filterwarnings( # deal with other modules having bad imports
'ignore', message=".*%s.*" % key, category=DeprecationWarning)
warnings.filterwarnings( # deal with bootstrap-theme bug
'ignore', message=".*modify script_files in the theme.*",
category=Warning)
warnings.filterwarnings( # nilearn
'ignore', message=r'The sklearn.* module is.*', category=FutureWarning)
warnings.filterwarnings( # nilearn
'ignore', message=r'Fetchers from the nilea.*', category=FutureWarning)
warnings.filterwarnings( # deal with other modules having bad imports
'ignore', message=".*ufunc size changed.*", category=RuntimeWarning)
warnings.filterwarnings( # realtime
'ignore', message=".*unclosed file.*", category=ResourceWarning)
warnings.filterwarnings('ignore', message='Exception ignored in.*')
# allow this ImportWarning, but don't show it
warnings.filterwarnings(
'ignore', message="can't resolve package from", category=ImportWarning)
warnings.filterwarnings(
'ignore', message='.*mne-realtime.*', category=DeprecationWarning)
warnings.filterwarnings(
'ignore', message=r'numpy\.ndarray size changed.*',
category=RuntimeWarning)
warnings.filterwarnings(
'ignore', message=r'.*Setting theme=.*6 in qdarkstyle.*',
category=RuntimeWarning)
# In case we use np.set_printoptions in any tutorials, we only
# want it to affect those:
np.set_printoptions(**_np_print_defaults)
reset_warnings(None, None)
# -- Fontawesome support -----------------------------------------------------
# here the "fab" and "fas" refer to "brand" and "solid" (determines which font
# file to look in). "fw" indicates fixed width.
brand_icons = ('apple', 'linux', 'windows', 'discourse', 'python')
fixed_icons = (
# homepage:
'book', 'code-branch', 'newspaper', 'question-circle', 'quote-left',
# contrib guide:
'bug', 'comment', 'hand-sparkles', 'magic', 'pencil-alt', 'remove-format',
'universal-access', 'discourse', 'python',
)
other_icons = (
'hand-paper', 'question', 'rocket', 'server', 'code', 'desktop',
'terminal', 'cloud-download-alt', 'wrench',
)
icons = dict()
for icon in brand_icons + fixed_icons + other_icons:
font = ('fab' if icon in brand_icons else 'fas',) # brand or solid font
fw = ('fa-fw',) if icon in fixed_icons else () # fixed-width
icons[icon] = font + fw
prolog = ''
for icon, classes in icons.items():
prolog += f'''
.. |{icon}| raw:: html
<i class="{' '.join(classes)} fa-{icon}"></i>
'''
prolog += '''
.. |fix-bug| raw:: html
<span class="fa-stack small-stack">
<i class="fas fa-bug fa-stack-1x"></i>
<i class="fas fa-ban fa-stack-2x"></i>
</span>
'''
prolog += '''
.. |ensp| unicode:: U+2002 .. EN SPACE
'''
# -- Dependency info ----------------------------------------------------------
try:
from importlib.metadata import metadata # new in Python 3.8
min_py = metadata('mne')['Requires-Python']
except ModuleNotFoundError:
from pkg_resources import get_distribution
info = get_distribution('mne').get_metadata_lines('PKG-INFO')
for line in info:
if line.strip().startswith('Requires-Python'):
min_py = line.split(':')[1]
min_py = min_py.lstrip(' =<>')
prolog += f'\n.. |min_python_version| replace:: {min_py}\n'
# -- website redirects --------------------------------------------------------
# Static list created 2021/04/13 based on what we needed to redirect,
# since we don't need to add redirects for examples added after this date.
needed_plot_redirects = {
# tutorials
'10_epochs_overview.py', '10_evoked_overview.py', '10_overview.py',
'10_preprocessing_overview.py', '10_raw_overview.py',
'10_reading_meg_data.py', '15_handling_bad_channels.py',
'20_event_arrays.py', '20_events_from_raw.py', '20_reading_eeg_data.py',
'20_rejecting_bad_data.py', '20_visualize_epochs.py',
'20_visualize_evoked.py', '30_annotate_raw.py', '30_epochs_metadata.py',
'30_filtering_resampling.py', '30_info.py', '30_reading_fnirs_data.py',
'35_artifact_correction_regression.py', '40_artifact_correction_ica.py',
'40_autogenerate_metadata.py', '40_sensor_locations.py',
'40_visualize_raw.py', '45_projectors_background.py',
'50_artifact_correction_ssp.py', '50_configure_mne.py',
'50_epochs_to_data_frame.py', '55_setting_eeg_reference.py',
'59_head_positions.py', '60_make_fixed_length_epochs.py',
'60_maxwell_filtering_sss.py', '70_fnirs_processing.py',
# examples
'3d_to_2d.py', 'brainstorm_data.py', 'channel_epochs_image.py',
'cluster_stats_evoked.py', 'compute_csd.py',
'compute_mne_inverse_epochs_in_label.py',
'compute_mne_inverse_raw_in_label.py', 'compute_mne_inverse_volume.py',
'compute_source_psd_epochs.py', 'covariance_whitening_dspm.py',
'custom_inverse_solver.py',
'decoding_csp_eeg.py', 'decoding_csp_timefreq.py',
'decoding_spatio_temporal_source.py', 'decoding_spoc_CMC.py',
'decoding_time_generalization_conditions.py',
'decoding_unsupervised_spatial_filter.py', 'decoding_xdawn_eeg.py',
'define_target_events.py', 'dics_source_power.py', 'eeg_csd.py',
'eeg_on_scalp.py', 'eeglab_head_sphere.py', 'elekta_epochs.py',
'ems_filtering.py', 'eog_artifact_histogram.py', 'evoked_arrowmap.py',
'evoked_ers_source_power.py', 'evoked_topomap.py', 'evoked_whitening.py',
'fdr_stats_evoked.py', 'find_ref_artifacts.py',
'fnirs_artifact_removal.py', 'forward_sensitivity_maps.py',
'gamma_map_inverse.py', 'hf_sef_data.py', 'ica_comparison.py',
'interpolate_bad_channels.py', 'label_activation_from_stc.py',
'label_from_stc.py', 'label_source_activations.py',
'left_cerebellum_volume_source.py', 'limo_data.py',
'linear_model_patterns.py', 'linear_regression_raw.py',
'meg_sensors.py', 'mixed_norm_inverse.py',
'mixed_source_space_inverse.py',
'mne_cov_power.py', 'mne_helmet.py', 'mne_inverse_coherence_epochs.py',
'mne_inverse_envelope_correlation.py',
'mne_inverse_envelope_correlation_volume.py',
'mne_inverse_psi_visual.py',
'morph_surface_stc.py', 'morph_volume_stc.py', 'movement_compensation.py',
'movement_detection.py', 'multidict_reweighted_tfmxne.py',
'muscle_detection.py', 'opm_data.py', 'otp.py', 'parcellation.py',
'psf_ctf_label_leakage.py', 'psf_ctf_vertices.py',
'psf_ctf_vertices_lcmv.py', 'publication_figure.py', 'rap_music.py',
'read_inverse.py', 'read_neo_format.py', 'read_noise_covariance_matrix.py',
'read_stc.py', 'receptive_field_mtrf.py', 'resolution_metrics.py',
'resolution_metrics_eegmeg.py', 'roi_erpimage_by_rt.py',
'sensor_noise_level.py',
'sensor_permutation_test.py', 'sensor_regression.py',
'shift_evoked.py', 'simulate_evoked_data.py', 'simulate_raw_data.py',
'simulated_raw_data_using_subject_anatomy.py', 'snr_estimate.py',
'source_label_time_frequency.py', 'source_power_spectrum.py',
'source_power_spectrum_opm.py', 'source_simulator.py',
'source_space_morphing.py', 'source_space_snr.py',
'source_space_time_frequency.py', 'ssd_spatial_filters.py',
'ssp_projs_sensitivity_map.py', 'temporal_whitening.py',
'time_frequency_erds.py', 'time_frequency_global_field_power.py',
'time_frequency_mixed_norm_inverse.py', 'time_frequency_simulated.py',
'topo_compare_conditions.py', 'topo_customized.py',
'vector_mne_solution.py', 'virtual_evoked.py', 'xdawn_denoising.py',
'xhemi.py',
}
ex = 'auto_examples'
co = 'connectivity'
mne_conn = 'https://mne.tools/mne-connectivity/stable'
tu = 'auto_tutorials'
di = 'discussions'
sm = 'source-modeling'
fw = 'forward'
nv = 'inverse'
sn = 'stats-sensor-space'
sr = 'stats-source-space'
sd = 'sample-datasets'
ml = 'machine-learning'
tf = 'time-freq'
si = 'simulation'
custom_redirects = {
# Custom redirects (one HTML path to another, relative to outdir)
# can be added here as fr->to key->value mappings
f'{tu}/evoked/plot_eeg_erp.html': f'{tu}/evoked/30_eeg_erp.html',
f'{tu}/evoked/plot_whitened.html': f'{tu}/evoked/40_whitened.html',
f'{tu}/misc/plot_modifying_data_inplace.html': f'{tu}/intro/15_inplace.html', # noqa E501
f'{tu}/misc/plot_report.html': f'{tu}/intro/70_report.html',
f'{tu}/misc/plot_seeg.html': f'{tu}/clinical/20_seeg.html',
f'{tu}/misc/plot_ecog.html': f'{tu}/clinical/30_ecog.html',
f'{tu}/{ml}/plot_receptive_field.html': f'{tu}/{ml}/30_strf.html',
f'{tu}/{ml}/plot_sensors_decoding.html': f'{tu}/{ml}/50_decoding.html',
f'{tu}/{sm}/plot_background_freesurfer.html': f'{tu}/{fw}/10_background_freesurfer.html', # noqa E501
f'{tu}/{sm}/plot_source_alignment.html': f'{tu}/{fw}/20_source_alignment.html', # noqa E501
f'{tu}/{sm}/plot_forward.html': f'{tu}/{fw}/30_forward.html',
f'{tu}/{sm}/plot_eeg_no_mri.html': f'{tu}/{fw}/35_eeg_no_mri.html',
f'{tu}/{sm}/plot_background_freesurfer_mne.html': f'{tu}/{fw}/50_background_freesurfer_mne.html', # noqa E501
f'{tu}/{sm}/plot_fix_bem_in_blender.html': f'{tu}/{fw}/80_fix_bem_in_blender.html', # noqa E501
f'{tu}/{sm}/plot_compute_covariance.html': f'{tu}/{fw}/90_compute_covariance.html', # noqa E501
f'{tu}/{sm}/plot_object_source_estimate.html': f'{tu}/{nv}/10_stc_class.html', # noqa E501
f'{tu}/{sm}/plot_dipole_fit.html': f'{tu}/{nv}/20_dipole_fit.html',
f'{tu}/{sm}/plot_mne_dspm_source_localization.html': f'{tu}/{nv}/30_mne_dspm_loreta.html', # noqa E501
f'{tu}/{sm}/plot_dipole_orientations.html': f'{tu}/{nv}/35_dipole_orientations.html', # noqa E501
f'{tu}/{sm}/plot_mne_solutions.html': f'{tu}/{nv}/40_mne_fixed_free.html',
f'{tu}/{sm}/plot_beamformer_lcmv.html': f'{tu}/{nv}/50_beamformer_lcmv.html', # noqa E501
f'{tu}/{sm}/plot_visualize_stc.html': f'{tu}/{nv}/60_visualize_stc.html',
f'{tu}/{sm}/plot_eeg_mri_coords.html': f'{tu}/{nv}/70_eeg_mri_coords.html',
f'{tu}/{sd}/plot_brainstorm_phantom_elekta.html': f'{tu}/{nv}/80_brainstorm_phantom_elekta.html', # noqa E501
f'{tu}/{sd}/plot_brainstorm_phantom_ctf.html': f'{tu}/{nv}/85_brainstorm_phantom_ctf.html', # noqa E501
f'{tu}/{sd}/plot_phantom_4DBTi.html': f'{tu}/{nv}/90_phantom_4DBTi.html',
f'{tu}/{sd}/plot_brainstorm_auditory.html': f'{tu}/io/60_ctf_bst_auditory.html', # noqa E501
f'{tu}/{sd}/plot_sleep.html': f'{tu}/clinical/60_sleep.html',
f'{tu}/{di}/plot_background_filtering.html': f'{tu}/preprocessing/25_background_filtering.html', # noqa E501
f'{tu}/{di}/plot_background_statistics.html': f'{tu}/{sn}/10_background_stats.html', # noqa E501
f'{tu}/{sn}/plot_stats_cluster_erp.html': f'{tu}/{sn}/20_erp_stats.html',
f'{tu}/{sn}/plot_stats_cluster_1samp_test_time_frequency.html': f'{tu}/{sn}/40_cluster_1samp_time_freq.html', # noqa E501
f'{tu}/{sn}/plot_stats_cluster_time_frequency.html': f'{tu}/{sn}/50_cluster_between_time_freq.html', # noqa E501
f'{tu}/{sn}/plot_stats_spatio_temporal_cluster_sensors.html': f'{tu}/{sn}/75_cluster_ftest_spatiotemporal.html', # noqa E501
f'{tu}/{sr}/plot_stats_cluster_spatio_temporal.html': f'{tu}/{sr}/20_cluster_1samp_spatiotemporal.html', # noqa E501
f'{tu}/{sr}/plot_stats_cluster_spatio_temporal_2samp.html': f'{tu}/{sr}/30_cluster_ftest_spatiotemporal.html', # noqa E501
f'{tu}/{sr}/plot_stats_cluster_spatio_temporal_repeated_measures_anova.html': f'{tu}/{sr}/60_cluster_rmANOVA_spatiotemporal.html', # noqa E501
f'{tu}/{sr}/plot_stats_cluster_time_frequency_repeated_measures_anova.html': f'{tu}/{sr}/70_cluster_rmANOVA_time_freq.html', # noqa E501
f'{tu}/{tf}/plot_sensors_time_frequency.html': f'{tu}/{tf}/20_sensors_time_frequency.html', # noqa E501
f'{tu}/{tf}/plot_ssvep.html': f'{tu}/{tf}/50_ssvep.html',
f'{tu}/{si}/plot_creating_data_structures.html': f'{tu}/{si}/10_array_objs.html', # noqa E501
f'{tu}/{si}/plot_point_spread.html': f'{tu}/{si}/70_point_spread.html',
f'{tu}/{si}/plot_dics.html': f'{tu}/{si}/80_dics.html',
f'{ex}/{co}/mne_inverse_label_connectivity.html': f'{mne_conn}/{ex}/mne_inverse_label_connectivity.html', # noqa E501
f'{ex}/{co}/cwt_sensor_connectivity.html': f'{mne_conn}/{ex}/cwt_sensor_connectivity.html', # noqa E501
f'{ex}/{co}/mixed_source_space_connectivity.html': f'{mne_conn}/{ex}/mixed_source_space_connectivity.html', # noqa E501
f'{ex}/{co}/mne_inverse_coherence_epochs.html': f'{mne_conn}/{ex}/mne_inverse_coherence_epochs.html', # noqa E501
f'{ex}/{co}/mne_inverse_connectivity_spectrum.html': f'{mne_conn}/{ex}/mne_inverse_connectivity_spectrum.html', # noqa E501
f'{ex}/{co}/mne_inverse_envelope_correlation_volume.html': f'{mne_conn}/{ex}/mne_inverse_envelope_correlation_volume.html', # noqa E501
f'{ex}/{co}/mne_inverse_envelope_correlation.html': f'{mne_conn}/{ex}/mne_inverse_envelope_correlation.html', # noqa E501
f'{ex}/{co}/mne_inverse_psi_visual.html': f'{mne_conn}/{ex}/mne_inverse_psi_visual.html', # noqa E501
f'{ex}/{co}/sensor_connectivity.html': f'{mne_conn}/{ex}/sensor_connectivity.html', # noqa E501
}
def make_redirects(app, exception):
"""Make HTML redirects."""
# https://www.sphinx-doc.org/en/master/extdev/appapi.html
# Adapted from sphinxcontrib/redirects (BSD-2-Clause)
if not (isinstance(app.builder,
sphinx.builders.html.StandaloneHTMLBuilder) and
exception is None):
return
logger = sphinx.util.logging.getLogger('mne')
TEMPLATE = """\
<!DOCTYPE HTML>
<html lang="en-US">
<head>
<meta charset="UTF-8">
<meta http-equiv="refresh" content="1; url={to}">
<script type="text/javascript">
window.location.href = "{to}"
</script>
<title>Page Redirection</title>
</head>
<body>
If you are not redirected automatically, follow this <a href='{to}'>link</a>.
</body>
</html>""" # noqa: E501
sphinx_gallery_conf = app.config['sphinx_gallery_conf']
for src_dir, out_dir in zip(sphinx_gallery_conf['examples_dirs'],
sphinx_gallery_conf['gallery_dirs']):
root = os.path.abspath(os.path.join(app.srcdir, src_dir))
fnames = [os.path.join(os.path.relpath(dirpath, root), fname)
for dirpath, _, fnames in os.walk(root)
for fname in fnames
if fname in needed_plot_redirects]
# plot_ redirects
for fname in fnames:
dirname = os.path.join(app.outdir, out_dir, os.path.dirname(fname))
to_fname = os.path.splitext(os.path.basename(fname))[0] + '.html'
fr_fname = f'plot_{to_fname}'
to_path = os.path.join(dirname, to_fname)
fr_path = os.path.join(dirname, fr_fname)
assert os.path.isfile(to_path), (fname, to_path)
with open(fr_path, 'w') as fid:
fid.write(TEMPLATE.format(to=to_fname))
logger.info(
f'Added {len(fnames):3d} HTML plot_* redirects for {out_dir}')
# custom redirects
for fr, to in custom_redirects.items():
if not to.startswith('http'):
assert os.path.isfile(os.path.join(app.outdir, to)), to
# handle links to sibling folders
path_parts = to.split('/')
assert tu in path_parts, path_parts # need to refactor otherwise
path_parts = ['..'] + path_parts[(path_parts.index(tu) + 1):]
to = os.path.join(*path_parts)
assert to.endswith('html'), to
fr_path = os.path.join(app.outdir, fr)
assert fr_path.endswith('html'), fr_path
# allow overwrite if existing file is just a redirect
if os.path.isfile(fr_path):
with open(fr_path, 'r') as fid:
for _ in range(8):
next(fid)
line = fid.readline()
assert 'Page Redirection' in line, line
# handle folders that no longer exist
if fr_path.split('/')[-2] in (
'misc', 'discussions', 'source-modeling', 'sample-datasets',
'connectivity'):
os.makedirs(os.path.dirname(fr_path), exist_ok=True)
with open(fr_path, 'w') as fid:
fid.write(TEMPLATE.format(to=to))
logger.info(
f'Added {len(custom_redirects):3d} HTML custom redirects')
def make_version(app, exception):
"""Make a text file with the git version."""
if not (isinstance(app.builder,
sphinx.builders.html.StandaloneHTMLBuilder) and
exception is None):
return
logger = sphinx.util.logging.getLogger('mne')
try:
stdout, _ = run_subprocess(['git', 'rev-parse', 'HEAD'], verbose=False)
except Exception as exc:
logger.warning(f'Failed to write _version.txt: {exc}')
return
with open(os.path.join(app.outdir, '_version.txt'), 'w') as fid:
fid.write(stdout)
logger.info(f'Added "{stdout.rstrip()}" > _version.txt')
# -- Connect our handlers to the main Sphinx app ---------------------------
def setup(app):
"""Set up the Sphinx app."""
app.connect('autodoc-process-docstring', append_attr_meth_examples)
if report_scraper is not None:
report_scraper.app = app
app.config.rst_prolog = prolog
app.connect('builder-inited', report_scraper.copyfiles)
sphinx_logger = sphinx.util.logging.getLogger('mne')
sphinx_logger.info(
f'Building documentation for MNE {release} ({mne.__file__})')
sphinx_logger.info(f'Building with scrapers={scrapers}')
app.connect('build-finished', make_redirects)
app.connect('build-finished', make_version)
[MAINT, MRG] Fix nilearn intersphinx (#10825)
* fix warning
* fix nilearn, moved to stable
* revert
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
from datetime import datetime, timezone
import faulthandler
import gc
import os
import subprocess
import sys
import time
import warnings
import numpy as np
import matplotlib
import sphinx
from sphinx_gallery.sorting import FileNameSortKey, ExplicitOrder
from numpydoc import docscrape
import mne
from mne.fixes import _compare_version
from mne.tests.test_docstring_parameters import error_ignores
from mne.utils import (linkcode_resolve, # noqa, analysis:ignore
_assert_no_instances, sizeof_fmt, run_subprocess)
from mne.viz import Brain # noqa
matplotlib.use('agg')
faulthandler.enable()
os.environ['_MNE_BROWSER_NO_BLOCK'] = 'true'
os.environ['MNE_BROWSER_OVERVIEW_MODE'] = 'hidden'
os.environ['MNE_BROWSER_THEME'] = 'light'
os.environ['MNE_3D_OPTION_THEME'] = 'light'
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
curdir = os.path.dirname(__file__)
sys.path.append(os.path.abspath(os.path.join(curdir, '..', 'mne')))
sys.path.append(os.path.abspath(os.path.join(curdir, 'sphinxext')))
# -- Project information -----------------------------------------------------
project = 'MNE'
td = datetime.now(tz=timezone.utc)
# We need to triage which date type we use so that incremental builds work
# (Sphinx looks at variable changes and rewrites all files if some change)
copyright = (
f'2012–{td.year}, MNE Developers. Last updated <time datetime="{td.isoformat()}" class="localized">{td.strftime("%Y-%m-%d %H:%M %Z")}</time>\n' # noqa: E501
'<script type="text/javascript">$(function () { $("time.localized").each(function () { var el = $(this); el.text(new Date(el.attr("datetime")).toLocaleString([], {dateStyle: "medium", timeStyle: "long"})); }); } )</script>') # noqa: E501
if os.getenv('MNE_FULL_DATE', 'false').lower() != 'true':
copyright = f'2012–{td.year}, MNE Developers. Last updated locally.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = mne.__version__
# The short X.Y version.
version = '.'.join(release.split('.')[:2])
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '2.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.linkcode',
'sphinx.ext.mathjax',
'sphinx.ext.todo',
'sphinx.ext.graphviz',
'numpydoc',
'sphinx_gallery.gen_gallery',
'gen_commands',
'gh_substitutions',
'mne_substitutions',
'newcontrib_substitutions',
'gen_names',
'sphinxcontrib.bibtex',
'sphinx_copybutton',
'sphinx_design',
'sphinxcontrib.youtube'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_includes']
# The suffix of source filenames.
source_suffix = '.rst'
# The main toctree document.
master_doc = 'index'
# List of documents that shouldn't be included in the build.
unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
default_role = "py:obj"
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'default'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['mne.']
# -- Sphinx-Copybutton configuration -----------------------------------------
copybutton_prompt_text = r">>> |\.\.\. |\$ "
copybutton_prompt_is_regexp = True
# -- Intersphinx configuration -----------------------------------------------
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'numpy': ('https://numpy.org/devdocs', None),
'scipy': ('https://scipy.github.io/devdocs', None),
'matplotlib': ('https://matplotlib.org/stable', None),
'sklearn': ('https://scikit-learn.org/stable', None),
'numba': ('https://numba.pydata.org/numba-doc/latest', None),
'joblib': ('https://joblib.readthedocs.io/en/latest', None),
'nibabel': ('https://nipy.org/nibabel', None),
'nilearn': ('http://nilearn.github.io/stable', None),
'nitime': ('https://nipy.org/nitime/', None),
'surfer': ('https://pysurfer.github.io/', None),
'mne_bids': ('https://mne.tools/mne-bids/stable', None),
'mne-connectivity': ('https://mne.tools/mne-connectivity/stable', None),
'pandas': ('https://pandas.pydata.org/pandas-docs/stable', None),
'seaborn': ('https://seaborn.pydata.org/', None),
'statsmodels': ('https://www.statsmodels.org/dev', None),
'patsy': ('https://patsy.readthedocs.io/en/latest', None),
'pyvista': ('https://docs.pyvista.org', None),
'imageio': ('https://imageio.readthedocs.io/en/latest', None),
'mne_realtime': ('https://mne.tools/mne-realtime', None),
'picard': ('https://pierreablin.github.io/picard/', None),
'qdarkstyle': ('https://qdarkstylesheet.readthedocs.io/en/latest', None),
'eeglabio': ('https://eeglabio.readthedocs.io/en/latest', None),
'dipy': ('https://dipy.org/documentation/latest/',
'https://dipy.org/documentation/latest/objects.inv/'),
'pooch': ('https://www.fatiando.org/pooch/latest/', None),
'pybv': ('https://pybv.readthedocs.io/en/latest/', None),
}
# NumPyDoc configuration -----------------------------------------------------
# Define what extra methods numpydoc will document
docscrape.ClassDoc.extra_public_methods = mne.utils._doc_special_members
numpydoc_class_members_toctree = False
numpydoc_attributes_as_param_list = True
numpydoc_xref_param_type = True
numpydoc_xref_aliases = {
# Python
'file-like': ':term:`file-like <python:file object>`',
'iterator': ':term:`iterator <python:iterator>`',
'path-like': ':term:`path-like`',
'array-like': ':term:`array-like`',
'Path': ':class:`python:pathlib.Path`',
# Matplotlib
'colormap': ':doc:`colormap <matplotlib:tutorials/colors/colormaps>`',
'color': ':doc:`color <matplotlib:api/colors_api>`',
'Axes': 'matplotlib.axes.Axes',
'Figure': 'matplotlib.figure.Figure',
'Axes3D': 'mpl_toolkits.mplot3d.axes3d.Axes3D',
'ColorbarBase': 'matplotlib.colorbar.ColorbarBase',
# sklearn
'LeaveOneOut': 'sklearn.model_selection.LeaveOneOut',
# joblib
'joblib.Parallel': 'joblib.Parallel',
# nibabel
'Nifti1Image': 'nibabel.nifti1.Nifti1Image',
'Nifti2Image': 'nibabel.nifti2.Nifti2Image',
'SpatialImage': 'nibabel.spatialimages.SpatialImage',
# MNE
'Label': 'mne.Label', 'Forward': 'mne.Forward', 'Evoked': 'mne.Evoked',
'Info': 'mne.Info', 'SourceSpaces': 'mne.SourceSpaces',
'SourceMorph': 'mne.SourceMorph',
'Epochs': 'mne.Epochs', 'Layout': 'mne.channels.Layout',
'EvokedArray': 'mne.EvokedArray', 'BiHemiLabel': 'mne.BiHemiLabel',
'AverageTFR': 'mne.time_frequency.AverageTFR',
'EpochsTFR': 'mne.time_frequency.EpochsTFR',
'Raw': 'mne.io.Raw', 'ICA': 'mne.preprocessing.ICA',
'Covariance': 'mne.Covariance', 'Annotations': 'mne.Annotations',
'DigMontage': 'mne.channels.DigMontage',
'VectorSourceEstimate': 'mne.VectorSourceEstimate',
'VolSourceEstimate': 'mne.VolSourceEstimate',
'VolVectorSourceEstimate': 'mne.VolVectorSourceEstimate',
'MixedSourceEstimate': 'mne.MixedSourceEstimate',
'MixedVectorSourceEstimate': 'mne.MixedVectorSourceEstimate',
'SourceEstimate': 'mne.SourceEstimate', 'Projection': 'mne.Projection',
'ConductorModel': 'mne.bem.ConductorModel',
'Dipole': 'mne.Dipole', 'DipoleFixed': 'mne.DipoleFixed',
'InverseOperator': 'mne.minimum_norm.InverseOperator',
'CrossSpectralDensity': 'mne.time_frequency.CrossSpectralDensity',
'SourceMorph': 'mne.SourceMorph',
'Xdawn': 'mne.preprocessing.Xdawn',
'Report': 'mne.Report', 'Forward': 'mne.Forward',
'TimeDelayingRidge': 'mne.decoding.TimeDelayingRidge',
'Vectorizer': 'mne.decoding.Vectorizer',
'UnsupervisedSpatialFilter': 'mne.decoding.UnsupervisedSpatialFilter',
'TemporalFilter': 'mne.decoding.TemporalFilter',
'SSD': 'mne.decoding.SSD',
'Scaler': 'mne.decoding.Scaler', 'SPoC': 'mne.decoding.SPoC',
'PSDEstimator': 'mne.decoding.PSDEstimator',
'LinearModel': 'mne.decoding.LinearModel',
'FilterEstimator': 'mne.decoding.FilterEstimator',
'EMS': 'mne.decoding.EMS', 'CSP': 'mne.decoding.CSP',
'Beamformer': 'mne.beamformer.Beamformer',
'Transform': 'mne.transforms.Transform',
'Coregistration': 'mne.coreg.Coregistration',
'Figure3D': 'mne.viz.Figure3D',
# dipy
'dipy.align.AffineMap': 'dipy.align.imaffine.AffineMap',
'dipy.align.DiffeomorphicMap': 'dipy.align.imwarp.DiffeomorphicMap',
}
numpydoc_xref_ignore = {
# words
'instance', 'instances', 'of', 'default', 'shape', 'or',
'with', 'length', 'pair', 'matplotlib', 'optional', 'kwargs', 'in',
'dtype', 'object',
# shapes
'n_vertices', 'n_faces', 'n_channels', 'm', 'n', 'n_events', 'n_colors',
'n_times', 'obj', 'n_chan', 'n_epochs', 'n_picks', 'n_ch_groups',
'n_dipoles', 'n_ica_components', 'n_pos', 'n_node_names', 'n_tapers',
'n_signals', 'n_step', 'n_freqs', 'wsize', 'Tx', 'M', 'N', 'p', 'q', 'r',
'n_observations', 'n_regressors', 'n_cols', 'n_frequencies', 'n_tests',
'n_samples', 'n_permutations', 'nchan', 'n_points', 'n_features',
'n_parts', 'n_features_new', 'n_components', 'n_labels', 'n_events_in',
'n_splits', 'n_scores', 'n_outputs', 'n_trials', 'n_estimators', 'n_tasks',
'nd_features', 'n_classes', 'n_targets', 'n_slices', 'n_hpi', 'n_fids',
'n_elp', 'n_pts', 'n_tris', 'n_nodes', 'n_nonzero', 'n_events_out',
'n_segments', 'n_orient_inv', 'n_orient_fwd', 'n_orient', 'n_dipoles_lcmv',
'n_dipoles_fwd', 'n_picks_ref', 'n_coords', 'n_meg', 'n_good_meg',
'n_moments', 'n_patterns', 'n_new_events',
# Undocumented (on purpose)
'RawKIT', 'RawEximia', 'RawEGI', 'RawEEGLAB', 'RawEDF', 'RawCTF', 'RawBTi',
'RawBrainVision', 'RawCurry', 'RawNIRX', 'RawGDF', 'RawSNIRF', 'RawBOXY',
'RawPersyst', 'RawNihon', 'RawNedf', 'RawHitachi',
# sklearn subclasses
'mapping', 'to', 'any',
# unlinkable
'CoregistrationUI',
'IntracranialElectrodeLocator',
'mne_qt_browser.figure.MNEQtBrowser',
}
numpydoc_validate = True
numpydoc_validation_checks = {'all'} | set(error_ignores)
numpydoc_validation_exclude = { # set of regex
# dict subclasses
r'\.clear', r'\.get$', r'\.copy$', r'\.fromkeys', r'\.items', r'\.keys',
r'\.pop', r'\.popitem', r'\.setdefault', r'\.update', r'\.values',
# list subclasses
r'\.append', r'\.count', r'\.extend', r'\.index', r'\.insert', r'\.remove',
r'\.sort',
# we currently don't document these properly (probably okay)
r'\.__getitem__', r'\.__contains__', r'\.__hash__', r'\.__mul__',
r'\.__sub__', r'\.__add__', r'\.__iter__', r'\.__div__', r'\.__neg__',
# copied from sklearn
r'mne\.utils\.deprecated',
# deprecations
r'mne\.connectivity\.degree', r'mne\.connectivity\.seed_target_indices',
r'mne\.viz\.plot_sensors_connectivity',
r'mne\.viz\.plot_connectivity_circle',
}
# -- Sphinx-gallery configuration --------------------------------------------
class Resetter(object):
"""Simple class to make the str(obj) static for Sphinx build env hash."""
def __init__(self):
self.t0 = time.time()
def __repr__(self):
return f'<{self.__class__.__name__}>'
def __call__(self, gallery_conf, fname, when):
import matplotlib.pyplot as plt
try:
from pyvista import Plotter # noqa
except ImportError:
Plotter = None # noqa
try:
from pyvistaqt import BackgroundPlotter # noqa
except ImportError:
BackgroundPlotter = None # noqa
try:
from vtkmodules.vtkCommonDataModel import vtkPolyData # noqa
except ImportError:
vtkPolyData = None # noqa
try:
from mne_qt_browser._pg_figure import MNEQtBrowser
except ImportError:
MNEQtBrowser = None
from mne.viz.backends.renderer import backend
_Renderer = backend._Renderer if backend is not None else None
reset_warnings(gallery_conf, fname)
# in case users have interactive mode turned on in matplotlibrc,
# turn it off here (otherwise the build can be very slow)
plt.ioff()
plt.rcParams['animation.embed_limit'] = 30.
# neo holds on to an exception, which in turn holds a stack frame,
# which will keep alive the global vars during SG execution
try:
import neo
neo.io.stimfitio.STFIO_ERR = None
except Exception:
pass
gc.collect()
when = f'mne/conf.py:Resetter.__call__:{when}:{fname}'
# Support stuff like
# MNE_SKIP_INSTANCE_ASSERTIONS="Brain,Plotter,BackgroundPlotter,vtkPolyData,_Renderer" make html_dev-memory # noqa: E501
# to just test MNEQtBrowser
skips = os.getenv('MNE_SKIP_INSTANCE_ASSERTIONS', '').lower()
prefix = ''
if skips not in ('true', '1', 'all'):
prefix = 'Clean '
skips = skips.split(',')
if 'brain' not in skips:
_assert_no_instances(Brain, when) # calls gc.collect()
if Plotter is not None and 'plotter' not in skips:
_assert_no_instances(Plotter, when)
if BackgroundPlotter is not None and \
'backgroundplotter' not in skips:
_assert_no_instances(BackgroundPlotter, when)
if vtkPolyData is not None and 'vtkpolydata' not in skips:
_assert_no_instances(vtkPolyData, when)
if '_renderer' not in skips:
_assert_no_instances(_Renderer, when)
if MNEQtBrowser is not None and \
'mneqtbrowser' not in skips:
# Ensure any manual fig.close() events get properly handled
from mne_qt_browser._pg_figure import QApplication
inst = QApplication.instance()
if inst is not None:
for _ in range(2):
inst.processEvents()
_assert_no_instances(MNEQtBrowser, when)
# This will overwrite some Sphinx printing but it's useful
# for memory timestamps
if os.getenv('SG_STAMP_STARTS', '').lower() == 'true':
import psutil
process = psutil.Process(os.getpid())
mem = sizeof_fmt(process.memory_info().rss)
print(f'{prefix}{time.time() - self.t0:6.1f} s : {mem}'.ljust(22))
examples_dirs = ['../tutorials', '../examples']
gallery_dirs = ['auto_tutorials', 'auto_examples']
os.environ['_MNE_BUILDING_DOC'] = 'true'
scrapers = ('matplotlib',)
try:
mne.viz.set_3d_backend(mne.viz.get_3d_backend())
except Exception:
report_scraper = None
else:
backend = mne.viz.get_3d_backend()
if backend in ('notebook', 'pyvistaqt'):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
import pyvista
pyvista.OFF_SCREEN = False
scrapers += (
mne.gui._GUIScraper(),
mne.viz._brain._BrainScraper(),
'pyvista',
)
report_scraper = mne.report._ReportScraper()
scrapers += (report_scraper,)
del backend
try:
import mne_qt_browser
_min_ver = _compare_version(mne_qt_browser.__version__, '>=', '0.2')
if mne.viz.get_browser_backend() == 'qt' and _min_ver:
scrapers += (mne.viz._scraper._MNEQtBrowserScraper(),)
except ImportError:
pass
compress_images = ('images', 'thumbnails')
# let's make things easier on Windows users
# (on Linux and macOS it's easy enough to require this)
if sys.platform.startswith('win'):
try:
subprocess.check_call(['optipng', '--version'])
except Exception:
compress_images = ()
sphinx_gallery_conf = {
'doc_module': ('mne',),
'reference_url': dict(mne=None),
'examples_dirs': examples_dirs,
'subsection_order': ExplicitOrder(['../examples/io/',
'../examples/simulation/',
'../examples/preprocessing/',
'../examples/visualization/',
'../examples/time_frequency/',
'../examples/stats/',
'../examples/decoding/',
'../examples/connectivity/',
'../examples/forward/',
'../examples/inverse/',
'../examples/realtime/',
'../examples/datasets/',
'../tutorials/intro/',
'../tutorials/io/',
'../tutorials/raw/',
'../tutorials/preprocessing/',
'../tutorials/epochs/',
'../tutorials/evoked/',
'../tutorials/time-freq/',
'../tutorials/forward/',
'../tutorials/inverse/',
'../tutorials/stats-sensor-space/',
'../tutorials/stats-source-space/',
'../tutorials/machine-learning/',
'../tutorials/clinical/',
'../tutorials/simulation/',
'../tutorials/sample-datasets/',
'../tutorials/misc/']),
'gallery_dirs': gallery_dirs,
'default_thumb_file': os.path.join('_static', 'mne_helmet.png'),
'backreferences_dir': 'generated',
'plot_gallery': 'True', # Avoid annoying Unicode/bool default warning
'thumbnail_size': (160, 112),
'remove_config_comments': True,
'min_reported_time': 1.,
'abort_on_example_error': False,
'reset_modules': ('matplotlib', Resetter()), # called w/each script
'reset_modules_order': 'both',
'image_scrapers': scrapers,
'show_memory': not sys.platform.startswith(('win', 'darwin')),
'line_numbers': False, # messes with style
'within_subsection_order': FileNameSortKey,
'capture_repr': ('_repr_html_',),
'junit': os.path.join('..', 'test-results', 'sphinx-gallery', 'junit.xml'),
'matplotlib_animations': True,
'compress_images': compress_images,
'filename_pattern': '^((?!sgskip).)*$',
}
# Files were renamed from plot_* with:
# find . -type f -name 'plot_*.py' -exec sh -c 'x="{}"; xn=`basename "${x}"`; git mv "$x" `dirname "${x}"`/${xn:5}' \; # noqa
def append_attr_meth_examples(app, what, name, obj, options, lines):
"""Append SG examples backreferences to method and attr docstrings."""
# NumpyDoc nicely embeds method and attribute docstrings for us, but it
# does not respect the autodoc templates that would otherwise insert
# the .. include:: lines, so we need to do it.
# Eventually this could perhaps live in SG.
if what in ('attribute', 'method'):
size = os.path.getsize(os.path.join(
os.path.dirname(__file__), 'generated', '%s.examples' % (name,)))
if size > 0:
lines += """
.. _sphx_glr_backreferences_{1}:
.. rubric:: Examples using ``{0}``:
.. minigallery:: {1}
""".format(name.split('.')[-1], name).split('\n')
# -- Other extension configuration -------------------------------------------
linkcheck_request_headers = dict(user_agent='Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36') # noqa: E501
linkcheck_ignore = [ # will be compiled to regex
r'https://datashare.is.ed.ac.uk/handle/10283/2189\?show=full', # noqa Max retries exceeded with url: /handle/10283/2189?show=full (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1123)')))
'https://doi.org/10.1002/mds.870120629', # Read timed out.
'https://doi.org/10.1088/0031-9155/32/1/004', # noqa Read timed out. (read timeout=15)
'https://doi.org/10.1088/0031-9155/40/3/001', # noqa Read timed out. (read timeout=15)
'https://doi.org/10.1088/0031-9155/51/7/008', # noqa Read timed out. (read timeout=15)
'https://doi.org/10.1088/0031-9155/57/7/1937', # noqa Read timed out. (read timeout=15)
'https://doi.org/10.1088/0967-3334/22/4/305', # noqa Read timed out. (read timeout=15)
'https://doi.org/10.1088/1741-2552/aacfe4', # noqa Read timed out. (read timeout=15)
'https://doi.org/10.1093/sleep/18.7.557', # noqa 403 Client Error: Forbidden for url: https://academic.oup.com/sleep/article-lookup/doi/10.1093/sleep/18.7.557
'https://doi.org/10.1162/089976699300016719', # noqa 403 Client Error: Forbidden for url: https://direct.mit.edu/neco/article/11/2/417-441/6242
'https://doi.org/10.1162/jocn.1993.5.2.162', # noqa 403 Client Error: Forbidden for url: https://direct.mit.edu/jocn/article/5/2/162-176/3095
'https://doi.org/10.1162/neco.1995.7.6.1129', # noqa 403 Client Error: Forbidden for url: https://direct.mit.edu/neco/article/7/6/1129-1159/5909
'https://doi.org/10.1162/jocn_a_00405', # noqa 403 Client Error: Forbidden for url: https://direct.mit.edu/jocn/article/25/9/1477-1492/27980
'https://doi.org/10.1167/15.6.4', # noqa 403 Client Error: Forbidden for url: https://jov.arvojournals.org/article.aspx?doi=10.1167/15.6.4
'https://doi.org/10.7488/ds/1556', # noqa Max retries exceeded with url: /handle/10283/2189 (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1122)')))
'https://imaging.mrc-cbu.cam.ac.uk/imaging/MniTalairach', # noqa Max retries exceeded with url: /imaging/MniTalairach (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1122)')))
'https://www.nyu.edu/', # noqa Max retries exceeded with url: / (Caused by SSLError(SSLError(1, '[SSL: DH_KEY_TOO_SMALL] dh key too small (_ssl.c:1122)')))
'https://docs.python.org/3/library/.*', # noqa ('Connection aborted.', ConnectionResetError(104, 'Connection reset by peer'))
'https://hal.archives-ouvertes.fr/hal-01848442.*', # noqa Sometimes: 503 Server Error: Service Unavailable for url: https://hal.archives-ouvertes.fr/hal-01848442/
'http://www.cs.ucl.ac.uk/staff/d.barber/brml.*', # noqa Sometimes: Read timed out
'https://compumedicsneuroscan.com/scan-acquire-configuration-files.*', # noqa SSL certificate error as of 2021/09/28
'https://chrisholdgraf.com', # noqa Max retries exceeded sometimes
'https://www.dtu.dk/english/service/phonebook/person.*', # noqa Too slow
'https://speakerdeck.com/dengemann/eeg-sensor-covariance-using-cross-validation', # noqa Too slow
'https://doi.org/10.1002/hbm.10024', # noqa Too slow sometimes
'https://www.researchgate.net', # noqa As of 2022/05/31 we get "403 Forbidden" errors, might have to do with https://stackoverflow.com/questions/72347165 but not worth the effort to fix
]
linkcheck_anchors = False # saves a bit of time
linkcheck_timeout = 15 # some can be quite slow
# autodoc / autosummary
autosummary_generate = True
autodoc_default_options = {'inherited-members': None}
# sphinxcontrib-bibtex
bibtex_bibfiles = ['./references.bib']
bibtex_style = 'unsrt'
bibtex_footbibliography_header = ''
# -- Nitpicky ----------------------------------------------------------------
nitpicky = True
nitpick_ignore = [
("py:class", "None. Remove all items from D."),
("py:class", "a set-like object providing a view on D's items"),
("py:class", "a set-like object providing a view on D's keys"),
("py:class", "v, remove specified key and return the corresponding value."), # noqa: E501
("py:class", "None. Update D from dict/iterable E and F."),
("py:class", "an object providing a view on D's values"),
("py:class", "a shallow copy of D"),
("py:class", "(k, v), remove and return some (key, value) pair as a"),
("py:class", "_FuncT"), # type hint used in @verbose decorator
("py:class", "mne.utils._logging._FuncT"),
]
suppress_warnings = ['image.nonlocal_uri'] # we intentionally link outside
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'pydata_sphinx_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
switcher_version_match = 'dev' if release.endswith('dev0') else version
html_theme_options = {
'icon_links': [
dict(name='GitHub',
url='https://github.com/mne-tools/mne-python',
icon='fab fa-github-square'),
dict(name='Twitter',
url='https://twitter.com/mne_python',
icon='fab fa-twitter-square'),
dict(name='Discourse',
url='https://mne.discourse.group/',
icon='fab fa-discourse'),
dict(name='Discord',
url='https://discord.gg/rKfvxTuATa',
icon='fab fa-discord')
],
'icon_links_label': 'Quick Links', # for screen reader
'use_edit_page_button': False,
'navigation_with_keys': False,
'show_toc_level': 1,
'navbar_end': ['theme-switcher', 'version-switcher', 'navbar-icon-links'],
'footer_items': ['copyright'],
'google_analytics_id': 'UA-37225609-1',
'switcher': {
'json_url': 'https://mne.tools/dev/_static/versions.json',
'version_match': switcher_version_match,
}
}
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "_static/mne_logo_small.svg"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "_static/favicon.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = [
'style.css',
]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
html_extra_path = [
'contributing.html',
'documentation.html',
'getting_started.html',
'install_mne_python.html',
]
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'index': ['search-field.html', 'sidebar-quicklinks.html'],
}
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
html_copy_source = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# accommodate different logo shapes (width values in rem)
xs = '2'
sm = '2.5'
md = '3'
lg = '4.5'
xl = '5'
xxl = '6'
# variables to pass to HTML templating engine
html_context = {
'build_dev_html': bool(int(os.environ.get('BUILD_DEV_HTML', False))),
'default_mode': 'auto',
'pygment_light_style': 'tango',
'pygment_dark_style': 'native',
'funders': [
dict(img='nih.png', size='3', title='National Institutes of Health'),
dict(img='nsf.png', size='3.5',
title='US National Science Foundation'),
dict(img='erc.svg', size='3.5', title='European Research Council'),
dict(img='doe.svg', size='3', title='US Department of Energy'),
dict(img='anr.svg', size='4.5',
title='Agence Nationale de la Recherche'),
dict(img='cds.png', size='2.25',
title='Paris-Saclay Center for Data Science'),
dict(img='google.svg', size='2.25', title='Google'),
dict(img='amazon.svg', size='2.5', title='Amazon'),
dict(img='czi.svg', size='2.5', title='Chan Zuckerberg Initiative'),
],
'institutions': [
dict(name='Massachusetts General Hospital',
img='MGH.svg',
url='https://www.massgeneral.org/',
size=sm),
dict(name='Athinoula A. Martinos Center for Biomedical Imaging',
img='Martinos.png',
url='https://martinos.org/',
size=md),
dict(name='Harvard Medical School',
img='Harvard.png',
url='https://hms.harvard.edu/',
size=sm),
dict(name='Massachusetts Institute of Technology',
img='MIT.svg',
url='https://web.mit.edu/',
size=md),
dict(name='New York University',
img='NYU.png',
url='https://www.nyu.edu/',
size=xs),
dict(name='Commissariat à l´énergie atomique et aux énergies alternatives', # noqa E501
img='CEA.png',
url='http://www.cea.fr/',
size=md),
dict(name='Aalto-yliopiston perustieteiden korkeakoulu',
img='Aalto.svg',
url='https://sci.aalto.fi/',
size=md),
dict(name='Télécom ParisTech',
img='Telecom_Paris_Tech.svg',
url='https://www.telecom-paris.fr/',
size=md),
dict(name='University of Washington',
img='Washington.png',
url='https://www.washington.edu/',
size=md),
dict(name='Institut du Cerveau et de la Moelle épinière',
img='ICM.jpg',
url='https://icm-institute.org/',
size=md),
dict(name='Boston University',
img='BU.svg',
url='https://www.bu.edu/',
size=lg),
dict(name='Institut national de la santé et de la recherche médicale',
img='Inserm.svg',
url='https://www.inserm.fr/',
size=xl),
dict(name='Forschungszentrum Jülich',
img='Julich.svg',
url='https://www.fz-juelich.de/',
size=xl),
dict(name='Technische Universität Ilmenau',
img='Ilmenau.gif',
url='https://www.tu-ilmenau.de/',
size=xxl),
dict(name='Berkeley Institute for Data Science',
img='BIDS.png',
url='https://bids.berkeley.edu/',
size=lg),
dict(name='Institut national de recherche en informatique et en automatique', # noqa E501
img='inria.png',
url='https://www.inria.fr/',
size=xl),
dict(name='Aarhus Universitet',
img='Aarhus.png',
url='https://www.au.dk/',
size=xl),
dict(name='Karl-Franzens-Universität Graz',
img='Graz.jpg',
url='https://www.uni-graz.at/',
size=md),
dict(name='SWPS Uniwersytet Humanistycznospołeczny',
img='SWPS.svg',
url='https://www.swps.pl/',
size=xl),
dict(name='Max-Planck-Institut für Bildungsforschung',
img='MPIB.svg',
url='https://www.mpib-berlin.mpg.de/',
size=xxl),
dict(name='Macquarie University',
img='Macquarie.png',
url='https://www.mq.edu.au/',
size=lg),
dict(name='Children’s Hospital of Philadelphia Research Institute',
img='CHOP.svg',
url='https://imaging.research.chop.edu/',
size=xxl),
],
# \u00AD is an optional hyphen (not rendered unless needed)
# If these are changed, the Makefile should be updated, too
'carousel': [
dict(title='Source Estimation',
text='Distributed, sparse, mixed-norm, beam\u00ADformers, dipole fitting, and more.', # noqa E501
url='auto_tutorials/inverse/30_mne_dspm_loreta.html',
img='sphx_glr_30_mne_dspm_loreta_008.gif',
alt='dSPM'),
dict(title='Machine Learning',
text='Advanced decoding models including time general\u00ADiza\u00ADtion.', # noqa E501
url='auto_tutorials/machine-learning/50_decoding.html',
img='sphx_glr_50_decoding_006.png',
alt='Decoding'),
dict(title='Encoding Models',
text='Receptive field estima\u00ADtion with optional smooth\u00ADness priors.', # noqa E501
url='auto_tutorials/machine-learning/30_strf.html',
img='sphx_glr_30_strf_001.png',
alt='STRF'),
dict(title='Statistics',
text='Parametric and non-parametric, permutation tests and clustering.', # noqa E501
url='auto_tutorials/stats-source-space/20_cluster_1samp_spatiotemporal.html', # noqa E501
img='sphx_glr_20_cluster_1samp_spatiotemporal_001.png',
alt='Clusters'),
dict(title='Connectivity',
text='All-to-all spectral and effective connec\u00ADtivity measures.', # noqa E501
url='https://mne.tools/mne-connectivity/stable/auto_examples/mne_inverse_label_connectivity.html', # noqa E501
img='https://mne.tools/mne-connectivity/stable/_images/sphx_glr_mne_inverse_label_connectivity_001.png', # noqa E501
alt='Connectivity'),
dict(title='Data Visualization',
text='Explore your data from multiple perspectives.',
url='auto_tutorials/evoked/20_visualize_evoked.html',
img='sphx_glr_20_visualize_evoked_007.png',
alt='Visualization'),
]
}
# Output file base name for HTML help builder.
htmlhelp_basename = 'mne-doc'
# -- Options for LaTeX output ------------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = []
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "_static/logo.png"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
latex_toplevel_sectioning = 'part'
_np_print_defaults = np.get_printoptions()
# -- Warnings management -----------------------------------------------------
def reset_warnings(gallery_conf, fname):
"""Ensure we are future compatible and ignore silly warnings."""
# In principle, our examples should produce no warnings.
# Here we cause warnings to become errors, with a few exceptions.
# This list should be considered alongside
# setup.cfg -> [tool:pytest] -> filterwarnings
# remove tweaks from other module imports or example runs
warnings.resetwarnings()
# restrict
warnings.filterwarnings('error')
# allow these, but show them
warnings.filterwarnings('always', '.*non-standard config type: "foo".*')
warnings.filterwarnings('always', '.*config type: "MNEE_USE_CUUDAA".*')
warnings.filterwarnings('always', '.*cannot make axes width small.*')
warnings.filterwarnings('always', '.*Axes that are not compatible.*')
warnings.filterwarnings('always', '.*FastICA did not converge.*')
# ECoG BIDS spec violations:
warnings.filterwarnings('always', '.*Fiducial point nasion not found.*')
warnings.filterwarnings('always', '.*DigMontage is only a subset of.*')
warnings.filterwarnings( # xhemi morph (should probably update sample)
'always', '.*does not exist, creating it and saving it.*')
warnings.filterwarnings('default', module='sphinx') # internal warnings
warnings.filterwarnings(
'always', '.*converting a masked element to nan.*') # matplotlib?
# allow these warnings, but don't show them
warnings.filterwarnings(
'ignore', '.*OpenSSL\\.rand is deprecated.*')
warnings.filterwarnings('ignore', '.*is currently using agg.*')
warnings.filterwarnings( # SciPy-related warning (maybe 1.2.0 will fix it)
'ignore', '.*the matrix subclass is not the recommended.*')
warnings.filterwarnings( # some joblib warning
'ignore', '.*semaphore_tracker: process died unexpectedly.*')
warnings.filterwarnings( # needed until SciPy 1.2.0 is released
'ignore', '.*will be interpreted as an array index.*', module='scipy')
warnings.filterwarnings(
'ignore', '.*invalid escape sequence.*', lineno=90) # quantities
warnings.filterwarnings(
'ignore', '.*invalid escape sequence.*', lineno=14) # mne-connectivity
warnings.filterwarnings(
'ignore', '.*invalid escape sequence.*', lineno=281) # mne-conn
warnings.filterwarnings(
'ignore', '.*"is not" with a literal.*', module='nilearn')
warnings.filterwarnings( # scikit-learn FastICA whiten=True deprecation
'ignore', r'.*From version 1\.3 whiten.*')
warnings.filterwarnings( # seaborn -> pandas
'ignore', '.*iteritems is deprecated and will be.*')
warnings.filterwarnings( # PyOpenGL for macOS
'ignore', '.*PyOpenGL was not found.*')
warnings.filterwarnings( # macOS Epochs
'ignore', '.*Plotting epochs on MacOS.*')
for key in ('HasTraits', r'numpy\.testing', 'importlib', r'np\.loads',
'Using or importing the ABCs from', # internal modules on 3.7
r"it will be an error for 'np\.bool_'", # ndimage
"DocumenterBridge requires a state object", # sphinx dev
"'U' mode is deprecated", # sphinx io
r"joblib is deprecated in 0\.21", # nilearn
'The usage of `cmp` is deprecated and will', # sklearn/pytest
'scipy.* is deprecated and will be removed in', # dipy
r'Converting `np\.character` to a dtype is deprecated', # vtk
r'sphinx\.util\.smartypants is deprecated',
'is a deprecated alias for the builtin', # NumPy
'the old name will be removed', # Jinja, via sphinx
r'Passing a schema to Validator\.iter_errors', # jsonschema
"default value of type 'dict' in an Any trait will", # traits
'rcParams is deprecated', # PyVista rcParams -> global_theme
'to mean no clipping',
r'the `scipy\.ndimage.*` namespace is deprecated', # Dipy
'`np.MachAr` is deprecated', # Numba
'distutils Version classes are deprecated', # pydata-sphinx-th
'The module matplotlib.tight_layout is deprecated', # nilearn
):
warnings.filterwarnings( # deal with other modules having bad imports
'ignore', message=".*%s.*" % key, category=DeprecationWarning)
warnings.filterwarnings( # deal with bootstrap-theme bug
'ignore', message=".*modify script_files in the theme.*",
category=Warning)
warnings.filterwarnings( # nilearn
'ignore', message=r'The sklearn.* module is.*', category=FutureWarning)
warnings.filterwarnings( # nilearn
'ignore', message=r'Fetchers from the nilea.*', category=FutureWarning)
warnings.filterwarnings( # deal with other modules having bad imports
'ignore', message=".*ufunc size changed.*", category=RuntimeWarning)
warnings.filterwarnings( # realtime
'ignore', message=".*unclosed file.*", category=ResourceWarning)
warnings.filterwarnings('ignore', message='Exception ignored in.*')
# allow this ImportWarning, but don't show it
warnings.filterwarnings(
'ignore', message="can't resolve package from", category=ImportWarning)
warnings.filterwarnings(
'ignore', message='.*mne-realtime.*', category=DeprecationWarning)
warnings.filterwarnings(
'ignore', message=r'numpy\.ndarray size changed.*',
category=RuntimeWarning)
warnings.filterwarnings(
'ignore', message=r'.*Setting theme=.*6 in qdarkstyle.*',
category=RuntimeWarning)
# In case we use np.set_printoptions in any tutorials, we only
# want it to affect those:
np.set_printoptions(**_np_print_defaults)
reset_warnings(None, None)
# -- Fontawesome support -----------------------------------------------------
# here the "fab" and "fas" refer to "brand" and "solid" (determines which font
# file to look in). "fw" indicates fixed width.
brand_icons = ('apple', 'linux', 'windows', 'discourse', 'python')
fixed_icons = (
# homepage:
'book', 'code-branch', 'newspaper', 'question-circle', 'quote-left',
# contrib guide:
'bug', 'comment', 'hand-sparkles', 'magic', 'pencil-alt', 'remove-format',
'universal-access', 'discourse', 'python',
)
other_icons = (
'hand-paper', 'question', 'rocket', 'server', 'code', 'desktop',
'terminal', 'cloud-download-alt', 'wrench',
)
icons = dict()
for icon in brand_icons + fixed_icons + other_icons:
font = ('fab' if icon in brand_icons else 'fas',) # brand or solid font
fw = ('fa-fw',) if icon in fixed_icons else () # fixed-width
icons[icon] = font + fw
prolog = ''
for icon, classes in icons.items():
prolog += f'''
.. |{icon}| raw:: html
<i class="{' '.join(classes)} fa-{icon}"></i>
'''
prolog += '''
.. |fix-bug| raw:: html
<span class="fa-stack small-stack">
<i class="fas fa-bug fa-stack-1x"></i>
<i class="fas fa-ban fa-stack-2x"></i>
</span>
'''
prolog += '''
.. |ensp| unicode:: U+2002 .. EN SPACE
'''
# -- Dependency info ----------------------------------------------------------
try:
from importlib.metadata import metadata # new in Python 3.8
min_py = metadata('mne')['Requires-Python']
except ModuleNotFoundError:
from pkg_resources import get_distribution
info = get_distribution('mne').get_metadata_lines('PKG-INFO')
for line in info:
if line.strip().startswith('Requires-Python'):
min_py = line.split(':')[1]
min_py = min_py.lstrip(' =<>')
prolog += f'\n.. |min_python_version| replace:: {min_py}\n'
# -- website redirects --------------------------------------------------------
# Static list created 2021/04/13 based on what we needed to redirect,
# since we don't need to add redirects for examples added after this date.
needed_plot_redirects = {
# tutorials
'10_epochs_overview.py', '10_evoked_overview.py', '10_overview.py',
'10_preprocessing_overview.py', '10_raw_overview.py',
'10_reading_meg_data.py', '15_handling_bad_channels.py',
'20_event_arrays.py', '20_events_from_raw.py', '20_reading_eeg_data.py',
'20_rejecting_bad_data.py', '20_visualize_epochs.py',
'20_visualize_evoked.py', '30_annotate_raw.py', '30_epochs_metadata.py',
'30_filtering_resampling.py', '30_info.py', '30_reading_fnirs_data.py',
'35_artifact_correction_regression.py', '40_artifact_correction_ica.py',
'40_autogenerate_metadata.py', '40_sensor_locations.py',
'40_visualize_raw.py', '45_projectors_background.py',
'50_artifact_correction_ssp.py', '50_configure_mne.py',
'50_epochs_to_data_frame.py', '55_setting_eeg_reference.py',
'59_head_positions.py', '60_make_fixed_length_epochs.py',
'60_maxwell_filtering_sss.py', '70_fnirs_processing.py',
# examples
'3d_to_2d.py', 'brainstorm_data.py', 'channel_epochs_image.py',
'cluster_stats_evoked.py', 'compute_csd.py',
'compute_mne_inverse_epochs_in_label.py',
'compute_mne_inverse_raw_in_label.py', 'compute_mne_inverse_volume.py',
'compute_source_psd_epochs.py', 'covariance_whitening_dspm.py',
'custom_inverse_solver.py',
'decoding_csp_eeg.py', 'decoding_csp_timefreq.py',
'decoding_spatio_temporal_source.py', 'decoding_spoc_CMC.py',
'decoding_time_generalization_conditions.py',
'decoding_unsupervised_spatial_filter.py', 'decoding_xdawn_eeg.py',
'define_target_events.py', 'dics_source_power.py', 'eeg_csd.py',
'eeg_on_scalp.py', 'eeglab_head_sphere.py', 'elekta_epochs.py',
'ems_filtering.py', 'eog_artifact_histogram.py', 'evoked_arrowmap.py',
'evoked_ers_source_power.py', 'evoked_topomap.py', 'evoked_whitening.py',
'fdr_stats_evoked.py', 'find_ref_artifacts.py',
'fnirs_artifact_removal.py', 'forward_sensitivity_maps.py',
'gamma_map_inverse.py', 'hf_sef_data.py', 'ica_comparison.py',
'interpolate_bad_channels.py', 'label_activation_from_stc.py',
'label_from_stc.py', 'label_source_activations.py',
'left_cerebellum_volume_source.py', 'limo_data.py',
'linear_model_patterns.py', 'linear_regression_raw.py',
'meg_sensors.py', 'mixed_norm_inverse.py',
'mixed_source_space_inverse.py',
'mne_cov_power.py', 'mne_helmet.py', 'mne_inverse_coherence_epochs.py',
'mne_inverse_envelope_correlation.py',
'mne_inverse_envelope_correlation_volume.py',
'mne_inverse_psi_visual.py',
'morph_surface_stc.py', 'morph_volume_stc.py', 'movement_compensation.py',
'movement_detection.py', 'multidict_reweighted_tfmxne.py',
'muscle_detection.py', 'opm_data.py', 'otp.py', 'parcellation.py',
'psf_ctf_label_leakage.py', 'psf_ctf_vertices.py',
'psf_ctf_vertices_lcmv.py', 'publication_figure.py', 'rap_music.py',
'read_inverse.py', 'read_neo_format.py', 'read_noise_covariance_matrix.py',
'read_stc.py', 'receptive_field_mtrf.py', 'resolution_metrics.py',
'resolution_metrics_eegmeg.py', 'roi_erpimage_by_rt.py',
'sensor_noise_level.py',
'sensor_permutation_test.py', 'sensor_regression.py',
'shift_evoked.py', 'simulate_evoked_data.py', 'simulate_raw_data.py',
'simulated_raw_data_using_subject_anatomy.py', 'snr_estimate.py',
'source_label_time_frequency.py', 'source_power_spectrum.py',
'source_power_spectrum_opm.py', 'source_simulator.py',
'source_space_morphing.py', 'source_space_snr.py',
'source_space_time_frequency.py', 'ssd_spatial_filters.py',
'ssp_projs_sensitivity_map.py', 'temporal_whitening.py',
'time_frequency_erds.py', 'time_frequency_global_field_power.py',
'time_frequency_mixed_norm_inverse.py', 'time_frequency_simulated.py',
'topo_compare_conditions.py', 'topo_customized.py',
'vector_mne_solution.py', 'virtual_evoked.py', 'xdawn_denoising.py',
'xhemi.py',
}
ex = 'auto_examples'
co = 'connectivity'
mne_conn = 'https://mne.tools/mne-connectivity/stable'
tu = 'auto_tutorials'
di = 'discussions'
sm = 'source-modeling'
fw = 'forward'
nv = 'inverse'
sn = 'stats-sensor-space'
sr = 'stats-source-space'
sd = 'sample-datasets'
ml = 'machine-learning'
tf = 'time-freq'
si = 'simulation'
custom_redirects = {
# Custom redirects (one HTML path to another, relative to outdir)
# can be added here as fr->to key->value mappings
f'{tu}/evoked/plot_eeg_erp.html': f'{tu}/evoked/30_eeg_erp.html',
f'{tu}/evoked/plot_whitened.html': f'{tu}/evoked/40_whitened.html',
f'{tu}/misc/plot_modifying_data_inplace.html': f'{tu}/intro/15_inplace.html', # noqa E501
f'{tu}/misc/plot_report.html': f'{tu}/intro/70_report.html',
f'{tu}/misc/plot_seeg.html': f'{tu}/clinical/20_seeg.html',
f'{tu}/misc/plot_ecog.html': f'{tu}/clinical/30_ecog.html',
f'{tu}/{ml}/plot_receptive_field.html': f'{tu}/{ml}/30_strf.html',
f'{tu}/{ml}/plot_sensors_decoding.html': f'{tu}/{ml}/50_decoding.html',
f'{tu}/{sm}/plot_background_freesurfer.html': f'{tu}/{fw}/10_background_freesurfer.html', # noqa E501
f'{tu}/{sm}/plot_source_alignment.html': f'{tu}/{fw}/20_source_alignment.html', # noqa E501
f'{tu}/{sm}/plot_forward.html': f'{tu}/{fw}/30_forward.html',
f'{tu}/{sm}/plot_eeg_no_mri.html': f'{tu}/{fw}/35_eeg_no_mri.html',
f'{tu}/{sm}/plot_background_freesurfer_mne.html': f'{tu}/{fw}/50_background_freesurfer_mne.html', # noqa E501
f'{tu}/{sm}/plot_fix_bem_in_blender.html': f'{tu}/{fw}/80_fix_bem_in_blender.html', # noqa E501
f'{tu}/{sm}/plot_compute_covariance.html': f'{tu}/{fw}/90_compute_covariance.html', # noqa E501
f'{tu}/{sm}/plot_object_source_estimate.html': f'{tu}/{nv}/10_stc_class.html', # noqa E501
f'{tu}/{sm}/plot_dipole_fit.html': f'{tu}/{nv}/20_dipole_fit.html',
f'{tu}/{sm}/plot_mne_dspm_source_localization.html': f'{tu}/{nv}/30_mne_dspm_loreta.html', # noqa E501
f'{tu}/{sm}/plot_dipole_orientations.html': f'{tu}/{nv}/35_dipole_orientations.html', # noqa E501
f'{tu}/{sm}/plot_mne_solutions.html': f'{tu}/{nv}/40_mne_fixed_free.html',
f'{tu}/{sm}/plot_beamformer_lcmv.html': f'{tu}/{nv}/50_beamformer_lcmv.html', # noqa E501
f'{tu}/{sm}/plot_visualize_stc.html': f'{tu}/{nv}/60_visualize_stc.html',
f'{tu}/{sm}/plot_eeg_mri_coords.html': f'{tu}/{nv}/70_eeg_mri_coords.html',
f'{tu}/{sd}/plot_brainstorm_phantom_elekta.html': f'{tu}/{nv}/80_brainstorm_phantom_elekta.html', # noqa E501
f'{tu}/{sd}/plot_brainstorm_phantom_ctf.html': f'{tu}/{nv}/85_brainstorm_phantom_ctf.html', # noqa E501
f'{tu}/{sd}/plot_phantom_4DBTi.html': f'{tu}/{nv}/90_phantom_4DBTi.html',
f'{tu}/{sd}/plot_brainstorm_auditory.html': f'{tu}/io/60_ctf_bst_auditory.html', # noqa E501
f'{tu}/{sd}/plot_sleep.html': f'{tu}/clinical/60_sleep.html',
f'{tu}/{di}/plot_background_filtering.html': f'{tu}/preprocessing/25_background_filtering.html', # noqa E501
f'{tu}/{di}/plot_background_statistics.html': f'{tu}/{sn}/10_background_stats.html', # noqa E501
f'{tu}/{sn}/plot_stats_cluster_erp.html': f'{tu}/{sn}/20_erp_stats.html',
f'{tu}/{sn}/plot_stats_cluster_1samp_test_time_frequency.html': f'{tu}/{sn}/40_cluster_1samp_time_freq.html', # noqa E501
f'{tu}/{sn}/plot_stats_cluster_time_frequency.html': f'{tu}/{sn}/50_cluster_between_time_freq.html', # noqa E501
f'{tu}/{sn}/plot_stats_spatio_temporal_cluster_sensors.html': f'{tu}/{sn}/75_cluster_ftest_spatiotemporal.html', # noqa E501
f'{tu}/{sr}/plot_stats_cluster_spatio_temporal.html': f'{tu}/{sr}/20_cluster_1samp_spatiotemporal.html', # noqa E501
f'{tu}/{sr}/plot_stats_cluster_spatio_temporal_2samp.html': f'{tu}/{sr}/30_cluster_ftest_spatiotemporal.html', # noqa E501
f'{tu}/{sr}/plot_stats_cluster_spatio_temporal_repeated_measures_anova.html': f'{tu}/{sr}/60_cluster_rmANOVA_spatiotemporal.html', # noqa E501
f'{tu}/{sr}/plot_stats_cluster_time_frequency_repeated_measures_anova.html': f'{tu}/{sr}/70_cluster_rmANOVA_time_freq.html', # noqa E501
f'{tu}/{tf}/plot_sensors_time_frequency.html': f'{tu}/{tf}/20_sensors_time_frequency.html', # noqa E501
f'{tu}/{tf}/plot_ssvep.html': f'{tu}/{tf}/50_ssvep.html',
f'{tu}/{si}/plot_creating_data_structures.html': f'{tu}/{si}/10_array_objs.html', # noqa E501
f'{tu}/{si}/plot_point_spread.html': f'{tu}/{si}/70_point_spread.html',
f'{tu}/{si}/plot_dics.html': f'{tu}/{si}/80_dics.html',
f'{ex}/{co}/mne_inverse_label_connectivity.html': f'{mne_conn}/{ex}/mne_inverse_label_connectivity.html', # noqa E501
f'{ex}/{co}/cwt_sensor_connectivity.html': f'{mne_conn}/{ex}/cwt_sensor_connectivity.html', # noqa E501
f'{ex}/{co}/mixed_source_space_connectivity.html': f'{mne_conn}/{ex}/mixed_source_space_connectivity.html', # noqa E501
f'{ex}/{co}/mne_inverse_coherence_epochs.html': f'{mne_conn}/{ex}/mne_inverse_coherence_epochs.html', # noqa E501
f'{ex}/{co}/mne_inverse_connectivity_spectrum.html': f'{mne_conn}/{ex}/mne_inverse_connectivity_spectrum.html', # noqa E501
f'{ex}/{co}/mne_inverse_envelope_correlation_volume.html': f'{mne_conn}/{ex}/mne_inverse_envelope_correlation_volume.html', # noqa E501
f'{ex}/{co}/mne_inverse_envelope_correlation.html': f'{mne_conn}/{ex}/mne_inverse_envelope_correlation.html', # noqa E501
f'{ex}/{co}/mne_inverse_psi_visual.html': f'{mne_conn}/{ex}/mne_inverse_psi_visual.html', # noqa E501
f'{ex}/{co}/sensor_connectivity.html': f'{mne_conn}/{ex}/sensor_connectivity.html', # noqa E501
}
def make_redirects(app, exception):
"""Make HTML redirects."""
# https://www.sphinx-doc.org/en/master/extdev/appapi.html
# Adapted from sphinxcontrib/redirects (BSD-2-Clause)
if not (isinstance(app.builder,
sphinx.builders.html.StandaloneHTMLBuilder) and
exception is None):
return
logger = sphinx.util.logging.getLogger('mne')
TEMPLATE = """\
<!DOCTYPE HTML>
<html lang="en-US">
<head>
<meta charset="UTF-8">
<meta http-equiv="refresh" content="1; url={to}">
<script type="text/javascript">
window.location.href = "{to}"
</script>
<title>Page Redirection</title>
</head>
<body>
If you are not redirected automatically, follow this <a href='{to}'>link</a>.
</body>
</html>""" # noqa: E501
sphinx_gallery_conf = app.config['sphinx_gallery_conf']
for src_dir, out_dir in zip(sphinx_gallery_conf['examples_dirs'],
sphinx_gallery_conf['gallery_dirs']):
root = os.path.abspath(os.path.join(app.srcdir, src_dir))
fnames = [os.path.join(os.path.relpath(dirpath, root), fname)
for dirpath, _, fnames in os.walk(root)
for fname in fnames
if fname in needed_plot_redirects]
# plot_ redirects
for fname in fnames:
dirname = os.path.join(app.outdir, out_dir, os.path.dirname(fname))
to_fname = os.path.splitext(os.path.basename(fname))[0] + '.html'
fr_fname = f'plot_{to_fname}'
to_path = os.path.join(dirname, to_fname)
fr_path = os.path.join(dirname, fr_fname)
assert os.path.isfile(to_path), (fname, to_path)
with open(fr_path, 'w') as fid:
fid.write(TEMPLATE.format(to=to_fname))
logger.info(
f'Added {len(fnames):3d} HTML plot_* redirects for {out_dir}')
# custom redirects
for fr, to in custom_redirects.items():
if not to.startswith('http'):
assert os.path.isfile(os.path.join(app.outdir, to)), to
# handle links to sibling folders
path_parts = to.split('/')
assert tu in path_parts, path_parts # need to refactor otherwise
path_parts = ['..'] + path_parts[(path_parts.index(tu) + 1):]
to = os.path.join(*path_parts)
assert to.endswith('html'), to
fr_path = os.path.join(app.outdir, fr)
assert fr_path.endswith('html'), fr_path
# allow overwrite if existing file is just a redirect
if os.path.isfile(fr_path):
with open(fr_path, 'r') as fid:
for _ in range(8):
next(fid)
line = fid.readline()
assert 'Page Redirection' in line, line
# handle folders that no longer exist
if fr_path.split('/')[-2] in (
'misc', 'discussions', 'source-modeling', 'sample-datasets',
'connectivity'):
os.makedirs(os.path.dirname(fr_path), exist_ok=True)
with open(fr_path, 'w') as fid:
fid.write(TEMPLATE.format(to=to))
logger.info(
f'Added {len(custom_redirects):3d} HTML custom redirects')
def make_version(app, exception):
"""Make a text file with the git version."""
if not (isinstance(app.builder,
sphinx.builders.html.StandaloneHTMLBuilder) and
exception is None):
return
logger = sphinx.util.logging.getLogger('mne')
try:
stdout, _ = run_subprocess(['git', 'rev-parse', 'HEAD'], verbose=False)
except Exception as exc:
logger.warning(f'Failed to write _version.txt: {exc}')
return
with open(os.path.join(app.outdir, '_version.txt'), 'w') as fid:
fid.write(stdout)
logger.info(f'Added "{stdout.rstrip()}" > _version.txt')
# -- Connect our handlers to the main Sphinx app ---------------------------
def setup(app):
"""Set up the Sphinx app."""
app.connect('autodoc-process-docstring', append_attr_meth_examples)
if report_scraper is not None:
report_scraper.app = app
app.config.rst_prolog = prolog
app.connect('builder-inited', report_scraper.copyfiles)
sphinx_logger = sphinx.util.logging.getLogger('mne')
sphinx_logger.info(
f'Building documentation for MNE {release} ({mne.__file__})')
sphinx_logger.info(f'Building with scrapers={scrapers}')
app.connect('build-finished', make_redirects)
app.connect('build-finished', make_version)
|
# -*- coding: utf-8 -*-
#
# ZS documentation build configuration file, created by
# sphinx-quickstart on Sun Nov 24 18:21:57 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# On readthedocs.org, the doc build is run inside a virtualenv, with zs
# installed, but the virtualenv bin/ dir is not on the path, so by default the
# 'zs' command is not available to the programoutput extension. But we want
# it to be. So fix that:
if (hasattr(sys, "real_prefix")
or sys.prefix != sys.getattr(sys, "base_prefix", sys.prefix)):
# we're in a virtualenv and sys.prefix points to the virtualenv
# directory. See:
# https://stackoverflow.com/questions/1871549/python-determine-if-running-inside-virtualenv
# (base_prefix is needed to also detect pyvenv environments -- future
# proofing!)
os.environ["PATH"] = "%s/bin:%s" % (sys.prefix, os.environ["PATH"])
# And let's also make sure our example file is not present, to avoid
# embarassing failures later
import shutil
if os.path.exists("example/scratch"):
shutil.rmtree("example/scratch")
os.mkdir("example/scratch")
shutil.copyfile("example/tiny-4grams.txt",
"example/scratch/tiny-4grams.txt")
# And set the TIME variable to control the output format from 'time' (see
# index.rst)
os.environ["TIME"] = "\nReal time elapsed: %e seconds"
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinxcontrib.programoutput',
'IPython.sphinxext.ipython_console_highlighting',
'IPython.sphinxext.ipython_directive',
]
# IPython extension: don't bother with matplotlib, it probably isn't installed
# and anyway we don't need it.
ipython_mplbackend = None
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'ZS'
copyright = u'2013-2014, Nathaniel J. Smith'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import zs
version = zs.__version__
# The full version, including alpha/beta/rc tags.
#release = '0.0.0'
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ZSdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'ZS.tex', u'ZS Documentation',
u'Nathaniel J. Smith', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'zs', u'ZS Documentation',
[u'Nathaniel J. Smith'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'ZS', u'ZS Documentation',
u'Nathaniel J. Smith', 'ZS', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
another RTD experiment...
# -*- coding: utf-8 -*-
#
# ZS documentation build configuration file, created by
# sphinx-quickstart on Sun Nov 24 18:21:57 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
os.system("apt-cache search python3")
os.system("dpkg -l python3*")
os.system("which python3")
# On readthedocs.org, the doc build is run inside a virtualenv, with zs
# installed, but the virtualenv bin/ dir is not on the path, so by default the
# 'zs' command is not available to the programoutput extension. But we want
# it to be. So fix that:
if (hasattr(sys, "real_prefix")
or sys.prefix != sys.getattr(sys, "base_prefix", sys.prefix)):
# we're in a virtualenv and sys.prefix points to the virtualenv
# directory. See:
# https://stackoverflow.com/questions/1871549/python-determine-if-running-inside-virtualenv
# (base_prefix is needed to also detect pyvenv environments -- future
# proofing!)
os.environ["PATH"] = "%s/bin:%s" % (sys.prefix, os.environ["PATH"])
# And let's also make sure our example file is not present, to avoid
# embarassing failures later
import shutil
if os.path.exists("example/scratch"):
shutil.rmtree("example/scratch")
os.mkdir("example/scratch")
shutil.copyfile("example/tiny-4grams.txt",
"example/scratch/tiny-4grams.txt")
# And set the TIME variable to control the output format from 'time' (see
# index.rst)
os.environ["TIME"] = "\nReal time elapsed: %e seconds"
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinxcontrib.programoutput',
'IPython.sphinxext.ipython_console_highlighting',
'IPython.sphinxext.ipython_directive',
]
# IPython extension: don't bother with matplotlib, it probably isn't installed
# and anyway we don't need it.
ipython_mplbackend = None
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'ZS'
copyright = u'2013-2014, Nathaniel J. Smith'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import zs
version = zs.__version__
# The full version, including alpha/beta/rc tags.
#release = '0.0.0'
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ZSdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'ZS.tex', u'ZS Documentation',
u'Nathaniel J. Smith', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'zs', u'ZS Documentation',
[u'Nathaniel J. Smith'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'ZS', u'ZS Documentation',
u'Nathaniel J. Smith', 'ZS', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
|
# coding=utf-8
"""
"""
from __future__ import absolute_import
import operator
import logging
from functools import partial
import datetime
import sqlalchemy as sa
from wtforms import (
ValidationError,
Field,
SelectMultipleField,
SelectField,
SelectFieldBase,
FormField,
)
from wtforms.validators import required, optional
from wtforms.compat import string_types, text_type
from wtforms.ext.sqlalchemy.fields import get_pk_from_identity, has_identity_key
from wtforms_alchemy import ModelFieldList as BaseModelFieldList
import babel
from flask.helpers import locked_cached_property
from flask_wtf.file import FileField as BaseFileField
from flask_babel import (
get_locale, get_timezone,
format_date, format_datetime
)
from abilian import i18n
from abilian.core.util import utc_dt
from abilian.core.extensions import db
from .widgets import DateTimeInput, DateInput, Select2, Select2Ajax, FileInput
from .util import babel2datetime
__all__ = ['ModelFieldList', 'FileField', 'DateField', 'Select2Field',
'Select2MultipleField', 'QuerySelect2Field', 'JsonSelect2Field',
'JsonSelect2MultipleField']
class ModelFieldList(BaseModelFieldList):
""" Filter empty entries
"""
def validate(self, form, extra_validators=tuple()):
for field in self.entries:
is_subform = isinstance(field, FormField)
data = field.data.values() if is_subform else [field.data]
if not any(data):
# all inputs empty: discard row
self.entries.remove(field)
return super(ModelFieldList, self).validate(form, extra_validators)
class FileField(BaseFileField):
"""
support 'multiple' attribute, enabling html5 multiple file input in widget.
can store file using a related model
:param blob_attr: attribute name to store / retrieve value on related model.
Used if `name` is a relationship on model. Defauts to `'value'`
"""
multiple = False
widget = FileInput()
blob_attr = 'value'
allow_delete = True
def __init__(self, *args, **kwargs):
try:
self.multiple = kwargs.pop('multiple')
except KeyError:
pass
self.blob_attr = kwargs.pop('blob_attr', self.__class__.blob_attr)
self.allow_delete = kwargs.pop('allow_delete', self.__class__.allow_delete)
BaseFileField.__init__(self, *args, **kwargs)
def __call__(self, **kwargs):
if 'multiple' not in kwargs and self.multiple:
kwargs['multiple'] = 'multiple'
return BaseFileField.__call__(self, **kwargs)
def process(self, formdata, *args, **kwargs):
delete_arg = u'__{name}_delete__'.format(name=self.name)
self._delete_file = formdata and delete_arg in formdata
return super(FileField, self).process(formdata, *args, **kwargs)
def process_data(self, value):
if isinstance(value, db.Model):
value = getattr(value, self.blob_attr)
return super(FileField, self).process_data(value)
def populate_obj(self, obj, name):
"""
Store file
"""
if not self.has_file() and not (self.allow_delete and self._delete_file):
return
state = sa.inspect(obj)
mapper = state.mapper
if name not in mapper.relationships:
# directly store in database
return super(FileField, self).populate_obj(obj, name)
rel = getattr(mapper.relationships, name)
if rel.uselist:
raise ValueError("Only single target supported; else use ModelFieldList")
val = getattr(obj, name)
if val is None:
val = rel.mapper.class_()
setattr(obj, name, val)
data = self.data.read() if self.has_file() else u''
setattr(val, self.blob_attr, data)
class DateTimeField(Field):
"""
"""
widget = DateTimeInput()
def __init__(self, label=None, validators=None, **kwargs):
super(DateTimeField, self).__init__(label, validators, **kwargs)
def _value(self):
if self.raw_data:
return ' '.join(self.raw_data)
else:
locale = get_locale()
date_fmt = locale.date_formats['short'].pattern
# force numerical months and 4 digit years
date_fmt = date_fmt.replace('MMMM', 'MM')\
.replace('MMM', 'MM')\
.replace('yyyy', 'y')\
.replace('yy', 'y')\
.replace('y', 'yyyy')
time_fmt = locale.time_formats['short']
dt_fmt = locale.datetime_formats['short'].format(time_fmt, date_fmt)
return format_datetime(self.data, dt_fmt) if self.data else ''
def process_formdata(self, valuelist):
if valuelist:
date_str = ' '.join(valuelist)
locale = get_locale()
date_fmt = locale.date_formats['short']
date_fmt = babel2datetime(date_fmt)
date_fmt = date_fmt.replace('%B', '%m')\
.replace('%b', '%m') # force numerical months
time_fmt = locale.time_formats['short']
time_fmt = babel2datetime(time_fmt)
datetime_fmt = u'{} | {}'.format(date_fmt, time_fmt)
try:
self.data = datetime.datetime.strptime(date_str, datetime_fmt)
if not self.data.tzinfo:
self.data = utc_dt(get_timezone().localize(self.data))
except ValueError:
self.data = None
raise ValueError(self.gettext('Not a valid datetime value'))
class DateField(DateTimeField):
"""
A text field which stores a `datetime.datetime` matching a format.
"""
widget = DateInput()
def __init__(self, label=None, validators=None, **kwargs):
super(DateField, self).__init__(label, validators, **kwargs)
def _value(self):
if self.raw_data:
return ' '.join(self.raw_data)
else:
date_fmt = get_locale().date_formats['short'].pattern
# force numerical months and 4 digit years
date_fmt = date_fmt.replace('MMMM', 'MM')\
.replace('MMM', 'MM')\
.replace('yyyy', 'y')\
.replace('yy', 'y')\
.replace('y', 'yyyy')
return format_date(self.data, date_fmt) if self.data else ''
def process_formdata(self, valuelist):
if valuelist:
date_str = ' '.join(valuelist)
date_fmt = get_locale().date_formats['short']
date_fmt = babel2datetime(date_fmt)
date_fmt = date_fmt.replace('%B', '%m')\
.replace('%b', '%m')
try:
self.data = datetime.datetime.strptime(date_str, date_fmt).date()
except ValueError:
self.data = None
raise ValueError(self.gettext('Not a valid datetime value'))
class Select2Field(SelectField):
"""
Allow choices to be a function instead of an iterable
"""
widget = Select2()
@property
def choices(self):
choices = self._choices
return choices() if callable(choices) else choices
@choices.setter
def choices(self, choices):
self._choices = choices
class Select2MultipleField(SelectMultipleField):
widget = Select2(multiple=True)
multiple = True
@property
def choices(self):
choices = self._choices
return choices() if callable(choices) else choices
@choices.setter
def choices(self, choices):
self._choices = choices
class QuerySelect2Field(SelectFieldBase):
"""
COPY/PASTED (and patched) from WTForms!
Will display a select drop-down field to choose between ORM results in a
sqlalchemy `Query`. The `data` property actually will store/keep an ORM
model instance, not the ID. Submitting a choice which is not in the query
will result in a validation error.
This field only works for queries on models whose primary key column(s)
have a consistent string representation. This means it mostly only works
for those composed of string, unicode, and integer types. For the most
part, the primary keys will be auto-detected from the model, alternately
pass a one-argument callable to `get_pk` which can return a unique
comparable key.
The `query` property on the field can be set from within a view to assign
a query per-instance to the field. If the property is not set, the
`query_factory` callable passed to the field constructor will be called to
obtain a query.
Specify `get_label` to customize the label associated with each option. If
a string, this is the name of an attribute on the model object to use as
the label text. If a one-argument callable, this callable will be passed
model instance and expected to return the label text. Otherwise, the model
object's `__str__` or `__unicode__` will be used.
:param allow_blank: DEPRECATED. Use optional()/required() validators instead.
"""
def __init__(self, label=None, validators=None, query_factory=None,
get_pk=None, get_label=None, allow_blank=False,
blank_text='', widget=None, multiple=False, **kwargs):
if widget is None:
widget = Select2(multiple=multiple)
kwargs['widget'] = widget
self.multiple = multiple
if (validators is None
or not any(isinstance(v, (optional, required)) for v in validators)):
logger = logging.getLogger(__name__ + '.' + self.__class__.__name__)
logger.warning('Use deprecated paramater `allow_blank`.')
validators.append(optional() if allow_blank else required())
super(QuerySelect2Field, self).__init__(label, validators, **kwargs)
# PATCHED!
if query_factory:
self.query_factory = query_factory
if get_pk is None:
if not has_identity_key:
raise Exception('The sqlalchemy identity_key function could not be imported.')
self.get_pk = get_pk_from_identity
else:
self.get_pk = get_pk
if get_label is None:
self.get_label = lambda x: x
elif isinstance(get_label, string_types):
self.get_label = operator.attrgetter(get_label)
else:
self.get_label = get_label
self.allow_blank = allow_blank
self.blank_text = blank_text
self.query = None
self._object_list = None
def _get_data(self):
formdata = self._formdata
if formdata is not None:
if not self.multiple:
formdata = [formdata]
formdata = set(formdata)
data = [obj for pk, obj in self._get_object_list()
if pk in formdata]
if all(hasattr(x, 'name') for x in data):
data = sorted(data, key=lambda x: x.name)
else:
data = sorted(data)
if data:
if not self.multiple:
data = data[0]
self._set_data(data)
return self._data
def _set_data(self, data):
self._data = data
self._formdata = None
data = property(_get_data, _set_data)
def _get_object_list(self):
if self._object_list is None:
query = self.query or self.query_factory()
get_pk = self.get_pk
self._object_list = list((text_type(get_pk(obj)), obj) for obj in query)
return self._object_list
def iter_choices(self):
if not self.flags.required:
yield (None,
None,
self.data == [] if self.multiple else self.data is None,)
predicate = (operator.contains
if (self.multiple and self.data is not None)
else operator.eq)
# remember: operator.contains(b, a) ==> a in b
# so: obj in data ==> contains(data, obj)
predicate = partial(predicate, self.data)
for pk, obj in self._get_object_list():
yield (pk, self.get_label(obj), predicate(obj))
def process_formdata(self, valuelist):
if not valuelist:
self.data = [] if self.multiple else None
else:
self._data = None
if not self.multiple:
valuelist = valuelist[0]
self._formdata = valuelist
def pre_validate(self, form):
if not self.allow_blank or self.data is not None:
data = set(self.data if self.multiple else [self.data])
valid = {obj for pk, obj in self._get_object_list()}
if (data - valid):
raise ValidationError(self.gettext('Not a valid choice'))
class JsonSelect2Field(SelectFieldBase):
"""
TODO: rewrite this docstring. This is copy-pasted from QuerySelectField
Will display a select drop-down field to choose between ORM results in a
sqlalchemy `Query`. The `data` property actually will store/keep an ORM
model instance, not the ID. Submitting a choice which is not in the query
will result in a validation error.
This field only works for queries on models whose primary key column(s)
have a consistent string representation. This means it mostly only works
for those composed of string, unicode, and integer types. For the most
part, the primary keys will be auto-detected from the model, alternately
pass a one-argument callable to `get_pk` which can return a unique
comparable key.
The `query` property on the field can be set from within a view to assign
a query per-instance to the field. If the property is not set, the
`query_factory` callable passed to the field constructor will be called to
obtain a query.
Specify `get_label` to customize the label associated with each option. If
a string, this is the name of an attribute on the model object to use as
the label text. If a one-argument callable, this callable will be passed
model instance and expected to return the label text. Otherwise, the model
object's `__str__` or `__unicode__` will be used.
If `allow_blank` is set to `True`, then a blank choice will be added to the
top of the list. Selecting this choice will result in the `data` property
being `None`. The label for this blank choice can be set by specifying the
`blank_text` parameter.
:param model_class: can be an sqlalchemy model, or a string with model
name. The model will be looked up in sqlalchemy class registry on first
access. This allows to use a model when it cannot be imported during field
declaration.
"""
def __init__(self, label=None, validators=None, ajax_source=None, widget=None,
blank_text='', model_class=None, multiple=False, **kwargs):
self.multiple = multiple
if widget is None:
widget = Select2Ajax(self.multiple)
kwargs['widget'] = widget
super(JsonSelect2Field, self).__init__(label, validators, **kwargs)
self.ajax_source = ajax_source
self._model_class = model_class
self.allow_blank = not self.flags.required
self.blank_text = blank_text
@locked_cached_property
def model_class(self):
cls = self._model_class
if isinstance(cls, type) and issubclass(cls, db.Model):
return cls
reg = db.Model._decl_class_registry
return reg[cls]
def iter_choices(self):
if not self.flags.required:
yield (None, None, self.data is None,)
data = self.data
if not self.multiple:
if data is None:
raise StopIteration
data = [data]
elif not data:
raise StopIteration
for obj in data:
yield(obj.id, obj.name, True)
def _get_data(self):
formdata = self._formdata
if formdata:
if not self.multiple:
formdata = [formdata]
data = [self.model_class.query.get(int(pk)) for pk in formdata]
if not self.multiple:
data = data[0] if data else None
self._set_data(data)
return self._data
def _set_data(self, data):
self._data = data
self._formdata = None
data = property(_get_data, _set_data)
def process_formdata(self, valuelist):
if not valuelist:
self.data = [] if self.multiple else None
else:
self._data = None
if hasattr(self.widget, 'process_formdata'):
# might need custom deserialization, i.e Select2 3.x with multiple +
# ajax
valuelist = self.widget.process_formdata(valuelist)
if not self.multiple:
valuelist = valuelist[0]
self._formdata = valuelist
class JsonSelect2MultipleField(JsonSelect2Field):
# legacy class, now use JsonSelect2Field(multiple=True)
pass
class LocaleSelectField(SelectField):
widget = Select2()
def __init__(self, *args, **kwargs):
kwargs['coerce'] = babel.Locale.parse
kwargs['choices'] = (locale_info
for locale_info in i18n.supported_app_locales())
super(LocaleSelectField, self).__init__(*args, **kwargs)
def iter_choices(self):
if not self.flags.required:
yield (None, None, self.data is None,)
for locale, label in i18n.supported_app_locales():
yield (locale.language, label.capitalize(), locale == self.data)
class TimezoneField(SelectField):
widget = Select2()
def __init__(self, *args, **kwargs):
kwargs['coerce'] = babel.dates.get_timezone
kwargs['choices'] = (tz_info for tz_info in i18n.timezones_choices())
super(TimezoneField, self).__init__(*args, **kwargs)
def iter_choices(self):
if not self.flags.required:
yield (None, None, self.data is None,)
for tz, label in i18n.timezones_choices():
yield (tz.zone, label, tz == self.data)
JsonSelect2Field: fix args passed to default widget
# coding=utf-8
"""
"""
from __future__ import absolute_import
import operator
import logging
from functools import partial
import datetime
import sqlalchemy as sa
from wtforms import (
ValidationError,
Field,
SelectMultipleField,
SelectField,
SelectFieldBase,
FormField,
)
from wtforms.validators import required, optional
from wtforms.compat import string_types, text_type
from wtforms.ext.sqlalchemy.fields import get_pk_from_identity, has_identity_key
from wtforms_alchemy import ModelFieldList as BaseModelFieldList
import babel
from flask.helpers import locked_cached_property
from flask_wtf.file import FileField as BaseFileField
from flask_babel import (
get_locale, get_timezone,
format_date, format_datetime
)
from abilian import i18n
from abilian.core.util import utc_dt
from abilian.core.extensions import db
from .widgets import DateTimeInput, DateInput, Select2, Select2Ajax, FileInput
from .util import babel2datetime
__all__ = ['ModelFieldList', 'FileField', 'DateField', 'Select2Field',
'Select2MultipleField', 'QuerySelect2Field', 'JsonSelect2Field',
'JsonSelect2MultipleField']
class ModelFieldList(BaseModelFieldList):
""" Filter empty entries
"""
def validate(self, form, extra_validators=tuple()):
for field in self.entries:
is_subform = isinstance(field, FormField)
data = field.data.values() if is_subform else [field.data]
if not any(data):
# all inputs empty: discard row
self.entries.remove(field)
return super(ModelFieldList, self).validate(form, extra_validators)
class FileField(BaseFileField):
"""
support 'multiple' attribute, enabling html5 multiple file input in widget.
can store file using a related model
:param blob_attr: attribute name to store / retrieve value on related model.
Used if `name` is a relationship on model. Defauts to `'value'`
"""
multiple = False
widget = FileInput()
blob_attr = 'value'
allow_delete = True
def __init__(self, *args, **kwargs):
try:
self.multiple = kwargs.pop('multiple')
except KeyError:
pass
self.blob_attr = kwargs.pop('blob_attr', self.__class__.blob_attr)
self.allow_delete = kwargs.pop('allow_delete', self.__class__.allow_delete)
BaseFileField.__init__(self, *args, **kwargs)
def __call__(self, **kwargs):
if 'multiple' not in kwargs and self.multiple:
kwargs['multiple'] = 'multiple'
return BaseFileField.__call__(self, **kwargs)
def process(self, formdata, *args, **kwargs):
delete_arg = u'__{name}_delete__'.format(name=self.name)
self._delete_file = formdata and delete_arg in formdata
return super(FileField, self).process(formdata, *args, **kwargs)
def process_data(self, value):
if isinstance(value, db.Model):
value = getattr(value, self.blob_attr)
return super(FileField, self).process_data(value)
def populate_obj(self, obj, name):
"""
Store file
"""
if not self.has_file() and not (self.allow_delete and self._delete_file):
return
state = sa.inspect(obj)
mapper = state.mapper
if name not in mapper.relationships:
# directly store in database
return super(FileField, self).populate_obj(obj, name)
rel = getattr(mapper.relationships, name)
if rel.uselist:
raise ValueError("Only single target supported; else use ModelFieldList")
val = getattr(obj, name)
if val is None:
val = rel.mapper.class_()
setattr(obj, name, val)
data = self.data.read() if self.has_file() else u''
setattr(val, self.blob_attr, data)
class DateTimeField(Field):
"""
"""
widget = DateTimeInput()
def __init__(self, label=None, validators=None, **kwargs):
super(DateTimeField, self).__init__(label, validators, **kwargs)
def _value(self):
if self.raw_data:
return ' '.join(self.raw_data)
else:
locale = get_locale()
date_fmt = locale.date_formats['short'].pattern
# force numerical months and 4 digit years
date_fmt = date_fmt.replace('MMMM', 'MM')\
.replace('MMM', 'MM')\
.replace('yyyy', 'y')\
.replace('yy', 'y')\
.replace('y', 'yyyy')
time_fmt = locale.time_formats['short']
dt_fmt = locale.datetime_formats['short'].format(time_fmt, date_fmt)
return format_datetime(self.data, dt_fmt) if self.data else ''
def process_formdata(self, valuelist):
if valuelist:
date_str = ' '.join(valuelist)
locale = get_locale()
date_fmt = locale.date_formats['short']
date_fmt = babel2datetime(date_fmt)
date_fmt = date_fmt.replace('%B', '%m')\
.replace('%b', '%m') # force numerical months
time_fmt = locale.time_formats['short']
time_fmt = babel2datetime(time_fmt)
datetime_fmt = u'{} | {}'.format(date_fmt, time_fmt)
try:
self.data = datetime.datetime.strptime(date_str, datetime_fmt)
if not self.data.tzinfo:
self.data = utc_dt(get_timezone().localize(self.data))
except ValueError:
self.data = None
raise ValueError(self.gettext('Not a valid datetime value'))
class DateField(DateTimeField):
"""
A text field which stores a `datetime.datetime` matching a format.
"""
widget = DateInput()
def __init__(self, label=None, validators=None, **kwargs):
super(DateField, self).__init__(label, validators, **kwargs)
def _value(self):
if self.raw_data:
return ' '.join(self.raw_data)
else:
date_fmt = get_locale().date_formats['short'].pattern
# force numerical months and 4 digit years
date_fmt = date_fmt.replace('MMMM', 'MM')\
.replace('MMM', 'MM')\
.replace('yyyy', 'y')\
.replace('yy', 'y')\
.replace('y', 'yyyy')
return format_date(self.data, date_fmt) if self.data else ''
def process_formdata(self, valuelist):
if valuelist:
date_str = ' '.join(valuelist)
date_fmt = get_locale().date_formats['short']
date_fmt = babel2datetime(date_fmt)
date_fmt = date_fmt.replace('%B', '%m')\
.replace('%b', '%m')
try:
self.data = datetime.datetime.strptime(date_str, date_fmt).date()
except ValueError:
self.data = None
raise ValueError(self.gettext('Not a valid datetime value'))
class Select2Field(SelectField):
"""
Allow choices to be a function instead of an iterable
"""
widget = Select2()
@property
def choices(self):
choices = self._choices
return choices() if callable(choices) else choices
@choices.setter
def choices(self, choices):
self._choices = choices
class Select2MultipleField(SelectMultipleField):
widget = Select2(multiple=True)
multiple = True
@property
def choices(self):
choices = self._choices
return choices() if callable(choices) else choices
@choices.setter
def choices(self, choices):
self._choices = choices
class QuerySelect2Field(SelectFieldBase):
"""
COPY/PASTED (and patched) from WTForms!
Will display a select drop-down field to choose between ORM results in a
sqlalchemy `Query`. The `data` property actually will store/keep an ORM
model instance, not the ID. Submitting a choice which is not in the query
will result in a validation error.
This field only works for queries on models whose primary key column(s)
have a consistent string representation. This means it mostly only works
for those composed of string, unicode, and integer types. For the most
part, the primary keys will be auto-detected from the model, alternately
pass a one-argument callable to `get_pk` which can return a unique
comparable key.
The `query` property on the field can be set from within a view to assign
a query per-instance to the field. If the property is not set, the
`query_factory` callable passed to the field constructor will be called to
obtain a query.
Specify `get_label` to customize the label associated with each option. If
a string, this is the name of an attribute on the model object to use as
the label text. If a one-argument callable, this callable will be passed
model instance and expected to return the label text. Otherwise, the model
object's `__str__` or `__unicode__` will be used.
:param allow_blank: DEPRECATED. Use optional()/required() validators instead.
"""
def __init__(self, label=None, validators=None, query_factory=None,
get_pk=None, get_label=None, allow_blank=False,
blank_text='', widget=None, multiple=False, **kwargs):
if widget is None:
widget = Select2(multiple=multiple)
kwargs['widget'] = widget
self.multiple = multiple
if (validators is None
or not any(isinstance(v, (optional, required)) for v in validators)):
logger = logging.getLogger(__name__ + '.' + self.__class__.__name__)
logger.warning('Use deprecated paramater `allow_blank`.')
validators.append(optional() if allow_blank else required())
super(QuerySelect2Field, self).__init__(label, validators, **kwargs)
# PATCHED!
if query_factory:
self.query_factory = query_factory
if get_pk is None:
if not has_identity_key:
raise Exception('The sqlalchemy identity_key function could not be imported.')
self.get_pk = get_pk_from_identity
else:
self.get_pk = get_pk
if get_label is None:
self.get_label = lambda x: x
elif isinstance(get_label, string_types):
self.get_label = operator.attrgetter(get_label)
else:
self.get_label = get_label
self.allow_blank = allow_blank
self.blank_text = blank_text
self.query = None
self._object_list = None
def _get_data(self):
formdata = self._formdata
if formdata is not None:
if not self.multiple:
formdata = [formdata]
formdata = set(formdata)
data = [obj for pk, obj in self._get_object_list()
if pk in formdata]
if all(hasattr(x, 'name') for x in data):
data = sorted(data, key=lambda x: x.name)
else:
data = sorted(data)
if data:
if not self.multiple:
data = data[0]
self._set_data(data)
return self._data
def _set_data(self, data):
self._data = data
self._formdata = None
data = property(_get_data, _set_data)
def _get_object_list(self):
if self._object_list is None:
query = self.query or self.query_factory()
get_pk = self.get_pk
self._object_list = list((text_type(get_pk(obj)), obj) for obj in query)
return self._object_list
def iter_choices(self):
if not self.flags.required:
yield (None,
None,
self.data == [] if self.multiple else self.data is None,)
predicate = (operator.contains
if (self.multiple and self.data is not None)
else operator.eq)
# remember: operator.contains(b, a) ==> a in b
# so: obj in data ==> contains(data, obj)
predicate = partial(predicate, self.data)
for pk, obj in self._get_object_list():
yield (pk, self.get_label(obj), predicate(obj))
def process_formdata(self, valuelist):
if not valuelist:
self.data = [] if self.multiple else None
else:
self._data = None
if not self.multiple:
valuelist = valuelist[0]
self._formdata = valuelist
def pre_validate(self, form):
if not self.allow_blank or self.data is not None:
data = set(self.data if self.multiple else [self.data])
valid = {obj for pk, obj in self._get_object_list()}
if (data - valid):
raise ValidationError(self.gettext('Not a valid choice'))
class JsonSelect2Field(SelectFieldBase):
"""
TODO: rewrite this docstring. This is copy-pasted from QuerySelectField
Will display a select drop-down field to choose between ORM results in a
sqlalchemy `Query`. The `data` property actually will store/keep an ORM
model instance, not the ID. Submitting a choice which is not in the query
will result in a validation error.
This field only works for queries on models whose primary key column(s)
have a consistent string representation. This means it mostly only works
for those composed of string, unicode, and integer types. For the most
part, the primary keys will be auto-detected from the model, alternately
pass a one-argument callable to `get_pk` which can return a unique
comparable key.
The `query` property on the field can be set from within a view to assign
a query per-instance to the field. If the property is not set, the
`query_factory` callable passed to the field constructor will be called to
obtain a query.
Specify `get_label` to customize the label associated with each option. If
a string, this is the name of an attribute on the model object to use as
the label text. If a one-argument callable, this callable will be passed
model instance and expected to return the label text. Otherwise, the model
object's `__str__` or `__unicode__` will be used.
If `allow_blank` is set to `True`, then a blank choice will be added to the
top of the list. Selecting this choice will result in the `data` property
being `None`. The label for this blank choice can be set by specifying the
`blank_text` parameter.
:param model_class: can be an sqlalchemy model, or a string with model
name. The model will be looked up in sqlalchemy class registry on first
access. This allows to use a model when it cannot be imported during field
declaration.
"""
def __init__(self, label=None, validators=None, ajax_source=None, widget=None,
blank_text='', model_class=None, multiple=False, **kwargs):
self.multiple = multiple
if widget is None:
widget = Select2Ajax(multiple=self.multiple)
kwargs['widget'] = widget
super(JsonSelect2Field, self).__init__(label, validators, **kwargs)
self.ajax_source = ajax_source
self._model_class = model_class
self.allow_blank = not self.flags.required
self.blank_text = blank_text
@locked_cached_property
def model_class(self):
cls = self._model_class
if isinstance(cls, type) and issubclass(cls, db.Model):
return cls
reg = db.Model._decl_class_registry
return reg[cls]
def iter_choices(self):
if not self.flags.required:
yield (None, None, self.data is None,)
data = self.data
if not self.multiple:
if data is None:
raise StopIteration
data = [data]
elif not data:
raise StopIteration
for obj in data:
yield(obj.id, obj.name, True)
def _get_data(self):
formdata = self._formdata
if formdata:
if not self.multiple:
formdata = [formdata]
data = [self.model_class.query.get(int(pk)) for pk in formdata]
if not self.multiple:
data = data[0] if data else None
self._set_data(data)
return self._data
def _set_data(self, data):
self._data = data
self._formdata = None
data = property(_get_data, _set_data)
def process_formdata(self, valuelist):
if not valuelist:
self.data = [] if self.multiple else None
else:
self._data = None
if hasattr(self.widget, 'process_formdata'):
# might need custom deserialization, i.e Select2 3.x with multiple +
# ajax
valuelist = self.widget.process_formdata(valuelist)
if not self.multiple:
valuelist = valuelist[0]
self._formdata = valuelist
class JsonSelect2MultipleField(JsonSelect2Field):
# legacy class, now use JsonSelect2Field(multiple=True)
pass
class LocaleSelectField(SelectField):
widget = Select2()
def __init__(self, *args, **kwargs):
kwargs['coerce'] = babel.Locale.parse
kwargs['choices'] = (locale_info
for locale_info in i18n.supported_app_locales())
super(LocaleSelectField, self).__init__(*args, **kwargs)
def iter_choices(self):
if not self.flags.required:
yield (None, None, self.data is None,)
for locale, label in i18n.supported_app_locales():
yield (locale.language, label.capitalize(), locale == self.data)
class TimezoneField(SelectField):
widget = Select2()
def __init__(self, *args, **kwargs):
kwargs['coerce'] = babel.dates.get_timezone
kwargs['choices'] = (tz_info for tz_info in i18n.timezones_choices())
super(TimezoneField, self).__init__(*args, **kwargs)
def iter_choices(self):
if not self.flags.required:
yield (None, None, self.data is None,)
for tz, label in i18n.timezones_choices():
yield (tz.zone, label, tz == self.data)
|
# coding=utf-8
import ibei
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'ibei'
copyright = '2022, Joshua Ryan Smith'
author = 'Joshua Ryan Smith'
version = ibei.__version__
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
'sphinxcontrib.bibtex'
]
# Configuration for `autodoc`.
autodoc_member_order = "bysource"
# Configuration for `sphinxcontrib-bibtex`.
bibtex_bibfiles = ['bib.bib']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'searchbox.html',
]
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
Remove superfluous config
# coding=utf-8
import ibei
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'ibei'
copyright = '2022, Joshua Ryan Smith'
author = 'Joshua Ryan Smith'
version = ibei.__version__
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
'sphinxcontrib.bibtex'
]
# Configuration for `autodoc`.
autodoc_member_order = "bysource"
# Configuration for `sphinxcontrib-bibtex`.
bibtex_bibfiles = ['bib.bib']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
|
# coding=utf-8
"""
Class based views
"""
from __future__ import absolute_import, division, print_function, \
unicode_literals
import logging
import sqlalchemy as sa
from flask import current_app, flash, g, redirect, render_template, request, \
url_for
from six import text_type
from werkzeug.exceptions import BadRequest, NotFound
from abilian.core.entities import ValidationError
from abilian.core.signals import activity
from abilian.i18n import _, _l
from abilian.services.security import CREATE, DELETE, READ, WRITE
from .. import csrf, forms, nav
from ..action import ButtonAction, Endpoint, actions
from .base import JSONView, View
logger = logging.getLogger(__name__)
class BaseObjectView(View):
"""
Base class common to all database objects views.
"""
#: form title
title = None
#: Model class
Model = None
#: primary key name to look for in url arguments
pk = 'object_id'
#: object instance for this view
obj = None
#: object id
object_id = None
#: template to render
template = None
#: default templates inherit from "base_template". This allows to use generic
#: templates with a custom base
base_template = "base.html"
def __init__(self, Model=None, pk=None, base_template=None, *args,
**kwargs):
View.__init__(self, *args, **kwargs)
cls = self.__class__
self.pk = pk if pk is not None else cls.pk
self.Model = Model if Model is not None else cls.Model
self.base_template = (base_template if base_template is not None else
cls.base_template)
def prepare_args(self, args, kwargs):
args, kwargs = self.init_object(args, kwargs)
if self.obj is None:
raise NotFound()
return args, kwargs
def breadcrumb(self):
"""
Return :class:`..nav.BreadcrumbItem` instance for this object.
This method may return a list of BreadcrumbItem instances. Return
`None` if nothing.
"""
return None
def init_object(self, args, kwargs):
"""
This method is reponsible for setting :attr:`obj`. It is called during
:meth:`prepare_args`.
"""
self.object_id = kwargs.pop(self.pk, None)
if self.object_id is not None:
self.obj = self.Model.query.get(self.object_id)
actions.context['object'] = self.obj
return args, kwargs
def get(self, *args, **kwargs):
bc = self.breadcrumb()
if bc is not None:
bc = [bc] if isinstance(bc, nav.BreadcrumbItem) else list(bc)
assert all(isinstance(b, nav.BreadcrumbItem) for b in bc)
g.breadcrumb.extend(bc)
kwargs = {'base_template': self.base_template}
kwargs.update(self.template_kwargs)
# forbid override "view"
kwargs['view'] = self
return render_template(self.template, **kwargs)
@property
def template_kwargs(self):
"""
Template render arguments. You can override `base_template` for
instance. Only `view` cannot be overriden.
"""
return {}
class ObjectView(BaseObjectView):
"""View objects.
"""
#: html template
template = 'default/object_view.html'
#: View form class. Form object used to show objects fields
Form = None
#: required permission. Must be an instance of
#: :class:`abilian.services.security.Permission`
permission = READ
#: form instance for this view
form = None
def __init__(self,
Model=None,
pk=None,
Form=None,
template=None,
*args,
**kwargs):
super(ObjectView, self).__init__(Model, pk, *args, **kwargs)
cls = self.__class__
self.Form = Form if Form is not None else cls.Form
self.template = template if template is not None else cls.template
def prepare_args(self, args, kwargs):
"""
:attr:`form` is initialized here. See also :meth:`View.prepare_args`.
"""
args, kwargs = super(ObjectView, self).prepare_args(args, kwargs)
self.form = self.Form(**self.get_form_kwargs())
return args, kwargs
def get_form_kwargs(self):
kw = dict(obj=self.obj)
if issubclass(self.Form, forms.Form) and self.permission:
kw['permission'] = self.permission
return kw
def index_url(self):
return url_for('.index')
def redirect_to_index(self):
return redirect(self.index_url())
@property
def template_kwargs(self):
"""Provides :attr:`form` to templates
"""
kw = super(ObjectView, self).template_kwargs
kw['form'] = self.form
return kw
CANCEL_BUTTON = ButtonAction(
'form',
'cancel',
title=_l(u'Cancel'),
btn_class='default cancel' # .cancel: if jquery.validate is used it will
) # properly skip validation
EDIT_BUTTON = ButtonAction(
'form', 'edit', btn_class='primary', title=_l(u'Save'))
ADD_ANOTHER_BUTTON = ButtonAction(
'form',
'create_add_another',
btn_class='primary',
title=_l(u'Create and add another'),
condition=lambda ctx: getattr(ctx['view'], 'add_another_button', False),)
class ObjectEdit(ObjectView):
"""Edit objects.
"""
template = 'default/object_edit.html'
decorators = (csrf.support_graceful_failure,)
permission = WRITE
#: :class:ButtonAction instance to show on form
_buttons = ()
#: submitted form data
data = None
#: action name from form data
action = None
#: button clicked, corresponding to :attr:`action`.
button = None
#: verb used to describe activity
activity_verb = 'update'
#: UI flash message
_message_success = _l(u"Entity successfully edited")
view_endpoint = None
def __init__(self,
Model=None,
pk=None,
Form=None,
template=None,
view_endpoint=None,
message_success=None,
*args,
**kwargs):
ObjectView.__init__(
self, Model, pk, Form, template=template, *args, **kwargs)
if view_endpoint is not None:
self.view_endpoint = view_endpoint
if not self.view_endpoint:
self.view_endpoint = '.{}_view'.format(self.Model.__name__)
if message_success:
self._message_success = message_success
def post(self, *args, **kwargs):
# conservative: no action submitted -> cancel
action = self.data.get('__action', u'cancel')
if action == u'cancel':
return self.cancel()
return self.handle_action(action)
def put(self):
return self.post()
def prepare_args(self, args, kwargs):
args, kwargs = super(ObjectEdit, self).prepare_args(args, kwargs)
self._buttons = self.get_form_buttons(*args, **kwargs)
self.data = request.form
return args, kwargs
def get_form_buttons(self, *args, **kwargs):
return [EDIT_BUTTON, CANCEL_BUTTON]
@property
def buttons(self):
return (button for button in self._buttons
if button.available(actions.context))
def view_url(self):
kw = {self.pk: self.obj.id}
return url_for(self.view_endpoint, **kw)
def redirect_to_view(self):
if self.button:
url = self.button.url(actions.context)
if url:
return redirect(url)
return redirect(self.view_url())
def message_success(self):
return text_type(self._message_success)
# actions
def handle_action(self, action):
for button in self._buttons:
if action == button.name:
if not button.available(dict(view=self)):
raise ValueError('Action "{}" not available'
''.format(action.encode('utf-8')))
break
else:
raise ValueError(
'Unknown action: "{}"'.format(action.encode('utf-8')))
self.action = action
self.button = button
return getattr(self, action)()
def cancel(self):
return self.redirect_to_view()
def edit(self, redirect_to=None):
if self.validate():
return self.form_valid(redirect_to=redirect_to)
else:
if request.csrf_failed:
errors = self.form.errors
csrf_failed = errors.pop('csrf_token', False)
if csrf_failed and not errors:
# failed only because of invalid/expired csrf, no error on form
return self.form_csrf_invalid()
resp = self.form_invalid()
if resp:
return resp
flash(_(u"Please fix the error(s) below"), "error")
# if we end here then something wrong has happened: show form with error
# messages
return self.get()
def before_populate_obj(self):
"""
This method is called after form has been validated and before calling
`form.populate_obj()`. Sometimes one may want to remove a field from
the form because it's non-sense to store it on edited object, and use it in
a specific manner, for example::
image = form.image
del form.image
store_image(image)
"""
pass
def after_populate_obj(self):
"""
Called after `self.obj` values have been updated, and `self.obj`
attached to an ORM session.
"""
pass
def handle_commit_exception(self, exc):
"""
Hook point to handle exception that may happen during commit.
It is the responsability of this method to perform a rollback if it is
required for handling `exc`. If the method does not handle `exc` if should
do nothing and return None.
:returns: * a valid :class:`Response` if exception is handled.
* `None` if exception is not handled. Default handling happens.
"""
return None
def commit_success(self):
"""
Called after object has been successfully saved to database
"""
def validate(self):
return self.form.validate()
def form_valid(self, redirect_to=None):
"""Save object.
Called when form is validated.
:param redirect_to: real url (created with url_for) to redirect to,
instead of the view by default.
"""
session = current_app.db.session()
with session.no_autoflush:
self.before_populate_obj()
self.form.populate_obj(self.obj)
session.add(self.obj)
self.after_populate_obj()
try:
session.flush()
self.send_activity()
session.commit()
except ValidationError as e:
rv = self.handle_commit_exception(e)
if rv is not None:
return rv
session.rollback()
flash(e.message, "error")
return self.get()
except sa.exc.IntegrityError as e:
rv = self.handle_commit_exception(e)
if rv is not None:
return rv
session.rollback()
logger.error(e)
flash(_(u"An entity with this name already exists in the system."),
"error")
return self.get()
else:
self.commit_success()
flash(self.message_success(), "success")
if redirect_to:
return redirect(redirect_to)
else:
return self.redirect_to_view()
def form_invalid(self):
"""
When a form doesn't validate this method is called.
It may return a :class:`Flask.Response` instance, to handle specific
errors in custom screens.
Else the edit form screen is returned with error(s) highlighted.
This method is useful for detecting edition conflict using hidden fields
and show a specific screen to help resolve the conflict.
"""
return None
def form_csrf_invalid(self):
"""
Called when a form doesn't validate *only* because of csrf token expiration.
This works only if form is an instance of :class:`flask_wtf.form.SecureForm`.
Else default CSRF protection (before request) will take place.
It must return a valid :class:`Flask.Response` instance. By default it
returns to edit form screen with an informative message.
"""
current_app.extensions['csrf-handler'].flash_csrf_failed_message()
return self.get()
def send_activity(self):
activity.send(
self,
actor=g.user,
verb=self.activity_verb,
object=self.obj,
target=self.activity_target)
@property
def activity_target(self):
"""
Return `target` to use when creating activity.
"""
return None
CREATE_BUTTON = ButtonAction(
'form', 'create', btn_class='primary', title=_l(u'Create'))
CHAIN_CREATE_BUTTON = ButtonAction(
'form',
'chain_create',
btn_class='primary',
title=_l(u'Create and add new'),
endpoint=lambda ctx: Endpoint(request.endpoint, **request.view_args),
condition=lambda ctx: getattr(ctx['view'], 'chain_create_allowed', False))
class ObjectCreate(ObjectEdit):
"""Create a new object.
"""
permission = CREATE
activity_verb = 'post'
_message_success = _l(u"Entity successfully added")
#: set to `True` to show 'Save and add new' button
chain_create_allowed = False
def __init__(self, *args, **kwargs):
chain_create_allowed = kwargs.pop('chain_create_allowed', None)
if chain_create_allowed is not None:
self.chain_create_allowed = bool(chain_create_allowed)
ObjectEdit.__init__(self, *args, **kwargs)
def prepare_args(self, args, kwargs):
# we must ensure that no flush() occurs and that obj is not registered in
# session (to prevent accidental insert of an incomplete object)
session = current_app.db.session()
with session.no_autoflush:
args, kwargs = super(ObjectCreate, self).prepare_args(args, kwargs)
try:
session.expunge(self.obj)
except sa.exc.InvalidRequestError:
# obj is not in session
pass
return args, kwargs
def init_object(self, args, kwargs):
self.obj = self.Model()
return args, kwargs
def get_form_kwargs(self):
kw = super(ObjectCreate, self).get_form_kwargs()
if request.method == 'GET':
# when GET allow form prefill instead of empty/current object data
# FIXME: filter allowed parameters on given a field flags (could be
# 'allow_from_get'?)
kw['formdata'] = request.args
return kw
def get_form_buttons(self, *args, **kwargs):
return [CREATE_BUTTON, CHAIN_CREATE_BUTTON, CANCEL_BUTTON]
def breadcrumb(self):
return nav.BreadcrumbItem(label=CREATE_BUTTON.title)
# actions
def create(self):
return self.edit()
chain_create = create
def cancel(self):
return self.redirect_to_index()
DELETE_BUTTON = ButtonAction('form', 'delete', title=_l(u'Delete'))
class ObjectDelete(ObjectEdit):
"""Delete object. Supports DELETE verb.
"""
methods = ['POST']
permission = DELETE
activity_verb = 'delete'
_message_success = _l(u"Entity deleted")
init_object = BaseObjectView.init_object
def get_form_buttons(self, *args, **kwargs):
return [DELETE_BUTTON, CANCEL_BUTTON]
def delete(self):
session = current_app.db.session()
session.delete(self.obj)
activity.send(
self,
actor=g.user,
verb="delete",
object=self.obj,
target=self.activity_target)
try:
session.commit()
except sa.exc.IntegrityError as e:
rv = self.handle_commit_exception(e)
if rv is not None:
return rv
session.rollback()
logger.error(e)
flash(_("This entity is referenced by another object and cannot be deleted."),
"error")
return self.redirect_to_view()
else:
flash(self.message_success(), 'success')
# FIXME: for DELETE verb response in case of success should be 200, 202
# (accepted) or 204 (no content)
return self.redirect_to_index()
class JSONBaseSearch(JSONView):
Model = None
minimum_input_length = 2
def __init__(self, *args, **kwargs):
Model = kwargs.pop('Model', self.Model)
minimum_input_length = kwargs.pop('minimum_input_length',
self.minimum_input_length)
super(JSONBaseSearch, self).__init__(*args, **kwargs)
self.Model = Model
self.minimum_input_length = minimum_input_length
def prepare_args(self, args, kwargs):
args, kwargs = JSONView.prepare_args(self, args, kwargs)
kwargs['q'] = kwargs.get("q", u'').replace(u"%", u" ").lower()
return args, kwargs
def data(self, q, *args, **kwargs):
if self.minimum_input_length and len(q) < self.minimum_input_length:
raise BadRequest('Minimum query length is {:d}'.format(
self.minimum_input_length),)
results = []
for obj in self.get_results(q, **kwargs):
results.append(self.get_item(obj))
return dict(results=results)
def get_results(self, q, *args, **kwargs):
raise NotImplementedError
def get_item(self, obj):
"""
Return a result item
:param obj: Instance object
:returns: a dictionnary with at least `id` and `text` values
"""
raise NotImplementedError
class JSONModelSearch(JSONBaseSearch):
"""
Base class for json sqlalchemy model search, as used by select2 widgets for
example
"""
def get_results(self, q, *args, **kwargs):
query = self.Model.query
query = self.options(query)
query = self.filter(query, q, **kwargs)
query = self.order_by(query)
if not q and not self.minimum_input_length:
query = query.limit(50)
return query.all()
def options(self, query):
return query.options(sa.orm.noload('*'))
def filter(self, query, q, **kwargs):
if not q:
return query
return query.filter(sa.func.lower(self.Model.name).like(q + "%"))
def order_by(self, query):
return query.order_by(self.Model.name)
def get_label(self, obj):
return obj.name
def get_item(self, obj):
"""
Return a result item.
:param obj: Instance object
:returns: a dictionnary with at least `id` and `text` values
"""
return dict(id=obj.id, text=self.get_label(obj), name=obj.name)
class JSONWhooshSearch(JSONBaseSearch):
"""
Base class for JSON Whoosh search, as used by select2 widgets for example
"""
def get_results(self, q, *args, **kwargs):
svc = current_app.services['indexing']
search_kwargs = {'limit': 50, 'Models': (self.Model,)}
results = svc.search(q, **search_kwargs)
try:
# 'nom' doesn't always exist but for Contacts, sorting on
# the last name ('nom') feels more natural than 'name',
# which starts with the first name ('prenom').
res = results[0]
fields = res.fields()
itemkey = None
if 'nom' in fields:
itemkey = 'nom'
elif 'name' in fields:
itemkey = 'name'
if itemkey:
results = sorted(
results, key=lambda it: it.fields().get(itemkey))
except Exception:
logger.warning(
"we could not sort whoosh results on fields' key {}.".format(
itemkey))
return results
def get_item(self, hit):
"""Return a result item.
:param hit: Hit object from Whoosh
:returns: a dictionnary with at least `id` and `text` values
"""
return dict(id=hit['id'], text=hit['name'], name=hit['name'])
fix error on logging
> 'itemkey' referenced before assignment.
This log is interesting only when itemkey is either 'name' or
'nom'. Nothing to log otherwise.
fixes https://sentry.io/abilian/extranet-mpr-demo/issues/255028004/
# coding=utf-8
"""
Class based views
"""
from __future__ import absolute_import, division, print_function, \
unicode_literals
import logging
import sqlalchemy as sa
from flask import current_app, flash, g, redirect, render_template, request, \
url_for
from six import text_type
from werkzeug.exceptions import BadRequest, NotFound
from abilian.core.entities import ValidationError
from abilian.core.signals import activity
from abilian.i18n import _, _l
from abilian.services.security import CREATE, DELETE, READ, WRITE
from .. import csrf, forms, nav
from ..action import ButtonAction, Endpoint, actions
from .base import JSONView, View
logger = logging.getLogger(__name__)
class BaseObjectView(View):
"""
Base class common to all database objects views.
"""
#: form title
title = None
#: Model class
Model = None
#: primary key name to look for in url arguments
pk = 'object_id'
#: object instance for this view
obj = None
#: object id
object_id = None
#: template to render
template = None
#: default templates inherit from "base_template". This allows to use generic
#: templates with a custom base
base_template = "base.html"
def __init__(self, Model=None, pk=None, base_template=None, *args,
**kwargs):
View.__init__(self, *args, **kwargs)
cls = self.__class__
self.pk = pk if pk is not None else cls.pk
self.Model = Model if Model is not None else cls.Model
self.base_template = (base_template if base_template is not None else
cls.base_template)
def prepare_args(self, args, kwargs):
args, kwargs = self.init_object(args, kwargs)
if self.obj is None:
raise NotFound()
return args, kwargs
def breadcrumb(self):
"""
Return :class:`..nav.BreadcrumbItem` instance for this object.
This method may return a list of BreadcrumbItem instances. Return
`None` if nothing.
"""
return None
def init_object(self, args, kwargs):
"""
This method is reponsible for setting :attr:`obj`. It is called during
:meth:`prepare_args`.
"""
self.object_id = kwargs.pop(self.pk, None)
if self.object_id is not None:
self.obj = self.Model.query.get(self.object_id)
actions.context['object'] = self.obj
return args, kwargs
def get(self, *args, **kwargs):
bc = self.breadcrumb()
if bc is not None:
bc = [bc] if isinstance(bc, nav.BreadcrumbItem) else list(bc)
assert all(isinstance(b, nav.BreadcrumbItem) for b in bc)
g.breadcrumb.extend(bc)
kwargs = {'base_template': self.base_template}
kwargs.update(self.template_kwargs)
# forbid override "view"
kwargs['view'] = self
return render_template(self.template, **kwargs)
@property
def template_kwargs(self):
"""
Template render arguments. You can override `base_template` for
instance. Only `view` cannot be overriden.
"""
return {}
class ObjectView(BaseObjectView):
"""View objects.
"""
#: html template
template = 'default/object_view.html'
#: View form class. Form object used to show objects fields
Form = None
#: required permission. Must be an instance of
#: :class:`abilian.services.security.Permission`
permission = READ
#: form instance for this view
form = None
def __init__(self,
Model=None,
pk=None,
Form=None,
template=None,
*args,
**kwargs):
super(ObjectView, self).__init__(Model, pk, *args, **kwargs)
cls = self.__class__
self.Form = Form if Form is not None else cls.Form
self.template = template if template is not None else cls.template
def prepare_args(self, args, kwargs):
"""
:attr:`form` is initialized here. See also :meth:`View.prepare_args`.
"""
args, kwargs = super(ObjectView, self).prepare_args(args, kwargs)
self.form = self.Form(**self.get_form_kwargs())
return args, kwargs
def get_form_kwargs(self):
kw = dict(obj=self.obj)
if issubclass(self.Form, forms.Form) and self.permission:
kw['permission'] = self.permission
return kw
def index_url(self):
return url_for('.index')
def redirect_to_index(self):
return redirect(self.index_url())
@property
def template_kwargs(self):
"""Provides :attr:`form` to templates
"""
kw = super(ObjectView, self).template_kwargs
kw['form'] = self.form
return kw
CANCEL_BUTTON = ButtonAction(
'form',
'cancel',
title=_l(u'Cancel'),
btn_class='default cancel' # .cancel: if jquery.validate is used it will
) # properly skip validation
EDIT_BUTTON = ButtonAction(
'form', 'edit', btn_class='primary', title=_l(u'Save'))
ADD_ANOTHER_BUTTON = ButtonAction(
'form',
'create_add_another',
btn_class='primary',
title=_l(u'Create and add another'),
condition=lambda ctx: getattr(ctx['view'], 'add_another_button', False),)
class ObjectEdit(ObjectView):
"""Edit objects.
"""
template = 'default/object_edit.html'
decorators = (csrf.support_graceful_failure,)
permission = WRITE
#: :class:ButtonAction instance to show on form
_buttons = ()
#: submitted form data
data = None
#: action name from form data
action = None
#: button clicked, corresponding to :attr:`action`.
button = None
#: verb used to describe activity
activity_verb = 'update'
#: UI flash message
_message_success = _l(u"Entity successfully edited")
view_endpoint = None
def __init__(self,
Model=None,
pk=None,
Form=None,
template=None,
view_endpoint=None,
message_success=None,
*args,
**kwargs):
ObjectView.__init__(
self, Model, pk, Form, template=template, *args, **kwargs)
if view_endpoint is not None:
self.view_endpoint = view_endpoint
if not self.view_endpoint:
self.view_endpoint = '.{}_view'.format(self.Model.__name__)
if message_success:
self._message_success = message_success
def post(self, *args, **kwargs):
# conservative: no action submitted -> cancel
action = self.data.get('__action', u'cancel')
if action == u'cancel':
return self.cancel()
return self.handle_action(action)
def put(self):
return self.post()
def prepare_args(self, args, kwargs):
args, kwargs = super(ObjectEdit, self).prepare_args(args, kwargs)
self._buttons = self.get_form_buttons(*args, **kwargs)
self.data = request.form
return args, kwargs
def get_form_buttons(self, *args, **kwargs):
return [EDIT_BUTTON, CANCEL_BUTTON]
@property
def buttons(self):
return (button for button in self._buttons
if button.available(actions.context))
def view_url(self):
kw = {self.pk: self.obj.id}
return url_for(self.view_endpoint, **kw)
def redirect_to_view(self):
if self.button:
url = self.button.url(actions.context)
if url:
return redirect(url)
return redirect(self.view_url())
def message_success(self):
return text_type(self._message_success)
# actions
def handle_action(self, action):
for button in self._buttons:
if action == button.name:
if not button.available(dict(view=self)):
raise ValueError('Action "{}" not available'
''.format(action.encode('utf-8')))
break
else:
raise ValueError(
'Unknown action: "{}"'.format(action.encode('utf-8')))
self.action = action
self.button = button
return getattr(self, action)()
def cancel(self):
return self.redirect_to_view()
def edit(self, redirect_to=None):
if self.validate():
return self.form_valid(redirect_to=redirect_to)
else:
if request.csrf_failed:
errors = self.form.errors
csrf_failed = errors.pop('csrf_token', False)
if csrf_failed and not errors:
# failed only because of invalid/expired csrf, no error on form
return self.form_csrf_invalid()
resp = self.form_invalid()
if resp:
return resp
flash(_(u"Please fix the error(s) below"), "error")
# if we end here then something wrong has happened: show form with error
# messages
return self.get()
def before_populate_obj(self):
"""
This method is called after form has been validated and before calling
`form.populate_obj()`. Sometimes one may want to remove a field from
the form because it's non-sense to store it on edited object, and use it in
a specific manner, for example::
image = form.image
del form.image
store_image(image)
"""
pass
def after_populate_obj(self):
"""
Called after `self.obj` values have been updated, and `self.obj`
attached to an ORM session.
"""
pass
def handle_commit_exception(self, exc):
"""
Hook point to handle exception that may happen during commit.
It is the responsability of this method to perform a rollback if it is
required for handling `exc`. If the method does not handle `exc` if should
do nothing and return None.
:returns: * a valid :class:`Response` if exception is handled.
* `None` if exception is not handled. Default handling happens.
"""
return None
def commit_success(self):
"""
Called after object has been successfully saved to database
"""
def validate(self):
return self.form.validate()
def form_valid(self, redirect_to=None):
"""Save object.
Called when form is validated.
:param redirect_to: real url (created with url_for) to redirect to,
instead of the view by default.
"""
session = current_app.db.session()
with session.no_autoflush:
self.before_populate_obj()
self.form.populate_obj(self.obj)
session.add(self.obj)
self.after_populate_obj()
try:
session.flush()
self.send_activity()
session.commit()
except ValidationError as e:
rv = self.handle_commit_exception(e)
if rv is not None:
return rv
session.rollback()
flash(e.message, "error")
return self.get()
except sa.exc.IntegrityError as e:
rv = self.handle_commit_exception(e)
if rv is not None:
return rv
session.rollback()
logger.error(e)
flash(_(u"An entity with this name already exists in the system."),
"error")
return self.get()
else:
self.commit_success()
flash(self.message_success(), "success")
if redirect_to:
return redirect(redirect_to)
else:
return self.redirect_to_view()
def form_invalid(self):
"""
When a form doesn't validate this method is called.
It may return a :class:`Flask.Response` instance, to handle specific
errors in custom screens.
Else the edit form screen is returned with error(s) highlighted.
This method is useful for detecting edition conflict using hidden fields
and show a specific screen to help resolve the conflict.
"""
return None
def form_csrf_invalid(self):
"""
Called when a form doesn't validate *only* because of csrf token expiration.
This works only if form is an instance of :class:`flask_wtf.form.SecureForm`.
Else default CSRF protection (before request) will take place.
It must return a valid :class:`Flask.Response` instance. By default it
returns to edit form screen with an informative message.
"""
current_app.extensions['csrf-handler'].flash_csrf_failed_message()
return self.get()
def send_activity(self):
activity.send(
self,
actor=g.user,
verb=self.activity_verb,
object=self.obj,
target=self.activity_target)
@property
def activity_target(self):
"""
Return `target` to use when creating activity.
"""
return None
CREATE_BUTTON = ButtonAction(
'form', 'create', btn_class='primary', title=_l(u'Create'))
CHAIN_CREATE_BUTTON = ButtonAction(
'form',
'chain_create',
btn_class='primary',
title=_l(u'Create and add new'),
endpoint=lambda ctx: Endpoint(request.endpoint, **request.view_args),
condition=lambda ctx: getattr(ctx['view'], 'chain_create_allowed', False))
class ObjectCreate(ObjectEdit):
"""Create a new object.
"""
permission = CREATE
activity_verb = 'post'
_message_success = _l(u"Entity successfully added")
#: set to `True` to show 'Save and add new' button
chain_create_allowed = False
def __init__(self, *args, **kwargs):
chain_create_allowed = kwargs.pop('chain_create_allowed', None)
if chain_create_allowed is not None:
self.chain_create_allowed = bool(chain_create_allowed)
ObjectEdit.__init__(self, *args, **kwargs)
def prepare_args(self, args, kwargs):
# we must ensure that no flush() occurs and that obj is not registered in
# session (to prevent accidental insert of an incomplete object)
session = current_app.db.session()
with session.no_autoflush:
args, kwargs = super(ObjectCreate, self).prepare_args(args, kwargs)
try:
session.expunge(self.obj)
except sa.exc.InvalidRequestError:
# obj is not in session
pass
return args, kwargs
def init_object(self, args, kwargs):
self.obj = self.Model()
return args, kwargs
def get_form_kwargs(self):
kw = super(ObjectCreate, self).get_form_kwargs()
if request.method == 'GET':
# when GET allow form prefill instead of empty/current object data
# FIXME: filter allowed parameters on given a field flags (could be
# 'allow_from_get'?)
kw['formdata'] = request.args
return kw
def get_form_buttons(self, *args, **kwargs):
return [CREATE_BUTTON, CHAIN_CREATE_BUTTON, CANCEL_BUTTON]
def breadcrumb(self):
return nav.BreadcrumbItem(label=CREATE_BUTTON.title)
# actions
def create(self):
return self.edit()
chain_create = create
def cancel(self):
return self.redirect_to_index()
DELETE_BUTTON = ButtonAction('form', 'delete', title=_l(u'Delete'))
class ObjectDelete(ObjectEdit):
"""Delete object. Supports DELETE verb.
"""
methods = ['POST']
permission = DELETE
activity_verb = 'delete'
_message_success = _l(u"Entity deleted")
init_object = BaseObjectView.init_object
def get_form_buttons(self, *args, **kwargs):
return [DELETE_BUTTON, CANCEL_BUTTON]
def delete(self):
session = current_app.db.session()
session.delete(self.obj)
activity.send(
self,
actor=g.user,
verb="delete",
object=self.obj,
target=self.activity_target)
try:
session.commit()
except sa.exc.IntegrityError as e:
rv = self.handle_commit_exception(e)
if rv is not None:
return rv
session.rollback()
logger.error(e)
flash(_("This entity is referenced by another object and cannot be deleted."),
"error")
return self.redirect_to_view()
else:
flash(self.message_success(), 'success')
# FIXME: for DELETE verb response in case of success should be 200, 202
# (accepted) or 204 (no content)
return self.redirect_to_index()
class JSONBaseSearch(JSONView):
Model = None
minimum_input_length = 2
def __init__(self, *args, **kwargs):
Model = kwargs.pop('Model', self.Model)
minimum_input_length = kwargs.pop('minimum_input_length',
self.minimum_input_length)
super(JSONBaseSearch, self).__init__(*args, **kwargs)
self.Model = Model
self.minimum_input_length = minimum_input_length
def prepare_args(self, args, kwargs):
args, kwargs = JSONView.prepare_args(self, args, kwargs)
kwargs['q'] = kwargs.get("q", u'').replace(u"%", u" ").lower()
return args, kwargs
def data(self, q, *args, **kwargs):
if self.minimum_input_length and len(q) < self.minimum_input_length:
raise BadRequest('Minimum query length is {:d}'.format(
self.minimum_input_length),)
results = []
for obj in self.get_results(q, **kwargs):
results.append(self.get_item(obj))
return dict(results=results)
def get_results(self, q, *args, **kwargs):
raise NotImplementedError
def get_item(self, obj):
"""
Return a result item
:param obj: Instance object
:returns: a dictionnary with at least `id` and `text` values
"""
raise NotImplementedError
class JSONModelSearch(JSONBaseSearch):
"""
Base class for json sqlalchemy model search, as used by select2 widgets for
example
"""
def get_results(self, q, *args, **kwargs):
query = self.Model.query
query = self.options(query)
query = self.filter(query, q, **kwargs)
query = self.order_by(query)
if not q and not self.minimum_input_length:
query = query.limit(50)
return query.all()
def options(self, query):
return query.options(sa.orm.noload('*'))
def filter(self, query, q, **kwargs):
if not q:
return query
return query.filter(sa.func.lower(self.Model.name).like(q + "%"))
def order_by(self, query):
return query.order_by(self.Model.name)
def get_label(self, obj):
return obj.name
def get_item(self, obj):
"""
Return a result item.
:param obj: Instance object
:returns: a dictionnary with at least `id` and `text` values
"""
return dict(id=obj.id, text=self.get_label(obj), name=obj.name)
class JSONWhooshSearch(JSONBaseSearch):
"""
Base class for JSON Whoosh search, as used by select2 widgets for example
"""
def get_results(self, q, *args, **kwargs):
svc = current_app.services['indexing']
search_kwargs = {'limit': 50, 'Models': (self.Model,)}
results = svc.search(q, **search_kwargs)
try:
# 'nom' doesn't always exist but for Contacts, sorting on
# the last name ('nom') feels more natural than 'name',
# which starts with the first name ('prenom').
itemkey = None
res = results[0]
fields = res.fields()
if 'nom' in fields:
itemkey = 'nom'
elif 'name' in fields:
itemkey = 'name'
if itemkey:
results = sorted(
results, key=lambda it: it.fields().get(itemkey))
except Exception:
if itemkey is not None:
logger.warning(
"we could not sort whoosh results on fields' key {}.".format(
itemkey))
return results
def get_item(self, hit):
"""Return a result item.
:param hit: Hit object from Whoosh
:returns: a dictionnary with at least `id` and `text` values
"""
return dict(id=hit['id'], text=hit['name'], name=hit['name'])
|
# -*- coding: utf-8 -*-
#
# Poio documentation build configuration file, created by
# sphinx-quickstart on Thu Feb 16 15:17:21 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.coverage',
'sphinx.ext.viewcode', 'numpydoc', 'sphinxtogithub']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Poio'
copyright = u'2012, Peter Bouda'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Poiodoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Poio.tex', u'Poio Documentation',
u'Peter Bouda', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'poio', u'Poio Documentation',
[u'Peter Bouda'], 1)
]
Changed copyright, removed sphinxtogithubpages dependency.
# -*- coding: utf-8 -*-
#
# Poio documentation build configuration file, created by
# sphinx-quickstart on Thu Feb 16 15:17:21 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.coverage',
'sphinx.ext.viewcode', 'numpydoc' ]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Poio'
copyright = u'2012, Centro Interdisciplinar de Documentação Linguística e Social'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Poiodoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Poio.tex', u'Poio Documentation',
u'Peter Bouda', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'poio', u'Poio Documentation',
[u'Peter Bouda'], 1)
]
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2015 jaidev <jaidev@newton>
#
# Distributed under terms of the MIT license.
"""
"""
from PySide import QtGui, QtCore
from data_frame_model import DataFrameModel
import pandas as pd
from misc import colnames
class QDelimtierSelectorBox(QtGui.QComboBox):
def __init__(self, parent):
super(QDelimtierSelectorBox, self).__init__(parent=parent)
self.delimMap = {"Comma": ",",
"Tab": "\t",
"Space": " "}
self.addItems(self.delimMap.keys())
self.currentIndexChanged.connect(self.changePreviewDelimiter)
@QtCore.Slot(QtCore.QObject, int)
def changePreviewDelimiter(self, newInt):
key = self.delimMap.keys()[newInt]
setattr(self.parent(), "SEP", self.delimMap[key])
self.parent().changePreviewDelimiter()
class QIndexSelectorBox(QtGui.QComboBox):
def __init__(self, parent, dfColumns):
super(QIndexSelectorBox, self).__init__(parent=parent)
self.indexList = [str(None)] + dfColumns.tolist()
self.addItems(self.indexList)
self.currentIndexChanged.connect(self.changePreviewIndex)
@QtCore.Slot(QtCore.QObject, int)
def changePreviewIndex(self, newInt):
setattr(self.parent(), "INDEX_COL", self.indexList[newInt])
self.parent().changePreviewIndex(self.indexList[newInt])
class QNRowsSelectorBox(QtGui.QLineEdit):
def __init__(self, parent, orgNrows):
super(QNRowsSelectorBox, self).__init__(parent=parent)
self.setText(str(orgNrows))
validator = QtGui.QIntValidator(0, orgNrows, self)
self.setValidator(validator)
class QParserButton(QtGui.QWidget):
def __init__(self, parent=None):
super(QParserButton, self).__init__(parent)
layout = QtGui.QHBoxLayout()
c_select = QtGui.QRadioButton("C", parent=self)
py_select = QtGui.QRadioButton("Python", parent=self)
c_select.setChecked(True)
c_select.toggled.connect(self.changeParserEngine)
py_select.toggled.connect(self.changeParserEngine)
layout.addWidget(c_select)
layout.addWidget(py_select)
self.setLayout(layout)
self.c_select = c_select
self.py_select = py_select
@QtCore.Slot(QtCore.QObject, str)
def changeParserEngine(self, toggled):
for engineBtn in (self.c_select, self.py_select):
if engineBtn.isChecked():
break
parser = engineBtn.text().lower()
setattr(self.parent(), "PARSER_ENGINE", parser)
self.parent().preview()
class ColumnSelectorWidget(QtGui.QDialog):
def __init__(self, colList, parent=None):
super(ColumnSelectorWidget, self).__init__(parent=parent)
self.colList = colList
allSelector = QtGui.QCheckBox("All Columns")
allSelector.setChecked(True)
allSelector.stateChanged.connect(self.toggleCBoxList)
self.allSelector = allSelector
layout = QtGui.QVBoxLayout()
layout.addWidget(allSelector)
cBoxList = []
for col in colList:
cBox = QtGui.QCheckBox(str(col))
cBox.setChecked(True)
cBox.setEnabled(False)
cBoxList.append(cBox)
for cBox in cBoxList:
layout.addWidget(cBox)
self.cBoxList = cBoxList
# Ok/ Cancel Layout
ok_pb = QtGui.QPushButton("OK")
ok_pb.clicked.connect(self.accept)
no_pb = QtGui.QPushButton("Cancel")
no_pb.clicked.connect(self.reject)
okCancelLayout = QtGui.QHBoxLayout()
okCancelLayout.addWidget(ok_pb)
okCancelLayout.addWidget(no_pb)
layout.addLayout(okCancelLayout)
self.setLayout(layout)
def toggleCBoxList(self):
if not self.allSelector.isChecked():
for cBox in self.cBoxList:
if cBox.text() != self.parent().INDEX_COL:
cBox.setEnabled(True)
else:
for cBox in self.cBoxList:
cBox.setEnabled(False)
cBox.setChecked(True)
class QImportWizard(QtGui.QDialog):
# Initialize default constants
PREVIEW_NROWS = 100
SEP = ','
INDEX_COL = None
HEADER = 0
PARSER_ENGINE = "c"
USECOLS = None
NROWS = None
def __init__(self, parent, filepath=None):
super(QImportWizard, self).__init__(parent)
self.setWindowTitle("Import Wizard")
self.setModal(True)
if filepath is None:
self.filepath = self.parent().filepath
else:
self.filepath = filepath
self.preview()
# TableView widget
self.tableView = QtGui.QTableView()
self.previewModel = DataFrameModel(self.previewData)
self.tableView.setModel(self.previewModel)
# Layout for all parameters
paramLayout = QtGui.QVBoxLayout()
# Index selector widget
self.indexSelectorBox = QIndexSelectorBox(self,
self.previewData.columns)
indexSelectorLabel = QtGui.QLabel("Index Column")
indexColLayout = QtGui.QHBoxLayout()
indexColLayout.addWidget(indexSelectorLabel)
indexColLayout.addWidget(self.indexSelectorBox)
paramLayout.addLayout(indexColLayout)
# Delimiter selector Widget
self.delimiterSelectorBox = QDelimtierSelectorBox(self)
delimiterSelectorLabel = QtGui.QLabel("Delimiter")
delimLayout = QtGui.QHBoxLayout()
delimLayout.addWidget(delimiterSelectorLabel)
delimLayout.addWidget(self.delimiterSelectorBox)
paramLayout.addLayout(delimLayout)
# Parser Engine layout
parserSelector = QParserButton(self)
parserSelectorLabel = QtGui.QLabel("Parser Engine")
parserEngineLayout = QtGui.QHBoxLayout()
parserEngineLayout.addWidget(parserSelectorLabel)
parserEngineLayout.addWidget(parserSelector)
paramLayout.addLayout(parserEngineLayout)
# Column select dialog
self.colSelector = ColumnSelectorWidget(
self.previewData.columns.tolist(),
parent=self)
selectColsBtn = QtGui.QPushButton("Select Columns")
selectColsBtn.clicked.connect(self.showColumnSelector)
paramLayout.addWidget(selectColsBtn)
# Nrows selector widget
nrows = self.getMaxRows()
nrowsSelector = QNRowsSelectorBox(parent=self, orgNrows=nrows)
nrowsSelectorLayout = QtGui.QHBoxLayout()
nrowsSelectorLayout.addWidget(QtGui.QLabel("No. of rows"))
nrowsSelectorLayout.addWidget(nrowsSelector)
paramLayout.addLayout(nrowsSelectorLayout)
# Ok/ Cancel Layout
ok_pb = QtGui.QPushButton("OK")
ok_pb.clicked.connect(self.accept)
no_pb = QtGui.QPushButton("Cancel")
no_pb.clicked.connect(self.reject)
okCancelLayout = QtGui.QHBoxLayout()
okCancelLayout.addWidget(ok_pb)
okCancelLayout.addWidget(no_pb)
paramLayout.addLayout(okCancelLayout)
# Layout
layout = QtGui.QHBoxLayout()
layout.addWidget(self.tableView)
layout.addLayout(paramLayout)
self.setLayout(layout)
def getMaxRows(self):
firstCol = colnames(self.filepath)[0]
return pd.read_csv(self.filepath, usecols=[firstCol]).shape[0]
def showColumnSelector(self):
if self.colSelector.exec_() == QtGui.QDialog.Accepted:
self.USECOLS = []
for cBox in self.colSelector.cBoxList:
if cBox.isChecked():
self.USECOLS.append(cBox.text())
self.previewSelectedColumns()
def preview(self):
self.previewData = pd.read_csv(self.filepath,
nrows=self.PREVIEW_NROWS, sep=self.SEP,
index_col=self.INDEX_COL,
header=self.HEADER,
engine=self.PARSER_ENGINE,
usecols=self.USECOLS)
def changePreviewIndex(self, newCol):
if newCol == "None":
self.previewData.index = range(self.previewData.shape[0])
else:
newIndex = self.previewData[newCol]
self.previewData.set_index(newIndex, inplace=True)
self.previewModel = DataFrameModel(self.previewData)
self.tableView.setModel(self.previewModel)
def changePreviewDelimiter(self):
self.preview()
self.previewModel = DataFrameModel(self.previewData)
self.tableView.setModel(self.previewModel)
def previewSelectedColumns(self):
self.previewModel = DataFrameModel(self.previewData[self.USECOLS])
self.tableView.setModel(self.previewModel)
if __name__ == '__main__':
import os.path as op
import sys
filepath = op.join(op.dirname(__file__), "iris.csv")
app = QtGui.QApplication(sys.argv)
window = QImportWizard(None, filepath)
window.show()
app.exec_()
sys.exit()
Integrate nrows in import wiz with the app
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2015 jaidev <jaidev@newton>
#
# Distributed under terms of the MIT license.
"""
"""
from PySide import QtGui, QtCore
from data_frame_model import DataFrameModel
import pandas as pd
from misc import colnames
class QDelimtierSelectorBox(QtGui.QComboBox):
def __init__(self, parent):
super(QDelimtierSelectorBox, self).__init__(parent=parent)
self.delimMap = {"Comma": ",",
"Tab": "\t",
"Space": " "}
self.addItems(self.delimMap.keys())
self.currentIndexChanged.connect(self.changePreviewDelimiter)
@QtCore.Slot(QtCore.QObject, int)
def changePreviewDelimiter(self, newInt):
key = self.delimMap.keys()[newInt]
setattr(self.parent(), "SEP", self.delimMap[key])
self.parent().changePreviewDelimiter()
class QIndexSelectorBox(QtGui.QComboBox):
def __init__(self, parent, dfColumns):
super(QIndexSelectorBox, self).__init__(parent=parent)
self.indexList = [str(None)] + dfColumns.tolist()
self.addItems(self.indexList)
self.currentIndexChanged.connect(self.changePreviewIndex)
@QtCore.Slot(QtCore.QObject, int)
def changePreviewIndex(self, newInt):
setattr(self.parent(), "INDEX_COL", self.indexList[newInt])
self.parent().changePreviewIndex(self.indexList[newInt])
class QNRowsSelectorBox(QtGui.QLineEdit):
def __init__(self, parent, orgNrows):
super(QNRowsSelectorBox, self).__init__(parent=parent)
self.setText(str(orgNrows))
validator = QtGui.QIntValidator(0, orgNrows, self)
self.setValidator(validator)
class QParserButton(QtGui.QWidget):
def __init__(self, parent=None):
super(QParserButton, self).__init__(parent)
layout = QtGui.QHBoxLayout()
c_select = QtGui.QRadioButton("C", parent=self)
py_select = QtGui.QRadioButton("Python", parent=self)
c_select.setChecked(True)
c_select.toggled.connect(self.changeParserEngine)
py_select.toggled.connect(self.changeParserEngine)
layout.addWidget(c_select)
layout.addWidget(py_select)
self.setLayout(layout)
self.c_select = c_select
self.py_select = py_select
@QtCore.Slot(QtCore.QObject, str)
def changeParserEngine(self, toggled):
for engineBtn in (self.c_select, self.py_select):
if engineBtn.isChecked():
break
parser = engineBtn.text().lower()
setattr(self.parent(), "PARSER_ENGINE", parser)
self.parent().preview()
class ColumnSelectorWidget(QtGui.QDialog):
def __init__(self, colList, parent=None):
super(ColumnSelectorWidget, self).__init__(parent=parent)
self.colList = colList
allSelector = QtGui.QCheckBox("All Columns")
allSelector.setChecked(True)
allSelector.stateChanged.connect(self.toggleCBoxList)
self.allSelector = allSelector
layout = QtGui.QVBoxLayout()
layout.addWidget(allSelector)
cBoxList = []
for col in colList:
cBox = QtGui.QCheckBox(str(col))
cBox.setChecked(True)
cBox.setEnabled(False)
cBoxList.append(cBox)
for cBox in cBoxList:
layout.addWidget(cBox)
self.cBoxList = cBoxList
# Ok/ Cancel Layout
ok_pb = QtGui.QPushButton("OK")
ok_pb.clicked.connect(self.accept)
no_pb = QtGui.QPushButton("Cancel")
no_pb.clicked.connect(self.reject)
okCancelLayout = QtGui.QHBoxLayout()
okCancelLayout.addWidget(ok_pb)
okCancelLayout.addWidget(no_pb)
layout.addLayout(okCancelLayout)
self.setLayout(layout)
def toggleCBoxList(self):
if not self.allSelector.isChecked():
for cBox in self.cBoxList:
if cBox.text() != self.parent().INDEX_COL:
cBox.setEnabled(True)
else:
for cBox in self.cBoxList:
cBox.setEnabled(False)
cBox.setChecked(True)
class QImportWizard(QtGui.QDialog):
# Initialize default constants
PREVIEW_NROWS = 100
SEP = ','
INDEX_COL = None
HEADER = 0
PARSER_ENGINE = "c"
USECOLS = None
NROWS = None
def __init__(self, parent, filepath=None):
super(QImportWizard, self).__init__(parent)
self.setWindowTitle("Import Wizard")
self.setModal(True)
if filepath is None:
self.filepath = self.parent().filepath
else:
self.filepath = filepath
self.preview()
# TableView widget
self.tableView = QtGui.QTableView()
self.previewModel = DataFrameModel(self.previewData)
self.tableView.setModel(self.previewModel)
# Layout for all parameters
paramLayout = QtGui.QVBoxLayout()
# Index selector widget
self.indexSelectorBox = QIndexSelectorBox(self,
self.previewData.columns)
indexSelectorLabel = QtGui.QLabel("Index Column")
indexColLayout = QtGui.QHBoxLayout()
indexColLayout.addWidget(indexSelectorLabel)
indexColLayout.addWidget(self.indexSelectorBox)
paramLayout.addLayout(indexColLayout)
# Delimiter selector Widget
self.delimiterSelectorBox = QDelimtierSelectorBox(self)
delimiterSelectorLabel = QtGui.QLabel("Delimiter")
delimLayout = QtGui.QHBoxLayout()
delimLayout.addWidget(delimiterSelectorLabel)
delimLayout.addWidget(self.delimiterSelectorBox)
paramLayout.addLayout(delimLayout)
# Parser Engine layout
parserSelector = QParserButton(self)
parserSelectorLabel = QtGui.QLabel("Parser Engine")
parserEngineLayout = QtGui.QHBoxLayout()
parserEngineLayout.addWidget(parserSelectorLabel)
parserEngineLayout.addWidget(parserSelector)
paramLayout.addLayout(parserEngineLayout)
# Column select dialog
self.colSelector = ColumnSelectorWidget(
self.previewData.columns.tolist(),
parent=self)
selectColsBtn = QtGui.QPushButton("Select Columns")
selectColsBtn.clicked.connect(self.showColumnSelector)
paramLayout.addWidget(selectColsBtn)
# Nrows selector widget
nrows = self.getMaxRows()
nrowsSelector = QNRowsSelectorBox(parent=self, orgNrows=nrows)
nrowsSelectorLayout = QtGui.QHBoxLayout()
nrowsSelectorLayout.addWidget(QtGui.QLabel("No. of rows"))
nrowsSelectorLayout.addWidget(nrowsSelector)
self.nrowsSelector = nrowsSelector
paramLayout.addLayout(nrowsSelectorLayout)
# Ok/ Cancel Layout
ok_pb = QtGui.QPushButton("OK")
ok_pb.clicked.connect(self.accept)
no_pb = QtGui.QPushButton("Cancel")
no_pb.clicked.connect(self.reject)
okCancelLayout = QtGui.QHBoxLayout()
okCancelLayout.addWidget(ok_pb)
okCancelLayout.addWidget(no_pb)
paramLayout.addLayout(okCancelLayout)
# Layout
layout = QtGui.QHBoxLayout()
layout.addWidget(self.tableView)
layout.addLayout(paramLayout)
self.setLayout(layout)
def getMaxRows(self):
firstCol = colnames(self.filepath)[0]
return pd.read_csv(self.filepath, usecols=[firstCol]).shape[0]
def showColumnSelector(self):
if self.colSelector.exec_() == QtGui.QDialog.Accepted:
self.USECOLS = []
for cBox in self.colSelector.cBoxList:
if cBox.isChecked():
self.USECOLS.append(cBox.text())
self.previewSelectedColumns()
def preview(self):
self.previewData = pd.read_csv(self.filepath,
nrows=self.PREVIEW_NROWS, sep=self.SEP,
index_col=self.INDEX_COL,
header=self.HEADER,
engine=self.PARSER_ENGINE,
usecols=self.USECOLS)
def changePreviewIndex(self, newCol):
if newCol == "None":
self.previewData.index = range(self.previewData.shape[0])
else:
newIndex = self.previewData[newCol]
self.previewData.set_index(newIndex, inplace=True)
self.previewModel = DataFrameModel(self.previewData)
self.tableView.setModel(self.previewModel)
def changePreviewDelimiter(self):
self.preview()
self.previewModel = DataFrameModel(self.previewData)
self.tableView.setModel(self.previewModel)
def previewSelectedColumns(self):
self.previewModel = DataFrameModel(self.previewData[self.USECOLS])
self.tableView.setModel(self.previewModel)
def accept(self):
self.NROWS = int(self.nrowsSelector.text())
super(QImportWizard, self).accept()
if __name__ == '__main__':
import os.path as op
import sys
filepath = op.join(op.dirname(__file__), "iris.csv")
app = QtGui.QApplication(sys.argv)
window = QImportWizard(None, filepath)
window.show()
app.exec_()
sys.exit()
|
# -*- coding: utf-8 -*-
# pylint: disable=C0103,W0622
'''
Sphinx documentation for Salt
'''
import functools
import sys
import os
import types
from sphinx.directives import TocTree
# pylint: disable=R0903
class Mock(object):
'''
Mock out specified imports
This allows autodoc to do its thing without having oodles of req'd
installed libs. This doesn't work with ``import *`` imports.
http://read-the-docs.readthedocs.org/en/latest/faq.html#i-get-import-errors-on-libraries-that-depend-on-c-modules
'''
def __init__(self, *args, **kwargs):
pass
__all__ = []
def __call__(self, *args, **kwargs):
ret = Mock()
# If mocked function is used as a decorator, expose decorated function.
# if args and callable(args[-1]):
# functools.update_wrapper(ret, args[0])
return ret
@classmethod
def __getattr__(cls, name):
if name in ('__file__', '__path__'):
return '/dev/null'
else:
return Mock()
# pylint: enable=R0903
MOCK_MODULES = [
# salt core
'Crypto',
'Crypto.Signature',
'Crypto.Cipher',
'Crypto.Hash',
'Crypto.PublicKey',
'Crypto.Random',
'Crypto.Signature',
'Crypto.Signature.PKCS1_v1_5',
'M2Crypto',
'msgpack',
'yaml',
'yaml.constructor',
'yaml.nodes',
'yaml.scanner',
'zmq',
'zmq.eventloop',
'zmq.eventloop.ioloop',
# third-party libs for cloud modules
'libcloud',
'libcloud.compute',
'libcloud.compute.base',
'libcloud.compute.deployment',
'libcloud.compute.providers',
'libcloud.compute.types',
'libcloud.loadbalancer',
'libcloud.loadbalancer.types',
'libcloud.loadbalancer.providers',
'libcloud.common',
'libcloud.common.google',
# third-party libs for netapi modules
'cherrypy',
'cherrypy.lib',
'cherrypy.process',
'cherrypy.wsgiserver',
'cherrypy.wsgiserver.ssl_builtin',
'tornado',
'tornado.concurrent',
'tornado.gen',
'tornado.httpclient',
'tornado.httpserver',
'tornado.httputil',
'tornado.ioloop',
'tornado.simple_httpclient',
'tornado.web',
'tornado.websocket',
'ws4py',
'ws4py.server',
'ws4py.server.cherrypyserver',
'ws4py.websocket',
# modules, renderers, states, returners, et al
'ClusterShell',
'ClusterShell.NodeSet',
'django',
'libvirt',
'MySQLdb',
'MySQLdb.cursors',
'nagios_json',
'psutil',
'pycassa',
'pymongo',
'rabbitmq_server',
'redis',
'requests',
'requests.exceptions',
'rpm',
'rpmUtils',
'rpmUtils.arch',
'yum',
'OpenSSL',
'zfs',
]
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = Mock()
# Define a fake version attribute for the following libs.
sys.modules['libcloud'].__version__ = '0.0.0'
sys.modules['pymongo'].version = '0.0.0'
# -- Add paths to PYTHONPATH ---------------------------------------------------
try:
docs_basepath = os.path.abspath(os.path.dirname(__file__))
except NameError:
# sphinx-intl and six execute some code which will raise this NameError
# assume we're in the doc/ directory
docs_basepath = os.path.abspath(os.path.dirname('.'))
addtl_paths = (
os.pardir, # salt itself (for autodoc)
'_ext', # custom Sphinx extensions
)
for path in addtl_paths:
sys.path.insert(0, os.path.abspath(os.path.join(docs_basepath, path)))
# We're now able to import salt
import salt.version
formulas_dir = os.path.join(os.pardir, docs_basepath, 'formulas')
# ----- Intersphinx Settings ------------------------------------------------>
intersphinx_mapping = {
'python2': ('http://docs.python.org/2', None),
'python3': ('http://docs.python.org/3', None)
}
# <---- Intersphinx Settings -------------------------------------------------
# -- General Configuration -----------------------------------------------------
project = 'Salt'
copyright = '2015 SaltStack, Inc.'
version = salt.version.__version__
latest_release = '2015.8.1' # latest release
previous_release = '2015.5.6' # latest release from previous branch
previous_release_dir = '2015.5' # path on web server for previous branch
build_type = 'latest' # latest, previous, develop, inactive
# set release to 'version' for develop so sha is used
# - otherwise -
# set release to 'latest_release' or 'previous_release'
release = latest_release # version, latest_release, previous_release
# Set google custom search engine
if release == latest_release:
search_cx = '004624818632696854117:yfmprrbw3pk' # latest
elif release.startswith('2014.7'):
search_cx = '004624818632696854117:thhslradbru' # 2014.7
elif release.startswith('2015.5'):
search_cx = '004624818632696854117:ovogwef29do' # 2015.5
else:
search_cx = '004624818632696854117:haj7bjntf4s' # develop
needs_sphinx = '1.3'
spelling_lang = 'en_US'
language = 'en'
locale_dirs = [
'_locale',
]
master_doc = 'contents'
templates_path = ['_templates']
exclude_patterns = ['_build', '_incl/*', 'ref/cli/_includes/*.rst']
extensions = [
'saltdomain', # Must come early
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.extlinks',
'sphinx.ext.intersphinx',
'httpdomain',
'youtube',
'saltautodoc', # Must be AFTER autodoc
'shorturls',
]
try:
import sphinxcontrib.spelling
except ImportError:
pass
else:
extensions += ['sphinxcontrib.spelling']
modindex_common_prefix = ['salt.']
autosummary_generate = True
# Define a substitution for linking to the latest release tarball
rst_prolog = """\
.. |current_release_doc| replace:: :doc:`/topics/releases/{release}`
.. |saltrepo| replace:: https://github.com/saltstack/salt
.. _`salt-users`: https://groups.google.com/forum/#!forum/salt-users
.. _`salt-announce`: https://groups.google.com/forum/#!forum/salt-announce
.. _`salt-packagers`: https://groups.google.com/forum/#!forum/salt-packagers
.. |windownload| raw:: html
<p>x86: <a href="https://repo.saltstack.com/windows/Salt-Minion-{release}-x86-Setup.exe"><strong>Salt-Minion-{release}-x86-Setup.exe</strong></a>
| <a href="https://repo.saltstack.com/windows/Salt-Minion-{release}-x86-Setup.exe.md5"><strong>md5</strong></a></p>
<p>AMD64: <a href="https://repo.saltstack.com/windows/Salt-Minion-{release}-AMD64-Setup.exe"><strong>Salt-Minion-{release}-AMD64-Setup.exe</strong></a>
| <a href="https://repo.saltstack.com/windows/Salt-Minion-{release}-AMD64-Setup.exe.md5"><strong>md5</strong></a></p>
""".format(release=release)
# A shortcut for linking to tickets on the GitHub issue tracker
extlinks = {
'blob': ('https://github.com/saltstack/salt/blob/%s/%%s' % 'develop', None),
'download': ('https://cloud.github.com/downloads/saltstack/salt/%s', None),
'issue': ('https://github.com/saltstack/salt/issues/%s', 'issue '),
'formula_url': ('https://github.com/saltstack-formulas/%s', ''),
}
# ----- Localization -------------------------------------------------------->
locale_dirs = ['locale/']
gettext_compact = False
# <---- Localization ---------------------------------------------------------
### HTML options
html_theme = 'saltstack2' #change to 'saltstack' to use previous theme
html_theme_path = ['_themes']
html_title = u''
html_short_title = 'Salt'
html_static_path = ['_static']
html_logo = None # specified in the theme layout.html
html_favicon = 'favicon.ico'
html_use_smartypants = False
# Set a var if we're building docs for the live site or not
on_saltstack = 'SALT_ON_SALTSTACK' in os.environ
# Use Google customized search or use Sphinx built-in JavaScript search
if on_saltstack:
html_search_template = 'googlesearch.html'
else:
html_search_template = 'searchbox.html'
html_additional_pages = {
'404': '404.html',
}
html_default_sidebars = [
html_search_template,
'version.html',
'localtoc.html',
'relations.html',
'sourcelink.html',
'saltstack.html',
]
html_sidebars = {
'ref/**/all/salt.*': [
html_search_template,
'version.html',
'modules-sidebar.html',
'localtoc.html',
'relations.html',
'sourcelink.html',
'saltstack.html',
],
'ref/formula/all/*': [
],
}
html_context = {
'on_saltstack': on_saltstack,
'html_default_sidebars': html_default_sidebars,
'github_base': 'https://github.com/saltstack/salt',
'github_issues': 'https://github.com/saltstack/salt/issues',
'github_downloads': 'https://github.com/saltstack/salt/downloads',
'latest_release': latest_release,
'previous_release': previous_release,
'previous_release_dir': previous_release_dir,
'search_cx': search_cx,
'build_type': build_type,
}
html_use_index = True
html_last_updated_fmt = '%b %d, %Y'
html_show_sourcelink = False
html_show_sphinx = True
html_show_copyright = True
### Latex options
latex_documents = [
('contents', 'Salt.tex', 'Salt Documentation', 'SaltStack, Inc.', 'manual'),
]
latex_logo = '_static/salt-logo.png'
latex_elements = {
'inputenc': '', # use XeTeX instead of the inputenc LaTeX package.
'utf8extra': '',
'preamble': '''
\usepackage{fontspec}
\setsansfont{Linux Biolinum O}
\setromanfont{Linux Libertine O}
\setmonofont{Source Code Pro}
''',
}
### Linux Biolinum, Linux Libertine: http://www.linuxlibertine.org/
### Source Code Pro: https://github.com/adobe-fonts/source-code-pro/releases
### Linkcheck options
linkcheck_ignore = [r'http://127.0.0.1',
r'http://salt:\d+',
r'http://local:\d+',
r'https://console.aws.amazon.com',
r'http://192.168.33.10',
r'http://domain:\d+',
r'http://123.456.789.012:\d+',
r'http://localhost',
r'https://groups.google.com/forum/#!forum/salt-users',
r'http://logstash.net/docs/latest/inputs/udp',
r'http://logstash.net/docs/latest/inputs/zeromq',
r'http://www.youtube.com/saltstack',
r'http://raven.readthedocs.org',
r'https://getsentry.com',
r'http://salt-cloud.readthedocs.org',
r'http://salt.readthedocs.org',
r'http://www.pip-installer.org/',
r'http://www.windowsazure.com/',
r'https://github.com/watching',
r'dash-feed://',
r'https://github.com/saltstack/salt/',
r'http://bootstrap.saltstack.org',
r'https://bootstrap.saltstack.com',
r'https://raw.githubusercontent.com/saltstack/salt-bootstrap/stable/bootstrap-salt.sh',
r'media.readthedocs.org/dash/salt/latest/salt.xml',
r'https://portal.aws.amazon.com/gp/aws/securityCredentials',
r'https://help.github.com/articles/fork-a-repo',
r'dash-feed://https%3A//media.readthedocs.org/dash/salt/latest/salt.xml'
]
linkcheck_anchors = False
### Manpage options
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
authors = [
'Thomas S. Hatch <thatch45@gmail.com> and many others, please see the Authors file',
]
man_pages = [
('contents', 'salt', 'Salt Documentation', authors, 7),
('ref/cli/salt', 'salt', 'salt', authors, 1),
('ref/cli/salt-master', 'salt-master', 'salt-master Documentation', authors, 1),
('ref/cli/salt-minion', 'salt-minion', 'salt-minion Documentation', authors, 1),
('ref/cli/salt-key', 'salt-key', 'salt-key Documentation', authors, 1),
('ref/cli/salt-cp', 'salt-cp', 'salt-cp Documentation', authors, 1),
('ref/cli/salt-call', 'salt-call', 'salt-call Documentation', authors, 1),
('ref/cli/salt-proxy', 'salt-proxy', 'salt-proxy Documentation', authors, 1),
('ref/cli/salt-syndic', 'salt-syndic', 'salt-syndic Documentation', authors, 1),
('ref/cli/salt-run', 'salt-run', 'salt-run Documentation', authors, 1),
('ref/cli/salt-ssh', 'salt-ssh', 'salt-ssh Documentation', authors, 1),
('ref/cli/salt-cloud', 'salt-cloud', 'Salt Cloud Command', authors, 1),
('ref/cli/salt-api', 'salt-api', 'salt-api Command', authors, 1),
('ref/cli/salt-unity', 'salt-unity', 'salt-unity Command', authors, 1),
('ref/cli/spm', 'spm', 'Salt Package Manager Command', authors, 1),
]
### epub options
epub_title = 'Salt Documentation'
epub_author = 'SaltStack, Inc.'
epub_publisher = epub_author
epub_copyright = copyright
epub_scheme = 'URL'
epub_identifier = 'http://saltstack.org/'
#epub_tocdepth = 3
def skip_mod_init_member(app, what, name, obj, skip, options):
if name.startswith('_'):
return True
if isinstance(obj, types.FunctionType) and obj.__name__ == 'mod_init':
return True
return False
def _normalize_version(args):
_, path = args
return '.'.join([x.zfill(4) for x in (path.split('/')[-1].split('.'))])
class ReleasesTree(TocTree):
option_spec = dict(TocTree.option_spec)
def run(self):
rst = super(ReleasesTree, self).run()
entries = rst[0][0]['entries'][:]
entries.sort(key=_normalize_version, reverse=True)
rst[0][0]['entries'][:] = entries
return rst
def setup(app):
app.add_directive('releasestree', ReleasesTree)
app.connect('autodoc-skip-member', skip_mod_init_member)
updated version numbers
# -*- coding: utf-8 -*-
# pylint: disable=C0103,W0622
'''
Sphinx documentation for Salt
'''
import functools
import sys
import os
import types
from sphinx.directives import TocTree
# pylint: disable=R0903
class Mock(object):
'''
Mock out specified imports
This allows autodoc to do its thing without having oodles of req'd
installed libs. This doesn't work with ``import *`` imports.
http://read-the-docs.readthedocs.org/en/latest/faq.html#i-get-import-errors-on-libraries-that-depend-on-c-modules
'''
def __init__(self, *args, **kwargs):
pass
__all__ = []
def __call__(self, *args, **kwargs):
ret = Mock()
# If mocked function is used as a decorator, expose decorated function.
# if args and callable(args[-1]):
# functools.update_wrapper(ret, args[0])
return ret
@classmethod
def __getattr__(cls, name):
if name in ('__file__', '__path__'):
return '/dev/null'
else:
return Mock()
# pylint: enable=R0903
MOCK_MODULES = [
# salt core
'Crypto',
'Crypto.Signature',
'Crypto.Cipher',
'Crypto.Hash',
'Crypto.PublicKey',
'Crypto.Random',
'Crypto.Signature',
'Crypto.Signature.PKCS1_v1_5',
'M2Crypto',
'msgpack',
'yaml',
'yaml.constructor',
'yaml.nodes',
'yaml.scanner',
'zmq',
'zmq.eventloop',
'zmq.eventloop.ioloop',
# third-party libs for cloud modules
'libcloud',
'libcloud.compute',
'libcloud.compute.base',
'libcloud.compute.deployment',
'libcloud.compute.providers',
'libcloud.compute.types',
'libcloud.loadbalancer',
'libcloud.loadbalancer.types',
'libcloud.loadbalancer.providers',
'libcloud.common',
'libcloud.common.google',
# third-party libs for netapi modules
'cherrypy',
'cherrypy.lib',
'cherrypy.process',
'cherrypy.wsgiserver',
'cherrypy.wsgiserver.ssl_builtin',
'tornado',
'tornado.concurrent',
'tornado.gen',
'tornado.httpclient',
'tornado.httpserver',
'tornado.httputil',
'tornado.ioloop',
'tornado.simple_httpclient',
'tornado.web',
'tornado.websocket',
'ws4py',
'ws4py.server',
'ws4py.server.cherrypyserver',
'ws4py.websocket',
# modules, renderers, states, returners, et al
'ClusterShell',
'ClusterShell.NodeSet',
'django',
'libvirt',
'MySQLdb',
'MySQLdb.cursors',
'nagios_json',
'psutil',
'pycassa',
'pymongo',
'rabbitmq_server',
'redis',
'requests',
'requests.exceptions',
'rpm',
'rpmUtils',
'rpmUtils.arch',
'yum',
'OpenSSL',
'zfs',
]
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = Mock()
# Define a fake version attribute for the following libs.
sys.modules['libcloud'].__version__ = '0.0.0'
sys.modules['pymongo'].version = '0.0.0'
# -- Add paths to PYTHONPATH ---------------------------------------------------
try:
docs_basepath = os.path.abspath(os.path.dirname(__file__))
except NameError:
# sphinx-intl and six execute some code which will raise this NameError
# assume we're in the doc/ directory
docs_basepath = os.path.abspath(os.path.dirname('.'))
addtl_paths = (
os.pardir, # salt itself (for autodoc)
'_ext', # custom Sphinx extensions
)
for path in addtl_paths:
sys.path.insert(0, os.path.abspath(os.path.join(docs_basepath, path)))
# We're now able to import salt
import salt.version
formulas_dir = os.path.join(os.pardir, docs_basepath, 'formulas')
# ----- Intersphinx Settings ------------------------------------------------>
intersphinx_mapping = {
'python2': ('http://docs.python.org/2', None),
'python3': ('http://docs.python.org/3', None)
}
# <---- Intersphinx Settings -------------------------------------------------
# -- General Configuration -----------------------------------------------------
project = 'Salt'
copyright = '2015 SaltStack, Inc.'
version = salt.version.__version__
latest_release = '2015.8.3' # latest release
previous_release = '2015.5.8' # latest release from previous branch
previous_release_dir = '2015.5' # path on web server for previous branch
build_type = 'latest' # latest, previous, develop, inactive
# set release to 'version' for develop so sha is used
# - otherwise -
# set release to 'latest_release' or 'previous_release'
release = latest_release # version, latest_release, previous_release
# Set google custom search engine
if release == latest_release:
search_cx = '004624818632696854117:yfmprrbw3pk' # latest
elif release.startswith('2014.7'):
search_cx = '004624818632696854117:thhslradbru' # 2014.7
elif release.startswith('2015.5'):
search_cx = '004624818632696854117:ovogwef29do' # 2015.5
else:
search_cx = '004624818632696854117:haj7bjntf4s' # develop
needs_sphinx = '1.3'
spelling_lang = 'en_US'
language = 'en'
locale_dirs = [
'_locale',
]
master_doc = 'contents'
templates_path = ['_templates']
exclude_patterns = ['_build', '_incl/*', 'ref/cli/_includes/*.rst']
extensions = [
'saltdomain', # Must come early
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.extlinks',
'sphinx.ext.intersphinx',
'httpdomain',
'youtube',
'saltautodoc', # Must be AFTER autodoc
'shorturls',
]
try:
import sphinxcontrib.spelling
except ImportError:
pass
else:
extensions += ['sphinxcontrib.spelling']
modindex_common_prefix = ['salt.']
autosummary_generate = True
# Define a substitution for linking to the latest release tarball
rst_prolog = """\
.. |current_release_doc| replace:: :doc:`/topics/releases/{release}`
.. |saltrepo| replace:: https://github.com/saltstack/salt
.. _`salt-users`: https://groups.google.com/forum/#!forum/salt-users
.. _`salt-announce`: https://groups.google.com/forum/#!forum/salt-announce
.. _`salt-packagers`: https://groups.google.com/forum/#!forum/salt-packagers
.. |windownload| raw:: html
<p>x86: <a href="https://repo.saltstack.com/windows/Salt-Minion-{release}-x86-Setup.exe"><strong>Salt-Minion-{release}-x86-Setup.exe</strong></a>
| <a href="https://repo.saltstack.com/windows/Salt-Minion-{release}-x86-Setup.exe.md5"><strong>md5</strong></a></p>
<p>AMD64: <a href="https://repo.saltstack.com/windows/Salt-Minion-{release}-AMD64-Setup.exe"><strong>Salt-Minion-{release}-AMD64-Setup.exe</strong></a>
| <a href="https://repo.saltstack.com/windows/Salt-Minion-{release}-AMD64-Setup.exe.md5"><strong>md5</strong></a></p>
""".format(release=release)
# A shortcut for linking to tickets on the GitHub issue tracker
extlinks = {
'blob': ('https://github.com/saltstack/salt/blob/%s/%%s' % 'develop', None),
'download': ('https://cloud.github.com/downloads/saltstack/salt/%s', None),
'issue': ('https://github.com/saltstack/salt/issues/%s', 'issue '),
'formula_url': ('https://github.com/saltstack-formulas/%s', ''),
}
# ----- Localization -------------------------------------------------------->
locale_dirs = ['locale/']
gettext_compact = False
# <---- Localization ---------------------------------------------------------
### HTML options
html_theme = 'saltstack2' #change to 'saltstack' to use previous theme
html_theme_path = ['_themes']
html_title = u''
html_short_title = 'Salt'
html_static_path = ['_static']
html_logo = None # specified in the theme layout.html
html_favicon = 'favicon.ico'
html_use_smartypants = False
# Set a var if we're building docs for the live site or not
on_saltstack = 'SALT_ON_SALTSTACK' in os.environ
# Use Google customized search or use Sphinx built-in JavaScript search
if on_saltstack:
html_search_template = 'googlesearch.html'
else:
html_search_template = 'searchbox.html'
html_additional_pages = {
'404': '404.html',
}
html_default_sidebars = [
html_search_template,
'version.html',
'localtoc.html',
'relations.html',
'sourcelink.html',
'saltstack.html',
]
html_sidebars = {
'ref/**/all/salt.*': [
html_search_template,
'version.html',
'modules-sidebar.html',
'localtoc.html',
'relations.html',
'sourcelink.html',
'saltstack.html',
],
'ref/formula/all/*': [
],
}
html_context = {
'on_saltstack': on_saltstack,
'html_default_sidebars': html_default_sidebars,
'github_base': 'https://github.com/saltstack/salt',
'github_issues': 'https://github.com/saltstack/salt/issues',
'github_downloads': 'https://github.com/saltstack/salt/downloads',
'latest_release': latest_release,
'previous_release': previous_release,
'previous_release_dir': previous_release_dir,
'search_cx': search_cx,
'build_type': build_type,
}
html_use_index = True
html_last_updated_fmt = '%b %d, %Y'
html_show_sourcelink = False
html_show_sphinx = True
html_show_copyright = True
### Latex options
latex_documents = [
('contents', 'Salt.tex', 'Salt Documentation', 'SaltStack, Inc.', 'manual'),
]
latex_logo = '_static/salt-logo.png'
latex_elements = {
'inputenc': '', # use XeTeX instead of the inputenc LaTeX package.
'utf8extra': '',
'preamble': '''
\usepackage{fontspec}
\setsansfont{Linux Biolinum O}
\setromanfont{Linux Libertine O}
\setmonofont{Source Code Pro}
''',
}
### Linux Biolinum, Linux Libertine: http://www.linuxlibertine.org/
### Source Code Pro: https://github.com/adobe-fonts/source-code-pro/releases
### Linkcheck options
linkcheck_ignore = [r'http://127.0.0.1',
r'http://salt:\d+',
r'http://local:\d+',
r'https://console.aws.amazon.com',
r'http://192.168.33.10',
r'http://domain:\d+',
r'http://123.456.789.012:\d+',
r'http://localhost',
r'https://groups.google.com/forum/#!forum/salt-users',
r'http://logstash.net/docs/latest/inputs/udp',
r'http://logstash.net/docs/latest/inputs/zeromq',
r'http://www.youtube.com/saltstack',
r'http://raven.readthedocs.org',
r'https://getsentry.com',
r'http://salt-cloud.readthedocs.org',
r'http://salt.readthedocs.org',
r'http://www.pip-installer.org/',
r'http://www.windowsazure.com/',
r'https://github.com/watching',
r'dash-feed://',
r'https://github.com/saltstack/salt/',
r'http://bootstrap.saltstack.org',
r'https://bootstrap.saltstack.com',
r'https://raw.githubusercontent.com/saltstack/salt-bootstrap/stable/bootstrap-salt.sh',
r'media.readthedocs.org/dash/salt/latest/salt.xml',
r'https://portal.aws.amazon.com/gp/aws/securityCredentials',
r'https://help.github.com/articles/fork-a-repo',
r'dash-feed://https%3A//media.readthedocs.org/dash/salt/latest/salt.xml'
]
linkcheck_anchors = False
### Manpage options
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
authors = [
'Thomas S. Hatch <thatch45@gmail.com> and many others, please see the Authors file',
]
man_pages = [
('contents', 'salt', 'Salt Documentation', authors, 7),
('ref/cli/salt', 'salt', 'salt', authors, 1),
('ref/cli/salt-master', 'salt-master', 'salt-master Documentation', authors, 1),
('ref/cli/salt-minion', 'salt-minion', 'salt-minion Documentation', authors, 1),
('ref/cli/salt-key', 'salt-key', 'salt-key Documentation', authors, 1),
('ref/cli/salt-cp', 'salt-cp', 'salt-cp Documentation', authors, 1),
('ref/cli/salt-call', 'salt-call', 'salt-call Documentation', authors, 1),
('ref/cli/salt-proxy', 'salt-proxy', 'salt-proxy Documentation', authors, 1),
('ref/cli/salt-syndic', 'salt-syndic', 'salt-syndic Documentation', authors, 1),
('ref/cli/salt-run', 'salt-run', 'salt-run Documentation', authors, 1),
('ref/cli/salt-ssh', 'salt-ssh', 'salt-ssh Documentation', authors, 1),
('ref/cli/salt-cloud', 'salt-cloud', 'Salt Cloud Command', authors, 1),
('ref/cli/salt-api', 'salt-api', 'salt-api Command', authors, 1),
('ref/cli/salt-unity', 'salt-unity', 'salt-unity Command', authors, 1),
('ref/cli/spm', 'spm', 'Salt Package Manager Command', authors, 1),
]
### epub options
epub_title = 'Salt Documentation'
epub_author = 'SaltStack, Inc.'
epub_publisher = epub_author
epub_copyright = copyright
epub_scheme = 'URL'
epub_identifier = 'http://saltstack.org/'
#epub_tocdepth = 3
def skip_mod_init_member(app, what, name, obj, skip, options):
if name.startswith('_'):
return True
if isinstance(obj, types.FunctionType) and obj.__name__ == 'mod_init':
return True
return False
def _normalize_version(args):
_, path = args
return '.'.join([x.zfill(4) for x in (path.split('/')[-1].split('.'))])
class ReleasesTree(TocTree):
option_spec = dict(TocTree.option_spec)
def run(self):
rst = super(ReleasesTree, self).run()
entries = rst[0][0]['entries'][:]
entries.sort(key=_normalize_version, reverse=True)
rst[0][0]['entries'][:] = entries
return rst
def setup(app):
app.add_directive('releasestree', ReleasesTree)
app.connect('autodoc-skip-member', skip_mod_init_member)
|
import re
import csv
import urllib2
import imaplib
import email.parser
import events
from idiokit import threado, util
class IMAP(threado.ThreadedStream):
def __init__(self, server, user, password, imapfilter,
url_rex="http://\S+", filename_rex=".*"):
threado.ThreadedStream.__init__(self)
self.mailbox = imaplib.IMAP4_SSL(server)
self.mailbox.login(user, password)
self.mailbox.select('INBOX', readonly=False)
self.filter = imapfilter
self.url_rex = re.compile(url_rex)
self.filename_rex = re.compile(filename_rex)
self.email_parser = email.parser.Parser()
self.poll_frequency = 1.0
self.start()
def fetch_url(self, url):
opened = urllib2.urlopen(url)
try:
info = str(opened.info())
header = self.email_parser.parsestr(info, headersonly=True)
filename = header.get_filename(None)
groupdict = dict()
if filename is not None:
match = self.filename_rex.match(filename)
if match is not None:
groupdict = match.groupdict()
reader = csv.DictReader(opened)
for row in reader:
event = events.Event()
for key, value in groupdict.items():
if key is None:
continue
if value is None:
continue
event.add(key, value)
for key, value in row.items():
if key is None:
continue
key = util.guess_encoding(key).lower().strip()
value = util.guess_encoding(value).strip()
if not value or value == "-":
continue
event.add(key, value)
self.inner.send(event)
finally:
opened.close()
def find_payload(self, num, path=()):
path = list(path) + [0]
while True:
path[-1] += 1
path_str = ".".join(map(str, path))
fetch = "(BODY.PEEK[%s.MIME])" % path_str
result, data = self.mailbox.fetch(num, fetch)
if not data or not isinstance(data[0], tuple) or len(data[0]) < 2:
return
header = self.email_parser.parsestr(data[0][1], headersonly=True)
disposition = header.get_params(list(), "content-disposition")
if ("attachment", "") in disposition:
continue
if header.is_multipart():
for result in self.find_payload(num, path):
yield result
else:
yield path_str, header.get_content_type()
def poll(self):
self.mailbox.noop()
result, data = self.mailbox.search(None, self.filter)
if not data:
return
for num in data[0].split():
for path, content_type in self.find_payload(num):
if content_type != "text/plain":
continue
fetch = "(BODY.PEEK[%s]<0.2048>)" % path
result, data = self.mailbox.fetch(num, fetch)
for parts in data:
for part in parts:
matches = re.findall(self.url_rex, part)
for match in matches:
self.fetch_url(match)
self.mailbox.store(num, "+FLAGS", "\\Seen")
def run(self):
while True:
try:
item = self.inner.next(self.poll_frequency)
except threado.Timeout:
pass
self.poll()
def main():
from idiokit.xmpp import XMPP
imap = IMAP("mail.example.com", "mailuser", "mailpassword",
'(FROM "eventfeed.com" BODY "http://" UNSEEN)',
url_rex=r"http://\S+", filename_rex=r"(?P<eventfile>.*)")
xmpp = XMPP("user@example.com", "password")
xmpp.connect()
room = xmpp.muc.join("room@conference.example.com", "imapbot")
for _ in imap | events.events_to_elements() | room | threado.throws():
pass
if __name__ == "__main__":
main()
Better fixes for the previous commit. Take None-values (in addition to keys) into account.
import re
import csv
import urllib2
import imaplib
import email.parser
import events
from idiokit import threado, util
class IMAP(threado.ThreadedStream):
def __init__(self, server, user, password, imapfilter,
url_rex="http://\S+", filename_rex=".*"):
threado.ThreadedStream.__init__(self)
self.mailbox = imaplib.IMAP4_SSL(server)
self.mailbox.login(user, password)
self.mailbox.select('INBOX', readonly=False)
self.filter = imapfilter
self.url_rex = re.compile(url_rex)
self.filename_rex = re.compile(filename_rex)
self.email_parser = email.parser.Parser()
self.poll_frequency = 1.0
self.start()
def fetch_url(self, url):
opened = urllib2.urlopen(url)
try:
info = str(opened.info())
header = self.email_parser.parsestr(info, headersonly=True)
filename = header.get_filename(None)
groupdict = dict()
if filename is not None:
match = self.filename_rex.match(filename)
if match is not None:
groupdict = match.groupdict()
reader = csv.DictReader(opened)
for row in reader:
event = events.Event()
for key, value in groupdict.items():
if None in (key, value):
continue
event.add(key, value)
for key, value in row.items():
if None in (key, value):
continue
key = util.guess_encoding(key).lower().strip()
value = util.guess_encoding(value).strip()
if not value or value == "-":
continue
event.add(key, value)
self.inner.send(event)
finally:
opened.close()
def find_payload(self, num, path=()):
path = list(path) + [0]
while True:
path[-1] += 1
path_str = ".".join(map(str, path))
fetch = "(BODY.PEEK[%s.MIME])" % path_str
result, data = self.mailbox.fetch(num, fetch)
if not data or not isinstance(data[0], tuple) or len(data[0]) < 2:
return
header = self.email_parser.parsestr(data[0][1], headersonly=True)
disposition = header.get_params(list(), "content-disposition")
if ("attachment", "") in disposition:
continue
if header.is_multipart():
for result in self.find_payload(num, path):
yield result
else:
yield path_str, header.get_content_type()
def poll(self):
self.mailbox.noop()
result, data = self.mailbox.search(None, self.filter)
if not data:
return
for num in data[0].split():
for path, content_type in self.find_payload(num):
if content_type != "text/plain":
continue
fetch = "(BODY.PEEK[%s]<0.2048>)" % path
result, data = self.mailbox.fetch(num, fetch)
for parts in data:
for part in parts:
matches = re.findall(self.url_rex, part)
for match in matches:
self.fetch_url(match)
self.mailbox.store(num, "+FLAGS", "\\Seen")
def run(self):
while True:
try:
item = self.inner.next(self.poll_frequency)
except threado.Timeout:
pass
self.poll()
def main():
from idiokit.xmpp import XMPP
imap = IMAP("mail.example.com", "mailuser", "mailpassword",
'(FROM "eventfeed.com" BODY "http://" UNSEEN)',
url_rex=r"http://\S+", filename_rex=r"(?P<eventfile>.*)")
xmpp = XMPP("user@example.com", "password")
xmpp.connect()
room = xmpp.muc.join("room@conference.example.com", "imapbot")
for _ in imap | events.events_to_elements() | room | threado.throws():
pass
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import importlib
import os
import shutil
import sys
import six.moves.urllib as urllib
from unittest.mock import MagicMock
sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('..'))
# We have to mock the ffi module
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return Mock()
MOCK_MODULES = ['pywayland._ffi']
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
# -- Build pywayland.protocol w/docs --------------------------------------
protocol_build_dir = '../pywayland/protocol/'
protocol_doc_dir = 'module/protocol'
protocol_version = '1.7.0'
protocol_source = 'http://cgit.freedesktop.org/wayland/wayland/plain/protocol/wayland.xml?id={}'.format(protocol_version)
index_header = """\
.. _protocol:
Protocol Modules
================
Wayland protocols built against Wayland {}.
.. toctree::
:maxdepth: 2
""".format(protocol_version)
protocol_rst = """\
.. module:: pywayland.protocol.{mod_lower}
{mod_upper}
{empty:=^{len}}
.. wl_protocol:: pywayland.protocol.{mod_lower} {mod_upper}
"""
def protocol_build(output_dir):
from pywayland.scanner import Scanner
protocol_dest = 'wayland.xml'
urllib.request.urlretrieve(protocol_source, protocol_dest)
scanner = Scanner(protocol_dest)
scanner.scan()
scanner.output(output_dir)
# There is probably a better way to do this in Sphinx, templating or something
# ... but this works
def protocol_doc(input_dir, output_dir):
py_files = os.listdir(input_dir)
doc_files = [os.path.splitext(f)[0] for f in py_files
if f[0] != '_']
# Write out the index file
index_file = os.path.join(output_dir, 'index.rst')
with open(index_file, 'w') as f:
f.write(index_header)
for d in doc_files:
f.write(' {}\n'.format(d))
for mod_lower in doc_files:
mod = importlib.import_module('pywayland.protocol.' + mod_lower)
for mod_upper in dir(mod):
if mod_upper.lower() == mod_lower:
break
mod_len = len(mod_lower)
doc_file = os.path.join(output_dir, '{}.rst'.format(mod_lower))
with open(doc_file, 'w') as f:
f.write(protocol_rst.format(
mod_lower=mod_lower,
mod_upper=mod_upper,
len=mod_len,
empty=''
))
# Build the protocol directoryon RTD, or if it does not exist
if os.environ.get('READTHEDOCS', None) or not os.path.exists(protocol_build_dir):
if not os.path.exists(protocol_build_dir):
os.makedirs(protocol_build_dir)
protocol_build(protocol_build_dir)
# Re-build the protocol documentation directory
if os.path.exists(protocol_doc_dir):
shutil.rmtree(protocol_doc_dir)
os.makedirs(protocol_doc_dir)
protocol_doc(protocol_build_dir, protocol_doc_dir)
# -- General configuration ------------------------------------------------
extensions = [
'sphinx.ext.autodoc',
'sphinx_wl_protocol'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'pywayland'
copyright = '2015, Sean Vig'
# The short X.Y version.
version = '0.0.1'
# The full version, including alpha/beta/rc tags.
release = '0.0.1a.dev5'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output ----------------------------------------------
# Set the html_theme when building locally
if not os.environ.get('READTHEDOCS', None):
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Output file base name for HTML help builder.
htmlhelp_basename = 'pywaylanddoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'pywayland.tex', 'pywayland Documentation',
'Sean Vig', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pywayland', 'pywayland Documentation',
['Sean Vig'], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pywayland', 'pywayland Documentation',
'Sean Vig', 'pywayland', 'Python bindings for the libwayland library',
'Miscellaneous'),
]
Fix the doc building
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import importlib
import os
import shutil
import sys
import six.moves.urllib as urllib
from unittest.mock import MagicMock
sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('..'))
# We have to mock the ffi module
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return Mock()
MOCK_MODULES = ['pywayland._ffi']
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
# -- Build pywayland.protocol w/docs --------------------------------------
protocol_build_dir = '../pywayland/protocol/'
protocol_doc_dir = 'module/protocol'
protocol_version = '1.7.0'
protocol_source = 'http://cgit.freedesktop.org/wayland/wayland/plain/protocol/wayland.xml?id={}'.format(protocol_version)
index_header = """\
.. _protocol:
Protocol Modules
================
Wayland protocols built against Wayland {}.
.. toctree::
:maxdepth: 2
""".format(protocol_version)
protocol_rst = """\
.. module:: pywayland.protocol.{mod_lower}
{mod_upper}
{empty:=^{len}}
.. wl_protocol:: pywayland.protocol.{mod_lower} {mod_upper}
"""
def protocol_build(output_dir):
from pywayland.scanner import Scanner
protocol_dest = 'wayland.xml'
urllib.request.urlretrieve(protocol_source, protocol_dest)
scanner = Scanner(protocol_dest)
scanner.output(output_dir)
# There is probably a better way to do this in Sphinx, templating or something
# ... but this works
def protocol_doc(input_dir, output_dir):
py_files = os.listdir(input_dir)
doc_files = [os.path.splitext(f)[0] for f in py_files
if f[0] != '_']
# Write out the index file
index_file = os.path.join(output_dir, 'index.rst')
with open(index_file, 'w') as f:
f.write(index_header)
for d in doc_files:
f.write(' {}\n'.format(d))
for mod_lower in doc_files:
mod = importlib.import_module('pywayland.protocol.' + mod_lower)
for mod_upper in dir(mod):
if mod_upper.lower() == mod_lower:
break
mod_len = len(mod_lower)
doc_file = os.path.join(output_dir, '{}.rst'.format(mod_lower))
with open(doc_file, 'w') as f:
f.write(protocol_rst.format(
mod_lower=mod_lower,
mod_upper=mod_upper,
len=mod_len,
empty=''
))
# Build the protocol directoryon RTD, or if it does not exist
if os.environ.get('READTHEDOCS', None) or not os.path.exists(protocol_build_dir):
if not os.path.exists(protocol_build_dir):
os.makedirs(protocol_build_dir)
protocol_build(protocol_build_dir)
# Re-build the protocol documentation directory
if os.path.exists(protocol_doc_dir):
shutil.rmtree(protocol_doc_dir)
os.makedirs(protocol_doc_dir)
protocol_doc(protocol_build_dir, protocol_doc_dir)
# -- General configuration ------------------------------------------------
extensions = [
'sphinx.ext.autodoc',
'sphinx_wl_protocol'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'pywayland'
copyright = '2015, Sean Vig'
# The short X.Y version.
version = '0.0.1'
# The full version, including alpha/beta/rc tags.
release = '0.0.1a.dev5'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output ----------------------------------------------
# Set the html_theme when building locally
if not os.environ.get('READTHEDOCS', None):
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Output file base name for HTML help builder.
htmlhelp_basename = 'pywaylanddoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'pywayland.tex', 'pywayland Documentation',
'Sean Vig', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pywayland', 'pywayland Documentation',
['Sean Vig'], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pywayland', 'pywayland Documentation',
'Sean Vig', 'pywayland', 'Python bindings for the libwayland library',
'Miscellaneous'),
]
|
# -*- coding: utf-8 -*-
#
# electrical documentation build configuration file, created by
# sphinx-quickstart on Thu Mar 22 15:47:53 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import sphinx_bootstrap_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration -----------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinxcontrib.napoleon']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'contents'
# General information about the project.
project = u'corr'
copyright = u'NIST MGI'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "0.1"
# The full version, including alpha/beta/rc tags.
release = "0.1"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
autoclass_content = 'both'
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'sphinxdoc'
html_theme = 'bootstrap'
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'navbar_title': "*",
'navbar_site_name': "More",
'navbar_links': [
("Native", "rst/native/README.html", True),
("Docker", "rst/docker/README.html", True),
("Setup", "rst/setup/README.html", True),
("Github", "https://github.com/usnistgov/corr-deploy/", True),
],
'navbar_pagenav': False,
'navbar_sidebarrel': False,
'globaltoc_depth': 1,
'source_link_position': '',
'bootswatch_theme': 'cosmo'
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "CoRR"
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = "CoRR"
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'logo.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'corrdeploydoc'
# -- Options for LaTeX output --------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'corr-deploy.tex', u'CoRR Deployment Documentation',
u'Faical Yannick P. Congo', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'corr-deploy', u'CoRR Deployment Documentation',
[u'Faical Yannick P. Congo'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'corr-deploy', u'CoRR Deployment Documentation',
u'Faical Yannick P. Congo', 'corr-deploy', 'Cloud of Reproducible Records',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
from recommonmark.parser import CommonMarkParser
from recommonmark.transform import AutoStructify
source_parsers = {'.md' : CommonMarkParser}
source_suffix = ['.rst', '.md']
def url_resolver(url):
"""Resolve url for both documentation and Github online.
If the url is an IPython notebook links to the correct path.
Args:
url: the path to the link (not always a full url)
Returns:
a local url to either the documentation or the Github
"""
if url[-6:] == '.ipynb':
return url[4:-6] + '.html'
else:
return url
def setup(app):
app.add_config_value('recommonmark_config', {
'url_resolver': url_resolver,
'auto_toc_tree_section': 'Contents',
}, True)
app.add_transform(AutoStructify)
import shutil, os, glob
rst_directory = 'rst'
native_directory = 'rst/native'
docker_directory = 'rst/docker'
setup_directory = 'rst/setup'
for directory in [rst_directory, native_directory, docker_directory, setup_directory]:
if not os.path.exists(directory):
os.makedirs(directory)
files_to_copy = (
'README.md',
'native/README.md',
'docker/README.md',
'setup/README.md',
'LICENSE'
)
for fpath in files_to_copy:
for fpath_glob in glob.glob(os.path.join('..', fpath)):
fpath_glob_ = '/'.join(fpath_glob.split('/')[1:])
shutil.copy(fpath_glob, os.path.join(rst_directory, fpath_glob_))
css copy issues.
# -*- coding: utf-8 -*-
#
# electrical documentation build configuration file, created by
# sphinx-quickstart on Thu Mar 22 15:47:53 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import sphinx_bootstrap_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration -----------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinxcontrib.napoleon']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'contents'
# General information about the project.
project = u'corr'
copyright = u'NIST MGI'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "0.1"
# The full version, including alpha/beta/rc tags.
release = "0.1"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
autoclass_content = 'both'
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'sphinxdoc'
html_theme = 'bootstrap'
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'navbar_title': "*",
'navbar_site_name': "More",
'navbar_links': [
("Native", "rst/native/README.html", True),
("Docker", "rst/docker/README.html", True),
("Setup", "rst/setup/README.html", True),
("Github", "https://github.com/usnistgov/corr-deploy/", True),
],
'navbar_pagenav': False,
'navbar_sidebarrel': False,
'globaltoc_depth': 1,
'source_link_position': '',
'bootswatch_theme': 'cosmo'
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "CoRR"
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = "CoRR"
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'logo.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'corrdeploydoc'
# -- Options for LaTeX output --------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'corr-deploy.tex', u'CoRR Deployment Documentation',
u'Faical Yannick P. Congo', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'corr-deploy', u'CoRR Deployment Documentation',
[u'Faical Yannick P. Congo'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'corr-deploy', u'CoRR Deployment Documentation',
u'Faical Yannick P. Congo', 'corr-deploy', 'Cloud of Reproducible Records',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
from recommonmark.parser import CommonMarkParser
from recommonmark.transform import AutoStructify
source_parsers = {'.md' : CommonMarkParser}
source_suffix = ['.rst', '.md']
def url_resolver(url):
"""Resolve url for both documentation and Github online.
If the url is an IPython notebook links to the correct path.
Args:
url: the path to the link (not always a full url)
Returns:
a local url to either the documentation or the Github
"""
if url[-6:] == '.ipynb':
return url[4:-6] + '.html'
else:
return url
def setup(app):
app.add_config_value('recommonmark_config', {
'url_resolver': url_resolver,
'auto_toc_tree_section': 'Contents',
}, True)
app.add_transform(AutoStructify)
app.add_stylesheet('corr-deploy.css')
import shutil, os, glob
rst_directory = 'rst'
native_directory = 'rst/native'
docker_directory = 'rst/docker'
setup_directory = 'rst/setup'
for directory in [rst_directory, native_directory, docker_directory, setup_directory]:
if not os.path.exists(directory):
os.makedirs(directory)
files_to_copy = (
'README.md',
'native/README.md',
'docker/README.md',
'setup/README.md',
'LICENSE'
)
for fpath in files_to_copy:
for fpath_glob in glob.glob(os.path.join('..', fpath)):
fpath_glob_ = '/'.join(fpath_glob.split('/')[1:])
shutil.copy(fpath_glob, os.path.join(rst_directory, fpath_glob_))
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from scipy.ndimage import interpolation
#############################################################################
# hansenlaw - a recursive method forward/inverse Abel transform algorithm
#
# Stephen Gibson - Australian National University, Australia
# Jason Gascooke - Flinders University, Australia
#
# This algorithm is adapted by Jason Gascooke from the article
# E. W. Hansen and P-L. Law
# "Recursive methods for computing the Abel transform and its inverse"
# J. Opt. Soc. Am A2, 510-520 (1985) doi: 10.1364/JOSAA.2.000510
#
# J. R. Gascooke PhD Thesis:
# "Energy Transfer in Polyatomic-Rare Gas Collisions and Van Der Waals
# Molecule Dissociation", Flinders University, 2000.
#
# Implemented in Python, with image quadrant co-adding, by Steve Gibson
# 2018-03 : NB method applies to grid centered (even columns), not
# pixel-centered (odd column) image see #206, #211
# 2018-02 : Drop one array dimension, use numpy broadcast multiplication
# 2015-12-16: Modified to calculate the forward Abel transform
# 2015-12-03: Vectorization and code improvements Dan Hickstein and
# Roman Yurchak
# Previously the algorithm iterated over the rows of the image
# now all of the rows are calculated simultaneously, which provides
# the same result, but speeds up processing considerably.
#############################################################################
def hansenlaw_transform(IM, dr=1, direction='inverse', shift=-0.5, **kwargs):
r"""Forward/Inverse Abel transformation using the algorithm of
`Hansen and Law J. Opt. Soc. Am. A 2, 510-520 (1985)
<http://dx.doi.org/10.1364/JOSAA.2.000510>`_ equation 2a:
.. math::
f(r) = -\frac{1}{\pi} \int_{r}^{\infty} \frac{g^\prime(R)}{\sqrt{R^2-r^2}} dR,
where
:math:`f(r)` is the reconstructed image (source) function,
:math:`g'(R)` is the derivative of the projection (measured) function
The Hansen and Law approach treats the Abel transform as a system modeled
by a set of linear differential equations, with :math:`f(r)` (forward) or
:math:`g'(R)` (inverse) the driving function.
Evaluation follows Eqs. (15 or 17), using (16a), (16b), and (16c or 18) of
the Hansen and Law paper. For the full image transform, use the
class :class:``abel.Transform``.
For the inverse Abel transform of image g: ::
f = abel.Transform(g, direction="inverse", method="hansenlaw").transform
For the forward Abel transform of image f: ::
g = abel.Transform(r, direction="forward", method="hansenlaw").transform
This function performs the Hansen-Law transform on only one "right-side"
image, typically one quadrant of the full image: ::
Qtrans = abel.hansenlaw.hansenlaw_transform(Q, direction="inverse")
Recursion method proceeds from the outer edge of the image
toward the image centre (origin). i.e. when ``n=cols-1``, ``R=Rmax``, and
when ``n=0``, ``R=0``. This fits well with processing the image one
quadrant (chosen orientation to be rightside-top), or one right-half
image at a time.
Parameters
----------
IM : 1D or 2D numpy array
right-side half-image (or quadrant)
dr : float
sampling size (=1 for pixel images), used for Jacobian scaling
direction : string ('forward' or 'inverse')
``forward`` or ``inverse`` Abel transform
shift : float
transform-pair better agreement if image shifted across
`scipy.ndimage.shift(IM, (0, -shift))`. Default `shift=-1/2` pixel
Returns
-------
AIM : 1D or 2D numpy array
forward/inverse Abel transform half-image
.. note:: Image should be a right-side image, like this: ::
. +-------- +--------+
. | * | * |
. | * | * | <---------- IM
. | * | * |
. +-------- o--------+
. | * | * |
. | * | * |
. | * | * |
. +-------- +--------+
In accordance with all PyAbel methods the image center ``o`` is
defined to be mix-pixel i.e. an odd number of columns, for the
whole image.
"""
# Hansen & Law parameters of exponential approximation, Table 1.
h = np.array([0.318, 0.19, 0.35, 0.82, 1.8, 3.9, 8.3, 19.6, 48.3])
lam = np.array([0.0, -2.1, -6.2, -22.4, -92.5, -414.5, -1889.4, -8990.9,
-47391.1])
IM = np.atleast_2d(IM)
# shift image across (default -1/2 pixel) gives better transform-pair
IMS = interpolation.shift(IM, (0, shift))
AIM = np.empty_like(IM) # forward/inverse Abel transform image
rows, N = IM.shape # shape of input quadrant (half)
K = h.size # using H&L nomenclature
# enumerate columns n = 0 is Rmax, the right side of image
n = np.arange(N-1) # n = 0, ..., N-2
num = N - n
denom = num - 1 # N-n-1 in Hansen & Law
ratio = num/denom # (N-n)/(N-n-1) = N/(N-1), ..., 4/3. 3/2, 2/1
# phi array Eq (16a), diagonal array, for each pixel
phi = np.empty((N-1, K))
for k in range(K):
phi[:, k] = ratio**lam[k]
# Gamma array, Eq (16b), with gamma Eq (16c) forward, or Eq (18) inverse
gamma = np.empty_like(phi)
if direction == "forward":
lam += 1
for k in range(K):
gamma[:, k] = h[k]*2*denom*(1 - ratio**lam[k])/lam[k] # (16c)
gamma *= -np.pi*dr # Jacobian - saves scaling the transform later
# driving function = raw image. Copy so input image not mangled
drive = IMS
else: # gamma for inverse transform
gamma[:, 0] = -h[0]*np.log(ratio) # Eq. (18 lamda=0)
for k in range(1, K):
gamma[:, k] = h[k]*(1 - phi[:, k])/lam[k] # Eq. (18 lamda<0)
# driving function derivative of the image intensity profile
drive = np.gradient(IMS, dr, axis=-1)
# Hansen and Law Abel transform ---- Eq. (15) forward, or Eq. (17) inverse
# transforms every image row during the column iteration
x = np.zeros((K, rows))
for nindx, pixelcol in zip(n, -n-1): #
x = phi[nindx][:, None]*x + gamma[nindx][:, None]*drive[:, pixelcol]
AIM[:, pixelcol] = x.sum(axis=0)
# missing 1st column
AIM[:, 0] = AIM[:, 1]
if AIM.shape[0] == 1:
AIM = AIM[0] # flatten to a vector
return AIM
Full circle, (0, -1/2) shift is all that makes the hansenlaw method agree with transform-pairs and the other methods! i.e. -1/2 pixel shift should be applied to odd-column full images
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from scipy.ndimage import interpolation
#############################################################################
# hansenlaw - a recursive method forward/inverse Abel transform algorithm
#
# Stephen Gibson - Australian National University, Australia
# Jason Gascooke - Flinders University, Australia
#
# This algorithm is adapted by Jason Gascooke from the article
# E. W. Hansen and P-L. Law
# "Recursive methods for computing the Abel transform and its inverse"
# J. Opt. Soc. Am A2, 510-520 (1985) doi: 10.1364/JOSAA.2.000510
#
# J. R. Gascooke PhD Thesis:
# "Energy Transfer in Polyatomic-Rare Gas Collisions and Van Der Waals
# Molecule Dissociation", Flinders University, 2000.
#
# Implemented in Python, with image quadrant co-adding, by Steve Gibson
# 2018-03 : NB method applies to grid centered (even columns), not
# pixel-centered (odd column) image see #206, #211
# Apply, -1/2 pixel shift for odd column full image
# 2018-02 : Drop one array dimension, use numpy broadcast multiplication
# 2015-12-16: Modified to calculate the forward Abel transform
# 2015-12-03: Vectorization and code improvements Dan Hickstein and
# Roman Yurchak
# Previously the algorithm iterated over the rows of the image
# now all of the rows are calculated simultaneously, which provides
# the same result, but speeds up processing considerably.
#############################################################################
def hansenlaw_transform(IM, dr=1, direction='inverse',
full_image_odd_cols=True, **kwargs):
r"""Forward/Inverse Abel transformation using the algorithm of
`Hansen and Law J. Opt. Soc. Am. A 2, 510-520 (1985)
<http://dx.doi.org/10.1364/JOSAA.2.000510>`_ equation 2a:
.. math::
f(r) = -\frac{1}{\pi} \int_{r}^{\infty} \frac{g^\prime(R)}{\sqrt{R^2-r^2}} dR,
where
:math:`f(r)` is the reconstructed image (source) function,
:math:`g'(R)` is the derivative of the projection (measured) function
The Hansen and Law approach treats the Abel transform as a system modeled
by a set of linear differential equations, with :math:`f(r)` (forward) or
:math:`g'(R)` (inverse) the driving function.
Evaluation follows Eqs. (15 or 17), using (16a), (16b), and (16c or 18) of
the Hansen and Law paper. For the full image transform, use the
class :class:``abel.Transform``.
For the inverse Abel transform of image g: ::
f = abel.Transform(g, direction="inverse", method="hansenlaw").transform
For the forward Abel transform of image f: ::
g = abel.Transform(r, direction="forward", method="hansenlaw").transform
This function performs the Hansen-Law transform on only one "right-side"
image, typically one quadrant of the full image: ::
Qtrans = abel.hansenlaw.hansenlaw_transform(Q, direction="inverse")
Recursion method proceeds from the outer edge of the image
toward the image centre (origin). i.e. when ``n=cols-1``, ``R=Rmax``, and
when ``n=0``, ``R=0``. This fits well with processing the image one
quadrant (chosen orientation to be rightside-top), or one right-half
image at a time.
Parameters
----------
IM : 1D or 2D numpy array
right-side half-image (or quadrant)
dr : float
sampling size (=1 for pixel images), used for Jacobian scaling
direction : string ('forward' or 'inverse')
``forward`` or ``inverse`` Abel transform
full_image_odd_cols : boolean
odd-column width images are shifted by (0, -1/2) pixel.
This improves the agreement with analytical transform-pair functions,
and the other `PyAbel` transform methods. See extensive discussion in
PR #211
Returns
-------
AIM : 1D or 2D numpy array
forward/inverse Abel transform half-image
.. note:: Image should be a right-side image, like this: ::
. +-------- +--------+
. | * | * |
. | * | * | <---------- IM
. | * | * |
. +-------- o--------+
. | * | * |
. | * | * |
. | * | * |
. +-------- +--------+
In accordance with all PyAbel methods the image center ``o`` is
defined to be mix-pixel i.e. an odd number of columns, for the
full image, not right side. Otherwise pass `full_image_odd_cols=False`
"""
# Hansen & Law parameters of exponential approximation, Table 1.
h = np.array([0.318, 0.19, 0.35, 0.82, 1.8, 3.9, 8.3, 19.6, 48.3])
lam = np.array([0.0, -2.1, -6.2, -22.4, -92.5, -414.5, -1889.4, -8990.9,
-47391.1])
IM = np.atleast_2d(IM)
# shift image left -1/2 pixel for odd column width full-images,
# better transform-pair agreement, see discussion in PR #211
if full_image_odd_cols:
IMS = interpolation.shift(IM, (0, -1/2))
AIM = np.empty_like(IMS) # forward/inverse Abel transform image
rows, N = IM.shape # shape of input quadrant (half)
K = h.size # using H&L nomenclature
# enumerate columns n = 0 is Rmax, the right side of image
n = np.arange(N-1) # n = 0, ..., N-2
num = N - n
denom = num - 1 # N-n-1 in Hansen & Law
ratio = num/denom # (N-n)/(N-n-1) = N/(N-1), ..., 4/3. 3/2, 2/1
# phi array Eq (16a), diagonal array, for each pixel
phi = np.empty((N-1, K))
for k in range(K):
phi[:, k] = ratio**lam[k]
# Gamma array, Eq (16b), with gamma Eq (16c) forward, or Eq (18) inverse
gamma = np.empty_like(phi)
if direction == "forward":
lam += 1
for k in range(K):
gamma[:, k] = h[k]*2*denom*(1 - ratio**lam[k])/lam[k] # (16c)
gamma *= -np.pi*dr # Jacobian - saves scaling the transform later
# driving function = raw image. Copy so input image not mangled
drive = IMS
else: # gamma for inverse transform
gamma[:, 0] = -h[0]*np.log(ratio) # Eq. (18 lamda=0)
for k in range(1, K):
gamma[:, k] = h[k]*(1 - phi[:, k])/lam[k] # Eq. (18 lamda<0)
# driving function derivative of the image intensity profile
drive = np.gradient(IMS, dr, axis=-1)
# Hansen and Law Abel transform ---------------
# Eq. (15) forward, or Eq. (17) inverse
# transforms every row, during the column iteration
x = np.zeros((K, rows))
for nindx, pixelcol in zip(n, -n-1): #
x = phi[nindx][:, None]*x + gamma[nindx][:, None]*drive[:, pixelcol]
AIM[:, pixelcol] = x.sum(axis=0)
# missing 1st column
AIM[:, 0] = AIM[:, 1]
if AIM.shape[0] == 1:
AIM = AIM[0] # flatten to a vector
return AIM
|
# -*- coding: utf-8 -*-
#
# Zephyr documentation build configuration file, created by
# sphinx-quickstart on Fri May 8 11:43:01 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# Add the 'extensions' directory to sys.path, to enable finding Sphinx
# extensions within.
sys.path.insert(0, os.path.join(os.path.abspath('.'), 'extensions'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'breathe', 'sphinx.ext.todo',
'sphinx.ext.extlinks',
'zephyr.application',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Zephyr Project'
copyright = u'2015-2017 Zephyr Project members and individual contributors.'
author = u'many'
if "ZEPHYR_BASE" not in os.environ:
sys.stderr.write("$ZEPHYR_BASE environment variable undefined.\n")
exit(1)
ZEPHYR_BASE = os.environ["ZEPHYR_BASE"]
# The following code tries to extract the information by reading the Makefile,
# when Sphinx is run directly (e.g. by Read the Docs).
try:
version_major = None
version_minor = None
patchlevel = None
extraversion = None
for line in open(os.path.join(ZEPHYR_BASE, 'VERSION')):
key, val = [x.strip() for x in line.split('=', 2)]
if key == 'VERSION_MAJOR':
version_major = val
if key == 'VERSION_MINOR':
version_minor = val
elif key == 'PATCHLEVEL':
patchlevel = val
elif key == 'EXTRAVERSION':
extraversion = val
if version_major and version_minor and patchlevel and extraversion:
break
except:
pass
finally:
if version_major and version_minor and patchlevel and extraversion is not None :
version = release = version_major + '.' + version_minor + '.' + patchlevel
if extraversion != '':
version = release = version + '-' + extraversion
else:
sys.stderr.write('Warning: Could not extract kernel version\n')
version = release = "unknown version"
version = release = os.getenv('KERNELVERSION','1.9.0')
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Additional lexer for Pygments (syntax highlighting)
from lexer.DtsLexer import DtsLexer
from sphinx.highlighting import lexers
lexers['DTS'] = DtsLexer()
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
rst_epilog = """
.. include:: /substitutions.txt
"""
# -- Options for HTML output ----------------------------------------------
try:
import sphinx_rtd_theme
except ImportError:
html_theme = 'zephyr'
html_theme_path = ['./themes']
else:
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
if tags.has('daily') or tags.has('release'):
html_theme = 'zephyr-docs-theme'
html_theme_path = ['./themes']
if tags.has('release'):
is_release = True
docs_title = 'Docs / %s' %(version)
else:
is_release = False
docs_title = 'Docs / Latest'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "Zephyr Project Documentation"
# This value determines the text for the permalink; it defaults to "¶".
# Set it to None or the empty string to disable permalinks.
#html_add_permalinks = ""
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants =
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
html_split_index = True
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink =
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = tags.has('development')
# If true, license is shown in the HTML footer. Default is True.
html_show_license = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
sourcelink_suffix = '.txt'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'zephyrdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'zephyr.tex', u'Zephyr Project Documentation',
u'many', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'zephyr', u'Zephyr Project Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'zephyr', u'Zephyr Project Documentation',
author, 'Zephyr', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
breathe_projects = {
"Zephyr": "doxygen/xml",
"doc-examples": "doxygen/xml"
}
breathe_default_project = "Zephyr"
# Qualifiers to a function are causing Sphihx/Breathe to warn about
# Error when parsing function declaration and more. This is a list
# of strings that the parser additionally should accept as
# attributes.
cpp_id_attributes = ['__syscall', '__syscall_inline', '__deprecated',
'__may_alias', '__used', '__unused', '__weak',
'__DEPRECATED_MACRO', 'FUNC_NORETURN' ]
# docs_title is used in the breadcrumb title in the zephyr docs theme
html_context = {
'show_license': html_show_license,
'docs_title': docs_title,
'is_release': is_release,
}
extlinks = {'jira': ('https://jira.zephyrproject.org/browse/%s', ''),
'github': ('https://github.com/zephyrproject-rtos/zephyr/issues/%s', '')
}
# some configuration for linkcheck builder
# noticed that we're getting false-positive link errors on JIRA, I suspect
# because it's taking too long for the server to respond so bump up the
# timeout (default=5) and turn off anchor checks (so only a HEAD request is
# done - much faster) Leave the ignore commented in case we want to remove
# jira link checks later...
linkcheck_timeout = 30
linkcheck_workers = 10
# linkcheck_ignore = [r'https://jira\.zephyrproject\.org/']
linkcheck_anchors = False
def setup(app):
app.add_stylesheet("zephyr-custom.css")
doc: conf.py: remove unused import
Make a pep8 linter happier.
Signed-off-by: Marti Bolivar <fd5da480b758a78208111e6ae5c6806e1f887137@opensourcefoundries.com>
# -*- coding: utf-8 -*-
#
# Zephyr documentation build configuration file, created by
# sphinx-quickstart on Fri May 8 11:43:01 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# Add the 'extensions' directory to sys.path, to enable finding Sphinx
# extensions within.
sys.path.insert(0, os.path.join(os.path.abspath('.'), 'extensions'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'breathe', 'sphinx.ext.todo',
'sphinx.ext.extlinks',
'zephyr.application',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Zephyr Project'
copyright = u'2015-2017 Zephyr Project members and individual contributors.'
author = u'many'
if "ZEPHYR_BASE" not in os.environ:
sys.stderr.write("$ZEPHYR_BASE environment variable undefined.\n")
exit(1)
ZEPHYR_BASE = os.environ["ZEPHYR_BASE"]
# The following code tries to extract the information by reading the Makefile,
# when Sphinx is run directly (e.g. by Read the Docs).
try:
version_major = None
version_minor = None
patchlevel = None
extraversion = None
for line in open(os.path.join(ZEPHYR_BASE, 'VERSION')):
key, val = [x.strip() for x in line.split('=', 2)]
if key == 'VERSION_MAJOR':
version_major = val
if key == 'VERSION_MINOR':
version_minor = val
elif key == 'PATCHLEVEL':
patchlevel = val
elif key == 'EXTRAVERSION':
extraversion = val
if version_major and version_minor and patchlevel and extraversion:
break
except:
pass
finally:
if version_major and version_minor and patchlevel and extraversion is not None :
version = release = version_major + '.' + version_minor + '.' + patchlevel
if extraversion != '':
version = release = version + '-' + extraversion
else:
sys.stderr.write('Warning: Could not extract kernel version\n')
version = release = "unknown version"
version = release = os.getenv('KERNELVERSION','1.9.0')
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Additional lexer for Pygments (syntax highlighting)
from lexer.DtsLexer import DtsLexer
from sphinx.highlighting import lexers
lexers['DTS'] = DtsLexer()
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
rst_epilog = """
.. include:: /substitutions.txt
"""
# -- Options for HTML output ----------------------------------------------
try:
import sphinx_rtd_theme
except ImportError:
html_theme = 'zephyr'
html_theme_path = ['./themes']
else:
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
if tags.has('daily') or tags.has('release'):
html_theme = 'zephyr-docs-theme'
html_theme_path = ['./themes']
if tags.has('release'):
is_release = True
docs_title = 'Docs / %s' %(version)
else:
is_release = False
docs_title = 'Docs / Latest'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "Zephyr Project Documentation"
# This value determines the text for the permalink; it defaults to "¶".
# Set it to None or the empty string to disable permalinks.
#html_add_permalinks = ""
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants =
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
html_split_index = True
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink =
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = tags.has('development')
# If true, license is shown in the HTML footer. Default is True.
html_show_license = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
sourcelink_suffix = '.txt'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'zephyrdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'zephyr.tex', u'Zephyr Project Documentation',
u'many', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'zephyr', u'Zephyr Project Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'zephyr', u'Zephyr Project Documentation',
author, 'Zephyr', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
breathe_projects = {
"Zephyr": "doxygen/xml",
"doc-examples": "doxygen/xml"
}
breathe_default_project = "Zephyr"
# Qualifiers to a function are causing Sphihx/Breathe to warn about
# Error when parsing function declaration and more. This is a list
# of strings that the parser additionally should accept as
# attributes.
cpp_id_attributes = ['__syscall', '__syscall_inline', '__deprecated',
'__may_alias', '__used', '__unused', '__weak',
'__DEPRECATED_MACRO', 'FUNC_NORETURN' ]
# docs_title is used in the breadcrumb title in the zephyr docs theme
html_context = {
'show_license': html_show_license,
'docs_title': docs_title,
'is_release': is_release,
}
extlinks = {'jira': ('https://jira.zephyrproject.org/browse/%s', ''),
'github': ('https://github.com/zephyrproject-rtos/zephyr/issues/%s', '')
}
# some configuration for linkcheck builder
# noticed that we're getting false-positive link errors on JIRA, I suspect
# because it's taking too long for the server to respond so bump up the
# timeout (default=5) and turn off anchor checks (so only a HEAD request is
# done - much faster) Leave the ignore commented in case we want to remove
# jira link checks later...
linkcheck_timeout = 30
linkcheck_workers = 10
# linkcheck_ignore = [r'https://jira\.zephyrproject\.org/']
linkcheck_anchors = False
def setup(app):
app.add_stylesheet("zephyr-custom.css")
|
# encoding: utf-8
import os.path
from datetime import datetime
from collections import defaultdict
from flask import json
from flask.ext.sqlalchemy import SQLAlchemy
import logging
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
prop_defs = {
'phone': "Telefon",
'email': "Email",
'website': "Website",
'facebook': "Facebook",
'twitter': "Twitter",
'address': "Adresa poștală",
}
meta_defs = ['office', 'college', 'hpol_id']
db = SQLAlchemy()
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
openid_url = db.Column(db.Text())
name = db.Column(db.Text())
email = db.Column(db.Text())
time_create = db.Column(db.DateTime)
class Person(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Text())
def get_content(self):
if self.versions:
version = sorted(self.versions, key=lambda v: v.time)[-1]
return version.get_content()
else:
return {}
def save_content_version(self, new_content, user):
utcnow = datetime.utcnow()
version = ContentVersion(person=self, user=user, time=utcnow)
version.content = json.dumps(new_content)
db.session.add(version)
log.info("Content update for person id=%r version_id=%r",
self.id, version.id)
def get_meta(self, key):
for meta in self.meta:
if meta.key == key:
return meta.value
else:
return None
@classmethod
def objects_current(cls):
return cls.query.filter(
db.not_(
cls.meta.any(
PersonMeta.key == 'removed' and
PersonMeta.value == 'true'
)
)
)
class ContentVersion(db.Model):
id = db.Column(db.Integer, primary_key=True)
person_id = db.Column(db.Integer, db.ForeignKey('person.id'))
person = db.relationship('Person', backref=db.backref('versions'))
content = db.Column(db.LargeBinary)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
user = db.relationship('User')
time = db.Column(db.DateTime)
def get_content(self):
return json.loads(self.content)
class PersonMeta(db.Model):
id = db.Column(db.Integer, primary_key=True)
person_id = db.Column(db.Integer, db.ForeignKey('person.id'))
person = db.relationship('Person', backref=db.backref('meta'))
key = db.Column(db.Text)
value = db.Column(db.Text)
def get_user(openid_url):
return User.query.filter_by(openid_url=openid_url).first()
def get_update_user(openid_url, name, email):
user = get_user(openid_url)
if user is None:
utcnow = datetime.utcnow()
user = User(openid_url=openid_url, time_create=utcnow)
log.info("New user, openid_url=%r", openid_url)
if (name, email) != (user.name, user.email):
user.name = name
user.email = email
db.session.add(user)
db.session.commit()
log.info("User data modified for openid_url=%r: name=%r, email=%r",
openid_url, name, email)
return user
unicode literals
# encoding: utf-8
import os.path
from datetime import datetime
from collections import defaultdict
from flask import json
from flask.ext.sqlalchemy import SQLAlchemy
import logging
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
prop_defs = {
'phone': u"Telefon",
'email': u"Email",
'website': u"Website",
'facebook': u"Facebook",
'twitter': u"Twitter",
'address': u"Adresa poștală",
}
meta_defs = ['office', 'college', 'hpol_id']
db = SQLAlchemy()
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
openid_url = db.Column(db.Text())
name = db.Column(db.Text())
email = db.Column(db.Text())
time_create = db.Column(db.DateTime)
class Person(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Text())
def get_content(self):
if self.versions:
version = sorted(self.versions, key=lambda v: v.time)[-1]
return version.get_content()
else:
return {}
def save_content_version(self, new_content, user):
utcnow = datetime.utcnow()
version = ContentVersion(person=self, user=user, time=utcnow)
version.content = json.dumps(new_content)
db.session.add(version)
log.info("Content update for person id=%r version_id=%r",
self.id, version.id)
def get_meta(self, key):
for meta in self.meta:
if meta.key == key:
return meta.value
else:
return None
@classmethod
def objects_current(cls):
return cls.query.filter(
db.not_(
cls.meta.any(
PersonMeta.key == 'removed' and
PersonMeta.value == 'true'
)
)
)
class ContentVersion(db.Model):
id = db.Column(db.Integer, primary_key=True)
person_id = db.Column(db.Integer, db.ForeignKey('person.id'))
person = db.relationship('Person', backref=db.backref('versions'))
content = db.Column(db.LargeBinary)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
user = db.relationship('User')
time = db.Column(db.DateTime)
def get_content(self):
return json.loads(self.content)
class PersonMeta(db.Model):
id = db.Column(db.Integer, primary_key=True)
person_id = db.Column(db.Integer, db.ForeignKey('person.id'))
person = db.relationship('Person', backref=db.backref('meta'))
key = db.Column(db.Text)
value = db.Column(db.Text)
def get_user(openid_url):
return User.query.filter_by(openid_url=openid_url).first()
def get_update_user(openid_url, name, email):
user = get_user(openid_url)
if user is None:
utcnow = datetime.utcnow()
user = User(openid_url=openid_url, time_create=utcnow)
log.info("New user, openid_url=%r", openid_url)
if (name, email) != (user.name, user.email):
user.name = name
user.email = email
db.session.add(user)
db.session.commit()
log.info("User data modified for openid_url=%r: name=%r, email=%r",
openid_url, name, email)
return user
|
import json
from dialog import ask_password, ask_add_password
from encryption import hash_binary, hash_hex
from encryption import encrypt_password, decrypt_password
from encryption import new_salt, new_random_password
import base64
import threading
import os
import traceback
MASTER_PASSWORD_IN_MEMORY_SECONDS = 10
class InvalidMasterPassword(Exception):
pass
class TransactionAbort(Exception):
pass
class Cancel(Exception):
pass
class MasterPassword(object):
_deletion_timer = None
_bytes = None
_hash = None
def __init__(self, hash = None):
"""hash must be a unicode hexdigest or None"""
self.hash = hash
@property
def seconds_in_memory(self):
return MASTER_PASSWORD_IN_MEMORY_SECONDS
def open_ask_dialog(self):
return ask_password('master password:')
def ask(self):
password = self.open_ask_dialog()
if password is None:
raise Cancel("The dialog was canceled.")
try:
self.password = password
except InvalidMasterPassword:
self.ask()
@property
def password(self):
raise TypeError('the password can not be used. Use bytes instead.')
@password.setter
def password(self, password):
# do not use the original password but the hash instead
password_bytes = password.encode('UTF-8')
self.bytes = hash_binary(password_bytes)
self.hash = hash_hex(self.bytes)
@property
def bytes(self):
if self._bytes is None:
self.ask()
else:
self.refresh_timer()
return self._bytes
@bytes.setter
def bytes(self, bytes):
self.refresh_timer()
self._bytes = bytes
def has_bytes(self):
return self._bytes is not None
def refresh_timer(self):
if self._deletion_timer is not None:
self._deletion_timer.cancel()
self._deletion_timer = threading.Timer(self.seconds_in_memory,
self.delete)
self._deletion_timer.start()
@property
def hash(self):
if self._hash is None:
self.ask()
if self.has_bytes(): # negligible race condition
assert self._hash == hash_hex(self.bytes)
return self._hash
@hash.setter
def hash(self, hash):
assert hash is None or all(character in "0123456789abcdef" for character in hash)
if self._hash is None:
self._hash = hash
elif self._hash != hash:
raise InvalidMasterPassword("The entered master password does not match the original.")
def delete(self):
self._bytes = None
def decrypt_password(self, encrypted_password, salt):
return decrypt_password(encrypted_password, salt, self.bytes)
def encrypt_password(self, password, salt):
return encrypt_password(password, salt, self.bytes)
class Database(object):
_config = None
_master_password = None
javascript_template_path = 'passwords_viewer_template.js'
javascript_path = 'passwords_viewer.js'
auto_save_to_javascript = True
new_master_password = MasterPassword
def __init__(self, file_name):
self.file_name = file_name
self.with_nesting = 0
def save_to_javascript(self):
with open(self.file_name) as source_file, \
open(self.javascript_template_path) as javascript_template_file, \
open(self.javascript_path, 'w') as javascript_file:
json = source_file.read()
template = javascript_template_file.read()
source = template.format(passwords = json)
javascript_file.write(source)
@property
def master_password(self):
"""the master password of the database"""
if self._master_password is None:
with self:
hash = self.config.get('master_password_hash', None)
self._master_password = self.new_master_password(hash)
self.config['master_password_hash'] = self._master_password.hash
return self._master_password
@property
def config(self):
if self._config is None:
raise ValueError('Use the with statement!')
return self._config
@config.setter
def config(self, value):
self._config = value
def new_config(self):
return {u'passwords':[]}
def __enter__(self):
if self.with_nesting == 0:
if os.path.isfile(self.file_name):
with open(self.file_name) as file:
self.config = json.load(file)
else:
self.config = self.new_config()
self.with_nesting += 1
def __exit__(self, ty, err, tb):
self.with_nesting -= 1
assert self.with_nesting >= 0
if self.with_nesting == 0:
config = self.config
self.config = None
if ty is not None or err is not None or tb is not None:
raise TransactionAbort('An error aborted the transaction.')
with open(self.file_name, 'w') as file:
json.dump(config, file)
if self.auto_save_to_javascript:
self.save_to_javascript()
@property
def _passwords(self):
return self.config[u'passwords']
def new_password_entry(self, entry = {}):
return PasswordEntry(self, entry, self.master_password)
def add_password_entry(self, entry):
dict = entry.asDict()
if dict not in self._passwords:
self._passwords.append(dict)
def remove_password_entry(self, entry):
dict = entry.asDict()
while dict in self._passwords:
self._passwords.remove(dict)
@property
def passwords(self):
"""the passwords in the database"""
with self:
entries = [self.new_password_entry(entry) for entry in self._passwords]
entries.sort(key = lambda entry: entry.name)
return entries
def add_new_password_from_user(self):
return self.new_password_entry().fill_from_user()
def add_new_password_from_export(self, export):
return self.new_password_entry().fill_from_export(export)
def export_all(self):
return [entry.export() for entry in self.passwords]
def export_all_json(self, file):
return json.dump(self.export_all(), file, sort_keys=True, indent=4)
def import_all(self, passwords, log_file):
successes = []
for password in passwords:
with self:
try:
with self:
entry = self.add_new_password_from_export(password)
successes.append(password.get("name", str(password)))
except:
traceback.print_exc(file = log_file)
print("Failed: ", password, file = log_file)
for success in successes:
print("Success:", success, file = log_file)
def import_all_json(self, file, log_file):
try:
passwords = json.load(file)
self.import_all(passwords, log_file)
except:
traceback.print_exc(file = log_file)
class PasswordEntry(object):
attributes = set()
def fill_from_user(self):
result = ask_add_password(password = new_random_password())
if result is None:
return
name, password, text = result
return self.fill_from_arguments(name = name,
password = password,
text = text,
deleted = False)
def fill_from_arguments(self, **attributes):
return self.fill_from_dict(attributes)
def fill_from_export(self, export):
export = export.copy()
for attribute in list(export.keys()):
if attribute not in self.attributes:
export.pop(attribute)
export['deleted'] = False
return self.fill_from_dict(export)
def fill_from_dict(self, dictionairy):
if not set(dictionairy.keys()) <= self.attributes:
print(self.attributes)
print(set(dictionairy.keys()))
raise ValueError('Unknown attributes {} should be {}'.format(
', '.join(set(dictionairy.keys()) - self.attributes),
', '.join(self.attributes)))
for attribute in self.attributes:
if attribute in dictionairy:
setattr(self, attribute, dictionairy[attribute])
self.database.add_password_entry(self)
return self
def __init__(self, database, dictionairy, master_password):
self.database = database
self.dictionairy = dictionairy
self.master_password = master_password
@property
def name(self):
return self.dictionairy[u'name']
@name.setter
def name(self, value):
with self.database:
self.dictionairy[u'name'] = value
attributes.add('name')
@property
def password(self):
encrypted_password = self.dictionairy[u'encrypted_password']
password_salt = self.dictionairy[u'password_salt']
return self.master_password.decrypt_password(encrypted_password, password_salt)
@password.setter
def password(self, password):
"encrypt the password"
with self.database:
password_salt = self.new_salt()
encrypted_password = self.master_password.encrypt_password(password, password_salt)
self.dictionairy[u'password_salt'] = password_salt
self.dictionairy[u'encrypted_password'] = encrypted_password
attributes.add('password')
new_salt = staticmethod(new_salt)
@property
def text(self):
return self.dictionairy[u'text']
@text.setter
def text(self, value):
with self.database:
self.dictionairy[u'text'] = value
attributes.add('text')
@property
def deleted(self):
return self.dictionairy[u'deleted']
@deleted.setter
def deleted(self, value):
with self.database:
self.dictionairy[u'deleted'] = value
attributes.add('deleted')
def asDict(self):
return self.dictionairy
def remove(self):
self.database.remove_password_entry(self)
def export(self):
return dict(name = self.name, password = self.password, text = self.text)
def __eq__(self, other):
return other == self.dictionairy
def __hash__(self):
return hash(self.dictionairy)
def new_entry(self, entry = {}):
return self.database.new_password_entry(entry)
def duplicate(self, **attributes):
new_entry = self.new_entry(self.dictionairy.copy())
new_entry.fill_from_dict(attributes)
return new_entry
sorting case insensitive
import json
from dialog import ask_password, ask_add_password
from encryption import hash_binary, hash_hex
from encryption import encrypt_password, decrypt_password
from encryption import new_salt, new_random_password
import base64
import threading
import os
import traceback
MASTER_PASSWORD_IN_MEMORY_SECONDS = 10
class InvalidMasterPassword(Exception):
pass
class TransactionAbort(Exception):
pass
class Cancel(Exception):
pass
class MasterPassword(object):
_deletion_timer = None
_bytes = None
_hash = None
def __init__(self, hash = None):
"""hash must be a unicode hexdigest or None"""
self.hash = hash
@property
def seconds_in_memory(self):
return MASTER_PASSWORD_IN_MEMORY_SECONDS
def open_ask_dialog(self):
return ask_password('master password:')
def ask(self):
password = self.open_ask_dialog()
if password is None:
raise Cancel("The dialog was canceled.")
try:
self.password = password
except InvalidMasterPassword:
self.ask()
@property
def password(self):
raise TypeError('the password can not be used. Use bytes instead.')
@password.setter
def password(self, password):
# do not use the original password but the hash instead
password_bytes = password.encode('UTF-8')
self.bytes = hash_binary(password_bytes)
self.hash = hash_hex(self.bytes)
@property
def bytes(self):
if self._bytes is None:
self.ask()
else:
self.refresh_timer()
return self._bytes
@bytes.setter
def bytes(self, bytes):
self.refresh_timer()
self._bytes = bytes
def has_bytes(self):
return self._bytes is not None
def refresh_timer(self):
if self._deletion_timer is not None:
self._deletion_timer.cancel()
self._deletion_timer = threading.Timer(self.seconds_in_memory,
self.delete)
self._deletion_timer.start()
@property
def hash(self):
if self._hash is None:
self.ask()
if self.has_bytes(): # negligible race condition
assert self._hash == hash_hex(self.bytes)
return self._hash
@hash.setter
def hash(self, hash):
assert hash is None or all(character in "0123456789abcdef" for character in hash)
if self._hash is None:
self._hash = hash
elif self._hash != hash:
raise InvalidMasterPassword("The entered master password does not match the original.")
def delete(self):
self._bytes = None
def decrypt_password(self, encrypted_password, salt):
return decrypt_password(encrypted_password, salt, self.bytes)
def encrypt_password(self, password, salt):
return encrypt_password(password, salt, self.bytes)
class Database(object):
_config = None
_master_password = None
javascript_template_path = 'passwords_viewer_template.js'
javascript_path = 'passwords_viewer.js'
auto_save_to_javascript = True
new_master_password = MasterPassword
def __init__(self, file_name):
self.file_name = file_name
self.with_nesting = 0
def save_to_javascript(self):
with open(self.file_name) as source_file, \
open(self.javascript_template_path) as javascript_template_file, \
open(self.javascript_path, 'w') as javascript_file:
json = source_file.read()
template = javascript_template_file.read()
source = template.format(passwords = json)
javascript_file.write(source)
@property
def master_password(self):
"""the master password of the database"""
if self._master_password is None:
with self:
hash = self.config.get('master_password_hash', None)
self._master_password = self.new_master_password(hash)
self.config['master_password_hash'] = self._master_password.hash
return self._master_password
@property
def config(self):
if self._config is None:
raise ValueError('Use the with statement!')
return self._config
@config.setter
def config(self, value):
self._config = value
def new_config(self):
return {u'passwords':[]}
def __enter__(self):
if self.with_nesting == 0:
if os.path.isfile(self.file_name):
with open(self.file_name) as file:
self.config = json.load(file)
else:
self.config = self.new_config()
self.with_nesting += 1
def __exit__(self, ty, err, tb):
self.with_nesting -= 1
assert self.with_nesting >= 0
if self.with_nesting == 0:
config = self.config
self.config = None
if ty is not None or err is not None or tb is not None:
raise TransactionAbort('An error aborted the transaction.')
with open(self.file_name, 'w') as file:
json.dump(config, file)
if self.auto_save_to_javascript:
self.save_to_javascript()
@property
def _passwords(self):
return self.config[u'passwords']
def new_password_entry(self, entry = {}):
return PasswordEntry(self, entry, self.master_password)
def add_password_entry(self, entry):
dict = entry.asDict()
if dict not in self._passwords:
self._passwords.append(dict)
def remove_password_entry(self, entry):
dict = entry.asDict()
while dict in self._passwords:
self._passwords.remove(dict)
@property
def passwords(self):
"""the passwords in the database"""
with self:
entries = [self.new_password_entry(entry) for entry in self._passwords]
entries.sort(key = lambda entry: str(entry.name).lower())
return entries
def add_new_password_from_user(self):
return self.new_password_entry().fill_from_user()
def add_new_password_from_export(self, export):
return self.new_password_entry().fill_from_export(export)
def export_all(self):
return [entry.export() for entry in self.passwords]
def export_all_json(self, file):
return json.dump(self.export_all(), file, sort_keys=True, indent=4)
def import_all(self, passwords, log_file):
successes = []
for password in passwords:
with self:
try:
with self:
entry = self.add_new_password_from_export(password)
successes.append(password.get("name", str(password)))
except:
traceback.print_exc(file = log_file)
print("Failed: ", password, file = log_file)
for success in successes:
print("Success:", success, file = log_file)
def import_all_json(self, file, log_file):
try:
passwords = json.load(file)
self.import_all(passwords, log_file)
except:
traceback.print_exc(file = log_file)
class PasswordEntry(object):
attributes = set()
def fill_from_user(self):
result = ask_add_password(password = new_random_password())
if result is None:
return
name, password, text = result
return self.fill_from_arguments(name = name,
password = password,
text = text,
deleted = False)
def fill_from_arguments(self, **attributes):
return self.fill_from_dict(attributes)
def fill_from_export(self, export):
export = export.copy()
for attribute in list(export.keys()):
if attribute not in self.attributes:
export.pop(attribute)
export['deleted'] = False
return self.fill_from_dict(export)
def fill_from_dict(self, dictionairy):
if not set(dictionairy.keys()) <= self.attributes:
print(self.attributes)
print(set(dictionairy.keys()))
raise ValueError('Unknown attributes {} should be {}'.format(
', '.join(set(dictionairy.keys()) - self.attributes),
', '.join(self.attributes)))
for attribute in self.attributes:
if attribute in dictionairy:
setattr(self, attribute, dictionairy[attribute])
self.database.add_password_entry(self)
return self
def __init__(self, database, dictionairy, master_password):
self.database = database
self.dictionairy = dictionairy
self.master_password = master_password
@property
def name(self):
return self.dictionairy[u'name']
@name.setter
def name(self, value):
with self.database:
self.dictionairy[u'name'] = value
attributes.add('name')
@property
def password(self):
encrypted_password = self.dictionairy[u'encrypted_password']
password_salt = self.dictionairy[u'password_salt']
return self.master_password.decrypt_password(encrypted_password, password_salt)
@password.setter
def password(self, password):
"encrypt the password"
with self.database:
password_salt = self.new_salt()
encrypted_password = self.master_password.encrypt_password(password, password_salt)
self.dictionairy[u'password_salt'] = password_salt
self.dictionairy[u'encrypted_password'] = encrypted_password
attributes.add('password')
new_salt = staticmethod(new_salt)
@property
def text(self):
return self.dictionairy[u'text']
@text.setter
def text(self, value):
with self.database:
self.dictionairy[u'text'] = value
attributes.add('text')
@property
def deleted(self):
return self.dictionairy[u'deleted']
@deleted.setter
def deleted(self, value):
with self.database:
self.dictionairy[u'deleted'] = value
attributes.add('deleted')
def asDict(self):
return self.dictionairy
def remove(self):
self.database.remove_password_entry(self)
def export(self):
return dict(name = self.name, password = self.password, text = self.text)
def __eq__(self, other):
return other == self.dictionairy
def __hash__(self):
return hash(self.dictionairy)
def new_entry(self, entry = {}):
return self.database.new_password_entry(entry)
def duplicate(self, **attributes):
new_entry = self.new_entry(self.dictionairy.copy())
new_entry.fill_from_dict(attributes)
return new_entry
|
""" ml.py
This file contains the ML algorithms themselves:
- AUCRegressor: a custom class that optimizes AUC directly
- MLR: a linear regression with non-negativity constraints
- StackedClassifier: a custom class that combines several models
And some related functions:
- find_params: sets the hyperparameters for a given model
Author: Paul Duan <email@paulduan.com>
"""
from __future__ import division
import cPickle as pickle
import itertools
import json
import logging
import multiprocessing
import scipy as sp
import numpy as np
from functools import partial
from operator import itemgetter
from sklearn.metrics import roc_curve, auc
from sklearn.grid_search import GridSearchCV
from sklearn import cross_validation, linear_model
from data import load_from_cache, get_dataset
from utils import stringify, compute_auc
logger = logging.getLogger(__name__)
N_TREES = 500
INITIAL_PARAMS = {
'LogisticRegression': {'C': 2, 'penalty': 'l2', 'class_weight': 'auto'},
'RandomForestClassifier': {
'n_estimators': N_TREES, 'n_jobs': 4,
'min_samples_leaf': 2, 'bootstrap': False,
'max_depth': 30, 'min_samples_split': 5, 'max_features': .1
},
'ExtraTreesClassifier': {
'n_estimators': N_TREES, 'n_jobs': 3, 'min_samples_leaf': 2,
'max_depth': 30, 'min_samples_split': 5, 'max_features': .1,
'bootstrap': False,
},
'GradientBoostingClassifier': {
'n_estimators': N_TREES, 'learning_rate': .08, 'max_features': 7,
'min_samples_leaf': 1, 'min_samples_split': 3, 'max_depth': 5,
},
}
PARAM_GRID = {
'LogisticRegression': {'C': [1.5, 2, 2.5, 3, 3.5, 5, 5.5],
'class_weight': ['auto']},
'RandomForestClassifier': {
'n_jobs': [1], 'max_depth': [15, 20, 25, 30, 35, None],
'min_samples_split': [1, 3, 5, 7],
'max_features': [3, 8, 11, 15],
},
'ExtraTreesClassifier': {'min_samples_leaf': [2, 3],
'n_jobs': [1],
'min_samples_split': [1, 2, 5],
'bootstrap': [False],
'max_depth': [15, 20, 25, 30],
'max_features': [1, 3, 5, 11]},
'GradientBoostingClassifier': {'max_features': [4, 5, 6, 7],
'learning_rate': [.05, .08, .1],
'max_depth': [8, 10, 13]},
}
class AUCRegressor(object):
def __init__(self):
self.coef_ = 0
def _auc_loss(self, coef, X, y):
fpr, tpr, _ = roc_curve(y, sp.dot(X, coef))
return -auc(fpr, tpr)
def fit(self, X, y):
lr = linear_model.LinearRegression()
auc_partial = partial(self._auc_loss, X=X, y=y)
initial_coef = lr.fit(X, y).coef_
self.coef_ = sp.optimize.fmin(auc_partial, initial_coef)
def predict(self, X):
return sp.dot(X, self.coef_)
def score(self, X, y):
fpr, tpr, _ = roc_curve(y, sp.dot(X, self.coef_))
return auc(fpr, tpr)
class MLR(object):
def __init__(self):
self.coef_ = 0
def fit(self, X, y):
self.coef_ = sp.optimize.nnls(X, y)[0]
self.coef_ = np.array(map(lambda x: x/sum(self.coef_), self.coef_))
def predict(self, X):
predictions = np.array(map(sum, self.coef_ * X))
return predictions
def score(self, X, y):
fpr, tpr, _ = roc_curve(y, sp.dot(X, self.coef_))
return auc(fpr, tpr)
class StackedClassifier(object):
"""
Implement stacking to combine several models.
The base (stage 0) models can be either combined through
simple averaging (fastest), or combined using a stage 1 generalizer
(requires computing CV predictions on the train set).
See http://ijcai.org/Past%20Proceedings/IJCAI-97-VOL2/PDF/011.pdf:
"Stacked generalization: when does it work?", Ting and Witten, 1997
For speed and convenience, both fitting and prediction are done
in the same method fit_predict; this is done in order to enable
one to compute metrics on the predictions after training each model without
having to wait for all the models to be trained.
Options:
------------------------------
- models: a list of (model, dataset) tuples that represent stage 0 models
- generalizer: an Estimator object. Must implement fit and predict
- model_selection: boolean. Whether to use brute force search to find the
optimal subset of models that produce the best AUC.
"""
def __init__(self, models, generalizer=None, model_selection=True,
stack=False, fwls=False, use_cached_models=True):
self.cache_dir = "main"
self.models = models
self.model_selection = model_selection
self.stack = stack
self.fwls = fwls
self.generalizer = linear_model.RidgeCV(
alphas=np.linspace(0, 200), cv=100)
self.use_cached_models = use_cached_models
def _combine_preds(self, X_train, X_cv, y, train=None, predict=None,
stack=False, fwls=False):
"""
Combine preds, returning in order:
- mean_preds: the simple average of all model predictions
- stack_preds: the predictions of the stage 1 generalizer
- fwls_preds: same as stack_preds, but optionally using more
complex blending schemes (meta-features, different
generalizers, etc.)
"""
mean_preds = np.mean(X_cv, axis=1)
stack_preds = None
fwls_preds = None
if stack:
self.generalizer.fit(X_train, y)
stack_preds = self.generalizer.predict(X_cv)
if self.fwls:
meta, meta_cv = get_dataset('metafeatures', train, predict)
fwls_train = np.hstack((X_train, meta))
fwls_cv = np.hstack((X_cv, meta))
self.generalizer.fit(fwls_train)
fwls_preds = self.generalizer.predict(fwls_cv)
return mean_preds, stack_preds, fwls_preds
def _find_best_subset(self, y, predictions_list):
"""Finds the combination of models that produce the best AUC."""
best_subset_indices = range(len(predictions_list))
pool = multiprocessing.Pool(processes=4)
partial_compute_subset_auc = partial(compute_subset_auc,
pred_set=predictions_list, y=y)
best_auc = 0
best_n = 0
best_indices = []
if len(predictions_list) == 1:
return [1]
for n in range(int(len(predictions_list)/2), len(predictions_list)):
cb = itertools.combinations(range(len(predictions_list)), n)
combination_results = pool.map(partial_compute_subset_auc, cb)
best_subset_auc, best_subset_indices = max(
combination_results, key=itemgetter(0))
print "- best subset auc (%d models): %.4f > %s" % (
n, best_subset_auc, n, list(best_subset_indices))
if best_subset_auc > best_auc:
best_auc = best_subset_auc
best_n = n
best_indices = list(best_subset_indices)
pool.terminate()
logger.info("best auc: %.4f", best_auc)
logger.info("best n: %d", best_n)
logger.info("best indices: %s", best_indices)
for i, (model, feature_set) in enumerate(self.models):
if i in best_subset_indices:
logger.info("> model: %s (%s)", model.__class__.__name__,
feature_set)
return best_subset_indices
def _get_model_preds(self, model, X_train, X_predict, y_train, cache_file):
"""
Return the model predictions on the prediction set,
using cache if possible.
"""
model_output = load_from_cache(
"models/%s/%s.pkl" % (self.cache_dir, cache_file),
self.use_cached_models)
model_params, model_preds = model_output \
if model_output is not None else (None, None)
if model_preds is None or model_params != model.get_params():
model.fit(X_train, y_train)
model_preds = model.predict_proba(X_predict)[:, 1]
with open("cache/models/%s/%s.pkl" % (
self.cache_dir, cache_file), 'w') as f:
pickle.dump((model.get_params(), model_preds), f)
return model_preds
def _get_model_cv_preds(self, model, X_train, y_train, cache_file):
"""
Return cross-validation predictions on the training set, using cache
if possible.
This is used if stacking is enabled (ie. a second model is used to
combine the stage 0 predictions).
"""
stack_preds = load_from_cache(
"models/%s/cv_preds/%s.pkl" % (self.cache_dir, cache_file),
self.use_cached_models)
if stack_preds is None:
kfold = cross_validation.StratifiedKFold(y_train, 4)
stack_preds = []
indexes_cv = []
for stage0, stack in kfold:
model.fit(X_train[stage0], y_train[stage0])
stack_preds.extend(list(model.predict_proba(
X_train[stack])[:, 1]))
indexes_cv.extend(list(stack))
stack_preds = np.array(stack_preds)[sp.argsort(indexes_cv)]
with open("cache/models/%s/cv_preds/%s%d.pkl" % (
self.cache_dir, cache_file), 'w') as f:
pickle.dump(stack_preds, f, pickle.HIGHEST_PROTOCOL)
return stack_preds
def fit_predict(self, y, train=None, predict=None, show_steps=True):
"""
Fit each model on the appropriate dataset, then return the average
of their individual predictions. If train is specified, use a subset
of the training set to train the models, then predict the outcome of
either the remaining samples or (if given) those specified in cv.
If train is omitted, train the models on the full training set, then
predict the outcome of the full test set.
Options:
------------------------------
- y: numpy array. The full vector of the ground truths.
- train: list. The indices of the elements to be used for training.
If None, take the entire training set.
- predict: list. The indices of the elements to be predicted.
- show_steps: boolean. Whether to compute metrics after each stage
of the computation.
"""
y_train = y[train] if train is not None else y
if train is not None and predict is None:
predict = [i for i in range(len(y)) if i not in train]
stage0_train = []
stage0_predict = []
for model, feature_set in self.models:
X_train, X_predict = get_dataset(feature_set, train, predict)
identifier = train[0] if train is not None else -1
cache_file = stringify(model, feature_set) + str(identifier)
model_preds = self._get_model_preds(
model, X_train, X_predict, y_train, cache_file)
stage0_predict.append(model_preds)
# if stacking, compute cross-validated predictions on the train set
if self.stack:
model_cv_preds = self._get_model_cv_preds(
model, X_train, y_train, cache_file)
stage0_train.append(model_cv_preds)
# verbose mode: compute metrics after every model computation
if show_steps:
if train is not None:
mean_preds, stack_preds, fwls_preds = self._combine_preds(
np.array(stage0_train).T, np.array(stage0_predict).T,
y_train, train, predict,
stack=self.stack, fwls=self.fwls)
model_auc = compute_auc(y[predict], stage0_predict[-1])
mean_auc = compute_auc(y[predict], mean_preds)
stack_auc = compute_auc(y[predict], stack_preds) \
if self.stack else 0
fwls_auc = compute_auc(y[predict], fwls_preds) \
if self.fwls else 0
logger.info(
"> AUC: %.4f (%.4f, %.4f, %.4f) [%s]", model_auc,
mean_auc, stack_auc, fwls_auc,
stringify(model, feature_set))
else:
logger.info("> used model %s:\n%s", stringify(
model, feature_set), model.get_params())
if self.model_selection and predict is not None:
best_subset = self._find_best_subset(y[predict], stage0_predict)
stage0_train = [pred for i, pred in enumerate(stage0_train)
if i in best_subset]
stage0_predict = [pred for i, pred in enumerate(stage0_predict)
if i in best_subset]
mean_preds, stack_preds, fwls_preds = self._combine_preds(
np.array(stage0_train).T, np.array(stage0_predict).T,
y_train, stack=self.stack, fwls=self.fwls)
if self.stack:
selected_preds = stack_preds if not self.fwls else fwls_preds
else:
selected_preds = mean_preds
return selected_preds
def compute_subset_auc(indices, pred_set, y):
subset = [vect for i, vect in enumerate(pred_set) if i in indices]
mean_preds = sp.mean(subset, axis=0)
mean_auc = compute_auc(y, mean_preds)
return mean_auc, indices
def find_params(model, feature_set, y, subsample=None, grid_search=False):
"""
Return parameter set for the model, either predefined
or found through grid search.
"""
model_name = model.__class__.__name__
params = INITIAL_PARAMS.get(model_name, {})
y = y if subsample is None else y[subsample]
try:
with open('saved_params.json') as f:
saved_params = json.load(f)
except IOError:
saved_params = {}
if (grid_search and model_name in PARAM_GRID and stringify(
model, feature_set) not in saved_params):
X, _ = get_dataset(feature_set, subsample, [0])
clf = GridSearchCV(model, PARAM_GRID[model_name], cv=10, n_jobs=6,
scoring="roc_auc")
clf.fit(X, y)
logger.info("found params (%s > %.4f): %s",
stringify(model, feature_set),
clf.best_score_, clf.best_params_)
params.update(clf.best_params_)
saved_params[stringify(model, feature_set)] = params
with open('saved_params.json', 'w') as f:
json.dump(saved_params, f, indent=4, separators=(',', ': '),
ensure_ascii=True, sort_keys=True)
else:
params.update(saved_params.get(stringify(model, feature_set), {}))
if grid_search:
logger.info("using params %s: %s", stringify(model, feature_set),
params)
return params
updated docstring
"""ml.py
This is the file that does the heavy lifting.
It contains the ML algorithms themselves:
- AUCRegressor: a custom class that optimizes AUC directly
- MLR: a linear regression with non-negativity constraints
- StackedClassifier: a custom class that combines several models
And some related functions:
- find_params: sets the hyperparameters for a given model
Author: Paul Duan <email@paulduan.com>
"""
from __future__ import division
import cPickle as pickle
import itertools
import json
import logging
import multiprocessing
import scipy as sp
import numpy as np
from functools import partial
from operator import itemgetter
from sklearn.metrics import roc_curve, auc
from sklearn.grid_search import GridSearchCV
from sklearn import cross_validation, linear_model
from data import load_from_cache, get_dataset
from utils import stringify, compute_auc
logger = logging.getLogger(__name__)
N_TREES = 500
INITIAL_PARAMS = {
'LogisticRegression': {'C': 2, 'penalty': 'l2', 'class_weight': 'auto'},
'RandomForestClassifier': {
'n_estimators': N_TREES, 'n_jobs': 4,
'min_samples_leaf': 2, 'bootstrap': False,
'max_depth': 30, 'min_samples_split': 5, 'max_features': .1
},
'ExtraTreesClassifier': {
'n_estimators': N_TREES, 'n_jobs': 3, 'min_samples_leaf': 2,
'max_depth': 30, 'min_samples_split': 5, 'max_features': .1,
'bootstrap': False,
},
'GradientBoostingClassifier': {
'n_estimators': N_TREES, 'learning_rate': .08, 'max_features': 7,
'min_samples_leaf': 1, 'min_samples_split': 3, 'max_depth': 5,
},
}
PARAM_GRID = {
'LogisticRegression': {'C': [1.5, 2, 2.5, 3, 3.5, 5, 5.5],
'class_weight': ['auto']},
'RandomForestClassifier': {
'n_jobs': [1], 'max_depth': [15, 20, 25, 30, 35, None],
'min_samples_split': [1, 3, 5, 7],
'max_features': [3, 8, 11, 15],
},
'ExtraTreesClassifier': {'min_samples_leaf': [2, 3],
'n_jobs': [1],
'min_samples_split': [1, 2, 5],
'bootstrap': [False],
'max_depth': [15, 20, 25, 30],
'max_features': [1, 3, 5, 11]},
'GradientBoostingClassifier': {'max_features': [4, 5, 6, 7],
'learning_rate': [.05, .08, .1],
'max_depth': [8, 10, 13]},
}
class AUCRegressor(object):
def __init__(self):
self.coef_ = 0
def _auc_loss(self, coef, X, y):
fpr, tpr, _ = roc_curve(y, sp.dot(X, coef))
return -auc(fpr, tpr)
def fit(self, X, y):
lr = linear_model.LinearRegression()
auc_partial = partial(self._auc_loss, X=X, y=y)
initial_coef = lr.fit(X, y).coef_
self.coef_ = sp.optimize.fmin(auc_partial, initial_coef)
def predict(self, X):
return sp.dot(X, self.coef_)
def score(self, X, y):
fpr, tpr, _ = roc_curve(y, sp.dot(X, self.coef_))
return auc(fpr, tpr)
class MLR(object):
def __init__(self):
self.coef_ = 0
def fit(self, X, y):
self.coef_ = sp.optimize.nnls(X, y)[0]
self.coef_ = np.array(map(lambda x: x/sum(self.coef_), self.coef_))
def predict(self, X):
predictions = np.array(map(sum, self.coef_ * X))
return predictions
def score(self, X, y):
fpr, tpr, _ = roc_curve(y, sp.dot(X, self.coef_))
return auc(fpr, tpr)
class StackedClassifier(object):
"""
Implement stacking to combine several models.
The base (stage 0) models can be either combined through
simple averaging (fastest), or combined using a stage 1 generalizer
(requires computing CV predictions on the train set).
See http://ijcai.org/Past%20Proceedings/IJCAI-97-VOL2/PDF/011.pdf:
"Stacked generalization: when does it work?", Ting and Witten, 1997
For speed and convenience, both fitting and prediction are done
in the same method fit_predict; this is done in order to enable
one to compute metrics on the predictions after training each model without
having to wait for all the models to be trained.
Options:
------------------------------
- models: a list of (model, dataset) tuples that represent stage 0 models
- generalizer: an Estimator object. Must implement fit and predict
- model_selection: boolean. Whether to use brute force search to find the
optimal subset of models that produce the best AUC.
"""
def __init__(self, models, generalizer=None, model_selection=True,
stack=False, fwls=False, use_cached_models=True):
self.cache_dir = "main"
self.models = models
self.model_selection = model_selection
self.stack = stack
self.fwls = fwls
self.generalizer = linear_model.RidgeCV(
alphas=np.linspace(0, 200), cv=100)
self.use_cached_models = use_cached_models
def _combine_preds(self, X_train, X_cv, y, train=None, predict=None,
stack=False, fwls=False):
"""
Combine preds, returning in order:
- mean_preds: the simple average of all model predictions
- stack_preds: the predictions of the stage 1 generalizer
- fwls_preds: same as stack_preds, but optionally using more
complex blending schemes (meta-features, different
generalizers, etc.)
"""
mean_preds = np.mean(X_cv, axis=1)
stack_preds = None
fwls_preds = None
if stack:
self.generalizer.fit(X_train, y)
stack_preds = self.generalizer.predict(X_cv)
if self.fwls:
meta, meta_cv = get_dataset('metafeatures', train, predict)
fwls_train = np.hstack((X_train, meta))
fwls_cv = np.hstack((X_cv, meta))
self.generalizer.fit(fwls_train)
fwls_preds = self.generalizer.predict(fwls_cv)
return mean_preds, stack_preds, fwls_preds
def _find_best_subset(self, y, predictions_list):
"""Finds the combination of models that produce the best AUC."""
best_subset_indices = range(len(predictions_list))
pool = multiprocessing.Pool(processes=4)
partial_compute_subset_auc = partial(compute_subset_auc,
pred_set=predictions_list, y=y)
best_auc = 0
best_n = 0
best_indices = []
if len(predictions_list) == 1:
return [1]
for n in range(int(len(predictions_list)/2), len(predictions_list)):
cb = itertools.combinations(range(len(predictions_list)), n)
combination_results = pool.map(partial_compute_subset_auc, cb)
best_subset_auc, best_subset_indices = max(
combination_results, key=itemgetter(0))
print "- best subset auc (%d models): %.4f > %s" % (
n, best_subset_auc, n, list(best_subset_indices))
if best_subset_auc > best_auc:
best_auc = best_subset_auc
best_n = n
best_indices = list(best_subset_indices)
pool.terminate()
logger.info("best auc: %.4f", best_auc)
logger.info("best n: %d", best_n)
logger.info("best indices: %s", best_indices)
for i, (model, feature_set) in enumerate(self.models):
if i in best_subset_indices:
logger.info("> model: %s (%s)", model.__class__.__name__,
feature_set)
return best_subset_indices
def _get_model_preds(self, model, X_train, X_predict, y_train, cache_file):
"""
Return the model predictions on the prediction set,
using cache if possible.
"""
model_output = load_from_cache(
"models/%s/%s.pkl" % (self.cache_dir, cache_file),
self.use_cached_models)
model_params, model_preds = model_output \
if model_output is not None else (None, None)
if model_preds is None or model_params != model.get_params():
model.fit(X_train, y_train)
model_preds = model.predict_proba(X_predict)[:, 1]
with open("cache/models/%s/%s.pkl" % (
self.cache_dir, cache_file), 'w') as f:
pickle.dump((model.get_params(), model_preds), f)
return model_preds
def _get_model_cv_preds(self, model, X_train, y_train, cache_file):
"""
Return cross-validation predictions on the training set, using cache
if possible.
This is used if stacking is enabled (ie. a second model is used to
combine the stage 0 predictions).
"""
stack_preds = load_from_cache(
"models/%s/cv_preds/%s.pkl" % (self.cache_dir, cache_file),
self.use_cached_models)
if stack_preds is None:
kfold = cross_validation.StratifiedKFold(y_train, 4)
stack_preds = []
indexes_cv = []
for stage0, stack in kfold:
model.fit(X_train[stage0], y_train[stage0])
stack_preds.extend(list(model.predict_proba(
X_train[stack])[:, 1]))
indexes_cv.extend(list(stack))
stack_preds = np.array(stack_preds)[sp.argsort(indexes_cv)]
with open("cache/models/%s/cv_preds/%s%d.pkl" % (
self.cache_dir, cache_file), 'w') as f:
pickle.dump(stack_preds, f, pickle.HIGHEST_PROTOCOL)
return stack_preds
def fit_predict(self, y, train=None, predict=None, show_steps=True):
"""
Fit each model on the appropriate dataset, then return the average
of their individual predictions. If train is specified, use a subset
of the training set to train the models, then predict the outcome of
either the remaining samples or (if given) those specified in cv.
If train is omitted, train the models on the full training set, then
predict the outcome of the full test set.
Options:
------------------------------
- y: numpy array. The full vector of the ground truths.
- train: list. The indices of the elements to be used for training.
If None, take the entire training set.
- predict: list. The indices of the elements to be predicted.
- show_steps: boolean. Whether to compute metrics after each stage
of the computation.
"""
y_train = y[train] if train is not None else y
if train is not None and predict is None:
predict = [i for i in range(len(y)) if i not in train]
stage0_train = []
stage0_predict = []
for model, feature_set in self.models:
X_train, X_predict = get_dataset(feature_set, train, predict)
identifier = train[0] if train is not None else -1
cache_file = stringify(model, feature_set) + str(identifier)
model_preds = self._get_model_preds(
model, X_train, X_predict, y_train, cache_file)
stage0_predict.append(model_preds)
# if stacking, compute cross-validated predictions on the train set
if self.stack:
model_cv_preds = self._get_model_cv_preds(
model, X_train, y_train, cache_file)
stage0_train.append(model_cv_preds)
# verbose mode: compute metrics after every model computation
if show_steps:
if train is not None:
mean_preds, stack_preds, fwls_preds = self._combine_preds(
np.array(stage0_train).T, np.array(stage0_predict).T,
y_train, train, predict,
stack=self.stack, fwls=self.fwls)
model_auc = compute_auc(y[predict], stage0_predict[-1])
mean_auc = compute_auc(y[predict], mean_preds)
stack_auc = compute_auc(y[predict], stack_preds) \
if self.stack else 0
fwls_auc = compute_auc(y[predict], fwls_preds) \
if self.fwls else 0
logger.info(
"> AUC: %.4f (%.4f, %.4f, %.4f) [%s]", model_auc,
mean_auc, stack_auc, fwls_auc,
stringify(model, feature_set))
else:
logger.info("> used model %s:\n%s", stringify(
model, feature_set), model.get_params())
if self.model_selection and predict is not None:
best_subset = self._find_best_subset(y[predict], stage0_predict)
stage0_train = [pred for i, pred in enumerate(stage0_train)
if i in best_subset]
stage0_predict = [pred for i, pred in enumerate(stage0_predict)
if i in best_subset]
mean_preds, stack_preds, fwls_preds = self._combine_preds(
np.array(stage0_train).T, np.array(stage0_predict).T,
y_train, stack=self.stack, fwls=self.fwls)
if self.stack:
selected_preds = stack_preds if not self.fwls else fwls_preds
else:
selected_preds = mean_preds
return selected_preds
def compute_subset_auc(indices, pred_set, y):
subset = [vect for i, vect in enumerate(pred_set) if i in indices]
mean_preds = sp.mean(subset, axis=0)
mean_auc = compute_auc(y, mean_preds)
return mean_auc, indices
def find_params(model, feature_set, y, subsample=None, grid_search=False):
"""
Return parameter set for the model, either predefined
or found through grid search.
"""
model_name = model.__class__.__name__
params = INITIAL_PARAMS.get(model_name, {})
y = y if subsample is None else y[subsample]
try:
with open('saved_params.json') as f:
saved_params = json.load(f)
except IOError:
saved_params = {}
if (grid_search and model_name in PARAM_GRID and stringify(
model, feature_set) not in saved_params):
X, _ = get_dataset(feature_set, subsample, [0])
clf = GridSearchCV(model, PARAM_GRID[model_name], cv=10, n_jobs=6,
scoring="roc_auc")
clf.fit(X, y)
logger.info("found params (%s > %.4f): %s",
stringify(model, feature_set),
clf.best_score_, clf.best_params_)
params.update(clf.best_params_)
saved_params[stringify(model, feature_set)] = params
with open('saved_params.json', 'w') as f:
json.dump(saved_params, f, indent=4, separators=(',', ': '),
ensure_ascii=True, sort_keys=True)
else:
params.update(saved_params.get(stringify(model, feature_set), {}))
if grid_search:
logger.info("using params %s: %s", stringify(model, feature_set),
params)
return params
|
# -*- coding: utf-8 -*-
#
# privacyIDEA documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 13 07:31:01 2014.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '3.0.2'
# The full version, including alpha/beta/rc tags.
#release = '2.16dev5'
release = version
import sys
import os
from mock import Mock as MagicMock
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return MagicMock()
#MOCK_MODULES = ['pandas', 'pyOpenSSL']
MOCK_MODULES = []
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
# Monkey-patch functools.wraps
# http://stackoverflow.com/questions/28366818/preserve-default-arguments-of-wrapped-decorated-python-function-in-sphinx-docume
import functools
def no_op_wraps(func):
"""Replaces functools.wraps in order to undo wrapping.
Can be used to preserve the decorated function's signature
in the documentation generated by Sphinx.
"""
def wrapper(decorator):
return func
return wrapper
functools.wraps = no_op_wraps
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
sys.path.append(os.path.abspath('_themes/flask-sphinx-themes'))
sys.path.insert(0, os.path.abspath('../privacyidea'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.imgmath', 'sphinx.ext.viewcode',
'sphinxcontrib.autohttp.flask']
http_index_ignore_prefixes = ['/token']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'privacyIDEA'
copyright = u'2014-2017, Cornelius Kölbel'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'sphinxdoc'
#html_theme = 'sphinx_rtd_theme'
#html_theme = 'agogo'
html_theme = 'flask'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_themes/flask-sphinx-themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "images/privacyidea-color.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'privacyIDEAdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'privacyIDEA.tex', u'privacyIDEA Authentication System',
u'Cornelius Kölbel', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'privacyidea-server', u'privacyIDEA Authentication System',
[u'Cornelius Kölbel'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'privacyIDEA', u'privacyIDEA AUthentication System',
u'Cornelius Kölbel', 'privacyIDEA', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
Fix building of docs
* The mock module is not part of doc/requirements.txt, which breaks
the build. It is not needed.
* doc/conf.py replaces functools.wraps with a no-op. Up until now,
the no-op did not have the same signature as functools.wrap,
which caused the build to fail.
# -*- coding: utf-8 -*-
#
# privacyIDEA documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 13 07:31:01 2014.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '3.0.2'
# The full version, including alpha/beta/rc tags.
#release = '2.16dev5'
release = version
import sys
import os
# Monkey-patch functools.wraps
# http://stackoverflow.com/questions/28366818/preserve-default-arguments-of-wrapped-decorated-python-function-in-sphinx-docume
import functools
def no_op_wraps(func, assigned=None, updated=None):
"""Replaces functools.wraps in order to undo wrapping.
Can be used to preserve the decorated function's signature
in the documentation generated by Sphinx.
"""
def wrapper(decorator):
return func
return wrapper
functools.wraps = no_op_wraps
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
sys.path.append(os.path.abspath('_themes/flask-sphinx-themes'))
sys.path.insert(0, os.path.abspath('../privacyidea'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.imgmath', 'sphinx.ext.viewcode',
'sphinxcontrib.autohttp.flask']
http_index_ignore_prefixes = ['/token']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'privacyIDEA'
copyright = u'2014-2017, Cornelius Kölbel'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'sphinxdoc'
#html_theme = 'sphinx_rtd_theme'
#html_theme = 'agogo'
html_theme = 'flask'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_themes/flask-sphinx-themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "images/privacyidea-color.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'privacyIDEAdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'privacyIDEA.tex', u'privacyIDEA Authentication System',
u'Cornelius Kölbel', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'privacyidea-server', u'privacyIDEA Authentication System',
[u'Cornelius Kölbel'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'privacyIDEA', u'privacyIDEA AUthentication System',
u'Cornelius Kölbel', 'privacyIDEA', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
#-------------------------------------------------------------------------------
import sqlite3
import bcrypt
import os
import shutil
import data
import htpasswd
import re
from email.utils import parseaddr
#-------------------------------------------------------------------------------
BCRYPT_ROUNDS = 5
template = 'template.db'
database = 'database.db'
passwdfile = 'htpasswd.db'
# JOB STATE
JOB_CREATED = 0 # Just Created
JOB_SUBMITTED = 1 # Submitted
JOB_RUNNING = 2 # Running
JOB_COMPLETED = 3 # Completed
# JOB FILE TYPE
FILEIN = 0
FILEOUT = 1
EMAIL_REGEX = re.compile(r"[^@ ]+@[^@ ]+\.[^@ ]+")
#-------------------------------------------------------------------------------
def mkEmptyDatabase( dbname ):
if os.path.isfile( dbname ):
os.remove( dbname )
conn = sqlite3.connect( dbname )
c = conn.cursor()
c.execute( "CREATE TABLE user (uid INTEGER PRIMARY KEY AUTOINCREMENT, name text, passwd text, email text, UNIQUE(name))" )
c.execute( "CREATE TABLE file (fid INTEGER PRIMARY KEY AUTOINCREMENT, uid INTEGER, global INTEGER, filename text, filetype INTEGER)" )
conn.commit()
c.execute( "CREATE TABLE job (jid INTEGER PRIMARY KEY AUTOINCREMENT, uid INTEGER, state INTEGER)" )
conn.commit()
c.execute( "CREATE TABLE jobslurm (jid INTEGER, slurmid INTEGER, PRIMARY KEY(jid, slurmid), FOREIGN KEY(jid) REFERENCES job(jid) )" )
conn.commit()
c.execute( "CREATE TABLE jobfile (jid INTEGER, fid INTEGER, jobfiletype INTEGER, PRIMARY KEY(jid, fid) )" )
conn.commit()
conn.close()
#-------------------------------------------------------------------------------
def clearDB():
mkEmptyDatabase( template )
if os.path.isfile( database ):
os.remove( database )
#-------------------------------------------------------------------------------
def init():
if not os.path.isfile( template ):
clearDB()
if not os.path.isfile( database ):
shutil.copy( template, database )
# create empty password file
if not os.path.isfile( passwdfile ):
open( passwdfile, 'w' ).close()
name = 'admin' # same name and passwd
insertUser( name, name, "j.smith@example.com" )
#-------------------------------------------------------------------------------
def insertUser( name, passwd, email ):
checkedName, checkedEmail = parseaddr( email )
if len( checkedEmail ) == 0 or not EMAIL_REGEX.match( checkedEmail):
print "ERROR: Invalid email ", email
return
h = bcrypt.hashpw( passwd, bcrypt.gensalt(BCRYPT_ROUNDS) )
conn = sqlite3.connect( database )
try:
with conn:
conn.execute( 'INSERT INTO user VALUES (null,?,?,?)', (name,h,checkedEmail) )
except sqlite3.IntegrityError:
print "ERROR: User Already Exists ", name
return
try:
with htpasswd.Basic( passwdfile ) as userdb:
userdb.add( name, passwd )
except htpasswd.basic.UserExists, e:
print "ERROR: User Already Exists ", name, e
#-------------------------------------------------------------------------------
def changeUserPassword( name, newpass ):
try:
with htpasswd.Basic( passwdfile ) as userdb:
userdb.change_password( name, newpass )
except htpasswd.basic.UserNotExists, e:
print "ERROR: User Not Exists ", name, e
return False
h = bcrypt.hashpw( newpass, bcrypt.gensalt(BCRYPT_ROUNDS) )
conn = sqlite3.connect( database )
try:
with conn:
conn.execute( 'UPDATE user SET passwd=? WHERE name=?', (h,name) )
except:
print "ERROR: changing password ", name
return False
return True
#-------------------------------------------------------------------------------
def checkUser( name, passwd ):
conn = sqlite3.connect( database )
c = conn.cursor()
c.execute( 'SELECT passwd FROM user WHERE name=?', (name,) )
val = c.fetchone()
conn.close()
if val is not None:
return bcrypt.hashpw( passwd, val[0] ) == val[0]
return False
#-------------------------------------------------------------------------------
def insertFile( user, filename ):
conn = sqlite3.connect( database )
c = conn.cursor()
c.execute( 'SELECT uid FROM user WHERE name=?', (user,) )
uid = c.fetchone()
if uid is not None:
c.execute( 'SELECT fid FROM file WHERE uid=? AND filename=?', (uid[0],filename) )
exists = c.fetchone()
if exists is None:
c.execute( 'INSERT INTO file VALUES (null,?,?,?,?)', (uid[0],0,filename,0) )
conn.commit()
conn.close()
else:
conn.close()
raise DataBaseError
#-------------------------------------------------------------------------------
def insertFileWithType( user, filename, filetype ):
conn = sqlite3.connect( database )
c = conn.cursor()
c.execute( 'SELECT uid FROM user WHERE name=?', (user,) )
uid = c.fetchone()
if uid is not None:
c.execute( 'SELECT fid FROM file WHERE uid=? AND filename=?', (uid[0],filename) )
exists = c.fetchone()
if exists is None:
c.execute( 'INSERT INTO file VALUES (null,?,?,?,?)', (uid[0],0,filename,filetype) )
conn.commit()
conn.close()
else:
conn.close()
raise DataBaseError
#-------------------------------------------------------------------------------
def createFile( userid, filename ):
conn = sqlite3.connect( database )
c = conn.cursor()
c.execute( 'INSERT INTO file VALUES (null,?,?,?,?)', (userid,0,filename,0) )
c.execute( 'SELECT last_insert_rowid() FROM file' )
fileid = c.fetchone()[0]
conn.commit()
conn.close()
return fileid
#-------------------------------------------------------------------------------
def createFileWithType( userid, filename, filetype ):
conn = sqlite3.connect( database )
c = conn.cursor()
c.execute( 'INSERT INTO file VALUES (null,?,?,?,?)', (userid,0,filename,filetype) )
c.execute( 'SELECT last_insert_rowid() FROM file' )
fileid = c.fetchone()[0]
conn.commit()
conn.close()
return fileid
#-------------------------------------------------------------------------------
def getUserFiles( user ):
files = []
conn = sqlite3.connect( database )
c = conn.cursor()
c.execute( 'SELECT uid FROM user WHERE name=?', (user,) )
uid = c.fetchone()
if uid is not None:
c.execute( 'SELECT fid,filename FROM file WHERE uid=? OR global=1', (uid[0],) )
dbfiles = c.fetchall()
for f in dbfiles:
files.append( {'id': f[0], 'file': f[1]} )
conn.close()
return files
#-------------------------------------------------------------------------------
def getUserFilesWithType( user, filetype ):
files = []
conn = sqlite3.connect( database )
c = conn.cursor()
c.execute( 'SELECT uid FROM user WHERE name=?', (user,) )
uid = c.fetchone()
if uid is not None:
c.execute( 'SELECT fid,filename FROM file WHERE (uid=? OR global=1) AND filetype=?', (uid[0],filetype) )
dbfiles = c.fetchall()
for f in dbfiles:
files.append( {'id': f[0], 'file': f[1]} )
conn.close()
return files
#-------------------------------------------------------------------------------
def getFileFullName( fid ):
conn = sqlite3.connect( database )
c = conn.cursor()
c.execute( 'SELECT uid,filename FROM file WHERE fid=?', (fid,) )
fdata = c.fetchone()
if fdata is None:
conn.close()
raise DataBaseError
c.execute('SELECT name FROM user WHERE uid=?', (fdata[0],) )
udata = c.fetchone()
if udata is None:
conn.close()
raise DataBaseError
conn.close()
return data.getUserFilename( udata[0], fdata[1] )
#-------------------------------------------------------------------------------
def isFileAllowedFromUser( fileid, user ):
conn = sqlite3.connect( database )
c = conn.cursor()
c.execute( 'SELECT uid FROM user WHERE name=?', (user,) )
udata = c.fetchone()
if udata is None:
conn.close()
return False
c.execute( 'SELECT uid,global FROM file WHERE fid=?', (fileid,) )
fdata = c.fetchone()
if fdata is None:
conn.close()
return False
conn.close()
return fdata[1] != 0 or fdata[0] == udata[0]
#-------------------------------------------------------------------------------
def createJob( user ):
conn = sqlite3.connect( database )
c = conn.cursor()
c.execute( 'SELECT uid FROM user WHERE name=?', (user,) )
uid = c.fetchone()
if uid is not None:
c.execute( 'INSERT INTO job VALUES (null,?,0)', (uid[0],) )
c.execute( 'SELECT last_insert_rowid() FROM job' )
jobid = c.fetchone()[0]
conn.commit()
conn.close()
return jobid
else:
conn.close()
raise DataBaseError
#-------------------------------------------------------------------------------
def getUserJobs( user ):
jobs = []
conn = sqlite3.connect( database )
c = conn.cursor()
c.execute( 'SELECT uid FROM user WHERE name=?', (user,) )
uid = c.fetchone()
if uid is not None:
c.execute( 'SELECT jid FROM job WHERE uid=?', (uid[0],) )
dbjobs = c.fetchall()
for j in dbjobs:
jobs.append( {'id': j[0]} )
conn.close()
return jobs
#-------------------------------------------------------------------------------
def addJobFile( jobid, fileid, jftype ):
conn = sqlite3.connect( database )
c = conn.cursor()
c.execute( 'INSERT INTO jobfile VALUES (?,?,?)', (jobid,fileid,jftype) )
conn.commit()
conn.close()
#-------------------------------------------------------------------------------
def addJobSlurmRef( jobid, slurmid ):
conn = sqlite3.connect( database )
c = conn.cursor()
c.execute( 'INSERT INTO jobslurm VALUES (?,?)', (jobid,slurmid) )
conn.commit()
conn.close()
#-------------------------------------------------------------------------------
def setJobSubmitted( jobid ):
conn = sqlite3.connect( database )
c = conn.cursor()
c.execute( 'UPDATE job SET state=1 WHERE jid=?', (jobid,) )
conn.commit()
conn.close()
#-------------------------------------------------------------------------------
def setJobRunning( jobid ):
conn = sqlite3.connect( database )
c = conn.cursor()
c.execute( 'UPDATE job SET state=2 WHERE jid=?', (jobid,) )
conn.commit()
conn.close()
#-------------------------------------------------------------------------------
def setJobCompleted( jobid ):
conn = sqlite3.connect( database )
c = conn.cursor()
c.execute( 'UPDATE job SET state=3 WHERE jid=?', (jobid,) )
conn.commit()
conn.close()
#-------------------------------------------------------------------------------
def getJobInfo( jobid ):
conn = sqlite3.connect( database )
c = conn.cursor()
c.execute('SELECT state FROM job WHERE jid=?', (jobid,) )
jdata = c.fetchone()
if jdata is None:
conn.close()
raise DataBaseError
c.execute('SELECT fid,jobfiletype FROM jobfile WHERE jid=?', (jobid,) )
jfiles = c.fetchall()
files = []
for jf in jfiles:
c.execute('SELECT filename FROM file WHERE fid=?', (jf[0],) )
fdata = c.fetchone()
if fdata is None:
conn.close()
raise DataBaseError
files.append( {'fid': jf[0], 'name': fdata[0], 'type': jf[1] } )
c.execute('SELECT slurmid FROM jobslurm WHERE jid=?', (jobid,) )
jslurms = c.fetchall()
slurms = []
for js in jslurms:
slurms.append( {'slurmid': js[0] } )
conn.close()
return { 'jobid': jobid, 'state': jdata[0], 'slurmids': slurms, 'files': files }
#-------------------------------------------------------------------------------
def getActiveJobs():
conn = sqlite3.connect( database )
c = conn.cursor()
c.execute( 'SELECT jid,uid,state FROM job WHERE state=1 OR state=2' )
jdata = c.fetchall()
jobs = []
for j in jdata:
jobs.append( {'jid':j[0],'uid':j[1],'state':j[2]} )
return jobs
#-------------------------------------------------------------------------------
def isJobFromUser( jobid, user ):
conn = sqlite3.connect( database )
c = conn.cursor()
c.execute( 'SELECT uid FROM user WHERE name=?', (user,) )
udata = c.fetchone()
if udata is None:
conn.close()
return False
c.execute( 'SELECT uid FROM job WHERE jid=?', (jobid,) )
jdata = c.fetchone()
if jdata is None:
conn.close()
return False
conn.close()
return jdata[0] == udata[0]
#-------------------------------------------------------------------------------
add get just created jobs
#-------------------------------------------------------------------------------
import sqlite3
import bcrypt
import os
import shutil
import data
import htpasswd
import re
from email.utils import parseaddr
#-------------------------------------------------------------------------------
BCRYPT_ROUNDS = 5
template = 'template.db'
database = 'database.db'
passwdfile = 'htpasswd.db'
# JOB STATE
JOB_CREATED = 0 # Just Created
JOB_SUBMITTED = 1 # Submitted
JOB_RUNNING = 2 # Running
JOB_COMPLETED = 3 # Completed
# JOB FILE TYPE
FILEIN = 0
FILEOUT = 1
EMAIL_REGEX = re.compile(r"[^@ ]+@[^@ ]+\.[^@ ]+")
#-------------------------------------------------------------------------------
def mkEmptyDatabase( dbname ):
if os.path.isfile( dbname ):
os.remove( dbname )
conn = sqlite3.connect( dbname )
c = conn.cursor()
c.execute( "CREATE TABLE user (uid INTEGER PRIMARY KEY AUTOINCREMENT, name text, passwd text, email text, UNIQUE(name))" )
c.execute( "CREATE TABLE file (fid INTEGER PRIMARY KEY AUTOINCREMENT, uid INTEGER, global INTEGER, filename text, filetype INTEGER)" )
conn.commit()
c.execute( "CREATE TABLE job (jid INTEGER PRIMARY KEY AUTOINCREMENT, uid INTEGER, state INTEGER)" )
conn.commit()
c.execute( "CREATE TABLE jobslurm (jid INTEGER, slurmid INTEGER, PRIMARY KEY(jid, slurmid), FOREIGN KEY(jid) REFERENCES job(jid) )" )
conn.commit()
c.execute( "CREATE TABLE jobfile (jid INTEGER, fid INTEGER, jobfiletype INTEGER, PRIMARY KEY(jid, fid) )" )
conn.commit()
conn.close()
#-------------------------------------------------------------------------------
def clearDB():
mkEmptyDatabase( template )
if os.path.isfile( database ):
os.remove( database )
#-------------------------------------------------------------------------------
def init():
if not os.path.isfile( template ):
clearDB()
if not os.path.isfile( database ):
shutil.copy( template, database )
# create empty password file
if not os.path.isfile( passwdfile ):
open( passwdfile, 'w' ).close()
name = 'admin' # same name and passwd
insertUser( name, name, "j.smith@example.com" )
#-------------------------------------------------------------------------------
def insertUser( name, passwd, email ):
checkedName, checkedEmail = parseaddr( email )
if len( checkedEmail ) == 0 or not EMAIL_REGEX.match( checkedEmail):
print "ERROR: Invalid email ", email
return
h = bcrypt.hashpw( passwd, bcrypt.gensalt(BCRYPT_ROUNDS) )
conn = sqlite3.connect( database )
try:
with conn:
conn.execute( 'INSERT INTO user VALUES (null,?,?,?)', (name,h,checkedEmail) )
except sqlite3.IntegrityError:
print "ERROR: User Already Exists ", name
return
try:
with htpasswd.Basic( passwdfile ) as userdb:
userdb.add( name, passwd )
except htpasswd.basic.UserExists, e:
print "ERROR: User Already Exists ", name, e
#-------------------------------------------------------------------------------
def changeUserPassword( name, newpass ):
try:
with htpasswd.Basic( passwdfile ) as userdb:
userdb.change_password( name, newpass )
except htpasswd.basic.UserNotExists, e:
print "ERROR: User Not Exists ", name, e
return False
h = bcrypt.hashpw( newpass, bcrypt.gensalt(BCRYPT_ROUNDS) )
conn = sqlite3.connect( database )
try:
with conn:
conn.execute( 'UPDATE user SET passwd=? WHERE name=?', (h,name) )
except:
print "ERROR: changing password ", name
return False
return True
#-------------------------------------------------------------------------------
def checkUser( name, passwd ):
conn = sqlite3.connect( database )
c = conn.cursor()
c.execute( 'SELECT passwd FROM user WHERE name=?', (name,) )
val = c.fetchone()
conn.close()
if val is not None:
return bcrypt.hashpw( passwd, val[0] ) == val[0]
return False
#-------------------------------------------------------------------------------
def insertFile( user, filename ):
conn = sqlite3.connect( database )
c = conn.cursor()
c.execute( 'SELECT uid FROM user WHERE name=?', (user,) )
uid = c.fetchone()
if uid is not None:
c.execute( 'SELECT fid FROM file WHERE uid=? AND filename=?', (uid[0],filename) )
exists = c.fetchone()
if exists is None:
c.execute( 'INSERT INTO file VALUES (null,?,?,?,?)', (uid[0],0,filename,0) )
conn.commit()
conn.close()
else:
conn.close()
raise DataBaseError
#-------------------------------------------------------------------------------
def insertFileWithType( user, filename, filetype ):
conn = sqlite3.connect( database )
c = conn.cursor()
c.execute( 'SELECT uid FROM user WHERE name=?', (user,) )
uid = c.fetchone()
if uid is not None:
c.execute( 'SELECT fid FROM file WHERE uid=? AND filename=?', (uid[0],filename) )
exists = c.fetchone()
if exists is None:
c.execute( 'INSERT INTO file VALUES (null,?,?,?,?)', (uid[0],0,filename,filetype) )
conn.commit()
conn.close()
else:
conn.close()
raise DataBaseError
#-------------------------------------------------------------------------------
def createFile( userid, filename ):
conn = sqlite3.connect( database )
c = conn.cursor()
c.execute( 'INSERT INTO file VALUES (null,?,?,?,?)', (userid,0,filename,0) )
c.execute( 'SELECT last_insert_rowid() FROM file' )
fileid = c.fetchone()[0]
conn.commit()
conn.close()
return fileid
#-------------------------------------------------------------------------------
def createFileWithType( userid, filename, filetype ):
conn = sqlite3.connect( database )
c = conn.cursor()
c.execute( 'INSERT INTO file VALUES (null,?,?,?,?)', (userid,0,filename,filetype) )
c.execute( 'SELECT last_insert_rowid() FROM file' )
fileid = c.fetchone()[0]
conn.commit()
conn.close()
return fileid
#-------------------------------------------------------------------------------
def getUserFiles( user ):
files = []
conn = sqlite3.connect( database )
c = conn.cursor()
c.execute( 'SELECT uid FROM user WHERE name=?', (user,) )
uid = c.fetchone()
if uid is not None:
c.execute( 'SELECT fid,filename FROM file WHERE uid=? OR global=1', (uid[0],) )
dbfiles = c.fetchall()
for f in dbfiles:
files.append( {'id': f[0], 'file': f[1]} )
conn.close()
return files
#-------------------------------------------------------------------------------
def getUserFilesWithType( user, filetype ):
files = []
conn = sqlite3.connect( database )
c = conn.cursor()
c.execute( 'SELECT uid FROM user WHERE name=?', (user,) )
uid = c.fetchone()
if uid is not None:
c.execute( 'SELECT fid,filename FROM file WHERE (uid=? OR global=1) AND filetype=?', (uid[0],filetype) )
dbfiles = c.fetchall()
for f in dbfiles:
files.append( {'id': f[0], 'file': f[1]} )
conn.close()
return files
#-------------------------------------------------------------------------------
def getFileFullName( fid ):
conn = sqlite3.connect( database )
c = conn.cursor()
c.execute( 'SELECT uid,filename FROM file WHERE fid=?', (fid,) )
fdata = c.fetchone()
if fdata is None:
conn.close()
raise DataBaseError
c.execute('SELECT name FROM user WHERE uid=?', (fdata[0],) )
udata = c.fetchone()
if udata is None:
conn.close()
raise DataBaseError
conn.close()
return data.getUserFilename( udata[0], fdata[1] )
#-------------------------------------------------------------------------------
def isFileAllowedFromUser( fileid, user ):
conn = sqlite3.connect( database )
c = conn.cursor()
c.execute( 'SELECT uid FROM user WHERE name=?', (user,) )
udata = c.fetchone()
if udata is None:
conn.close()
return False
c.execute( 'SELECT uid,global FROM file WHERE fid=?', (fileid,) )
fdata = c.fetchone()
if fdata is None:
conn.close()
return False
conn.close()
return fdata[1] != 0 or fdata[0] == udata[0]
#-------------------------------------------------------------------------------
def createJob( user ):
conn = sqlite3.connect( database )
c = conn.cursor()
c.execute( 'SELECT uid FROM user WHERE name=?', (user,) )
uid = c.fetchone()
if uid is not None:
c.execute( 'INSERT INTO job VALUES (null,?,0)', (uid[0],) )
c.execute( 'SELECT last_insert_rowid() FROM job' )
jobid = c.fetchone()[0]
conn.commit()
conn.close()
return jobid
else:
conn.close()
raise DataBaseError
#-------------------------------------------------------------------------------
def getUserJobs( user ):
jobs = []
conn = sqlite3.connect( database )
c = conn.cursor()
c.execute( 'SELECT uid FROM user WHERE name=?', (user,) )
uid = c.fetchone()
if uid is not None:
c.execute( 'SELECT jid FROM job WHERE uid=?', (uid[0],) )
dbjobs = c.fetchall()
for j in dbjobs:
jobs.append( {'id': j[0]} )
conn.close()
return jobs
#-------------------------------------------------------------------------------
def addJobFile( jobid, fileid, jftype ):
conn = sqlite3.connect( database )
c = conn.cursor()
c.execute( 'INSERT INTO jobfile VALUES (?,?,?)', (jobid,fileid,jftype) )
conn.commit()
conn.close()
#-------------------------------------------------------------------------------
def addJobSlurmRef( jobid, slurmid ):
conn = sqlite3.connect( database )
c = conn.cursor()
c.execute( 'INSERT INTO jobslurm VALUES (?,?)', (jobid,slurmid) )
conn.commit()
conn.close()
#-------------------------------------------------------------------------------
def setJobSubmitted( jobid ):
conn = sqlite3.connect( database )
c = conn.cursor()
c.execute( 'UPDATE job SET state=1 WHERE jid=?', (jobid,) )
conn.commit()
conn.close()
#-------------------------------------------------------------------------------
def setJobRunning( jobid ):
conn = sqlite3.connect( database )
c = conn.cursor()
c.execute( 'UPDATE job SET state=2 WHERE jid=?', (jobid,) )
conn.commit()
conn.close()
#-------------------------------------------------------------------------------
def setJobCompleted( jobid ):
conn = sqlite3.connect( database )
c = conn.cursor()
c.execute( 'UPDATE job SET state=3 WHERE jid=?', (jobid,) )
conn.commit()
conn.close()
#-------------------------------------------------------------------------------
def getJobInfo( jobid ):
conn = sqlite3.connect( database )
c = conn.cursor()
c.execute('SELECT state FROM job WHERE jid=?', (jobid,) )
jdata = c.fetchone()
if jdata is None:
conn.close()
raise DataBaseError
c.execute('SELECT fid,jobfiletype FROM jobfile WHERE jid=?', (jobid,) )
jfiles = c.fetchall()
files = []
for jf in jfiles:
c.execute('SELECT filename FROM file WHERE fid=?', (jf[0],) )
fdata = c.fetchone()
if fdata is None:
conn.close()
raise DataBaseError
files.append( {'fid': jf[0], 'name': fdata[0], 'type': jf[1] } )
c.execute('SELECT slurmid FROM jobslurm WHERE jid=?', (jobid,) )
jslurms = c.fetchall()
slurms = []
for js in jslurms:
slurms.append( {'slurmid': js[0] } )
conn.close()
return { 'jobid': jobid, 'state': jdata[0], 'slurmids': slurms, 'files': files }
#-------------------------------------------------------------------------------
def getJustCreatedJobs():
conn = sqlite3.connect( database )
c = conn.cursor()
c.execute( 'SELECT jid,uid,state FROM job WHERE state=0' )
jdata = c.fetchall()
jobs = []
for j in jdata:
jobs.append( {'jid':j[0],'uid':j[1],'state':j[2]} )
return jobs
#-------------------------------------------------------------------------------
def getActiveJobs():
conn = sqlite3.connect( database )
c = conn.cursor()
c.execute( 'SELECT jid,uid,state FROM job WHERE state=1 OR state=2' )
jdata = c.fetchall()
jobs = []
for j in jdata:
jobs.append( {'jid':j[0],'uid':j[1],'state':j[2]} )
return jobs
#-------------------------------------------------------------------------------
def isJobFromUser( jobid, user ):
conn = sqlite3.connect( database )
c = conn.cursor()
c.execute( 'SELECT uid FROM user WHERE name=?', (user,) )
udata = c.fetchone()
if udata is None:
conn.close()
return False
c.execute( 'SELECT uid FROM job WHERE jid=?', (jobid,) )
jdata = c.fetchone()
if jdata is None:
conn.close()
return False
conn.close()
return jdata[0] == udata[0]
#-------------------------------------------------------------------------------
|
################################################################################
#
# Functions to read column-separated files
#
################################################################################
import pandas as pd
import numpy as np
def pdread_2col(filename, noheader=False):
""" Read in a 2 column file with pandas.
Faster then read_2col
Parameters
----------
filename: str
Name of file to read.
noheader: bool
Flag indicating if there is no column names given in file.
Default = False.
Returns
-------
col1: ndarray
First column as float64.
col2: ndarray
Second column as float64.
"""
if noheader:
data = pd.read_table(filename, comment='#', names=["col1", "col2"],
header=None, dtype=np.float64,
delim_whitespace=True)
else:
data = pd.read_table(filename, comment='#', names=["col1", "col2"],
dtype=np.float64, delim_whitespace=True)
return data["col1"].values, data["col2"].values
def pdread_3col(filename, noheader=False):
""" Read in a 3 column file with pandas.
Faster then read_3col
Parameters
----------
filename: str
Name of file to read.
noheader: bool
Flag indicating if there is no column names given in file
Returns
-------
col1: ndarray
First column as float64.
col2: ndarray
Second column as float64.
col3: ndarray
Third column as float64.
"""
if noheader:
data = pd.read_table(filename, comment='#', names=["col1", "col2", "col3"],
header=None, dtype=np.float64, delim_whitespace=True)
else:
data = pd.read_table(filename, comment='#', names=["col1", "col2", "col3"],
dtype=np.float64, delim_whitespace=True)
return data["col1"].values, data["col2"].values, data["col3"].values
def pdread_4col(filename, noheader=False):
""" Read in a 4 column file with pandas.
Faster then read_3col
Parameters
----------
filename: str
Name of file to read.
noheader: bool
Flag indicating if there is no column names given in file
Returns
-------
col1: ndarray
First column as float64.
col2: ndarray
Second column as float64.
col3: ndarray
Third column as float64.
col4: ndarray
Fourth column as float64.
"""
if noheader:
data = pd.read_table(filename, comment='#', names=["col1", "col2", "col3", "col4"],
header=None, dtype=np.float64, delim_whitespace=True)
else:
data = pd.read_table(filename, comment='#', names=["col1", "col2", "col3", "col4"],
dtype=np.float64, delim_whitespace=True)
return data["col1"].values, data["col2"].values, data["col3"].values, data["col4"].values
def read_col(filename):
# This program reads column formatted data from a file and
# returns a list in which each sublist correspond to the line's elements.
# THE RESULT IS A LIST OF STRINGS!
f = open(filename, "r")
list_data = []
while 1:
line = f.readline()
if line == "":
break
if line[0] == '#':
continue
list_data.append(line.strip().split())
f.close()
return list_data
def read_2col(filename):
# The same as the previous, but returns 2 vectors, corresponding each
# one to a column.THE RESULTS ARE FLOAT PYTHON VECTORS.
# Note that in python all "float" are in fact "double-precision".
list_data = read_col(filename)
col1 = []
col2 = []
for i, __ in enumerate(list_data):
# checking if the line is valid
if(list_data[i][0][0] != '#'):
col1.append(float(list_data[i][0]))
col2.append(float(list_data[i][1]))
return [col1, col2]
def read_3col(filename):
# The same as the previous, but returns 3 columns
list_data = read_col(filename)
col1 = []
col2 = []
col3 = []
for i, __ in enumerate(list_data):
# checking if the line is valid
if(list_data[i][0][0] != '#'):
col1.append(float(list_data[i][0]))
col2.append(float(list_data[i][1]))
col3.append(float(list_data[i][2]))
return [col1, col2, col3]
def read_4col(filename):
# The same as the previous, but returns 4 columns
list_data = read_col(filename)
col1 = []
col2 = []
col3 = []
col4 = []
for i, __ in enumerate(list_data):
# checking if the line is valid
if(list_data[i][0][0] != '#'):
col1.append(float(list_data[i][0]))
col2.append(float(list_data[i][1]))
col3.append(float(list_data[i][2]))
col4.append(float(list_data[i][3]))
return [col1, col2, col3, col4]
################################################################################
#
# Functions to write files in column-separated formats
#
################################################################################
def pdwrite_2col(filename, data1, data2, sep="\t", header=False):
""" Write out a 2 column file with pandas.
Faster then write_2col, uses pandas.DataFrame.to_csv()
Parameters
----------
filename: str
Name of file to write.
data1: ndarray or list, array-like
The data for the first column
data2: ndarray or list, array-like
The data for the second column
sep: str
Character separation between values.
header: list of strings or bool, default False
Header strings to apply to columns.
Returns
-------
flag: bool
Returns 0 if successful.
"""
if header:
df = pd.DataFrame({"# " + header[0]: data1, header[1]: data2})
else:
df = pd.DataFrame({"# x": data1, "y": data2})
# Write dataframe to file
df.to_csv(filename, sep=sep, header=header, index=False) # header=False
return 0
def pdwrite_3col(filename, data1, data2, data3, sep="\t", header=False):
""" Write out a 3 column file with pandas.
Faster then write_3col, uses pandas.DataFrame.to_csv()
Parameters
----------
filename: str
Name of file to write.
data1: ndarray or list, array-like
The data for the first column
data2: ndarray or list, array-like
The data for the second column
data3: ndarray or list, array-like
The data for the third column
sep: str
Character separation between values.
header: list of strings or bool, default False
Header strings to apply to columns.
Returns
-------
flag: bool
Returns 0 if successful.
"""
if header:
df = pd.DataFrame({"# " + header[0]: data1, header[1]: data2, header[2]: data3})
else:
df = pd.DataFrame({"# x": data1, "y": data2, "z": data3})
# Write dataframe to file
df.to_csv(filename, sep=sep, header=header, index=False) # header=False
return 0
def write_2col(filename, data1, data2):
# Writes data in 2 columns separated by tabs in a "filename" file.
f = open(filename, "w")
for i, __ in enumerate(data1):
f.write("\t"+str(data1[i])+"\t\t"+str(data2[i])+"\n")
f.close()
def write_3col(filename, data1, data2, data3):
# Writes data in 2 columns separated by tabs in a "filename" file.
f = open(filename, "w")
for i, __ in enumerate(data1):
f.write("\t"+str(data1[i])+"\t\t"+str(data2[i])+"\t\t"+str(data3[i])+"\n")
f.close()
def write_e_2col(filename, data1, data2):
# Writes data in 2 columns separated by tabs in a "filename" file.
f = open(filename, "w")
for i, __ in enumerate(data1):
# f.write("\t"+str(data1[i])+"\t\t"+str(data2[i])+"\t\t"+str(data3[i])+"\n")
f.write("\t{0:e}\t\t{1:e}\n".format(data1[i], data2[i]))
f.close()
def write_e_3col(filename, data1, data2, data3):
# Writes data in 3 columns separated by tabs in a "filename" file.
f = open(filename, "w")
for i, __ in enumerate(data1):
# f.write("\t"+str(data1[i])+"\t\t"+str(data2[i])+"\t\t"+str(data3[i])+"\n")
f.write("\t{0:e}\t\t{1:e}\t\t{2:e}\n".format(data1[i], data2[i], data3[i]))
f.close()
def pdwrite_cols(filename, *data, **kwargs):
""" Write out a csv file with pandas, variable columns possible.
Uses pandas.DataFrame.to_csv()
Parameters
----------
filename: str
Name of file to write.
*data: ndarray or list, array-like
Variable number of data columns to be writen in the given order.
**kwargs: dict
Keyword args for pandas
sep: str, default="\t"
Character separation between values.
header: list of strings or bool, default False
Header strings to apply to columns. Must be equal to number
of data columns provided.
Returns
-------
flag: bool
Returns 0 if successful.
"""
# unpack keyword args, second argument is the defualt if not found.
header = kwargs.pop('header', False)
sep = kwargs.pop('sep', "\t")
index = kwargs.pop('index', False)
# TODO: See about passing any extra keywords into pandas call
if kwargs: # check for unwanted kewords
raise TypeError('Unexpected **kwargs: {!r}'.format(kwargs))
if header:
if len(header) != len(data):
raise ValueError("Size of data and header does not match.")
data_dict = {}
for i, data_i in enumerate(data):
data_dict[i] = data[i] # keys are assigned the index value from enumerate
if len(data[i]) != len(data[0]):
raise ValueError("The length of the data columns are not equal")
df = pd.DataFrame(data_dict)
write_sequence = range(len(data)) # key values to write data in order
# Write dataframe to file
df.to_csv(filename, columns=write_sequence, sep=sep, header=header, index=index)
return 0
Add floating point number format to pdwrite_cols
Keyword argument added to the other pdwriters.
Former-commit-id: 4f742572a8a5508bdb41081c702743d1b16f229a [formerly 0be7d2c24001de1f519d40fdd623480de87ae534] [formerly 38eeaf5954fb18ef2008d3edd9f623d59e2ab93a [formerly feb12ff1f8643018aa8d80ef3b385b1f944b40cc]]
Former-commit-id: 447f90ffa0f994ba4b1db1877bfa1747200847bb [formerly dc1f7c11465a3141a10a4357b2251e71a15e3942]
Former-commit-id: 20c8a6a00f5ab983ac5d94c78cd4ec88d64578b2
################################################################################
#
# Functions to read column-separated files
#
################################################################################
import pandas as pd
import numpy as np
def pdread_2col(filename, noheader=False):
""" Read in a 2 column file with pandas.
Faster then read_2col
Parameters
----------
filename: str
Name of file to read.
noheader: bool
Flag indicating if there is no column names given in file.
Default = False.
Returns
-------
col1: ndarray
First column as float64.
col2: ndarray
Second column as float64.
"""
if noheader:
data = pd.read_table(filename, comment='#', names=["col1", "col2"],
header=None, dtype=np.float64,
delim_whitespace=True)
else:
data = pd.read_table(filename, comment='#', names=["col1", "col2"],
dtype=np.float64, delim_whitespace=True)
return data["col1"].values, data["col2"].values
def pdread_3col(filename, noheader=False):
""" Read in a 3 column file with pandas.
Faster then read_3col
Parameters
----------
filename: str
Name of file to read.
noheader: bool
Flag indicating if there is no column names given in file
Returns
-------
col1: ndarray
First column as float64.
col2: ndarray
Second column as float64.
col3: ndarray
Third column as float64.
"""
if noheader:
data = pd.read_table(filename, comment='#', names=["col1", "col2", "col3"],
header=None, dtype=np.float64, delim_whitespace=True)
else:
data = pd.read_table(filename, comment='#', names=["col1", "col2", "col3"],
dtype=np.float64, delim_whitespace=True)
return data["col1"].values, data["col2"].values, data["col3"].values
def pdread_4col(filename, noheader=False):
""" Read in a 4 column file with pandas.
Faster then read_3col
Parameters
----------
filename: str
Name of file to read.
noheader: bool
Flag indicating if there is no column names given in file
Returns
-------
col1: ndarray
First column as float64.
col2: ndarray
Second column as float64.
col3: ndarray
Third column as float64.
col4: ndarray
Fourth column as float64.
"""
if noheader:
data = pd.read_table(filename, comment='#', names=["col1", "col2", "col3", "col4"],
header=None, dtype=np.float64, delim_whitespace=True)
else:
data = pd.read_table(filename, comment='#', names=["col1", "col2", "col3", "col4"],
dtype=np.float64, delim_whitespace=True)
return data["col1"].values, data["col2"].values, data["col3"].values, data["col4"].values
def read_col(filename):
# This program reads column formatted data from a file and
# returns a list in which each sublist correspond to the line's elements.
# THE RESULT IS A LIST OF STRINGS!
f = open(filename, "r")
list_data = []
while 1:
line = f.readline()
if line == "":
break
if line[0] == '#':
continue
list_data.append(line.strip().split())
f.close()
return list_data
def read_2col(filename):
# The same as the previous, but returns 2 vectors, corresponding each
# one to a column.THE RESULTS ARE FLOAT PYTHON VECTORS.
# Note that in python all "float" are in fact "double-precision".
list_data = read_col(filename)
col1 = []
col2 = []
for i, __ in enumerate(list_data):
# checking if the line is valid
if(list_data[i][0][0] != '#'):
col1.append(float(list_data[i][0]))
col2.append(float(list_data[i][1]))
return [col1, col2]
def read_3col(filename):
# The same as the previous, but returns 3 columns
list_data = read_col(filename)
col1 = []
col2 = []
col3 = []
for i, __ in enumerate(list_data):
# checking if the line is valid
if(list_data[i][0][0] != '#'):
col1.append(float(list_data[i][0]))
col2.append(float(list_data[i][1]))
col3.append(float(list_data[i][2]))
return [col1, col2, col3]
def read_4col(filename):
# The same as the previous, but returns 4 columns
list_data = read_col(filename)
col1 = []
col2 = []
col3 = []
col4 = []
for i, __ in enumerate(list_data):
# checking if the line is valid
if(list_data[i][0][0] != '#'):
col1.append(float(list_data[i][0]))
col2.append(float(list_data[i][1]))
col3.append(float(list_data[i][2]))
col4.append(float(list_data[i][3]))
return [col1, col2, col3, col4]
################################################################################
#
# Functions to write files in column-separated formats
#
################################################################################
def pdwrite_2col(filename, data1, data2, sep="\t", header=False, float_format=None):
""" Write out a 2 column file with pandas.
Faster then write_2col, uses pandas.DataFrame.to_csv()
Parameters
----------
filename: str
Name of file to write.
data1: ndarray or list, array-like
The data for the first column
data2: ndarray or list, array-like
The data for the second column
sep: str
Character separation between values.
header: list of strings or bool, default False
Header strings to apply to columns.
float_format: str default None
Specify floating point string format.
Returns
-------
flag: bool
Returns 0 if successful.
"""
if header:
df = pd.DataFrame({"# " + header[0]: data1, header[1]: data2})
else:
df = pd.DataFrame({"# x": data1, "y": data2})
# Write dataframe to file
df.to_csv(filename, sep=sep, header=header, index=False, float_format=float_format) # header=False
return 0
def pdwrite_3col(filename, data1, data2, data3, sep="\t", header=False, float_format=None):
""" Write out a 3 column file with pandas.
Faster then write_3col, uses pandas.DataFrame.to_csv()
Parameters
----------
filename: str
Name of file to write.
data1: ndarray or list, array-like
The data for the first column
data2: ndarray or list, array-like
The data for the second column
data3: ndarray or list, array-like
The data for the third column
sep: str
Character separation between values.
header: list of strings or bool, default False
Header strings to apply to columns.
float_format: str default None
Specify floating point string format.
Returns
-------
flag: bool
Returns 0 if successful.
"""
if header:
df = pd.DataFrame({"# " + header[0]: data1, header[1]: data2, header[2]: data3})
else:
df = pd.DataFrame({"# x": data1, "y": data2, "z": data3})
# Write dataframe to file
df.to_csv(filename, sep=sep, header=header, index=False, float_format=float_format) # header=False
return 0
def write_2col(filename, data1, data2):
# Writes data in 2 columns separated by tabs in a "filename" file.
f = open(filename, "w")
for i, __ in enumerate(data1):
f.write("\t"+str(data1[i])+"\t\t"+str(data2[i])+"\n")
f.close()
def write_3col(filename, data1, data2, data3):
# Writes data in 2 columns separated by tabs in a "filename" file.
f = open(filename, "w")
for i, __ in enumerate(data1):
f.write("\t"+str(data1[i])+"\t\t"+str(data2[i])+"\t\t"+str(data3[i])+"\n")
f.close()
def write_e_2col(filename, data1, data2):
# Writes data in 2 columns separated by tabs in a "filename" file.
f = open(filename, "w")
for i, __ in enumerate(data1):
# f.write("\t"+str(data1[i])+"\t\t"+str(data2[i])+"\t\t"+str(data3[i])+"\n")
f.write("\t{0:e}\t\t{1:e}\n".format(data1[i], data2[i]))
f.close()
def write_e_3col(filename, data1, data2, data3):
# Writes data in 3 columns separated by tabs in a "filename" file.
f = open(filename, "w")
for i, __ in enumerate(data1):
# f.write("\t"+str(data1[i])+"\t\t"+str(data2[i])+"\t\t"+str(data3[i])+"\n")
f.write("\t{0:e}\t\t{1:e}\t\t{2:e}\n".format(data1[i], data2[i], data3[i]))
f.close()
def pdwrite_cols(filename, *data, **kwargs):
""" Write out a csv file with pandas, variable columns possible.
Uses pandas.DataFrame.to_csv()
Parameters
----------
filename: str
Name of file to write.
*data: ndarray or list, array-like
Variable number of data columns to be writen in the given order.
**kwargs: dict
Keyword args for pandas
sep: str, default="\t"
Character separation between values.
header: list of strings or bool, default False
Header strings to apply to columns. Must be equal to number
of data columns provided.
Returns
-------
flag: bool
Returns 0 if successful.
"""
# unpack keyword args, second argument is the defualt if not found.
header = kwargs.pop('header', False)
sep = kwargs.pop('sep', "\t")
index = kwargs.pop('index', False)
float_format = kwargs.pop('float_format', '{0:.6f}'.format)
# TODO: See about passing any extra keywords into pandas call
if kwargs: # check for unwanted kewords
raise TypeError('Unexpected **kwargs: {!r}'.format(kwargs))
if header:
if len(header) != len(data):
raise ValueError("Size of data and header does not match.")
data_dict = {}
for i, data_i in enumerate(data):
data_dict[i] = data[i] # keys are assigned the index value from enumerate
if len(data[i]) != len(data[0]):
raise ValueError("The length of the data columns are not equal")
df = pd.DataFrame(data_dict)
write_sequence = range(len(data)) # key values to write data in order
# Write dataframe to file
df.to_csv(filename, columns=write_sequence, sep=sep, header=header, index=index, float_format=float_format)
return 0
|
# -*- coding: utf-8 -*-
# pylint: disable=C0103,W0622
'''
Sphinx documentation for Salt
'''
import functools
import sys
import os
import re
import types
import time
from sphinx.directives import TocTree
# pylint: disable=R0903
class Mock(object):
'''
Mock out specified imports.
This allows autodoc to do its thing without having oodles of req'd
installed libs. This doesn't work with ``import *`` imports.
This Mock class can be configured to return a specific values at specific names, if required.
http://read-the-docs.readthedocs.org/en/latest/faq.html#i-get-import-errors-on-libraries-that-depend-on-c-modules
'''
def __init__(self, mapping=None, *args, **kwargs):
"""
Mapping allows autodoc to bypass the Mock object, but actually assign
a specific value, expected by a specific attribute returned.
"""
self.__mapping = mapping or {}
__all__ = []
def __call__(self, *args, **kwargs):
# If mocked function is used as a decorator, expose decorated function.
# if args and callable(args[-1]):
# functools.update_wrapper(ret, args[0])
return Mock(mapping=self.__mapping)
def __getattr__(self, name):
#__mapping = {'total': 0}
data = None
if name in self.__mapping:
data = self.__mapping.get(name)
elif name in ('__file__', '__path__'):
data = '/dev/null'
elif name == '__qualname__':
raise AttributeError("'Mock' object has no attribute '__qualname__'")
else:
data = Mock(mapping=self.__mapping)
return data
def __iter__(self):
return self
def next(self):
raise StopIteration
# pylint: enable=R0903
MOCK_MODULES = [
# Python stdlib
'user',
# salt core
'Crypto',
'Crypto.Signature',
'Crypto.Cipher',
'Crypto.Hash',
'Crypto.PublicKey',
'Crypto.Random',
'Crypto.Signature',
'Crypto.Signature.PKCS1_v1_5',
'M2Crypto',
'msgpack',
'yaml',
'yaml.constructor',
'yaml.nodes',
'yaml.parser',
'yaml.scanner',
'zmq',
'zmq.eventloop',
'zmq.eventloop.ioloop',
# third-party libs for cloud modules
'libcloud',
'libcloud.compute',
'libcloud.compute.base',
'libcloud.compute.deployment',
'libcloud.compute.providers',
'libcloud.compute.types',
'libcloud.loadbalancer',
'libcloud.loadbalancer.types',
'libcloud.loadbalancer.providers',
'libcloud.common',
'libcloud.common.google',
# third-party libs for netapi modules
'cherrypy',
'cherrypy.lib',
'cherrypy.process',
'cherrypy.wsgiserver',
'cherrypy.wsgiserver.ssl_builtin',
'tornado',
'tornado.concurrent',
'tornado.escape',
'tornado.gen',
'tornado.httpclient',
'tornado.httpserver',
'tornado.httputil',
'tornado.ioloop',
'tornado.iostream',
'tornado.netutil',
'tornado.simple_httpclient',
'tornado.stack_context',
'tornado.web',
'tornado.websocket',
'tornado.locks',
'ws4py',
'ws4py.server',
'ws4py.server.cherrypyserver',
'ws4py.websocket',
# modules, renderers, states, returners, et al
'ClusterShell',
'ClusterShell.NodeSet',
'django',
'libvirt',
'MySQLdb',
'MySQLdb.cursors',
'nagios_json',
'psutil',
'pycassa',
'pymongo',
'rabbitmq_server',
'redis',
#'requests',
#'requests.exceptions',
'rpm',
'rpmUtils',
'rpmUtils.arch',
'yum',
'OpenSSL',
'zfs',
'salt.ext.six.moves.winreg',
'win32security',
'ntsecuritycon',
'napalm',
'dson',
'hjson',
'jnpr',
'json',
'lxml',
'lxml.etree',
'jnpr.junos',
'jnpr.junos.utils',
'jnpr.junos.utils.config',
'jnpr.junos.utils.sw',
'dns',
'dns.resolver',
'keyring',
'netaddr',
'netaddr.IPAddress',
'netaddr.core',
'netaddr.core.AddrFormatError',
'pyroute2',
'pyroute2.ipdb',
'avahi',
'dbus',
'twisted',
'twisted.internet',
'twisted.internet.protocol',
'twisted.internet.protocol.DatagramProtocol',
'msgpack',
'boto.regioninfo',
]
for mod_name in MOCK_MODULES:
if mod_name == 'psutil':
mock = Mock(mapping={'total': 0}) # Otherwise it will crash Sphinx
else:
mock = Mock()
sys.modules[mod_name] = mock
def mock_decorator_with_params(*oargs, **okwargs):
'''
Optionally mock a decorator that takes parameters
E.g.:
@blah(stuff=True)
def things():
pass
'''
def inner(fn, *iargs, **ikwargs):
if hasattr(fn, '__call__'):
return fn
else:
return Mock()
return inner
# Define a fake version attribute for the following libs.
sys.modules['libcloud'].__version__ = '0.0.0'
sys.modules['msgpack'].version = (1, 0, 0)
sys.modules['psutil'].version_info = (3, 0, 0)
sys.modules['pymongo'].version = '0.0.0'
sys.modules['ntsecuritycon'].STANDARD_RIGHTS_REQUIRED = 0
sys.modules['ntsecuritycon'].SYNCHRONIZE = 0
# Define a fake version attribute for the following libs.
sys.modules['cherrypy'].config = mock_decorator_with_params
sys.modules['tornado'].version_info = (0, 0, 0)
sys.modules['boto.regioninfo']._load_json_file = {'endpoints': None}
# -- Add paths to PYTHONPATH ---------------------------------------------------
try:
docs_basepath = os.path.abspath(os.path.dirname(__file__))
except NameError:
# sphinx-intl and six execute some code which will raise this NameError
# assume we're in the doc/ directory
docs_basepath = os.path.abspath(os.path.dirname('.'))
addtl_paths = (
os.pardir, # salt itself (for autodoc)
'_ext', # custom Sphinx extensions
)
for path in addtl_paths:
sys.path.insert(0, os.path.abspath(os.path.join(docs_basepath, path)))
# We're now able to import salt
import salt.version
formulas_dir = os.path.join(os.pardir, docs_basepath, 'formulas')
# ----- Intersphinx Settings ------------------------------------------------>
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None)
}
# <---- Intersphinx Settings -------------------------------------------------
# -- General Configuration -----------------------------------------------------
# Set a var if we're building docs for the live site or not
on_saltstack = 'SALT_ON_SALTSTACK' in os.environ
project = 'Salt'
version = salt.version.__version__
latest_release = '2018.3.1' # latest release
previous_release = '2017.7.6' # latest release from previous branch
previous_release_dir = '2017.7' # path on web server for previous branch
next_release = '' # next release
next_release_dir = '' # path on web server for next release branch
today = ''
copyright = ''
if on_saltstack:
today = "Generated on " + time.strftime("%B %d, %Y") + " at " + time.strftime("%X %Z") + "."
copyright = time.strftime("%Y")
# < --- START do not merge these settings to other branches START ---> #
build_type = 'develop' # latest, previous, develop, next
release = version # version, latest_release, previous_release
# < --- END do not merge these settings to other branches END ---> #
# Set google custom search engine
if release == latest_release:
search_cx = '004624818632696854117:yfmprrbw3pk' # latest
elif release.startswith('2014.7'):
search_cx = '004624818632696854117:thhslradbru' # 2014.7
elif release.startswith('2015.5'):
search_cx = '004624818632696854117:ovogwef29do' # 2015.5
elif release.startswith('2015.8'):
search_cx = '004624818632696854117:aw_tegffouy' # 2015.8
else:
search_cx = '004624818632696854117:haj7bjntf4s' # develop
needs_sphinx = '1.3'
spelling_lang = 'en_US'
language = 'en'
locale_dirs = [
'_locale',
]
master_doc = 'contents'
templates_path = ['_templates']
exclude_patterns = ['_build', '_incl/*', 'ref/cli/_includes/*.rst']
extensions = [
'saltdomain', # Must come early
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.autosummary',
'sphinx.ext.extlinks',
'sphinx.ext.intersphinx',
'httpdomain',
'youtube',
'saltautodoc', # Must be AFTER autodoc
'shorturls',
]
try:
import sphinxcontrib.spelling
except ImportError:
pass
else:
extensions += ['sphinxcontrib.spelling']
modindex_common_prefix = ['salt.']
autosummary_generate = True
# strip git rev as there won't necessarily be a release based on it
stripped_release = re.sub(r'-\d+-g[0-9a-f]+$', '', release)
# Define a substitution for linking to the latest release tarball
rst_prolog = """\
.. |current_release_doc| replace:: :doc:`/topics/releases/{release}`
.. |saltrepo| replace:: https://github.com/saltstack/salt
.. _`salt-users`: https://groups.google.com/forum/#!forum/salt-users
.. _`salt-announce`: https://groups.google.com/forum/#!forum/salt-announce
.. _`salt-packagers`: https://groups.google.com/forum/#!forum/salt-packagers
.. _`salt-slack`: https://saltstackcommunity.herokuapp.com/
.. |windownload| raw:: html
<p>Python2 x86: <a
href="https://repo.saltstack.com/windows/Salt-Minion-{release}-Py2-x86-Setup.exe"><strong>Salt-Minion-{release}-x86-Setup.exe</strong></a>
| <a href="https://repo.saltstack.com/windows/Salt-Minion-{release}-Py2-x86-Setup.exe.md5"><strong>md5</strong></a></p>
<p>Python2 AMD64: <a
href="https://repo.saltstack.com/windows/Salt-Minion-{release}-Py2-AMD64-Setup.exe"><strong>Salt-Minion-{release}-AMD64-Setup.exe</strong></a>
| <a href="https://repo.saltstack.com/windows/Salt-Minion-{release}-Py2-AMD64-Setup.exe.md5"><strong>md5</strong></a></p>
<p>Python3 x86: <a
href="https://repo.saltstack.com/windows/Salt-Minion-{release}-Py3-x86-Setup.exe"><strong>Salt-Minion-{release}-x86-Setup.exe</strong></a>
| <a href="https://repo.saltstack.com/windows/Salt-Minion-{release}-Py3-x86-Setup.exe.md5"><strong>md5</strong></a></p>
<p>Python3 AMD64: <a
href="https://repo.saltstack.com/windows/Salt-Minion-{release}-Py3-AMD64-Setup.exe"><strong>Salt-Minion-{release}-AMD64-Setup.exe</strong></a>
| <a href="https://repo.saltstack.com/windows/Salt-Minion-{release}-Py3-AMD64-Setup.exe.md5"><strong>md5</strong></a></p>
.. |osxdownloadpy2| raw:: html
<p>x86_64: <a href="https://repo.saltstack.com/osx/salt-{release}-py2-x86_64.pkg"><strong>salt-{release}-py2-x86_64.pkg</strong></a>
| <a href="https://repo.saltstack.com/osx/salt-{release}-py2-x86_64.pkg.md5"><strong>md5</strong></a></p>
.. |osxdownloadpy3| raw:: html
<p>x86_64: <a href="https://repo.saltstack.com/osx/salt-{release}-py3-x86_64.pkg"><strong>salt-{release}-py3-x86_64.pkg</strong></a>
| <a href="https://repo.saltstack.com/osx/salt-{release}-py3-x86_64.pkg.md5"><strong>md5</strong></a></p>
""".format(release=stripped_release)
# A shortcut for linking to tickets on the GitHub issue tracker
extlinks = {
'blob': ('https://github.com/saltstack/salt/blob/%s/%%s' % 'develop', None),
'issue': ('https://github.com/saltstack/salt/issues/%s', 'issue #'),
'pull': ('https://github.com/saltstack/salt/pull/%s', 'PR #'),
'formula_url': ('https://github.com/saltstack-formulas/%s', ''),
}
# ----- Localization -------------------------------------------------------->
locale_dirs = ['locale/']
gettext_compact = False
# <---- Localization ---------------------------------------------------------
### HTML options
html_theme = os.environ.get('HTML_THEME', 'saltstack2') # set 'HTML_THEME=saltstack' to use previous theme
html_theme_path = ['_themes']
html_title = u''
html_short_title = 'Salt'
html_static_path = ['_static']
html_logo = None # specified in the theme layout.html
html_favicon = 'favicon.ico'
html_use_smartypants = False
# Use Google customized search or use Sphinx built-in JavaScript search
if on_saltstack:
html_search_template = 'googlesearch.html'
else:
html_search_template = 'searchbox.html'
html_additional_pages = {
'404': '404.html',
}
html_default_sidebars = [
html_search_template,
'version.html',
'localtoc.html',
'relations.html',
'sourcelink.html',
'saltstack.html',
]
html_sidebars = {
'ref/**/all/salt.*': [
html_search_template,
'version.html',
'modules-sidebar.html',
'localtoc.html',
'relations.html',
'sourcelink.html',
'saltstack.html',
],
'ref/formula/all/*': [
],
}
html_context = {
'on_saltstack': on_saltstack,
'html_default_sidebars': html_default_sidebars,
'github_base': 'https://github.com/saltstack/salt',
'github_issues': 'https://github.com/saltstack/salt/issues',
'github_downloads': 'https://github.com/saltstack/salt/downloads',
'latest_release': latest_release,
'previous_release': previous_release,
'previous_release_dir': previous_release_dir,
'next_release': next_release,
'next_release_dir': next_release_dir,
'search_cx': search_cx,
'build_type': build_type,
'today': today,
'copyright': copyright,
}
html_use_index = True
html_last_updated_fmt = '%b %d, %Y'
html_show_sourcelink = False
html_show_sphinx = True
html_show_copyright = True
### Latex options
latex_documents = [
('contents', 'Salt.tex', 'Salt Documentation', 'SaltStack, Inc.', 'manual'),
]
latex_logo = '_static/salt-logo.png'
latex_elements = {
'inputenc': '', # use XeTeX instead of the inputenc LaTeX package.
'utf8extra': '',
'preamble': '''
\usepackage{fontspec}
\setsansfont{Linux Biolinum O}
\setromanfont{Linux Libertine O}
\setmonofont{Source Code Pro}
''',
}
### Linux Biolinum, Linux Libertine: http://www.linuxlibertine.org/
### Source Code Pro: https://github.com/adobe-fonts/source-code-pro/releases
### Linkcheck options
linkcheck_ignore = [r'http://127.0.0.1',
r'http://salt:\d+',
r'http://local:\d+',
r'https://console.aws.amazon.com',
r'http://192.168.33.10',
r'http://domain:\d+',
r'http://123.456.789.012:\d+',
r'http://localhost',
r'https://groups.google.com/forum/#!forum/salt-users',
r'http://logstash.net/docs/latest/inputs/udp',
r'http://logstash.net/docs/latest/inputs/zeromq',
r'http://www.youtube.com/saltstack',
r'https://raven.readthedocs.io',
r'https://getsentry.com',
r'https://salt-cloud.readthedocs.io',
r'https://salt.readthedocs.io',
r'http://www.pip-installer.org/',
r'http://www.windowsazure.com/',
r'https://github.com/watching',
r'dash-feed://',
r'https://github.com/saltstack/salt/',
r'http://bootstrap.saltstack.org',
r'https://bootstrap.saltstack.com',
r'https://raw.githubusercontent.com/saltstack/salt-bootstrap/stable/bootstrap-salt.sh',
r'media.readthedocs.org/dash/salt/latest/salt.xml',
r'https://portal.aws.amazon.com/gp/aws/securityCredentials',
r'https://help.github.com/articles/fork-a-repo',
r'dash-feed://https%3A//media.readthedocs.org/dash/salt/latest/salt.xml'
]
linkcheck_anchors = False
### Manpage options
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
authors = [
'Thomas S. Hatch <thatch45@gmail.com> and many others, please see the Authors file',
]
man_pages = [
('contents', 'salt', 'Salt Documentation', authors, 7),
('ref/cli/salt', 'salt', 'salt', authors, 1),
('ref/cli/salt-master', 'salt-master', 'salt-master Documentation', authors, 1),
('ref/cli/salt-minion', 'salt-minion', 'salt-minion Documentation', authors, 1),
('ref/cli/salt-key', 'salt-key', 'salt-key Documentation', authors, 1),
('ref/cli/salt-cp', 'salt-cp', 'salt-cp Documentation', authors, 1),
('ref/cli/salt-call', 'salt-call', 'salt-call Documentation', authors, 1),
('ref/cli/salt-proxy', 'salt-proxy', 'salt-proxy Documentation', authors, 1),
('ref/cli/salt-syndic', 'salt-syndic', 'salt-syndic Documentation', authors, 1),
('ref/cli/salt-run', 'salt-run', 'salt-run Documentation', authors, 1),
('ref/cli/salt-ssh', 'salt-ssh', 'salt-ssh Documentation', authors, 1),
('ref/cli/salt-cloud', 'salt-cloud', 'Salt Cloud Command', authors, 1),
('ref/cli/salt-api', 'salt-api', 'salt-api Command', authors, 1),
('ref/cli/salt-unity', 'salt-unity', 'salt-unity Command', authors, 1),
('ref/cli/spm', 'spm', 'Salt Package Manager Command', authors, 1),
]
### epub options
epub_title = 'Salt Documentation'
epub_author = 'SaltStack, Inc.'
epub_publisher = epub_author
epub_copyright = copyright
epub_scheme = 'URL'
epub_identifier = 'http://saltstack.com/'
epub_tocdup = False
#epub_tocdepth = 3
def skip_mod_init_member(app, what, name, obj, skip, options):
if name.startswith('_'):
return True
if isinstance(obj, types.FunctionType) and obj.__name__ == 'mod_init':
return True
return False
def _normalize_version(args):
_, path = args
return '.'.join([x.zfill(4) for x in (path.split('/')[-1].split('.'))])
class ReleasesTree(TocTree):
option_spec = dict(TocTree.option_spec)
def run(self):
rst = super(ReleasesTree, self).run()
entries = rst[0][0]['entries'][:]
entries.sort(key=_normalize_version, reverse=True)
rst[0][0]['entries'][:] = entries
return rst
def setup(app):
app.add_directive('releasestree', ReleasesTree)
app.connect('autodoc-skip-member', skip_mod_init_member)
Sort and update mocks in doc configuration
# -*- coding: utf-8 -*-
# pylint: disable=C0103,W0622
'''
Sphinx documentation for Salt
'''
import functools
import sys
import os
import re
import types
import time
from sphinx.directives import TocTree
# pylint: disable=R0903
class Mock(object):
'''
Mock out specified imports.
This allows autodoc to do its thing without having oodles of req'd
installed libs. This doesn't work with ``import *`` imports.
This Mock class can be configured to return a specific values at specific names, if required.
http://read-the-docs.readthedocs.org/en/latest/faq.html#i-get-import-errors-on-libraries-that-depend-on-c-modules
'''
def __init__(self, mapping=None, *args, **kwargs):
"""
Mapping allows autodoc to bypass the Mock object, but actually assign
a specific value, expected by a specific attribute returned.
"""
self.__mapping = mapping or {}
__all__ = []
def __call__(self, *args, **kwargs):
# If mocked function is used as a decorator, expose decorated function.
# if args and callable(args[-1]):
# functools.update_wrapper(ret, args[0])
return Mock(mapping=self.__mapping)
def __getattr__(self, name):
#__mapping = {'total': 0}
data = None
if name in self.__mapping:
data = self.__mapping.get(name)
elif name in ('__file__', '__path__'):
data = '/dev/null'
elif name == '__qualname__':
raise AttributeError("'Mock' object has no attribute '__qualname__'")
else:
data = Mock(mapping=self.__mapping)
return data
def __iter__(self):
return self
def next(self):
raise StopIteration
# pylint: enable=R0903
MOCK_MODULES = [
# Python stdlib
'user',
# salt core
'Crypto',
'Crypto.Signature',
'Crypto.Cipher',
'Crypto.Hash',
'Crypto.PublicKey',
'Crypto.Random',
'Crypto.Signature',
'Crypto.Signature.PKCS1_v1_5',
'M2Crypto',
'msgpack',
'yaml',
'yaml.constructor',
'yaml.nodes',
'yaml.parser',
'yaml.scanner',
'zmq',
'zmq.eventloop',
'zmq.eventloop.ioloop',
# third-party libs for cloud modules
'libcloud',
'libcloud.compute',
'libcloud.compute.base',
'libcloud.compute.deployment',
'libcloud.compute.providers',
'libcloud.compute.types',
'libcloud.loadbalancer',
'libcloud.loadbalancer.types',
'libcloud.loadbalancer.providers',
'libcloud.common',
'libcloud.common.google',
# third-party libs for netapi modules
'cherrypy',
'cherrypy.lib',
'cherrypy.process',
'cherrypy.wsgiserver',
'cherrypy.wsgiserver.ssl_builtin',
'tornado',
'tornado.concurrent',
'tornado.escape',
'tornado.gen',
'tornado.httpclient',
'tornado.httpserver',
'tornado.httputil',
'tornado.ioloop',
'tornado.iostream',
'tornado.netutil',
'tornado.simple_httpclient',
'tornado.stack_context',
'tornado.web',
'tornado.websocket',
'tornado.locks',
'ws4py',
'ws4py.server',
'ws4py.server.cherrypyserver',
'ws4py.websocket',
# modules, renderers, states, returners, et al
'ClusterShell',
'ClusterShell.NodeSet',
'MySQLdb',
'MySQLdb.cursors',
'OpenSSL',
'avahi',
'boto.regioninfo',
'concurrent',
'dbus',
'django',
'dns',
'dns.resolver',
'dson',
'hjson',
'jnpr',
'jnpr.junos',
'jnpr.junos.utils',
'jnpr.junos.utils.config',
'jnpr.junos.utils.sw',
'json',
'keyring',
'libvirt',
'lxml',
'lxml.etree',
'msgpack',
'nagios_json',
'napalm',
'netaddr',
'netaddr.IPAddress',
'netaddr.core',
'netaddr.core.AddrFormatError',
'ntsecuritycon',
'psutil',
'pycassa',
'pyconnman',
'pyiface',
'pymongo',
'pyroute2',
'pyroute2.ipdb',
'rabbitmq_server',
'redis',
'rpm',
'rpmUtils',
'rpmUtils.arch',
'salt.ext.six.moves.winreg',
'twisted',
'twisted.internet',
'twisted.internet.protocol',
'twisted.internet.protocol.DatagramProtocol',
'win32security',
'yum',
'zfs',
]
for mod_name in MOCK_MODULES:
if mod_name == 'psutil':
mock = Mock(mapping={'total': 0}) # Otherwise it will crash Sphinx
else:
mock = Mock()
sys.modules[mod_name] = mock
def mock_decorator_with_params(*oargs, **okwargs):
'''
Optionally mock a decorator that takes parameters
E.g.:
@blah(stuff=True)
def things():
pass
'''
def inner(fn, *iargs, **ikwargs):
if hasattr(fn, '__call__'):
return fn
else:
return Mock()
return inner
# Define a fake version attribute for the following libs.
sys.modules['libcloud'].__version__ = '0.0.0'
sys.modules['msgpack'].version = (1, 0, 0)
sys.modules['psutil'].version_info = (3, 0, 0)
sys.modules['pymongo'].version = '0.0.0'
sys.modules['ntsecuritycon'].STANDARD_RIGHTS_REQUIRED = 0
sys.modules['ntsecuritycon'].SYNCHRONIZE = 0
# Define a fake version attribute for the following libs.
sys.modules['cherrypy'].config = mock_decorator_with_params
sys.modules['tornado'].version_info = (0, 0, 0)
sys.modules['boto.regioninfo']._load_json_file = {'endpoints': None}
# -- Add paths to PYTHONPATH ---------------------------------------------------
try:
docs_basepath = os.path.abspath(os.path.dirname(__file__))
except NameError:
# sphinx-intl and six execute some code which will raise this NameError
# assume we're in the doc/ directory
docs_basepath = os.path.abspath(os.path.dirname('.'))
addtl_paths = (
os.pardir, # salt itself (for autodoc)
'_ext', # custom Sphinx extensions
)
for path in addtl_paths:
sys.path.insert(0, os.path.abspath(os.path.join(docs_basepath, path)))
# We're now able to import salt
import salt.version
formulas_dir = os.path.join(os.pardir, docs_basepath, 'formulas')
# ----- Intersphinx Settings ------------------------------------------------>
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None)
}
# <---- Intersphinx Settings -------------------------------------------------
# -- General Configuration -----------------------------------------------------
# Set a var if we're building docs for the live site or not
on_saltstack = 'SALT_ON_SALTSTACK' in os.environ
project = 'Salt'
version = salt.version.__version__
latest_release = '2018.3.1' # latest release
previous_release = '2017.7.6' # latest release from previous branch
previous_release_dir = '2017.7' # path on web server for previous branch
next_release = '' # next release
next_release_dir = '' # path on web server for next release branch
today = ''
copyright = ''
if on_saltstack:
today = "Generated on " + time.strftime("%B %d, %Y") + " at " + time.strftime("%X %Z") + "."
copyright = time.strftime("%Y")
# < --- START do not merge these settings to other branches START ---> #
build_type = 'develop' # latest, previous, develop, next
release = version # version, latest_release, previous_release
# < --- END do not merge these settings to other branches END ---> #
# Set google custom search engine
if release == latest_release:
search_cx = '004624818632696854117:yfmprrbw3pk' # latest
elif release.startswith('2014.7'):
search_cx = '004624818632696854117:thhslradbru' # 2014.7
elif release.startswith('2015.5'):
search_cx = '004624818632696854117:ovogwef29do' # 2015.5
elif release.startswith('2015.8'):
search_cx = '004624818632696854117:aw_tegffouy' # 2015.8
else:
search_cx = '004624818632696854117:haj7bjntf4s' # develop
needs_sphinx = '1.3'
spelling_lang = 'en_US'
language = 'en'
locale_dirs = [
'_locale',
]
master_doc = 'contents'
templates_path = ['_templates']
exclude_patterns = ['_build', '_incl/*', 'ref/cli/_includes/*.rst']
extensions = [
'saltdomain', # Must come early
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.autosummary',
'sphinx.ext.extlinks',
'sphinx.ext.intersphinx',
'httpdomain',
'youtube',
'saltautodoc', # Must be AFTER autodoc
'shorturls',
]
try:
import sphinxcontrib.spelling
except ImportError:
pass
else:
extensions += ['sphinxcontrib.spelling']
modindex_common_prefix = ['salt.']
autosummary_generate = True
# strip git rev as there won't necessarily be a release based on it
stripped_release = re.sub(r'-\d+-g[0-9a-f]+$', '', release)
# Define a substitution for linking to the latest release tarball
rst_prolog = """\
.. |current_release_doc| replace:: :doc:`/topics/releases/{release}`
.. |saltrepo| replace:: https://github.com/saltstack/salt
.. _`salt-users`: https://groups.google.com/forum/#!forum/salt-users
.. _`salt-announce`: https://groups.google.com/forum/#!forum/salt-announce
.. _`salt-packagers`: https://groups.google.com/forum/#!forum/salt-packagers
.. _`salt-slack`: https://saltstackcommunity.herokuapp.com/
.. |windownload| raw:: html
<p>Python2 x86: <a
href="https://repo.saltstack.com/windows/Salt-Minion-{release}-Py2-x86-Setup.exe"><strong>Salt-Minion-{release}-x86-Setup.exe</strong></a>
| <a href="https://repo.saltstack.com/windows/Salt-Minion-{release}-Py2-x86-Setup.exe.md5"><strong>md5</strong></a></p>
<p>Python2 AMD64: <a
href="https://repo.saltstack.com/windows/Salt-Minion-{release}-Py2-AMD64-Setup.exe"><strong>Salt-Minion-{release}-AMD64-Setup.exe</strong></a>
| <a href="https://repo.saltstack.com/windows/Salt-Minion-{release}-Py2-AMD64-Setup.exe.md5"><strong>md5</strong></a></p>
<p>Python3 x86: <a
href="https://repo.saltstack.com/windows/Salt-Minion-{release}-Py3-x86-Setup.exe"><strong>Salt-Minion-{release}-x86-Setup.exe</strong></a>
| <a href="https://repo.saltstack.com/windows/Salt-Minion-{release}-Py3-x86-Setup.exe.md5"><strong>md5</strong></a></p>
<p>Python3 AMD64: <a
href="https://repo.saltstack.com/windows/Salt-Minion-{release}-Py3-AMD64-Setup.exe"><strong>Salt-Minion-{release}-AMD64-Setup.exe</strong></a>
| <a href="https://repo.saltstack.com/windows/Salt-Minion-{release}-Py3-AMD64-Setup.exe.md5"><strong>md5</strong></a></p>
.. |osxdownloadpy2| raw:: html
<p>x86_64: <a href="https://repo.saltstack.com/osx/salt-{release}-py2-x86_64.pkg"><strong>salt-{release}-py2-x86_64.pkg</strong></a>
| <a href="https://repo.saltstack.com/osx/salt-{release}-py2-x86_64.pkg.md5"><strong>md5</strong></a></p>
.. |osxdownloadpy3| raw:: html
<p>x86_64: <a href="https://repo.saltstack.com/osx/salt-{release}-py3-x86_64.pkg"><strong>salt-{release}-py3-x86_64.pkg</strong></a>
| <a href="https://repo.saltstack.com/osx/salt-{release}-py3-x86_64.pkg.md5"><strong>md5</strong></a></p>
""".format(release=stripped_release)
# A shortcut for linking to tickets on the GitHub issue tracker
extlinks = {
'blob': ('https://github.com/saltstack/salt/blob/%s/%%s' % 'develop', None),
'issue': ('https://github.com/saltstack/salt/issues/%s', 'issue #'),
'pull': ('https://github.com/saltstack/salt/pull/%s', 'PR #'),
'formula_url': ('https://github.com/saltstack-formulas/%s', ''),
}
# ----- Localization -------------------------------------------------------->
locale_dirs = ['locale/']
gettext_compact = False
# <---- Localization ---------------------------------------------------------
### HTML options
html_theme = os.environ.get('HTML_THEME', 'saltstack2') # set 'HTML_THEME=saltstack' to use previous theme
html_theme_path = ['_themes']
html_title = u''
html_short_title = 'Salt'
html_static_path = ['_static']
html_logo = None # specified in the theme layout.html
html_favicon = 'favicon.ico'
html_use_smartypants = False
# Use Google customized search or use Sphinx built-in JavaScript search
if on_saltstack:
html_search_template = 'googlesearch.html'
else:
html_search_template = 'searchbox.html'
html_additional_pages = {
'404': '404.html',
}
html_default_sidebars = [
html_search_template,
'version.html',
'localtoc.html',
'relations.html',
'sourcelink.html',
'saltstack.html',
]
html_sidebars = {
'ref/**/all/salt.*': [
html_search_template,
'version.html',
'modules-sidebar.html',
'localtoc.html',
'relations.html',
'sourcelink.html',
'saltstack.html',
],
'ref/formula/all/*': [
],
}
html_context = {
'on_saltstack': on_saltstack,
'html_default_sidebars': html_default_sidebars,
'github_base': 'https://github.com/saltstack/salt',
'github_issues': 'https://github.com/saltstack/salt/issues',
'github_downloads': 'https://github.com/saltstack/salt/downloads',
'latest_release': latest_release,
'previous_release': previous_release,
'previous_release_dir': previous_release_dir,
'next_release': next_release,
'next_release_dir': next_release_dir,
'search_cx': search_cx,
'build_type': build_type,
'today': today,
'copyright': copyright,
}
html_use_index = True
html_last_updated_fmt = '%b %d, %Y'
html_show_sourcelink = False
html_show_sphinx = True
html_show_copyright = True
### Latex options
latex_documents = [
('contents', 'Salt.tex', 'Salt Documentation', 'SaltStack, Inc.', 'manual'),
]
latex_logo = '_static/salt-logo.png'
latex_elements = {
'inputenc': '', # use XeTeX instead of the inputenc LaTeX package.
'utf8extra': '',
'preamble': '''
\usepackage{fontspec}
\setsansfont{Linux Biolinum O}
\setromanfont{Linux Libertine O}
\setmonofont{Source Code Pro}
''',
}
### Linux Biolinum, Linux Libertine: http://www.linuxlibertine.org/
### Source Code Pro: https://github.com/adobe-fonts/source-code-pro/releases
### Linkcheck options
linkcheck_ignore = [r'http://127.0.0.1',
r'http://salt:\d+',
r'http://local:\d+',
r'https://console.aws.amazon.com',
r'http://192.168.33.10',
r'http://domain:\d+',
r'http://123.456.789.012:\d+',
r'http://localhost',
r'https://groups.google.com/forum/#!forum/salt-users',
r'http://logstash.net/docs/latest/inputs/udp',
r'http://logstash.net/docs/latest/inputs/zeromq',
r'http://www.youtube.com/saltstack',
r'https://raven.readthedocs.io',
r'https://getsentry.com',
r'https://salt-cloud.readthedocs.io',
r'https://salt.readthedocs.io',
r'http://www.pip-installer.org/',
r'http://www.windowsazure.com/',
r'https://github.com/watching',
r'dash-feed://',
r'https://github.com/saltstack/salt/',
r'http://bootstrap.saltstack.org',
r'https://bootstrap.saltstack.com',
r'https://raw.githubusercontent.com/saltstack/salt-bootstrap/stable/bootstrap-salt.sh',
r'media.readthedocs.org/dash/salt/latest/salt.xml',
r'https://portal.aws.amazon.com/gp/aws/securityCredentials',
r'https://help.github.com/articles/fork-a-repo',
r'dash-feed://https%3A//media.readthedocs.org/dash/salt/latest/salt.xml'
]
linkcheck_anchors = False
### Manpage options
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
authors = [
'Thomas S. Hatch <thatch45@gmail.com> and many others, please see the Authors file',
]
man_pages = [
('contents', 'salt', 'Salt Documentation', authors, 7),
('ref/cli/salt', 'salt', 'salt', authors, 1),
('ref/cli/salt-master', 'salt-master', 'salt-master Documentation', authors, 1),
('ref/cli/salt-minion', 'salt-minion', 'salt-minion Documentation', authors, 1),
('ref/cli/salt-key', 'salt-key', 'salt-key Documentation', authors, 1),
('ref/cli/salt-cp', 'salt-cp', 'salt-cp Documentation', authors, 1),
('ref/cli/salt-call', 'salt-call', 'salt-call Documentation', authors, 1),
('ref/cli/salt-proxy', 'salt-proxy', 'salt-proxy Documentation', authors, 1),
('ref/cli/salt-syndic', 'salt-syndic', 'salt-syndic Documentation', authors, 1),
('ref/cli/salt-run', 'salt-run', 'salt-run Documentation', authors, 1),
('ref/cli/salt-ssh', 'salt-ssh', 'salt-ssh Documentation', authors, 1),
('ref/cli/salt-cloud', 'salt-cloud', 'Salt Cloud Command', authors, 1),
('ref/cli/salt-api', 'salt-api', 'salt-api Command', authors, 1),
('ref/cli/salt-unity', 'salt-unity', 'salt-unity Command', authors, 1),
('ref/cli/spm', 'spm', 'Salt Package Manager Command', authors, 1),
]
### epub options
epub_title = 'Salt Documentation'
epub_author = 'SaltStack, Inc.'
epub_publisher = epub_author
epub_copyright = copyright
epub_scheme = 'URL'
epub_identifier = 'http://saltstack.com/'
epub_tocdup = False
#epub_tocdepth = 3
def skip_mod_init_member(app, what, name, obj, skip, options):
if name.startswith('_'):
return True
if isinstance(obj, types.FunctionType) and obj.__name__ == 'mod_init':
return True
return False
def _normalize_version(args):
_, path = args
return '.'.join([x.zfill(4) for x in (path.split('/')[-1].split('.'))])
class ReleasesTree(TocTree):
option_spec = dict(TocTree.option_spec)
def run(self):
rst = super(ReleasesTree, self).run()
entries = rst[0][0]['entries'][:]
entries.sort(key=_normalize_version, reverse=True)
rst[0][0]['entries'][:] = entries
return rst
def setup(app):
app.add_directive('releasestree', ReleasesTree)
app.connect('autodoc-skip-member', skip_mod_init_member)
|
from datetime import date, timedelta, datetime
from math import ceil
import tinydb
from tinydb.storages import JSONStorage
from tinydb_serialization import SerializationMiddleware
from date_serializer import DateSerializer
DEFAULT_DATABASE = 'bangumi.db'
def opendb():
"""
Open TinyDB database
"""
serialization = SerializationMiddleware()
serialization.register_serializer(DateSerializer(), 'Date Serializer')
return tinydb.TinyDB(DEFAULT_DATABASE, storage=serialization)
def add_bangumis(bangumi_list):
"""
Add bangumis provided by bangumi_list into database
param:
bangumi_list list of bangumis
"""
db = opendb()
db.insert_multiple(bangumi_list)
print(
'{0} bangumi/s has been insert into database.'.format(
len(bangumi_list)
)
)
db.close()
def update(bangumi):
"""
Update bangumi information in database
params:
bangumi object of Bangumi class
"""
db = opendb()
q = tinydb.Query()
db.update(bangumi.dict(), q.name == bangumi.name)
db.close()
def unloaded_episodes():
"""
Fetch undownloaded episodes
return:
list of dict of filemeta
"""
db = opendb()
bangumi = tinydb.Query()
unloaded_bangumi = db.search(bangumi.next_onair_date <= date.today())
db.close()
unloaded_episodes = []
for bangumi in unloaded_bangumi:
start_date = bangumi['start_date']
start_date_datetime = datetime(
start_date.year, start_date.month, start_date.day)
now_air_episode = ceil((datetime.now()
- start_date_datetime).total_seconds()
/ timedelta(days=1).total_seconds() / 7)
print('Bangumi info: {0}'.format(bangumi['name']))
print('Time interval to next on air day: {0}'.format(
(datetime.now() - start_date_datetime).total_seconds()))
print('Calculated days to next on air day: {0}'.format((datetime.now() - start_date_datetime
).total_seconds() / timedelta(days=1).total_seconds()))
print('Available episode:{0}'.format(now_air_episode))
for i in range(bangumi['dled_ep'] + 1, int(now_air_episode) + 1):
unloaded_episodes.append(FileMeta(name=bangumi['name'],
ep=i,
translation_team=bangumi[
'translation_team'],
url=''))
return unloaded_episodes
def set_downloaded_episode(bangumi_name, episode):
"""
Set downloaded episode record of sepcified bangumi
params:
bangumi_name Name of bangumi
episode Episode number
"""
db = opendb()
bangumi = tinydb.Query()
bangumi_info = db.get(bangumi.name == bangumi_name)
bangumi_info['dled_ep'] += 1
db.update(bangumi_info, bangumi.name == bangumi_name)
def fetch_available_episodes():
"""
Fetch available episodes at the time when the function is called
return:
list of dict of available episode(s)
"""
db = opendb()
animes = db.all()
db.close()
if len(animes) == 0:
print('There is no animes in database')
return []
avail_episodes = []
for anime in animes:
print('anime: {0}'.format(anime['name']))
start_date = anime['start_date']
start_date_datetime = datetime(
start_date.year, start_date.month, start_date.day)
time_interval = datetime.now() - start_date_datetime
time_interval_days = time_interval.total_seconds() \
/ timedelta(days=1).total_seconds()
episode_now = int(ceil(time_interval_days / 7)) + anime['offset']
print('days between now and start day:{0}'.format(time_interval_days))
print('episode available now:{}'.format(episode_now))
print('downloaded:{}\n\n'.format(anime['dled_ep']))
if episode_now > anime['dled_ep']:
for i in range(anime['dled_ep'] + 1, episode_now + 1):
avail_episodes.append({
'name': anime['name'],
'keyword': anime['keyword'],
'translation_team': anime['translation_team'],
'ep': i,
})
return avail_episodes
def update_anime_info(name, new):
"""
Update information of anime with a dict contains new information
"""
db = opendb()
anime = tinydb.Query()
info = db.get(anime.name == name)
for key in new:
info[key] = new[key]
db.update(info, anime.name == name)
db.close()
remove unloaded_episodes and update from database.py
from datetime import date, timedelta, datetime
from math import ceil
import tinydb
from tinydb.storages import JSONStorage
from tinydb_serialization import SerializationMiddleware
from date_serializer import DateSerializer
DEFAULT_DATABASE = 'bangumi.db'
def opendb():
"""
Open TinyDB database
"""
serialization = SerializationMiddleware()
serialization.register_serializer(DateSerializer(), 'Date Serializer')
return tinydb.TinyDB(DEFAULT_DATABASE, storage=serialization)
def add_bangumis(bangumi_list):
"""
Add bangumis provided by bangumi_list into database
param:
bangumi_list list of bangumis
"""
db = opendb()
db.insert_multiple(bangumi_list)
print(
'{0} bangumi/s has been insert into database.'.format(
len(bangumi_list)
)
)
db.close()
def set_downloaded_episode(bangumi_name, episode):
"""
Set downloaded episode record of sepcified bangumi
params:
bangumi_name Name of bangumi
episode Episode number
"""
db = opendb()
bangumi = tinydb.Query()
bangumi_info = db.get(bangumi.name == bangumi_name)
bangumi_info['dled_ep'] += 1
db.update(bangumi_info, bangumi.name == bangumi_name)
def fetch_available_episodes():
"""
Fetch available episodes at the time when the function is called
return:
list of dict of available episode(s)
"""
db = opendb()
animes = db.all()
db.close()
if len(animes) == 0:
print('There is no animes in database')
return []
avail_episodes = []
for anime in animes:
print('anime: {0}'.format(anime['name']))
start_date = anime['start_date']
start_date_datetime = datetime(
start_date.year, start_date.month, start_date.day)
time_interval = datetime.now() - start_date_datetime
time_interval_days = time_interval.total_seconds() \
/ timedelta(days=1).total_seconds()
episode_now = int(ceil(time_interval_days / 7)) + anime['offset']
print('days between now and start day:{0}'.format(time_interval_days))
print('episode available now:{}'.format(episode_now))
print('downloaded:{}\n\n'.format(anime['dled_ep']))
if episode_now > anime['dled_ep']:
for i in range(anime['dled_ep'] + 1, episode_now + 1):
avail_episodes.append({
'name': anime['name'],
'keyword': anime['keyword'],
'translation_team': anime['translation_team'],
'ep': i,
})
return avail_episodes
def update_anime_info(name, new):
"""
Update information of anime with a dict contains new information
"""
db = opendb()
anime = tinydb.Query()
info = db.get(anime.name == name)
for key in new:
info[key] = new[key]
db.update(info, anime.name == name)
db.close()
|
from .models import Client
from .models import Company
from .models import Contact
from .models import Contract
from .models import ContractSettings
from .models import Estimate
from .models import File
from .models import Invoice
from .models import Newsletter
from .models import Note
from .models import Profile
from .models import Project
from .models import Proposal
from .models import Report
from .models import Service
from .models import AppSettings
from .models import Task
from .models import Time
from django import forms
from taggit.models import Tag
class AdminProfileForm(forms.ModelForm):
class Meta:
model = Profile
fields = '__all__'
widgets = {
'bio': forms.widgets.TextInput(attrs={'class': 'tinymce'}),
}
class AdminTimeForm(forms.ModelForm):
class Meta:
model = Time
fields = (
'date',
'hours',
'log',
'client',
'estimate',
'invoice',
'project',
'user',
'task',
'invoiced', )
class ClientForm(forms.ModelForm):
class Meta:
model = Client
fields = '__all__'
widgets = {
'notes': forms.widgets.TextInput(attrs={'class': 'tinymce'}),
}
class CompanyForm(forms.ModelForm):
class Meta:
model = Company
fields = '__all__'
widgets = {
'notes': forms.widgets.TextInput(attrs={'class': 'tinymce'}),
}
class ContactForm(forms.ModelForm):
class Meta:
model = Contact
fields = ( # Exclude uuid instead of include everything else?
'active',
'subscribed',
'first_name',
'last_name',
'title',
'email',
'mobile_phone',
'office_phone',
'fax',
'address',
'client',
'notes', )
class ContractForm(forms.ModelForm):
class Meta:
model = Contract
fields = '__all__'
widgets = {
'body': forms.widgets.TextInput(attrs={'class': 'tinymce'}),
}
class ContractSettingsForm(forms.ModelForm):
class Meta:
model = ContractSettings
fields = '__all__'
widgets = {
'parties': forms.widgets.TextInput(attrs={'class': 'tinymce'}),
'scope_of_work':
forms.widgets.TextInput(attrs={'class': 'tinymce'}),
'payment_terms':
forms.widgets.TextInput(attrs={'class': 'tinymce'}),
'timing_of_payment':
forms.widgets.TextInput(attrs={'class': 'tinymce'}),
'contributor_assignment_agreement':
forms.widgets.TextInput(attrs={'class': 'tinymce'}),
'authority_to_act':
forms.widgets.TextInput(attrs={'class': 'tinymce'}),
'termination': forms.widgets.TextInput(attrs={'class': 'tinymce'}),
'governing_laws':
forms.widgets.TextInput(attrs={'class': 'tinymce'}),
'period_of_agreement':
forms.widgets.TextInput(attrs={'class': 'tinymce'}),
'confidentiality':
forms.widgets.TextInput(attrs={'class': 'tinymce'}),
'taxes': forms.widgets.TextInput(attrs={'class': 'tinymce'}),
'limited_warranty':
forms.widgets.TextInput(attrs={'class': 'tinymce'}),
'complete_agreement':
forms.widgets.TextInput(attrs={'class': 'tinymce'}),
}
class EstimateForm(forms.ModelForm):
class Meta:
model = Estimate
fields = '__all__'
class FileForm(forms.ModelForm):
class Meta:
model = File
fields = '__all__'
class InvoiceForm(forms.ModelForm):
"""
Issue Date, Last Payment Date, Invoice ID, PO Number, Client, Subject,
Invoice Amount, Paid Amount, Balance, Subtotal, Discount, Tax, Tax2,
Currency, Currency Symbol, Document Type
"""
class Meta:
model = Invoice
fields = (
'subject',
'issue_date',
'last_payment_date',
'client',
'project', )
class MailForm(forms.Form):
test = forms.BooleanField(required=False)
subject = forms.CharField(required=False)
message = forms.CharField(widget=forms.Textarea(), required=False)
class NewsletterForm(forms.ModelForm):
class Meta:
model = Newsletter
fields = '__all__'
widgets = {
'text': forms.widgets.TextInput(attrs={'class': 'tinymce'}),
}
contacts = forms.ModelMultipleChoiceField(
queryset=Contact.objects.filter(
subscribed=True).exclude(email='').order_by('first_name'),
widget=forms.SelectMultiple(attrs={'size': '50'}))
class NoteForm(forms.ModelForm):
class Meta:
model = Note
fields = (
'active',
'hidden',
'title',
'tags',
'note',
'due_date', )
widgets = {
'note': forms.widgets.TextInput(attrs={'class': 'tinymce'}),
}
tags = forms.ModelMultipleChoiceField(
queryset=Tag.objects.all(),
widget=forms.SelectMultiple(attrs={'size': '5'}))
class ProfileForm(forms.ModelForm):
class Meta:
model = Profile
fields = ('rate', )
class ProjectForm(forms.ModelForm):
class Meta:
model = Project
fields = '__all__'
class ProposalForm(forms.ModelForm):
class Meta:
model = Proposal
fields = '__all__'
widgets = {
'body': forms.widgets.TextInput(attrs={'class': 'tinymce'}),
}
class ReportForm(forms.ModelForm):
class Meta:
model = Report
fields = '__all__'
class ServiceForm(forms.ModelForm):
class Meta:
model = Service
fields = '__all__'
widgets = {
'description': forms.widgets.TextInput(attrs={'class': 'tinymce'}),
}
class AppSettingsForm(forms.ModelForm):
class Meta:
model = AppSettings
fields = '__all__'
class TaskForm(forms.ModelForm):
class Meta:
model = Task
fields = '__all__'
class TimeForm(forms.ModelForm):
class Meta:
model = Time
fields = ('date', 'hours', 'log')
Update
from .models import Client
from .models import Company
from .models import Contact
from .models import Contract
from .models import ContractSettings
from .models import Estimate
from .models import File
from .models import Invoice
from .models import Newsletter
from .models import Note
from .models import Profile
from .models import Project
from .models import Proposal
from .models import Report
from .models import Service
from .models import AppSettings
from .models import Task
from .models import Time
from django import forms
from taggit.models import Tag
class AdminProfileForm(forms.ModelForm):
class Meta:
model = Profile
fields = '__all__'
widgets = {
'bio': forms.widgets.TextInput(attrs={'class': 'tinymce'}),
}
class AdminTimeForm(forms.ModelForm):
class Meta:
model = Time
fields = (
'date',
'hours',
'log',
'client',
'estimate',
'invoice',
'project',
'user',
'task',
'invoiced', )
class ClientForm(forms.ModelForm):
class Meta:
model = Client
fields = '__all__'
widgets = {
'notes': forms.widgets.TextInput(attrs={'class': 'tinymce'}),
}
class CompanyForm(forms.ModelForm):
class Meta:
model = Company
fields = '__all__'
widgets = {
'notes': forms.widgets.TextInput(attrs={'class': 'tinymce'}),
}
class ContactForm(forms.ModelForm):
class Meta:
model = Contact
fields = ( # Exclude uuid instead of include everything else?
'active',
'subscribed',
'first_name',
'last_name',
'title',
'email',
'mobile_phone',
'office_phone',
'fax',
'address',
'client',
'notes', )
class ContractForm(forms.ModelForm):
class Meta:
model = Contract
fields = '__all__'
widgets = {
'body': forms.widgets.TextInput(attrs={'class': 'tinymce'}),
}
class ContractSettingsForm(forms.ModelForm):
class Meta:
model = ContractSettings
fields = '__all__'
widgets = {
'parties': forms.widgets.TextInput(attrs={'class': 'tinymce'}),
'scope_of_work':
forms.widgets.TextInput(attrs={'class': 'tinymce'}),
'payment_terms':
forms.widgets.TextInput(attrs={'class': 'tinymce'}),
'timing_of_payment':
forms.widgets.TextInput(attrs={'class': 'tinymce'}),
'contributor_assignment_agreement':
forms.widgets.TextInput(attrs={'class': 'tinymce'}),
'authority_to_act':
forms.widgets.TextInput(attrs={'class': 'tinymce'}),
'termination': forms.widgets.TextInput(attrs={'class': 'tinymce'}),
'governing_laws':
forms.widgets.TextInput(attrs={'class': 'tinymce'}),
'period_of_agreement':
forms.widgets.TextInput(attrs={'class': 'tinymce'}),
'confidentiality':
forms.widgets.TextInput(attrs={'class': 'tinymce'}),
'taxes': forms.widgets.TextInput(attrs={'class': 'tinymce'}),
'limited_warranty':
forms.widgets.TextInput(attrs={'class': 'tinymce'}),
'complete_agreement':
forms.widgets.TextInput(attrs={'class': 'tinymce'}),
}
class EstimateForm(forms.ModelForm):
class Meta:
model = Estimate
fields = '__all__'
class FileForm(forms.ModelForm):
class Meta:
model = File
fields = '__all__'
class InvoiceForm(forms.ModelForm):
"""
Issue Date, Last Payment Date, Invoice ID, PO Number, Client, Subject,
Invoice Amount, Paid Amount, Balance, Subtotal, Discount, Tax, Tax2,
Currency, Currency Symbol, Document Type
"""
class Meta:
model = Invoice
fields = (
'client',
'project',
'subject',
'issue_date',
'last_payment_date', )
class MailForm(forms.Form):
test = forms.BooleanField(required=False)
subject = forms.CharField(required=False)
message = forms.CharField(widget=forms.Textarea(), required=False)
class NewsletterForm(forms.ModelForm):
class Meta:
model = Newsletter
fields = '__all__'
widgets = {
'text': forms.widgets.TextInput(attrs={'class': 'tinymce'}),
}
contacts = forms.ModelMultipleChoiceField(
queryset=Contact.objects.filter(
subscribed=True).exclude(email='').order_by('first_name'),
widget=forms.SelectMultiple(attrs={'size': '50'}))
class NoteForm(forms.ModelForm):
class Meta:
model = Note
fields = (
'active',
'hidden',
'title',
'tags',
'note',
'due_date', )
widgets = {
'note': forms.widgets.TextInput(attrs={'class': 'tinymce'}),
}
tags = forms.ModelMultipleChoiceField(
queryset=Tag.objects.all(),
widget=forms.SelectMultiple(attrs={'size': '5'}))
class ProfileForm(forms.ModelForm):
class Meta:
model = Profile
fields = ('rate', )
class ProjectForm(forms.ModelForm):
class Meta:
model = Project
fields = '__all__'
class ProposalForm(forms.ModelForm):
class Meta:
model = Proposal
fields = '__all__'
widgets = {
'body': forms.widgets.TextInput(attrs={'class': 'tinymce'}),
}
class ReportForm(forms.ModelForm):
class Meta:
model = Report
fields = '__all__'
class ServiceForm(forms.ModelForm):
class Meta:
model = Service
fields = '__all__'
widgets = {
'description': forms.widgets.TextInput(attrs={'class': 'tinymce'}),
}
class AppSettingsForm(forms.ModelForm):
class Meta:
model = AppSettings
fields = '__all__'
class TaskForm(forms.ModelForm):
class Meta:
model = Task
fields = '__all__'
class TimeForm(forms.ModelForm):
class Meta:
model = Time
fields = ('date', 'hours', 'log')
|
import asyncio
import esipy
from discord.ext import commands
from requests.adapters import DEFAULT_POOLSIZE
from utils.log import get_logger
ESI_SWAGGER_JSON = 'https://esi.evetech.net/dev/swagger.json'
ESI_APP: esipy.App = None
ESI_CLIENT: esipy.EsiClient = None
ESI_CLIENT_SEMAPHORE = asyncio.Semaphore(DEFAULT_POOLSIZE)
ESI_ENDPOINT_LOCKS = {}
def get_esi_app():
global ESI_APP
if not ESI_APP:
ESI_APP = esipy.App.create(url=ESI_SWAGGER_JSON)
return ESI_APP
def get_esi_client():
global ESI_CLIENT
if not ESI_CLIENT:
ESI_CLIENT = esipy.EsiClient(retry_requests=True)
return ESI_CLIENT
class EsiCog:
def __init__(self, bot: commands.Bot):
logger = get_logger(__name__, bot)
logger.info("Creating esipy App...")
self._esi_app_task = bot.loop.run_in_executor(None, get_esi_app)
self._esi_app_task.add_done_callback(
lambda f: logger.info("esipy App created"))
logger.info("Creating esipy EsiClient...")
self._esi_client_task = bot.loop.run_in_executor(None, get_esi_client)
self._esi_client_task.add_done_callback(
lambda f: logger.info("esipy EsiClient created"))
def __unload(self):
self._esi_app_task.cancel()
self._esi_app_task.cancel()
async def get_esi_app(self):
return await self._esi_app_task
async def get_esi_client(self):
return await self._esi_client_task
async def esi_request(self, loop, client, operation):
key = esipy.utils.make_cache_key(operation[0])
lock = ESI_ENDPOINT_LOCKS.setdefault(key, asyncio.Lock())
async with ESI_CLIENT_SEMAPHORE:
async with lock:
return await loop.run_in_executor(None, client.request,
operation)
Fix typo in esicog unload
import asyncio
import esipy
from discord.ext import commands
from requests.adapters import DEFAULT_POOLSIZE
from utils.log import get_logger
ESI_SWAGGER_JSON = 'https://esi.evetech.net/dev/swagger.json'
ESI_APP: esipy.App = None
ESI_CLIENT: esipy.EsiClient = None
ESI_CLIENT_SEMAPHORE = asyncio.Semaphore(DEFAULT_POOLSIZE)
ESI_ENDPOINT_LOCKS = {}
def get_esi_app():
global ESI_APP
if not ESI_APP:
ESI_APP = esipy.App.create(url=ESI_SWAGGER_JSON)
return ESI_APP
def get_esi_client():
global ESI_CLIENT
if not ESI_CLIENT:
ESI_CLIENT = esipy.EsiClient(retry_requests=True)
return ESI_CLIENT
class EsiCog:
def __init__(self, bot: commands.Bot):
logger = get_logger(__name__, bot)
logger.info("Creating esipy App...")
self._esi_app_task = bot.loop.run_in_executor(None, get_esi_app)
self._esi_app_task.add_done_callback(
lambda f: logger.info("esipy App created"))
logger.info("Creating esipy EsiClient...")
self._esi_client_task = bot.loop.run_in_executor(None, get_esi_client)
self._esi_client_task.add_done_callback(
lambda f: logger.info("esipy EsiClient created"))
def __unload(self):
self._esi_app_task.cancel()
self._esi_client_task.cancel()
async def get_esi_app(self):
return await self._esi_app_task
async def get_esi_client(self):
return await self._esi_client_task
async def esi_request(self, loop, client, operation):
key = esipy.utils.make_cache_key(operation[0])
lock = ESI_ENDPOINT_LOCKS.setdefault(key, asyncio.Lock())
async with ESI_CLIENT_SEMAPHORE:
async with lock:
return await loop.run_in_executor(None, client.request,
operation)
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import os
import cv2
import h5py
import numpy as np
import tensorflow as tf
from glob import glob
from tqdm import tqdm
from multiprocessing import Pool
from sklearn.model_selection import train_test_split
seed = 1337
def one_hot(labels_dense, num_classes=10):
num_labels = labels_dense.shape[0]
index_offset = np.arange(num_labels) * num_classes
labels_one_hot = np.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
return labels_one_hot
class DataSetLoader:
@staticmethod
def get_extension(ext):
if ext in ['jpg', 'png']:
return 'img'
elif ext == 'tfr':
return 'tfr'
elif ext == 'h5':
return 'h5'
elif ext == 'npy':
return 'npy'
else:
raise ValueError("[-] There'is no supporting file... [%s] :(" % ext)
@staticmethod
def get_img(path, size=(64, 64), interp=cv2.INTER_CUBIC):
img = cv2.imread(path, cv2.IMREAD_COLOR)[..., ::-1] # BGR to RGB
if img.shape[0] == size[0]:
return img
else:
return cv2.resize(img, size, interp)
@staticmethod
def parse_tfr_tf(record):
features = tf.parse_single_example(record, features={
'shape': tf.FixedLenFeature([3], tf.int64),
'data': tf.FixedLenFeature([], tf.string)})
data = tf.decode_raw(features['data'], tf.uint8)
return tf.reshape(data, features['shape'])
@staticmethod
def parse_tfr_np(record):
ex = tf.train.Example()
ex.ParseFromString(record)
shape = ex.features.feature['shape'].int64_list.value
data = ex.features.feature['data'].bytes_list.value[0]
return np.fromstring(data, np.uint8).reshape(shape)
@staticmethod
def img_scaling(img, scale='0,1'):
if scale == '0,1':
img /= 255.
elif scale == '-1,1':
img = (img / 127.5) - 1.
else:
raise ValueError("[-] Only '0,1' or '-1,1' please")
return img
def __init__(self, path, size=None, name='to_tfr', use_save=False, save_file_name='',
buffer_size=4096, n_threads=8,
use_image_scaling=True, image_scale='0,1', img_save_method=cv2.INTER_LINEAR, debug=True):
self.op = name.split('_')
self.debug = debug
try:
assert len(self.op) == 2
except AssertionError:
raise AssertionError("[-] Invalid Target Types :(")
self.size = size
try:
assert self.size
except AssertionError:
raise AssertionError("[-] Invalid Target Sizes :(")
# To-DO
# Supporting 4D Image
self.height = size[0]
self.width = size[1]
self.channel = size[2]
self.path = path
try:
assert os.path.exists(self.path)
except AssertionError:
raise AssertionError("[-] Path(%s) does not exist :(" % self.path)
self.buffer_size = buffer_size
self.n_threads = n_threads
if os.path.isfile(self.path):
self.file_list = [self.path]
self.file_ext = self.path.split('.')[-1]
self.file_names = [self.path]
else:
self.file_list = sorted(os.listdir(self.path))
self.file_ext = self.file_list[0].split('.')[-1]
self.file_names = glob(self.path + '/*')
self.raw_data = np.ndarray([], dtype=np.uint8) # (N, H * W * C)
if self.debug:
print("[*] Detected Path is [%s]" % self.path)
print("[*] Detected File Extension is [%s]" % self.file_ext)
print("[*] Detected First File Name is [%s] (%d File(s))" % (self.file_names[0], len(self.file_names)))
self.types = ('img', 'tfr', 'h5', 'npy') # Supporting Data Types
self.op_src = self.get_extension(self.file_ext)
self.op_dst = self.op[1]
try:
chk_src, chk_dst = False, False
for t in self.types:
if self.op_src == t:
chk_src = True
if self.op_dst == t:
chk_dst = True
assert chk_src and chk_dst
except AssertionError:
raise AssertionError("[-] Invalid Operation Types (%s, %s) :(" % (self.op_src, self.op_dst))
self.img_save_method = img_save_method
if self.op_src == self.types[0]:
self.load_img()
elif self.op_src == self.types[1]:
self.load_tfr()
elif self.op_src == self.types[2]:
self.load_h5()
elif self.op_src == self.types[3]:
self.load_npy()
else:
raise NotImplementedError("[-] Not Supported Type :(")
# Random Shuffle
order = np.arange(self.raw_data.shape[0])
np.random.RandomState(seed).shuffle(order)
self.raw_data = self.raw_data[order]
# Clip [0, 255]
self.raw_data = np.rint(self.raw_data).clip(0, 255).astype(np.uint8)
self.use_save = use_save
self.save_file_name = save_file_name
if self.use_save:
try:
assert self.save_file_name
except AssertionError:
raise AssertionError("[-] Empty save-file name :(")
if self.op_dst == self.types[0]:
self.convert_to_img()
elif self.op_dst == self.types[1]:
self.tfr_opt = tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.NONE)
self.tfr_writer = tf.python_io.TFRecordWriter(self.save_file_name + ".tfrecords", self.tfr_opt)
self.convert_to_tfr()
elif self.op_dst == self.types[2]:
self.convert_to_h5()
elif self.op_dst == self.types[3]:
self.convert_to_npy()
else:
raise NotImplementedError("[-] Not Supported Type :(")
self.use_image_scaling = use_image_scaling
self.img_scale = image_scale
if self.use_image_scaling:
self.raw_data = self.img_scaling(self.raw_data, self.img_scale)
def load_img(self):
self.raw_data = np.zeros((len(self.file_list), self.height * self.width * self.channel),
dtype=np.uint8)
for i, fn in tqdm(enumerate(self.file_names)):
self.raw_data[i] = self.get_img(fn, (self.height, self.width), self.img_save_method).flatten()
if self.debug: # just once
print("[*] Image Shape : ", self.raw_data[i].shape)
print("[*] Image Size : ", self.raw_data[i].size)
print("[*] Image MIN/MAX : (%d, %d)" % (np.min(self.raw_data[i]), np.max(self.raw_data[i])))
self.debug = False
def load_tfr(self):
self.raw_data = tf.data.TFRecordDataset(self.file_names, compression_type='', buffer_size=self.buffer_size)
self.raw_data = self.raw_data.map(self.parse_tfr_tf, num_parallel_calls=self.n_threads)
def load_h5(self, size=0, offset=0):
init = True
for fl in self.file_list: # For multiple .h5 files
with h5py.File(fl, 'r') as hf:
data = hf['images']
full_size = len(data)
if size == 0:
size = full_size
n_chunks = int(np.ceil(full_size / size))
if offset >= n_chunks:
print("[*] Looping from back to start.")
offset %= n_chunks
if offset == n_chunks - 1:
print("[-] Not enough data available, clipping to end.")
data = data[offset * size:]
else:
data = data[offset * size:(offset + 1) * size]
data = np.array(data, dtype=np.uint8)
print("[+] ", fl, " => Image size : ", data.shape)
if init:
self.raw_data = data
init = False
if self.debug: # just once
print("[*] Image Shape : ", self.raw_data[0].shape)
print("[*] Image Size : ", self.raw_data[0].size)
print("[*] Image MIN/MAX : (%d, %d)" % (np.min(self.raw_data[0]), np.max(self.raw_data[0])))
self.debug = False
continue
else:
self.raw_data = np.concatenate((self.raw_data, data))
def load_npy(self):
self.raw_data = np.rollaxis(np.squeeze(np.load(self.file_names), axis=0), 0, 3)
if self.debug: # just once
print("[*] Image Shape : ", self.raw_data[0].shape)
print("[*] Image Size : ", self.raw_data[0].size)
print("[*] Image MIN/MAX : (%d, %d)" % (np.min(self.raw_data[0]), np.max(self.raw_data[0])))
self.debug = False
def convert_to_img(self):
def to_img(i):
cv2.imwrite('imgHQ%05d.png' % i, cv2.COLOR_BGR2RGB)
return True
raw_data_shape = self.raw_data.shape # (N, H * W * C)
try:
assert os.path.exists(self.save_file_name)
except AssertionError:
print("[-] There's no %s :(" % self.save_file_name)
print("[*] Make directory at %s... " % self.save_file_name)
os.mkdir(self.save_file_name)
ii = [i for i in range(raw_data_shape[0])]
pool = Pool(self.n_threads)
print(pool.map(to_img, ii))
def convert_to_tfr(self):
for data in self.raw_data:
ex = tf.train.Example(features=tf.train.Features(feature={
'shape': tf.train.Feature(int64_list=tf.train.Int64List(value=data.shape)),
'data': tf.train.Feature(bytes_list=tf.train.BytesList(value=[data.tostring()]))
}))
self.tfr_writer.write(ex.SerializeToString())
def convert_to_h5(self):
with h5py.File(self.save_file_name, 'w') as f:
f.create_dataset("images", data=self.raw_data)
def convert_to_npy(self):
np.save(self.save_file_name, self.raw_data)
class MNISTDataSet:
def __init__(self, use_split=False, split_rate=0.15, random_state=42, ds_path=None):
self.use_split = use_split
self.split_rate = split_rate
self.random_state = random_state
self.ds_path = ds_path
try:
assert self.ds_path
except AssertionError:
raise AssertionError("[-] MNIST DataSet Path is required!")
from tensorflow.examples.tutorials.mnist import input_data
self.data = input_data.read_data_sets(self.ds_path, one_hot=True) # download MNIST
# training data
self.train_data = self.data.train
self.train_images = self.train_data.images
self.train_labels = self.train_data.labels
self.valid_images = None
self.valid_labels = None
# test data
self.test_data = self.data.test
self.test_images = self.test_data.images
self.test_labels = self.test_data.labels
# split training data set into train, valid
if self.use_split:
self.train_images, self.valid_images, self.train_labels, self.valid_labels = \
train_test_split(self.train_images, self.train_labels,
test_size=self.split_rate,
random_state=self.random_state)
class CiFarDataSet:
@staticmethod
def unpickle(file):
import pickle as p
# WARN: Only for python3, NOT FOR python2
with open(file, 'rb') as f:
return p.load(f, encoding='bytes')
def __init__(self, height=32, width=32, channel=3,
use_split=False, split_rate=0.2, random_state=42, ds_name="cifar-10", ds_path=None):
"""
# General Settings
:param height: input image height, default 32
:param width: input image width, default 32
:param channel: input image channel, default 3 (RGB)
- in case of CIFAR, image size is 32 x 32 x 3 (HWC).
# Pre-Processing Option
:param use_split: training DataSet splitting, default True
:param split_rate: image split rate (into train & test), default 0.2
:param random_state: random seed for shuffling, default 42
# DataSet Option
:param ds_name: DataSet's name, default cifar-10
:param ds_path: DataSet's path, default None
"""
self.height = height
self.width = width
self.channel = channel
self.use_split = use_split
self.split_rate = split_rate
self.random_state = random_state
self.ds_name = ds_name
self.ds_path = ds_path # DataSet path
self.n_classes = 10 # DataSet the number of classes, default 10
self.train_images = None
self.valid_images = None
self.test_images = None
self.train_labels = None
self.valid_labels = None
self.test_labels = None
try:
assert self.ds_path
except AssertionError:
raise AssertionError("[-] CIFAR10/100 DataSets' Path is required!")
if self.ds_name == "cifar-10":
self.cifar_10() # loading Cifar-10
elif self.ds_name == "cifar-100":
self.cifar_100() # loading Cifar-100
else:
raise NotImplementedError("[-] Only 'cifar-10' or 'cifar-100'")
def cifar_10(self):
self.n_classes = 10 # labels
train_batch_1 = self.unpickle("{0}/data_batch_1".format(self.ds_path))
train_batch_2 = self.unpickle("{0}/data_batch_2".format(self.ds_path))
train_batch_3 = self.unpickle("{0}/data_batch_3".format(self.ds_path))
train_batch_4 = self.unpickle("{0}/data_batch_4".format(self.ds_path))
train_batch_5 = self.unpickle("{0}/data_batch_5".format(self.ds_path))
# training data & label
train_data = np.concatenate([
train_batch_1[b'data'],
train_batch_2[b'data'],
train_batch_3[b'data'],
train_batch_4[b'data'],
train_batch_5[b'data'],
], axis=0)
train_labels = np.concatenate([
train_batch_1[b'labels'],
train_batch_2[b'labels'],
train_batch_3[b'labels'],
train_batch_4[b'labels'],
train_batch_5[b'labels'],
], axis=0)
# Image size : 32x32x3
train_images = np.swapaxes(train_data.reshape([-1,
self.height,
self.width,
self.channel], order='F'), 1, 2)
# test data & label
test_batch = self.unpickle("{0}/test_batch".format(self.ds_path))
test_data = test_batch[b'data']
test_labels = np.array(test_batch[b'labels'])
# image size : 32x32x3
test_images = np.swapaxes(test_data.reshape([-1,
self.height,
self.width,
self.channel], order='F'), 1, 2)
# split training data set into train / val
if self.use_split:
train_images, valid_images, train_labels, valid_labels = \
train_test_split(train_images, train_labels,
test_size=self.split_rate,
random_state=self.random_state)
self.valid_images = valid_images
self.valid_labels = one_hot(valid_labels, self.n_classes)
self.train_images = train_images
self.test_images = test_images
self.train_labels = one_hot(train_labels, self.n_classes)
self.test_labels = one_hot(test_labels, self.n_classes)
def cifar_100(self):
self.n_classes = 100 # labels
# training data & label
train_batch = self.unpickle("{0}/train".format(self.ds_path))
train_data = np.concatenate([train_batch[b'data']], axis=0)
train_labels = np.concatenate([train_batch[b'fine_labels']], axis=0)
train_images = np.swapaxes(train_data.reshape([-1,
self.height,
self.width,
self.channel], order='F'), 1, 2)
# test data & label
test_batch = self.unpickle("{0}/test".format(self.ds_path))
test_data = np.concatenate([test_batch[b'data']], axis=0)
test_labels = np.concatenate([test_batch[b'fine_labels']], axis=0)
test_images = np.swapaxes(test_data.reshape([-1,
self.height,
self.width,
self.channel], order='F'), 1, 2)
# split training data set into train / val
if self.use_split:
train_images, valid_images, train_labels, valid_labels = \
train_test_split(train_images, train_labels,
test_size=self.split_rate,
random_state=self.random_state)
self.valid_images = valid_images
self.valid_labels = one_hot(valid_labels, self.n_classes)
self.train_images = train_images
self.test_images = test_images
self.train_labels = one_hot(train_labels, self.n_classes)
self.test_labels = one_hot(test_labels, self.n_classes)
class CelebADataSet:
"""
This Class for CelebA & CelebA-HQ DataSets.
- saving images as .h5 file for more faster loading.
- Actually, CelebA-HQ DataSet is kinda encrypted. So if u wanna use it, decrypt first!
There're a few codes that download & decrypt CelebA-HQ DataSet.
"""
def __init__(self,
height=64, width=64, channel=3, attr_labels=(),
n_threads=30, use_split=False, split_rate=0.2, random_state=42,
ds_image_path=None, ds_label_path=None, ds_type="CelebA", use_img_scale=True, img_scale="-1,1",
use_save=False, save_type='to_h5', save_file_name=None,
use_concat_data=False):
"""
# General Settings
:param height: image height
:param width: image width
:param channel: image channel
- in case of CelebA, image size is 64 x 64 x 3 (HWC)
- in case of CelebA-HQ, image size is 1024 x 1024 x 3 (HWC)
:param attr_labels: attributes of CelebA DataSet
- in case of CelebA, the number of attributes is 40
# Pre-Processing Option
:param n_threads: the number of threads
:param use_split: splitting train DataSet into train/val
:param split_rate: image split rate (into train & val)
:param random_state: random seed for shuffling, default 42
# DataSet Settings
:param ds_image_path: DataSet's Image Path
:param ds_label_path: DataSet's Label Path
:param ds_type: which DataSet is
:param use_img_scale: using img scaling?
:param img_scale: img normalize
:param use_save: saving into another file format
:param save_type: file format to save
:param save_file_name: file name to save
:param use_concat_data: concatenate images & labels
"""
self.height = height
self.width = width
self.channel = channel
'''
# Available attributes
[
5_o_Clock_Shadow, Arched_Eyebrows, Attractive, Bags_Under_Eyes, Bald, Bangs, Big_Lips, Big_Nose, Black_Hair,
Blond_Hair, Blurry, Brown_Hair, Bushy_Eyebrows, Chubby, Double_Chin, Eyeglasses, Goatee, Gray_Hair,
Heavy_Makeup, High_Cheekbones, Male, Mouth_Slightly_Open, Mustache, Narrow_Eyes, No_Beard, Oval_Face,
Pale_Skin, Pointy_Nose, Receding_Hairline, Rosy_Cheeks, Sideburns, Smiling, Straight_Hair, Wavy_Hair,
Wearing_Earrings, Wearing_Hat, Wearing_Lipstick, Wearing_Necklace, Wearing_Necktie, Young
]
'''
self.attr_labels = attr_labels
self.image_shape = (self.height, self.width, self.channel) # (H, W, C)
self.n_threads = n_threads
self.use_split = use_split
self.split_rate = split_rate
self.random_state = random_state
self.attr = [] # loaded labels
self.images = []
self.labels = {}
"""
Expected DataSet's Path Example
CelebA : CelebA/ (sub-folder : Anno/..., Img/... )
CelebA-HQ : CelebA-HQ/ (sub-folder : ...npy, ...png )
Labels : CelebA/Anno/...txt
Expected DatSet's Type
'CelebA' or 'CelebA-HQ'
"""
self.ds_image_path = ds_image_path
self.ds_label_path = ds_label_path
self.ds_type = ds_type
self.use_img_scale = use_img_scale
self.img_scale = img_scale
try:
assert self.ds_image_path and self.ds_label_path
except AssertionError:
raise AssertionError("[-] CelebA/CelebA-HQ DataSets' Path is required! (%s)")
if self.ds_type == "CelebA":
self.num_images = 202599 # the number of CelebA images
elif self.ds_type == "CelebA-HQ":
self.num_images = 30000 # the number of CelebA-HQ images
tmp_path = self.ds_image_path + "/imgHQ00000."
if os.path.exists(tmp_path + "dat"):
raise FileNotFoundError("[-] You need to decrypt .dat file first!\n" +
"[-] plz, use original PGGAN repo or"
" this repo https://github.com/nperraud/download-celebA-HQ")
else:
raise NotImplemented("[-] 'ds_type' muse be 'CelebA' or 'CelebA-HQ'")
self.use_save = use_save
self.save_type = save_type
self.save_file_name = save_file_name
self.use_concat_data = use_concat_data
try:
if self.use_save:
assert self.save_file_name
except AssertionError:
raise AssertionError("[-] save-file/folder-name is required!")
self.images = DataSetLoader(path=self.ds_image_path,
size=self.image_shape,
use_save=self.use_save,
name=self.save_type,
save_file_name=self.save_file_name,
use_image_scaling=use_img_scale,
image_scale=self.img_scale).raw_data # numpy arrays
self.labels = self.load_attr(path=self.ds_label_path)
if self.use_concat_data:
self.images = self.concat_data(self.images, self.labels)
# split training data set into train / val
if self.use_split:
self.train_images, self.valid_images, self.train_labels, self.valid_labels = \
train_test_split(self.images, self.labels,
test_size=self.split_rate,
random_state=self.random_state)
def load_attr(self, path):
with open(path, 'r') as f:
img_attr = []
self.num_images = int(f.readline().strip())
self.attr = (f.readline().strip()).split(' ')
print("[*] the number of images : %d" % self.num_images)
print("[*] the number of attributes : %d/%d" % (len(self.attr_labels), len(self.attr)))
for fn in f.readlines():
row = fn.strip().split()
# img_name = row[0]
attr = [int(x) for x in row[1:]]
tmp = [attr[self.attr.index(x)] for x in self.attr_labels]
tmp = [1. if x == 1 else 0. for x in tmp] # one-hot labeling
img_attr.append(tmp)
return np.asarray(img_attr)
def concat_data(self, img, label):
label = np.tile(np.reshape(label, [-1, 1, 1, len(self.attr_labels)]), [1, self.height, self.width, 1])
return np.concatenate([img, label], axis=3)
class Pix2PixDataSet:
def __init__(self, height=64, width=64, channel=3,
use_split=False, split_rate=0.15, random_state=42, n_threads=8,
ds_path=None, ds_name=None, use_save=False, save_type='to_h5', save_file_name=None):
"""
# General Settings
:param height: image height, default 64
:param width: image width, default 64
:param channel: image channel, default 3 (RGB)
# Pre-Processing Option
:param use_split: using DataSet split, default False
:param split_rate: image split rate (into train & test), default 0.2
:param random_state: random seed for shuffling, default 42
:param n_threads: the number of threads for multi-threading, default 8
# DataSet Option
:param ds_path: DataSet's Path, default None
:param ds_name: DataSet's Name, default None
:param use_save: saving into another file format
:param save_type: file format to save
:param save_file_name: file name to save
"""
self.height = height
self.width = width
self.channel = channel
self.image_shape = (self.height, self.width, self.channel)
self.use_split = use_split
self.split_rate = split_rate
self.random_state = random_state
self.n_threads = n_threads # change this value to the fitted value for ur system
"""
Expected ds_path : pix2pix/...
Expected ds_name : apple2orange
"""
self.ds_path = ds_path
self.ds_name = ds_name
# single grid : testA, testB, (trainA, trainB)
# double grid : train, val, (test, sample)
self.ds_single_grid = ['apple2orange', 'horse2zebra', 'monet2photo', 'summer2winter_yosemite', 'vangogh2photo',
'ae_photos', 'cezanne2photo', 'ukivoe2photo', 'iphone2dslr_flower']
self.ds_double_grid = ['cityscapes', 'edges2handbags', 'edges2shoes', 'facades', 'maps']
# Single Grid DatSet - the number of images
self.n_sg_images_a = 400
self.n_sg_images_b = 6287
# Double Grid DatSet - the number of images
self.n_dg_images_a = 0
self.n_dg_images_b = 0
self.use_save = use_save
self.save_type = save_type
self.save_file_name = save_file_name
try:
if self.use_save:
assert self.save_file_name
except AssertionError:
raise AssertionError("[-] save-file/folder-name is required!")
if self.ds_name in self.ds_single_grid:
self.images_a = DataSetLoader(path=self.ds_path + "/" + self.ds_name + "/trainA/",
size=self.image_shape,
use_save=self.use_save,
name=self.save_type,
save_file_name=self.save_file_name,
use_image_scaling=True,
image_scale='0,1').raw_data # numpy arrays
self.images_b = DataSetLoader(path=self.ds_path + "/" + self.ds_name + "/trainB/",
size=self.image_shape,
use_save=self.use_save,
name=self.save_type,
save_file_name=self.save_file_name,
use_image_scaling=True,
image_scale='0,1').raw_data # numpy arrays
self.n_images_a = self.n_sg_images_a
self.n_images_b = self.n_sg_images_b
elif self.ds_name in self.ds_double_grid:
# To-Do
# Implement this!
self.n_images_a = self.n_dg_images_a
self.n_images_b = self.n_dg_images_b
else:
raise NotImplementedError("[-] Not Implemented yet")
class ImageNetDataSet:
def __init__(self):
pass
class Div2KDataSet:
def __init__(self, hr_height=384, hr_width=384, lr_height=96, lr_width=96, channel=3,
use_split=False, split_rate=0.1, random_state=42, n_threads=8,
ds_path=None, ds_name=None, use_img_scale=True,
ds_hr_path=None, ds_lr_path=None,
use_save=False, save_type='to_h5', save_file_name=None):
"""
# General Settings
:param hr_height: input HR image height, default 384
:param hr_width: input HR image width, default 384
:param lr_height: input LR image height, default 96
:param lr_width: input LR image width, default 96
:param channel: input image channel, default 3 (RGB)
- in case of Div2K - ds x4, image size is 384 x 384 x 3 (HWC).
# Pre-Processing Option
:param split_rate: image split rate (into train & test), default 0.1
:param random_state: random seed for shuffling, default 42
:param n_threads: the number of threads for multi-threading, default 8
# DataSet Option
:param ds_path: DataSet's Path, default None
:param ds_name: DataSet's Name, default None
:param use_img_scale: using img scaling?
:param ds_hr_path: DataSet High Resolution path
:param ds_lr_path: DataSet Low Resolution path
:param use_save: saving into another file format
:param save_type: file format to save
:param save_file_name: file name to save
"""
self.hr_height = hr_height
self.hr_width = hr_width
self.lr_height = lr_height
self.lr_width = lr_width
self.channel = channel
self.hr_shape = (self.hr_height, self.hr_width, self.channel)
self.lr_shape = (self.lr_height, self.lr_width, self.channel)
self.use_split = use_split
self.split_rate = split_rate
self.random_state = random_state
self.num_threads = n_threads # change this value to the fitted value for ur system
"""
Expected ds_path : div2k/...
Expected ds_name : X4
"""
self.ds_path = ds_path
self.ds_name = ds_name
self.ds_hr_path = ds_hr_path
self.ds_lr_path = ds_lr_path
try:
assert self.ds_path
except AssertionError:
try:
assert self.ds_hr_path and self.ds_lr_path
except AssertionError:
raise AssertionError("[-] DataSet's path is required!")
self.use_save = use_save
self.save_type = save_type
self.save_file_name = save_file_name
try:
if self.use_save:
assert self.save_file_name
else:
self.save_file_name = ""
except AssertionError:
raise AssertionError("[-] save-file/folder-name is required!")
self.n_images = 800
self.n_images_val = 100
self.use_img_scaling = use_img_scale
if self.ds_path: # like .h5 or .tfr
self.ds_hr_path = self.ds_path + "/DIV2K_train_HR/"
self.ds_lr_path = self.ds_path + "/DIV2K_train_LR_bicubic/" + self.ds_name + "/"
self.hr_images = DataSetLoader(path=self.ds_hr_path,
size=self.hr_shape,
use_save=self.use_save,
name=self.save_type,
save_file_name=self.save_file_name + "-hr.h5",
use_image_scaling=self.use_img_scaling,
image_scale="-1.1",
img_save_method=cv2.INTER_LINEAR).raw_data # numpy arrays
self.lr_images = DataSetLoader(path=self.ds_lr_path,
size=self.lr_shape,
use_save=self.use_save,
name=self.save_type,
save_file_name=self.save_file_name + "-lr.h5",
use_image_scaling=self.use_img_scaling,
image_scale="0,1",
img_save_method=cv2.INTER_CUBIC).raw_data # numpy arrays
class UrbanSoundDataSet:
def __init__(self):
pass
class DataIterator:
def __init__(self, x, y, batch_size, label_off=False):
self.x = x
self.label_off = label_off
if not self.label_off:
self.y = y
self.batch_size = batch_size
self.num_examples = num_examples = x.shape[0]
self.num_batches = num_examples // batch_size
self.pointer = 0
assert (self.batch_size <= self.num_examples)
def next_batch(self):
start = self.pointer
self.pointer += self.batch_size
if self.pointer > self.num_examples:
perm = np.arange(self.num_examples)
np.random.shuffle(perm)
self.x = self.x[perm]
if not self.label_off:
self.y = self.y[perm]
start = 0
self.pointer = self.batch_size
end = self.pointer
if not self.label_off:
return self.x[start:end], self.y[start:end]
else:
return self.x[start:end]
def iterate(self):
for step in range(self.num_batches):
yield self.next_batch()
add: input scale type at error
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import os
import cv2
import h5py
import numpy as np
import tensorflow as tf
from glob import glob
from tqdm import tqdm
from multiprocessing import Pool
from sklearn.model_selection import train_test_split
seed = 1337
def one_hot(labels_dense, num_classes=10):
num_labels = labels_dense.shape[0]
index_offset = np.arange(num_labels) * num_classes
labels_one_hot = np.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
return labels_one_hot
class DataSetLoader:
@staticmethod
def get_extension(ext):
if ext in ['jpg', 'png']:
return 'img'
elif ext == 'tfr':
return 'tfr'
elif ext == 'h5':
return 'h5'
elif ext == 'npy':
return 'npy'
else:
raise ValueError("[-] There'is no supporting file... [%s] :(" % ext)
@staticmethod
def get_img(path, size=(64, 64), interp=cv2.INTER_CUBIC):
img = cv2.imread(path, cv2.IMREAD_COLOR)[..., ::-1] # BGR to RGB
if img.shape[0] == size[0]:
return img
else:
return cv2.resize(img, size, interp)
@staticmethod
def parse_tfr_tf(record):
features = tf.parse_single_example(record, features={
'shape': tf.FixedLenFeature([3], tf.int64),
'data': tf.FixedLenFeature([], tf.string)})
data = tf.decode_raw(features['data'], tf.uint8)
return tf.reshape(data, features['shape'])
@staticmethod
def parse_tfr_np(record):
ex = tf.train.Example()
ex.ParseFromString(record)
shape = ex.features.feature['shape'].int64_list.value
data = ex.features.feature['data'].bytes_list.value[0]
return np.fromstring(data, np.uint8).reshape(shape)
@staticmethod
def img_scaling(img, scale='0,1'):
if scale == '0,1':
img /= 255.
elif scale == '-1,1':
img = (img / 127.5) - 1.
else:
raise ValueError("[-] Only '0,1' or '-1,1' please - (%s)" % scale)
return img
def __init__(self, path, size=None, name='to_tfr', use_save=False, save_file_name='',
buffer_size=4096, n_threads=8,
use_image_scaling=True, image_scale='0,1', img_save_method=cv2.INTER_LINEAR, debug=True):
self.op = name.split('_')
self.debug = debug
try:
assert len(self.op) == 2
except AssertionError:
raise AssertionError("[-] Invalid Target Types :(")
self.size = size
try:
assert self.size
except AssertionError:
raise AssertionError("[-] Invalid Target Sizes :(")
# To-DO
# Supporting 4D Image
self.height = size[0]
self.width = size[1]
self.channel = size[2]
self.path = path
try:
assert os.path.exists(self.path)
except AssertionError:
raise AssertionError("[-] Path(%s) does not exist :(" % self.path)
self.buffer_size = buffer_size
self.n_threads = n_threads
if os.path.isfile(self.path):
self.file_list = [self.path]
self.file_ext = self.path.split('.')[-1]
self.file_names = [self.path]
else:
self.file_list = sorted(os.listdir(self.path))
self.file_ext = self.file_list[0].split('.')[-1]
self.file_names = glob(self.path + '/*')
self.raw_data = np.ndarray([], dtype=np.uint8) # (N, H * W * C)
if self.debug:
print("[*] Detected Path is [%s]" % self.path)
print("[*] Detected File Extension is [%s]" % self.file_ext)
print("[*] Detected First File Name is [%s] (%d File(s))" % (self.file_names[0], len(self.file_names)))
self.types = ('img', 'tfr', 'h5', 'npy') # Supporting Data Types
self.op_src = self.get_extension(self.file_ext)
self.op_dst = self.op[1]
try:
chk_src, chk_dst = False, False
for t in self.types:
if self.op_src == t:
chk_src = True
if self.op_dst == t:
chk_dst = True
assert chk_src and chk_dst
except AssertionError:
raise AssertionError("[-] Invalid Operation Types (%s, %s) :(" % (self.op_src, self.op_dst))
self.img_save_method = img_save_method
if self.op_src == self.types[0]:
self.load_img()
elif self.op_src == self.types[1]:
self.load_tfr()
elif self.op_src == self.types[2]:
self.load_h5()
elif self.op_src == self.types[3]:
self.load_npy()
else:
raise NotImplementedError("[-] Not Supported Type :(")
# Random Shuffle
order = np.arange(self.raw_data.shape[0])
np.random.RandomState(seed).shuffle(order)
self.raw_data = self.raw_data[order]
# Clip [0, 255]
self.raw_data = np.rint(self.raw_data).clip(0, 255).astype(np.uint8)
self.use_save = use_save
self.save_file_name = save_file_name
if self.use_save:
try:
assert self.save_file_name
except AssertionError:
raise AssertionError("[-] Empty save-file name :(")
if self.op_dst == self.types[0]:
self.convert_to_img()
elif self.op_dst == self.types[1]:
self.tfr_opt = tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.NONE)
self.tfr_writer = tf.python_io.TFRecordWriter(self.save_file_name + ".tfrecords", self.tfr_opt)
self.convert_to_tfr()
elif self.op_dst == self.types[2]:
self.convert_to_h5()
elif self.op_dst == self.types[3]:
self.convert_to_npy()
else:
raise NotImplementedError("[-] Not Supported Type :(")
self.use_image_scaling = use_image_scaling
self.img_scale = image_scale
if self.use_image_scaling:
self.raw_data = self.img_scaling(self.raw_data, self.img_scale)
def load_img(self):
self.raw_data = np.zeros((len(self.file_list), self.height * self.width * self.channel),
dtype=np.uint8)
for i, fn in tqdm(enumerate(self.file_names)):
self.raw_data[i] = self.get_img(fn, (self.height, self.width), self.img_save_method).flatten()
if self.debug: # just once
print("[*] Image Shape : ", self.raw_data[i].shape)
print("[*] Image Size : ", self.raw_data[i].size)
print("[*] Image MIN/MAX : (%d, %d)" % (np.min(self.raw_data[i]), np.max(self.raw_data[i])))
self.debug = False
def load_tfr(self):
self.raw_data = tf.data.TFRecordDataset(self.file_names, compression_type='', buffer_size=self.buffer_size)
self.raw_data = self.raw_data.map(self.parse_tfr_tf, num_parallel_calls=self.n_threads)
def load_h5(self, size=0, offset=0):
init = True
for fl in self.file_list: # For multiple .h5 files
with h5py.File(fl, 'r') as hf:
data = hf['images']
full_size = len(data)
if size == 0:
size = full_size
n_chunks = int(np.ceil(full_size / size))
if offset >= n_chunks:
print("[*] Looping from back to start.")
offset %= n_chunks
if offset == n_chunks - 1:
print("[-] Not enough data available, clipping to end.")
data = data[offset * size:]
else:
data = data[offset * size:(offset + 1) * size]
data = np.array(data, dtype=np.uint8)
print("[+] ", fl, " => Image size : ", data.shape)
if init:
self.raw_data = data
init = False
if self.debug: # just once
print("[*] Image Shape : ", self.raw_data[0].shape)
print("[*] Image Size : ", self.raw_data[0].size)
print("[*] Image MIN/MAX : (%d, %d)" % (np.min(self.raw_data[0]), np.max(self.raw_data[0])))
self.debug = False
continue
else:
self.raw_data = np.concatenate((self.raw_data, data))
def load_npy(self):
self.raw_data = np.rollaxis(np.squeeze(np.load(self.file_names), axis=0), 0, 3)
if self.debug: # just once
print("[*] Image Shape : ", self.raw_data[0].shape)
print("[*] Image Size : ", self.raw_data[0].size)
print("[*] Image MIN/MAX : (%d, %d)" % (np.min(self.raw_data[0]), np.max(self.raw_data[0])))
self.debug = False
def convert_to_img(self):
def to_img(i):
cv2.imwrite('imgHQ%05d.png' % i, cv2.COLOR_BGR2RGB)
return True
raw_data_shape = self.raw_data.shape # (N, H * W * C)
try:
assert os.path.exists(self.save_file_name)
except AssertionError:
print("[-] There's no %s :(" % self.save_file_name)
print("[*] Make directory at %s... " % self.save_file_name)
os.mkdir(self.save_file_name)
ii = [i for i in range(raw_data_shape[0])]
pool = Pool(self.n_threads)
print(pool.map(to_img, ii))
def convert_to_tfr(self):
for data in self.raw_data:
ex = tf.train.Example(features=tf.train.Features(feature={
'shape': tf.train.Feature(int64_list=tf.train.Int64List(value=data.shape)),
'data': tf.train.Feature(bytes_list=tf.train.BytesList(value=[data.tostring()]))
}))
self.tfr_writer.write(ex.SerializeToString())
def convert_to_h5(self):
with h5py.File(self.save_file_name, 'w') as f:
f.create_dataset("images", data=self.raw_data)
def convert_to_npy(self):
np.save(self.save_file_name, self.raw_data)
class MNISTDataSet:
def __init__(self, use_split=False, split_rate=0.15, random_state=42, ds_path=None):
self.use_split = use_split
self.split_rate = split_rate
self.random_state = random_state
self.ds_path = ds_path
try:
assert self.ds_path
except AssertionError:
raise AssertionError("[-] MNIST DataSet Path is required!")
from tensorflow.examples.tutorials.mnist import input_data
self.data = input_data.read_data_sets(self.ds_path, one_hot=True) # download MNIST
# training data
self.train_data = self.data.train
self.train_images = self.train_data.images
self.train_labels = self.train_data.labels
self.valid_images = None
self.valid_labels = None
# test data
self.test_data = self.data.test
self.test_images = self.test_data.images
self.test_labels = self.test_data.labels
# split training data set into train, valid
if self.use_split:
self.train_images, self.valid_images, self.train_labels, self.valid_labels = \
train_test_split(self.train_images, self.train_labels,
test_size=self.split_rate,
random_state=self.random_state)
class CiFarDataSet:
@staticmethod
def unpickle(file):
import pickle as p
# WARN: Only for python3, NOT FOR python2
with open(file, 'rb') as f:
return p.load(f, encoding='bytes')
def __init__(self, height=32, width=32, channel=3,
use_split=False, split_rate=0.2, random_state=42, ds_name="cifar-10", ds_path=None):
"""
# General Settings
:param height: input image height, default 32
:param width: input image width, default 32
:param channel: input image channel, default 3 (RGB)
- in case of CIFAR, image size is 32 x 32 x 3 (HWC).
# Pre-Processing Option
:param use_split: training DataSet splitting, default True
:param split_rate: image split rate (into train & test), default 0.2
:param random_state: random seed for shuffling, default 42
# DataSet Option
:param ds_name: DataSet's name, default cifar-10
:param ds_path: DataSet's path, default None
"""
self.height = height
self.width = width
self.channel = channel
self.use_split = use_split
self.split_rate = split_rate
self.random_state = random_state
self.ds_name = ds_name
self.ds_path = ds_path # DataSet path
self.n_classes = 10 # DataSet the number of classes, default 10
self.train_images = None
self.valid_images = None
self.test_images = None
self.train_labels = None
self.valid_labels = None
self.test_labels = None
try:
assert self.ds_path
except AssertionError:
raise AssertionError("[-] CIFAR10/100 DataSets' Path is required!")
if self.ds_name == "cifar-10":
self.cifar_10() # loading Cifar-10
elif self.ds_name == "cifar-100":
self.cifar_100() # loading Cifar-100
else:
raise NotImplementedError("[-] Only 'cifar-10' or 'cifar-100'")
def cifar_10(self):
self.n_classes = 10 # labels
train_batch_1 = self.unpickle("{0}/data_batch_1".format(self.ds_path))
train_batch_2 = self.unpickle("{0}/data_batch_2".format(self.ds_path))
train_batch_3 = self.unpickle("{0}/data_batch_3".format(self.ds_path))
train_batch_4 = self.unpickle("{0}/data_batch_4".format(self.ds_path))
train_batch_5 = self.unpickle("{0}/data_batch_5".format(self.ds_path))
# training data & label
train_data = np.concatenate([
train_batch_1[b'data'],
train_batch_2[b'data'],
train_batch_3[b'data'],
train_batch_4[b'data'],
train_batch_5[b'data'],
], axis=0)
train_labels = np.concatenate([
train_batch_1[b'labels'],
train_batch_2[b'labels'],
train_batch_3[b'labels'],
train_batch_4[b'labels'],
train_batch_5[b'labels'],
], axis=0)
# Image size : 32x32x3
train_images = np.swapaxes(train_data.reshape([-1,
self.height,
self.width,
self.channel], order='F'), 1, 2)
# test data & label
test_batch = self.unpickle("{0}/test_batch".format(self.ds_path))
test_data = test_batch[b'data']
test_labels = np.array(test_batch[b'labels'])
# image size : 32x32x3
test_images = np.swapaxes(test_data.reshape([-1,
self.height,
self.width,
self.channel], order='F'), 1, 2)
# split training data set into train / val
if self.use_split:
train_images, valid_images, train_labels, valid_labels = \
train_test_split(train_images, train_labels,
test_size=self.split_rate,
random_state=self.random_state)
self.valid_images = valid_images
self.valid_labels = one_hot(valid_labels, self.n_classes)
self.train_images = train_images
self.test_images = test_images
self.train_labels = one_hot(train_labels, self.n_classes)
self.test_labels = one_hot(test_labels, self.n_classes)
def cifar_100(self):
self.n_classes = 100 # labels
# training data & label
train_batch = self.unpickle("{0}/train".format(self.ds_path))
train_data = np.concatenate([train_batch[b'data']], axis=0)
train_labels = np.concatenate([train_batch[b'fine_labels']], axis=0)
train_images = np.swapaxes(train_data.reshape([-1,
self.height,
self.width,
self.channel], order='F'), 1, 2)
# test data & label
test_batch = self.unpickle("{0}/test".format(self.ds_path))
test_data = np.concatenate([test_batch[b'data']], axis=0)
test_labels = np.concatenate([test_batch[b'fine_labels']], axis=0)
test_images = np.swapaxes(test_data.reshape([-1,
self.height,
self.width,
self.channel], order='F'), 1, 2)
# split training data set into train / val
if self.use_split:
train_images, valid_images, train_labels, valid_labels = \
train_test_split(train_images, train_labels,
test_size=self.split_rate,
random_state=self.random_state)
self.valid_images = valid_images
self.valid_labels = one_hot(valid_labels, self.n_classes)
self.train_images = train_images
self.test_images = test_images
self.train_labels = one_hot(train_labels, self.n_classes)
self.test_labels = one_hot(test_labels, self.n_classes)
class CelebADataSet:
"""
This Class for CelebA & CelebA-HQ DataSets.
- saving images as .h5 file for more faster loading.
- Actually, CelebA-HQ DataSet is kinda encrypted. So if u wanna use it, decrypt first!
There're a few codes that download & decrypt CelebA-HQ DataSet.
"""
def __init__(self,
height=64, width=64, channel=3, attr_labels=(),
n_threads=30, use_split=False, split_rate=0.2, random_state=42,
ds_image_path=None, ds_label_path=None, ds_type="CelebA", use_img_scale=True, img_scale="-1,1",
use_save=False, save_type='to_h5', save_file_name=None,
use_concat_data=False):
"""
# General Settings
:param height: image height
:param width: image width
:param channel: image channel
- in case of CelebA, image size is 64 x 64 x 3 (HWC)
- in case of CelebA-HQ, image size is 1024 x 1024 x 3 (HWC)
:param attr_labels: attributes of CelebA DataSet
- in case of CelebA, the number of attributes is 40
# Pre-Processing Option
:param n_threads: the number of threads
:param use_split: splitting train DataSet into train/val
:param split_rate: image split rate (into train & val)
:param random_state: random seed for shuffling, default 42
# DataSet Settings
:param ds_image_path: DataSet's Image Path
:param ds_label_path: DataSet's Label Path
:param ds_type: which DataSet is
:param use_img_scale: using img scaling?
:param img_scale: img normalize
:param use_save: saving into another file format
:param save_type: file format to save
:param save_file_name: file name to save
:param use_concat_data: concatenate images & labels
"""
self.height = height
self.width = width
self.channel = channel
'''
# Available attributes
[
5_o_Clock_Shadow, Arched_Eyebrows, Attractive, Bags_Under_Eyes, Bald, Bangs, Big_Lips, Big_Nose, Black_Hair,
Blond_Hair, Blurry, Brown_Hair, Bushy_Eyebrows, Chubby, Double_Chin, Eyeglasses, Goatee, Gray_Hair,
Heavy_Makeup, High_Cheekbones, Male, Mouth_Slightly_Open, Mustache, Narrow_Eyes, No_Beard, Oval_Face,
Pale_Skin, Pointy_Nose, Receding_Hairline, Rosy_Cheeks, Sideburns, Smiling, Straight_Hair, Wavy_Hair,
Wearing_Earrings, Wearing_Hat, Wearing_Lipstick, Wearing_Necklace, Wearing_Necktie, Young
]
'''
self.attr_labels = attr_labels
self.image_shape = (self.height, self.width, self.channel) # (H, W, C)
self.n_threads = n_threads
self.use_split = use_split
self.split_rate = split_rate
self.random_state = random_state
self.attr = [] # loaded labels
self.images = []
self.labels = {}
"""
Expected DataSet's Path Example
CelebA : CelebA/ (sub-folder : Anno/..., Img/... )
CelebA-HQ : CelebA-HQ/ (sub-folder : ...npy, ...png )
Labels : CelebA/Anno/...txt
Expected DatSet's Type
'CelebA' or 'CelebA-HQ'
"""
self.ds_image_path = ds_image_path
self.ds_label_path = ds_label_path
self.ds_type = ds_type
self.use_img_scale = use_img_scale
self.img_scale = img_scale
try:
assert self.ds_image_path and self.ds_label_path
except AssertionError:
raise AssertionError("[-] CelebA/CelebA-HQ DataSets' Path is required! (%s)")
if self.ds_type == "CelebA":
self.num_images = 202599 # the number of CelebA images
elif self.ds_type == "CelebA-HQ":
self.num_images = 30000 # the number of CelebA-HQ images
tmp_path = self.ds_image_path + "/imgHQ00000."
if os.path.exists(tmp_path + "dat"):
raise FileNotFoundError("[-] You need to decrypt .dat file first!\n" +
"[-] plz, use original PGGAN repo or"
" this repo https://github.com/nperraud/download-celebA-HQ")
else:
raise NotImplemented("[-] 'ds_type' muse be 'CelebA' or 'CelebA-HQ'")
self.use_save = use_save
self.save_type = save_type
self.save_file_name = save_file_name
self.use_concat_data = use_concat_data
try:
if self.use_save:
assert self.save_file_name
except AssertionError:
raise AssertionError("[-] save-file/folder-name is required!")
self.images = DataSetLoader(path=self.ds_image_path,
size=self.image_shape,
use_save=self.use_save,
name=self.save_type,
save_file_name=self.save_file_name,
use_image_scaling=use_img_scale,
image_scale=self.img_scale).raw_data # numpy arrays
self.labels = self.load_attr(path=self.ds_label_path)
if self.use_concat_data:
self.images = self.concat_data(self.images, self.labels)
# split training data set into train / val
if self.use_split:
self.train_images, self.valid_images, self.train_labels, self.valid_labels = \
train_test_split(self.images, self.labels,
test_size=self.split_rate,
random_state=self.random_state)
def load_attr(self, path):
with open(path, 'r') as f:
img_attr = []
self.num_images = int(f.readline().strip())
self.attr = (f.readline().strip()).split(' ')
print("[*] the number of images : %d" % self.num_images)
print("[*] the number of attributes : %d/%d" % (len(self.attr_labels), len(self.attr)))
for fn in f.readlines():
row = fn.strip().split()
# img_name = row[0]
attr = [int(x) for x in row[1:]]
tmp = [attr[self.attr.index(x)] for x in self.attr_labels]
tmp = [1. if x == 1 else 0. for x in tmp] # one-hot labeling
img_attr.append(tmp)
return np.asarray(img_attr)
def concat_data(self, img, label):
label = np.tile(np.reshape(label, [-1, 1, 1, len(self.attr_labels)]), [1, self.height, self.width, 1])
return np.concatenate([img, label], axis=3)
class Pix2PixDataSet:
def __init__(self, height=64, width=64, channel=3,
use_split=False, split_rate=0.15, random_state=42, n_threads=8,
ds_path=None, ds_name=None, use_save=False, save_type='to_h5', save_file_name=None):
"""
# General Settings
:param height: image height, default 64
:param width: image width, default 64
:param channel: image channel, default 3 (RGB)
# Pre-Processing Option
:param use_split: using DataSet split, default False
:param split_rate: image split rate (into train & test), default 0.2
:param random_state: random seed for shuffling, default 42
:param n_threads: the number of threads for multi-threading, default 8
# DataSet Option
:param ds_path: DataSet's Path, default None
:param ds_name: DataSet's Name, default None
:param use_save: saving into another file format
:param save_type: file format to save
:param save_file_name: file name to save
"""
self.height = height
self.width = width
self.channel = channel
self.image_shape = (self.height, self.width, self.channel)
self.use_split = use_split
self.split_rate = split_rate
self.random_state = random_state
self.n_threads = n_threads # change this value to the fitted value for ur system
"""
Expected ds_path : pix2pix/...
Expected ds_name : apple2orange
"""
self.ds_path = ds_path
self.ds_name = ds_name
# single grid : testA, testB, (trainA, trainB)
# double grid : train, val, (test, sample)
self.ds_single_grid = ['apple2orange', 'horse2zebra', 'monet2photo', 'summer2winter_yosemite', 'vangogh2photo',
'ae_photos', 'cezanne2photo', 'ukivoe2photo', 'iphone2dslr_flower']
self.ds_double_grid = ['cityscapes', 'edges2handbags', 'edges2shoes', 'facades', 'maps']
# Single Grid DatSet - the number of images
self.n_sg_images_a = 400
self.n_sg_images_b = 6287
# Double Grid DatSet - the number of images
self.n_dg_images_a = 0
self.n_dg_images_b = 0
self.use_save = use_save
self.save_type = save_type
self.save_file_name = save_file_name
try:
if self.use_save:
assert self.save_file_name
except AssertionError:
raise AssertionError("[-] save-file/folder-name is required!")
if self.ds_name in self.ds_single_grid:
self.images_a = DataSetLoader(path=self.ds_path + "/" + self.ds_name + "/trainA/",
size=self.image_shape,
use_save=self.use_save,
name=self.save_type,
save_file_name=self.save_file_name,
use_image_scaling=True,
image_scale='0,1').raw_data # numpy arrays
self.images_b = DataSetLoader(path=self.ds_path + "/" + self.ds_name + "/trainB/",
size=self.image_shape,
use_save=self.use_save,
name=self.save_type,
save_file_name=self.save_file_name,
use_image_scaling=True,
image_scale='0,1').raw_data # numpy arrays
self.n_images_a = self.n_sg_images_a
self.n_images_b = self.n_sg_images_b
elif self.ds_name in self.ds_double_grid:
# To-Do
# Implement this!
self.n_images_a = self.n_dg_images_a
self.n_images_b = self.n_dg_images_b
else:
raise NotImplementedError("[-] Not Implemented yet")
class ImageNetDataSet:
def __init__(self):
pass
class Div2KDataSet:
def __init__(self, hr_height=384, hr_width=384, lr_height=96, lr_width=96, channel=3,
use_split=False, split_rate=0.1, random_state=42, n_threads=8,
ds_path=None, ds_name=None, use_img_scale=True,
ds_hr_path=None, ds_lr_path=None,
use_save=False, save_type='to_h5', save_file_name=None):
"""
# General Settings
:param hr_height: input HR image height, default 384
:param hr_width: input HR image width, default 384
:param lr_height: input LR image height, default 96
:param lr_width: input LR image width, default 96
:param channel: input image channel, default 3 (RGB)
- in case of Div2K - ds x4, image size is 384 x 384 x 3 (HWC).
# Pre-Processing Option
:param split_rate: image split rate (into train & test), default 0.1
:param random_state: random seed for shuffling, default 42
:param n_threads: the number of threads for multi-threading, default 8
# DataSet Option
:param ds_path: DataSet's Path, default None
:param ds_name: DataSet's Name, default None
:param use_img_scale: using img scaling?
:param ds_hr_path: DataSet High Resolution path
:param ds_lr_path: DataSet Low Resolution path
:param use_save: saving into another file format
:param save_type: file format to save
:param save_file_name: file name to save
"""
self.hr_height = hr_height
self.hr_width = hr_width
self.lr_height = lr_height
self.lr_width = lr_width
self.channel = channel
self.hr_shape = (self.hr_height, self.hr_width, self.channel)
self.lr_shape = (self.lr_height, self.lr_width, self.channel)
self.use_split = use_split
self.split_rate = split_rate
self.random_state = random_state
self.num_threads = n_threads # change this value to the fitted value for ur system
"""
Expected ds_path : div2k/...
Expected ds_name : X4
"""
self.ds_path = ds_path
self.ds_name = ds_name
self.ds_hr_path = ds_hr_path
self.ds_lr_path = ds_lr_path
try:
assert self.ds_path
except AssertionError:
try:
assert self.ds_hr_path and self.ds_lr_path
except AssertionError:
raise AssertionError("[-] DataSet's path is required!")
self.use_save = use_save
self.save_type = save_type
self.save_file_name = save_file_name
try:
if self.use_save:
assert self.save_file_name
else:
self.save_file_name = ""
except AssertionError:
raise AssertionError("[-] save-file/folder-name is required!")
self.n_images = 800
self.n_images_val = 100
self.use_img_scaling = use_img_scale
if self.ds_path: # like .h5 or .tfr
self.ds_hr_path = self.ds_path + "/DIV2K_train_HR/"
self.ds_lr_path = self.ds_path + "/DIV2K_train_LR_bicubic/" + self.ds_name + "/"
self.hr_images = DataSetLoader(path=self.ds_hr_path,
size=self.hr_shape,
use_save=self.use_save,
name=self.save_type,
save_file_name=self.save_file_name + "-hr.h5",
use_image_scaling=self.use_img_scaling,
image_scale="-1.1",
img_save_method=cv2.INTER_LINEAR).raw_data # numpy arrays
self.lr_images = DataSetLoader(path=self.ds_lr_path,
size=self.lr_shape,
use_save=self.use_save,
name=self.save_type,
save_file_name=self.save_file_name + "-lr.h5",
use_image_scaling=self.use_img_scaling,
image_scale="0,1",
img_save_method=cv2.INTER_CUBIC).raw_data # numpy arrays
class UrbanSoundDataSet:
def __init__(self):
pass
class DataIterator:
def __init__(self, x, y, batch_size, label_off=False):
self.x = x
self.label_off = label_off
if not self.label_off:
self.y = y
self.batch_size = batch_size
self.num_examples = num_examples = x.shape[0]
self.num_batches = num_examples // batch_size
self.pointer = 0
assert (self.batch_size <= self.num_examples)
def next_batch(self):
start = self.pointer
self.pointer += self.batch_size
if self.pointer > self.num_examples:
perm = np.arange(self.num_examples)
np.random.shuffle(perm)
self.x = self.x[perm]
if not self.label_off:
self.y = self.y[perm]
start = 0
self.pointer = self.batch_size
end = self.pointer
if not self.label_off:
return self.x[start:end], self.y[start:end]
else:
return self.x[start:end]
def iterate(self):
for step in range(self.num_batches):
yield self.next_batch()
|
from boto.exception import BotoServerError
from collections import OrderedDict
from decimal import Decimal
from django.conf import settings as django_settings
from django.contrib.gis.geoip2 import GeoIP2
from django.core.mail import send_mail
from django.core.paginator import Paginator
from django.core.paginator import EmptyPage
from django.core.paginator import PageNotAnInteger
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.db.models import F
from django.db.models import Sum
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.shortcuts import render
from django.template.loader import render_to_string
from django.utils import timezone
from docx import Document
from docx.enum.text import WD_ALIGN_PARAGRAPH
from faker import Faker
from functools import reduce
from import_export import widgets
from hashlib import md5
from io import BytesIO
from io import StringIO
from lxml import etree
from matplotlib.dates import DateFormatter
from matplotlib.dates import date2num
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg
from operator import or_ as OR
fake = Faker()
EXCLUDE_MODELS = ('note', 'project')
URL_NAMES = {
'Settings App': ('settings_app', 'settings_app_edit', ''), # custom meta
'Settings Company': ('settings_company', 'settings_company_edit', ''),
'Settings Contract': ('settings_contract', 'settings_contract_edit', ''),
'client': ('client_view', 'client_edit', 'client_index'),
'contact': ('contact_view', 'contact_edit', 'contact_index'),
'contract': ('contract_view', 'contract_edit', 'contract_index'),
'estimate': ('estimate_view', 'estimate_edit', 'estimate_index'),
'file': ('file_view', 'file_edit', 'file_index'),
'invoice': ('invoice_view', 'invoice_edit', 'invoice_index'),
'log': ('log_view', 'log_edit', 'log_index'),
'newsletter': ('newsletter_view', 'newsletter_edit', 'newsletter_index'),
'note': ('note_view', 'note_edit', 'note_index'),
'profile': ('user_view', 'user_edit', 'user_index'),
'project': ('project_view', 'project_edit', 'project_index'),
'proposal': ('proposal_view', 'proposal_edit', 'proposal_index'),
'report': ('report_view', 'report_edit', 'report_index'),
'service': ('', 'service_edit', ''),
'task': ('task_view', 'task_edit', 'task_index'),
'time': ('time_view', 'time_edit', 'time_index'),
'user': ('user_view', 'user_edit', 'user_index'),
}
class BooleanWidget(widgets.Widget):
"""
Convert strings to boolean values
"""
def clean(self, value):
if value == 'Yes':
return True
else:
return False
class DecimalWidget(widgets.Widget):
"""
Convert strings to decimal values
"""
def clean(self, value):
if value:
return Decimal(value.replace(',', ''))
else:
return Decimal(0)
class UserWidget(widgets.Widget):
"""
"""
def clean(self, value):
return value
def edit(request, **kwargs):
context = {}
obj = None
app_settings_model = kwargs.get('app_settings_model')
client_model = kwargs.get('client_model')
company_model = kwargs.get('company_model')
contact_model = kwargs.get('contact_model')
estimate_model = kwargs.get('estimate_model')
form_model = kwargs.get('form_model')
invoice_model = kwargs.get('invoice_model')
model = kwargs.get('model')
note_model = kwargs.get('note_model')
pk = kwargs.get('pk')
project_model = kwargs.get('project_model')
user_model = kwargs.get('user_model')
if model:
model_name = model._meta.verbose_name
context['active_nav'] = model_name
if pk is None: # New or mail
form = get_form(
client_model=client_model,
form_model=form_model,
invoice_model=invoice_model,
model=model,
user_model=user_model,
request=request)
else: # Existing
if model_name == 'profile':
model = user_model # Swap profile_model with user_model
# for create_user
obj = get_object_or_404(model, pk=pk)
form = get_form(form_model=form_model, obj=obj)
if request.method == 'POST':
refer = request.META['HTTP_REFERER']
if pk is None:
form = form_model(request.POST)
else:
# Copy or delete
copy = request.POST.get('copy')
delete = request.POST.get('delete')
if copy:
return obj_copy(obj)
if delete:
return obj_remove(obj)
# Check boxes
query_checkbox = get_query(request, 'checkbox')
if query_checkbox['condition']:
return set_check_boxes(obj, query_checkbox, refer,
app_settings_model)
# Invoice sent
invoice_sent = request.POST.get('invoice_sent')
if invoice_sent:
for time in obj.time_set.all():
time.invoiced = True
time.save()
form = form_model(request.POST, instance=obj)
if form.is_valid():
try:
obj = form.save()
user = set_relationship(
obj,
request,
client_model=client_model,
company_model=company_model,
estimate_model=estimate_model,
invoice_model=invoice_model,
model=model,
project_model=project_model,
user_model=user_model)
if user:
return obj_edit(user.profile, pk=user.pk)
else:
return obj_edit(obj, pk=pk)
except AttributeError: # Mail
obj = mail_obj(
request,
contact_model=contact_model,
estimate_model=estimate_model,
note_model=note_model)
recipients = mail_recipients(obj)
for first_name, email_address in recipients:
mail_send(
**mail_compose(
obj,
form=form,
first_name=first_name,
mail_to=email_address,
request=request))
# if status:
# messages.add_message(request, messages.SUCCESS,
# 'Mail sent to %s!' %
# ', '.join(recipients))
# else:
# messages.add_message(request, messages.WARNING,
# 'Mail not sent to %s!' %
# ', '.join(recipients))
context['form'] = form
context['is_staff'] = request.user.is_staff
context['item'] = obj
context['pk'] = pk
if company_model:
company = company_model.get_solo()
context['company'] = company
if invoice_model: # Dashboard totals for reporting
invoices = invoice_model.objects.filter(last_payment_date=None)
gross, net = get_invoice_totals(invoices)
context['gross'] = gross
context['net'] = net
elif contact_model:
model_name = contact_model._meta.verbose_name
elif note_model:
model_name = note_model._meta.verbose_name
template_name = get_template_and_url(
model_name=model_name, page_type='edit')
return render(request, template_name, context)
def generate_doc(contract):
"""
https://stackoverflow.com/a/24122313/185820
"""
document = Document()
# Head
task = ''
if contract.task:
task = contract.task
title = document.add_heading(
'ACLARK.NET, LLC %s AGREEMENT PREPARED FOR:' % task, level=1)
title.alignment = WD_ALIGN_PARAGRAPH.CENTER
if contract.client:
client_name = document.add_heading(contract.client.name, level=1)
client_name.alignment = WD_ALIGN_PARAGRAPH.CENTER
client_address = document.add_heading(contract.client.address, level=1)
client_address.alignment = WD_ALIGN_PARAGRAPH.CENTER
parser = etree.HTMLParser() # http://lxml.de/parsing.html
tree = etree.parse(StringIO(contract.body), parser)
# Body
for element in tree.iter():
if element.tag == 'h2':
document.add_heading(element.text, level=2)
elif element.tag == 'p':
document.add_paragraph(element.text)
return document
def get_client_city(request):
ip_address = get_client_ip(request)
geo = GeoIP2()
if ip_address:
return geo.city(ip_address)
# https://stackoverflow.com/a/4581997/185820
def get_client_ip(request):
return request.META.get('HTTP_X_REAL_IP')
def get_company_name(company):
if company.name:
company_name = company.name
else:
company_name = fake.text()
# Ghetto
company_name = company.name.replace('.', '_')
company_name = company_name.replace(', ', '_')
company_name = company_name.replace('#', '_')
company_name = company_name.replace('-', '_')
company_name = company_name.replace('(', '_')
company_name = company_name.replace(')', '_')
company_name = company_name.replace(' ', '_')
company_name = company_name.upper()
return company_name
def get_fields(items):
for item in items:
fields = item._meta._get_fields()
item.fields = OrderedDict()
for field in fields:
if not field.is_relation:
item.fields[field.name] = getattr(item, field.name)
return items
def get_form(**kwargs):
"""
Return appropriate form based on new or edit
"""
client_model = kwargs.get('client_model')
form_model = kwargs.get('form_model')
invoice_model = kwargs.get('invoice_model')
model = kwargs.get('model')
obj = kwargs.get('obj')
request = kwargs.get('request')
user_model = kwargs.get('user_model')
query_client = None
query_user = None
if request:
query_user = get_query(request, 'user')
query_client = get_query(request, 'client')
if obj: # Existing object
model_name = obj._meta.verbose_name
if model_name == 'note': # Populate form with tags already set
form = form_model(initial={'tags': obj.tags.all()}, instance=obj)
else:
form = form_model(instance=obj)
else: # New object or mail
initial = {'send_html': True}
if model:
model_name = model._meta.verbose_name
if model_name == 'report' and invoice_model: # Populate new report
# with gross, net.
invoices = invoice_model.objects.filter(last_payment_date=None)
gross, net = get_invoice_totals(invoices)
obj = model(gross=gross, net=net)
form = form_model(instance=obj)
elif model_name == 'contact': # Populate new contact
# with appropriate fields
if query_user:
user = get_object_or_404(user_model, pk=query_user)
obj = model(email=user.email)
elif query_client:
client = get_object_or_404(client_model, pk=query_client)
obj = model(client=client)
form = form_model(instance=obj)
else:
form = form_model(initial=initial)
else:
form = form_model(initial=initial)
return form
def get_index_items(**kwargs):
"""
"""
context = {}
app_settings_model = kwargs.get('app_settings_model')
columns_visible = kwargs.get('columns_visible')
company_model = kwargs.get('company_model')
model = kwargs.get('model')
order_by = kwargs.get('order_by')
page_size = kwargs.get('page_size')
request = kwargs.get('request')
search_fields = kwargs.get('search_fields')
show_search = kwargs.get('show_search')
model_name = model._meta.verbose_name
edit_url = '%s_edit' % model_name
if columns_visible:
context['columns_visible'] = columns_visible
if company_model:
company = company_model.get_solo()
context['company'] = company
page = get_query(request, 'page')
paginated = get_query(request, 'paginated')
search = get_query(request, 'search')
if request:
context['is_staff'] = request.user.is_staff
# Search is easy
if request.method == 'POST':
if search == u'': # Empty search returns none
context['show_search'] = True
return context
else:
return get_search_results(
context,
model,
search_fields,
search,
app_settings_model=app_settings_model,
edit_url=edit_url,
request=request)
# Not a search
if model_name in EXCLUDE_MODELS and get_setting(
request, app_settings_model, 'exclude_hidden'):
items = model.objects.exclude(hidden=True)
else:
items = model.objects.all()
# Order items (http://stackoverflow.com/a/20257999/185820)
if order_by is not None:
items = items.order_by(*order_by)
# Don't show items to anon
if not request.user.is_authenticated:
items = []
# Per model extras
if model_name == 'note':
context['note_info'] = get_note_info(model)
elif model_name == 'time':
context['total_hours'] = get_total_hours(items)
# Paginate if paginated
if paginated:
page_size = get_setting(
request, app_settings_model, 'page_size', page_size=page_size)
items = paginate(items, page, page_size)
context['edit_url'] = edit_url
context['icon_size'] = get_setting(request, app_settings_model,
'icon_size')
context['icon_color'] = get_setting(request, app_settings_model,
'icon_color')
context['page'] = page
context['paginated'] = paginated
context['show_search'] = show_search
items = set_items_name(model_name, items=items)
context['items'] = items
context['active_nav'] = model_name
return context
def get_invoice_totals(invoices):
invoice_amount = invoice_cog = 0
for invoice in invoices:
if invoice.amount:
invoice_amount += float(invoice.amount)
if invoice.cog:
invoice_cog += float(invoice.cog)
return invoice_amount, invoice_amount - invoice_cog
def get_note_info(note_model):
note_info = {}
active = len(note_model.objects.filter(active=True))
inactive = len(note_model.objects.filter(active=False))
hidden = len(note_model.objects.filter(hidden=True))
not_hidden = inactive - hidden
total = len(note_model.objects.all())
note_info['active'] = active
note_info['inactive'] = inactive
note_info['hidden'] = hidden
note_info['not_hidden'] = not_hidden
note_info['total'] = total
return note_info
def get_page_items(**kwargs):
app_settings_model = kwargs.get('app_settings_model')
company_model = kwargs.get('company_model')
columns_visible = kwargs.get('columns_visible')
contact_model = kwargs.get('contact_model')
contract_model = kwargs.get('contract_model')
dashboard_item_model = kwargs.get('dashboard_item_model')
estimate_model = kwargs.get('estimate_model')
invoice_model = kwargs.get('invoice_model')
model = kwargs.get('model')
note_model = kwargs.get('note_model')
obj = kwargs.get('obj')
profile_model = kwargs.get('profile_model')
project_model = kwargs.get('project_model')
report_model = kwargs.get('report_model')
request = kwargs.get('request')
order_by = kwargs.get('order_by')
pk = kwargs.get('pk')
time_model = kwargs.get('time_model')
user_model = kwargs.get('user_model')
context = {}
items = None
if company_model:
company = company_model.get_solo()
context['company'] = company
if columns_visible:
context['columns_visible'] = columns_visible
if model or obj:
if model:
model_name = model._meta.verbose_name
elif obj:
model_name = obj._meta.verbose_name
context['model_name'] = model_name
context['active_nav'] = model_name
context['edit_url'] = '%s_edit' % model_name
if model_name == 'Settings App':
app_settings = app_settings_model.get_solo()
context['items'] = get_fields([app_settings, ]) # table_items.html
elif model_name == 'Settings Company':
company_settings = model.get_solo()
context['items'] = get_fields([company_settings,
]) # table_items.html
elif model_name == 'Settings Contract':
contract_settings = model.get_solo()
context['items'] = get_fields([contract_settings,
]) # table_items.html
elif model_name == 'client':
client = get_object_or_404(model, pk=pk)
contacts = contact_model.objects.filter(client=client)
contracts = contract_model.objects.filter(client=client)
projects = project_model.objects.filter(client=client)
context['contacts'] = contacts
context['contracts'] = contracts
context['item'] = client
context['notes'] = client.note.all()
context['projects'] = projects
elif model_name == 'contact':
contact = get_object_or_404(model, pk=pk)
context['items'] = get_fields([contact, ]) # table_items.html
context['item'] = contact
elif model_name == 'contract':
contract = get_object_or_404(model, pk=pk)
estimate = contract.statement_of_work
if estimate:
times_client = time_model.objects.filter(
client=estimate.client,
estimate=None,
project=None,
invoiced=False,
invoice=None)
times_estimate = time_model.objects.filter(estimate=estimate)
times = times_client | times_estimate
else:
times = None
context['doc_type'] = model_name
context['item'] = contract
context['times'] = times
elif model_name == 'estimate': # handle obj or model
if not obj:
estimate = get_object_or_404(model, pk=pk)
else:
estimate = obj
if not estimate.is_sow:
doc_type = model_name
else:
doc_type = 'statement of work'
if not obj:
times_client = time_model.objects.filter(
client=estimate.client,
estimate=None,
project=None,
invoiced=False,
invoice=None)
times_estimate = time_model.objects.filter(estimate=estimate)
times = times_client | times_estimate
else:
times = obj.times.all()
if order_by:
times = times.order_by(*order_by['time'])
times = set_invoice_totals(times, estimate=estimate)
context['doc_type'] = doc_type
context['entries'] = times
context['item'] = estimate
if model_name == 'file':
file_obj = get_object_or_404(model, pk=pk)
context['doc_type'] = model_name
context['item'] = file_obj
elif model_name == 'invoice':
invoice = get_object_or_404(model, pk=pk)
times = get_times_for_obj(invoice, time_model)
times = times.order_by(*order_by['time'])
times = set_invoice_totals(times, invoice=invoice)
last_payment_date = invoice.last_payment_date
total_hours = get_total_hours(times)
context['doc_type'] = model_name
context['entries'] = times
context['item'] = invoice
context['invoice'] = True
context['last_payment_date'] = last_payment_date
context['total_hours'] = total_hours
elif model_name == 'newsletter':
newsletter = get_object_or_404(model, pk=pk)
context['doc_type'] = model_name
context['item'] = newsletter
elif model_name == 'note':
note = get_object_or_404(model, pk=pk)
context['item'] = note
elif model_name == 'project':
project = get_object_or_404(model, pk=pk)
contacts = contact_model.objects.all()
items = set_items_name('contact', items=contacts)
estimates = estimate_model.objects.filter(project=project)
items = set_items_name('estimate', items=estimates, _items=items)
invoices = invoice_model.objects.filter(project=project)
invoices = invoices.order_by('-issue_date')
items = set_items_name('invoice', items=invoices, _items=items)
times = get_times_for_obj(project, time_model)
times = times.order_by(*order_by['time'])
items = set_items_name('time', items=times, _items=items)
users = user_model.objects.filter(project=project)
items = set_items_name('user', items=users, _items=items)
context['item'] = project
context['items'] = items
elif model_name == 'proposal':
proposal = get_object_or_404(model, pk=pk)
context['doc_type'] = model_name
context['item'] = proposal
elif model_name == 'report':
report = get_object_or_404(model, pk=pk)
reports = model.objects.filter(active=True)
reports = reports.aggregate(
gross=Sum(F('gross')), net=Sum(F('net')))
context['cost'] = report.gross - report.net
context['item'] = report
elif model_name == 'task':
task = get_object_or_404(model, pk=pk)
context['item'] = task
elif model_name == 'time':
time_entry = get_object_or_404(model, pk=pk)
context['item'] = time_entry
elif model_name == 'user':
user = get_object_or_404(model, pk=pk)
profile_model.objects.get_or_create(user=user)
projects = project_model.objects.filter(
team__in=[user, ], active=True)
projects = projects.order_by(*order_by['project'])
times = time_model.objects.filter(
estimate=None, invoiced=False, user=user)
times = times.order_by(*order_by['time'])
contacts = contact_model.objects.all()
context['item'] = user
context['items'] = get_fields([user.profile, ]) # table_items.html
context['projects'] = projects
context['times'] = times
else: # home
if request:
if request.user.is_authenticated:
# Dashboard
dashboard_choices = get_setting(request, app_settings_model,
'dashboard_choices')
dashboard_items = [
i.title.lower() for i in dashboard_item_model.objects.all()
]
context['dashboard_choices'] = dashboard_choices
context['dashboard_items'] = dashboard_items
# Items
invoices = invoice_model.objects.filter(last_payment_date=None)
invoices = invoices.order_by(*order_by['invoice'])
items = set_items_name('invoice', items=invoices)
notes = note_model.objects.filter(active=True, hidden=False)
notes = notes.order_by(*order_by['note'])[:10]
items = set_items_name('note', items=notes, _items=items)
projects = project_model.objects.filter(
active=True, hidden=False)
projects = projects.order_by(*order_by['project'])
items = set_items_name('project', items=projects, _items=items)
times = time_model.objects.filter(
invoiced=False, user=request.user)
times = times.order_by(*order_by['time'])
items = set_items_name('time', items=times, _items=items)
# Plot
points = report_model.objects.filter(active=True)
# Totals
gross, net = get_invoice_totals(invoices)
context['gross'] = gross
context['invoices'] = invoices
context['city_data'] = get_client_city(request)
context['items'] = items
context['net'] = net
context['notes'] = notes
context['note_info'] = get_note_info(note_model)
context['points'] = points
context['projects'] = projects
context['times'] = times
total_hours = get_total_hours(times)
context['total_hours'] = total_hours
context['total_earned'] = get_total_earned(request,
total_hours)
if request:
context['icon_size'] = get_setting(request, app_settings_model,
'icon_size')
context['icon_color'] = get_setting(request, app_settings_model,
'icon_color')
doc = get_query(request, 'doc')
pdf = get_query(request, 'pdf')
context['doc'] = doc
context['pdf'] = pdf
context['is_staff'] = request.user.is_staff
return context
def get_plot(request): # http://stackoverflow.com/a/5515994/185820
"""
"""
points = get_query(request, 'points')
# http://matplotlib.org/examples/api/date_demo.html
x = [
date2num(timezone.datetime.strptime(i[1], '%Y-%m-%d')) for i in points
]
y = [i[0] for i in points]
figure = Figure()
canvas = FigureCanvasAgg(figure)
axes = figure.add_subplot(1, 1, 1)
axes.grid(True)
axes.plot(x, y)
axes.xaxis.set_major_formatter(DateFormatter('%m'))
# write image data to a string buffer and get the PNG image bytes
buf = BytesIO()
canvas.print_png(buf)
data = buf.getvalue()
# write image bytes back to the browser
return HttpResponse(data, content_type="image/png")
def get_query(request, query):
"""
"""
# Special handling for some query strings
if query == 'paginated':
paginated = request.GET.get('paginated')
if paginated == u'false':
return False
else:
return True
elif query == 'search' and request.method == 'POST':
return request.POST.get('search', '')
elif query == 'points': # plot
points = request.GET.get('points')
if points:
points = points.split(' ')
else:
points = []
points = [i.split(',') for i in points]
return points
elif query == 'checkbox':
query_checkbox = {}
query_checkbox_active = request.POST.get('checkbox-active')
query_checkbox_subscribe = request.POST.get('checkbox-subscribe')
condition = ( # if any of these exist
query_checkbox_active == 'on' or query_checkbox_active == 'off' or
query_checkbox_subscribe == 'on' or
query_checkbox_subscribe == 'off')
query_checkbox['active'] = query_checkbox_active
query_checkbox['subscribe'] = query_checkbox_subscribe
query_checkbox['condition'] = condition
return query_checkbox
elif query == 'doc':
doc = request.GET.get('doc')
if doc:
return True
else:
return False
elif query == 'pdf':
pdf = request.GET.get('pdf')
if pdf:
return True
else:
return False
elif query == 'test':
test = request.GET.get('test')
if test:
return True
else:
return False
else: # Normal handling
return request.GET.get(query, '')
def get_search_results(context,
model,
search_fields,
search,
app_settings_model=None,
edit_url=None,
request=None):
query = []
model_name = model._meta.verbose_name
for field in search_fields:
query.append(Q(**{field + '__icontains': search}))
items = model.objects.filter(reduce(OR, query))
context['active_nav'] = model_name
context['edit_url'] = edit_url
context['icon_size'] = get_setting(request, app_settings_model,
'icon_size')
context['icon_color'] = get_setting(request, app_settings_model,
'icon_color')
context['show_search'] = True
items = set_items_name(model_name, items=items)
context['items'] = items
return context
def get_setting(request, app_settings_model, setting, page_size=None):
"""
Allow user to override global setting
"""
if not request.user.is_authenticated:
return
dashboard_override = user_pref = None
app_settings = app_settings_model.get_solo()
if setting == 'icon_size':
if has_profile(request.user):
user_pref = request.user.profile.icon_size
if user_pref:
return user_pref
else:
return app_settings.icon_size
elif setting == 'icon_color':
if has_profile(request.user):
user_pref = request.user.profile.icon_color
if user_pref:
return user_pref
else:
return app_settings.icon_color
elif setting == 'page_size':
if has_profile(request.user):
user_pref = request.user.profile.page_size
if user_pref:
return user_pref
elif page_size: # View's page_size preference
return page_size
else:
return app_settings.page_size
elif setting == 'dashboard_choices':
dashboard_choices = app_settings.dashboard_choices
dashboard_override = False
if has_profile(request.user):
dashboard_override = request.user.profile.dashboard_override
if has_profile(request.user) and dashboard_override:
dashboard_choices = request.user.profile.dashboard_choices
return dashboard_choices
elif setting == 'exclude_hidden':
return app_settings.exclude_hidden
def get_template_and_url(**kwargs):
"""
"""
model_name = kwargs.get('model_name')
page_type = kwargs.get('page_type')
if page_type == 'view':
url_name = URL_NAMES[model_name][0]
template_name = '%s.html' % url_name
return template_name, url_name
elif page_type == 'copy':
url_name = URL_NAMES[model_name][1]
return url_name
elif page_type == 'edit':
template_name = '%s.html' % URL_NAMES[model_name][1]
return template_name
elif page_type == 'home':
url_name = 'home'
return url_name
elif page_type == 'index':
url_name = URL_NAMES[model_name][2]
return url_name
def get_times_for_obj(obj, time_model):
model_name = obj._meta.verbose_name
if model_name == 'invoice':
times = time_model.objects.filter(
estimate=None, invoice=obj)
elif model_name == 'project':
times = time_model.objects.filter(
invoiced=False, estimate=None, project=obj)
return times
def get_total_earned(request, total_hours):
total_earned = 0
if has_profile(request.user):
if request.user.profile.rate:
rate = request.user.profile.rate
total_earned = total_hours * rate
return '%.2f' % total_earned
def get_total_hours(items):
total_hours = items.aggregate(hours=Sum(F('hours')))
total_hours = total_hours['hours']
return total_hours
def gravatar_url(email):
"""
MD5 hash of email address for use with Gravatar
"""
return django_settings.GRAVATAR_URL % md5(email.lower()).hexdigest()
def has_profile(user):
return hasattr(user, 'profile')
def last_month():
"""
Returns last day of last month
"""
first = timezone.now().replace(day=1)
return first - timezone.timedelta(days=1)
def mail_compose(obj, **kwargs):
context = {}
first_name = kwargs.get('first_name')
form = kwargs.get('form')
mail_to = kwargs.get('mail_to')
model_name = obj._meta.verbose_name
if model_name == 'contact':
message = form.cleaned_data['message']
subject = form.cleaned_data['subject']
elif model_name == 'estimate':
message = render_to_string('pdf_invoice.html', get_page_items(obj=obj))
subject = obj.subject
elif model_name == 'note':
message = obj.note
subject = obj.title
if first_name:
message = render_to_string('first_name.html', {
'first_name': first_name,
'message': message,
})
if form: # http://stackoverflow.com/a/28476681/185820
if 'send_html' in form.data:
html_message = render_to_string(form.data['template'], {
'message': message,
})
context['html_message'] = html_message
else: # python manage.py send_note
context['html_message'] = render_to_string('mail.html',
{'message': message, })
context['mail_to'] = mail_to
context['mail_from'] = django_settings.EMAIL_FROM
context['message'] = message
context['subject'] = subject
return context
def mail_obj(request, **kwargs):
query_contact = get_query(request, 'contact')
query_estimate = get_query(request, 'estimate')
query_note = get_query(request, 'note')
contact_model = kwargs.get('contact_model')
estimate_model = kwargs.get('estimate_model')
note_model = kwargs.get('note_model')
if contact_model and query_contact:
obj = contact_model.objects.get(pk=query_contact)
elif note_model and query_note:
obj = note_model.objects.get(pk=query_note)
elif estimate_model and query_estimate:
obj = estimate_model.objects.get(pk=query_estimate)
return obj
def mail_recipients(obj):
model_name = obj._meta.verbose_name
if model_name == 'contact':
return ((obj.first_name, obj.email), )
if model_name == 'estimate':
return [(i.first_name, i.email) for i in obj.contacts.all()]
elif model_name == 'note':
return [(i.first_name, i.email) for i in obj.contacts.all()]
def mail_send(**kwargs):
html_message = kwargs.get('html_message')
mail_from = kwargs.get('mail_from')
mail_to = kwargs.get('mail_to')
message = kwargs.get('message')
subject = kwargs.get('subject')
try:
send_mail(
subject,
message,
mail_from, (mail_to, ),
fail_silently=False,
html_message=html_message)
status = True
except BotoServerError:
status = False
return status
def obj_copy(obj):
dup = obj
dup.pk = None
dup.save()
kwargs = {}
kwargs['pk'] = dup.pk
model_name = obj._meta.verbose_name
url_name = get_template_and_url(model_name=model_name, page_type='copy')
return HttpResponseRedirect(reverse(url_name, kwargs=kwargs))
def obj_edit(obj, pk=None):
model_name = obj._meta.verbose_name
template_name, url_name = get_template_and_url(
model_name=model_name, page_type='view') # Redir to view
# New or existing object
kwargs = {}
if pk: # Special cases for some objects e.g. settings, user
if model_name == 'Settings App':
return HttpResponseRedirect(reverse(url_name))
elif model_name == 'Settings Company':
return HttpResponseRedirect(reverse(url_name))
elif model_name == 'Settings Contract':
return HttpResponseRedirect(reverse(url_name))
if model_name == 'profile': # Redir to user pk not profile pk
kwargs['pk'] = obj.user.pk
else:
kwargs['pk'] = pk
else: # New
kwargs['pk'] = obj.pk
return HttpResponseRedirect(reverse(url_name, kwargs=kwargs))
def obj_remove(obj):
model_name = obj._meta.verbose_name
if model_name == 'time':
url_name = get_template_and_url(
model_name=model_name, page_type='home') # Redir to home
else:
url_name = get_template_and_url(
model_name=model_name, page_type='index') # Redir to index
obj.delete()
return HttpResponseRedirect(reverse(url_name))
def paginate(items, page, page_size):
"""
"""
paginator = Paginator(items, page_size, orphans=5)
try:
items = paginator.page(page)
except PageNotAnInteger:
items = paginator.page(1)
except EmptyPage:
items = paginator.page(paginator.num_pages)
return items
def set_check_boxes(obj, query_checkbox, refer, app_settings_model):
model_name = obj._meta.verbose_name
if (query_checkbox['active'] == 'on' or
query_checkbox['active'] == 'off'): # Active
if query_checkbox['active'] == 'on':
obj.active = True
obj.hidden = False
else:
obj.active = False
# Auto-hide
if model_name == 'note' and app_settings_model:
app_settings = app_settings_model.get_solo()
if app_settings.auto_hide:
obj.hidden = True
elif (query_checkbox['subscribe'] == 'on' or
query_checkbox['subscribe'] == 'off'): # Subscribe
if query_checkbox['active'] == 'on':
obj.subscribed = True
else:
obj.subscribed = False
obj.save()
return HttpResponseRedirect(refer)
def set_invoice_totals(times, estimate=None, invoice=None):
"""
Set invoice, estimate and time totals
"""
invoice_amount = invoice_cog = 0
time_entry_amount = time_entry_cog = 0
for time_entry in times:
hours = time_entry.hours
if time_entry.task:
rate = time_entry.task.rate
time_entry_amount = rate * hours
if time_entry.user:
rate = time_entry.user.profile.rate
if rate:
time_entry_cog = rate * hours
time_entry.amount = '%.2f' % time_entry_amount
time_entry.cog = '%.2f' % time_entry_cog
invoice_amount += time_entry_amount
invoice_cog += time_entry_cog
if invoice:
invoice.amount = '%.2f' % invoice_amount
invoice.cog = '%.2f' % invoice_cog
invoice.save()
elif estimate:
estimate.amount = '%.2f' % invoice_amount
estimate.save()
return times
def set_items_name(model_name, items=None, _items={}):
"""
Share templates by returning dictionary of items e.g.
for item in items.reports
instead of:
for item in reports
"""
_items['%ss' % model_name] = items
return _items
def set_relationship(obj, request, **kwargs):
"""
Sets relationships and returns None unless user is created then
return user.
"""
client_model = kwargs.get('client_model')
company_model = kwargs.get('company_model')
estimate_model = kwargs.get('estimate_model')
invoice_model = kwargs.get('invoice_model')
model = kwargs.get('model')
project_model = kwargs.get('project_model')
user_model = kwargs.get('user_model')
model_name = obj._meta.verbose_name
if model_name == 'contact':
query_client = get_query(request, 'client')
if query_client:
client = get_object_or_404(client_model, pk=query_client)
obj.client = client
obj.save()
elif model_name == 'estimate' or model_name == 'invoice':
query_project = get_query(request, 'project')
if query_project:
project = get_object_or_404(project_model, pk=query_project)
obj.client = project.client
obj.project = project
obj.save()
elif model_name == 'note':
query_client = get_query(request, 'client')
query_company = get_query(request, 'company')
if query_client:
client = get_object_or_404(client_model, pk=query_client)
client.note.add(obj)
client.save()
elif query_company:
company = company_model.get_solo()
company.note.add(obj)
company.save()
elif model_name == 'profile':
if obj.preferred_username:
username = obj.preferred_username
else:
username = fake.text()[:150]
user = user_model.objects.create_user(username=username)
model.objects.get_or_create(user=user) # Create profile
return user # Only condition that returns a value
elif model_name == 'project':
query_client = get_query(request, 'client')
if query_client:
client = get_object_or_404(client_model, pk=query_client)
obj.client = client
obj.save()
elif model_name == 'time':
obj.user = request.user
query_estimate = get_query(request, 'estimate')
query_invoice = get_query(request, 'invoice')
query_project = get_query(request, 'project')
if not request.user.is_staff: # Staff have more than one project
user_projects = project_model.objects.filter(team__in=[obj.user, ])
if len(user_projects) > 0:
obj.project = user_projects[0]
obj.task = obj.project.task
if query_estimate:
estimate = get_object_or_404(estimate_model, pk=query_estimate)
obj.estimate = estimate
if query_invoice:
invoice = get_object_or_404(invoice_model, pk=query_invoice)
obj.invoice = invoice
if query_project:
project = get_object_or_404(project_model, pk=query_project)
obj.project = project
obj.task = project.task
obj.save()
Update
from boto.exception import BotoServerError
from collections import OrderedDict
from decimal import Decimal
from django.conf import settings as django_settings
from django.contrib.gis.geoip2 import GeoIP2
from django.core.mail import send_mail
from django.core.paginator import Paginator
from django.core.paginator import EmptyPage
from django.core.paginator import PageNotAnInteger
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.db.models import F
from django.db.models import Sum
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.shortcuts import render
from django.template.loader import render_to_string
from django.utils import timezone
from docx import Document
from docx.enum.text import WD_ALIGN_PARAGRAPH
from faker import Faker
from functools import reduce
from import_export import widgets
from hashlib import md5
from io import BytesIO
from io import StringIO
from lxml import etree
from matplotlib.dates import DateFormatter
from matplotlib.dates import date2num
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg
from operator import or_ as OR
fake = Faker()
EXCLUDE_MODELS = ('note', 'project')
URL_NAMES = {
'Settings App': ('settings_app', 'settings_app_edit', ''), # custom meta
'Settings Company': ('settings_company', 'settings_company_edit', ''),
'Settings Contract': ('settings_contract', 'settings_contract_edit', ''),
'client': ('client_view', 'client_edit', 'client_index'),
'contact': ('contact_view', 'contact_edit', 'contact_index'),
'contract': ('contract_view', 'contract_edit', 'contract_index'),
'estimate': ('estimate_view', 'estimate_edit', 'estimate_index'),
'file': ('file_view', 'file_edit', 'file_index'),
'invoice': ('invoice_view', 'invoice_edit', 'invoice_index'),
'log': ('log_view', 'log_edit', 'log_index'),
'newsletter': ('newsletter_view', 'newsletter_edit', 'newsletter_index'),
'note': ('note_view', 'note_edit', 'note_index'),
'profile': ('user_view', 'user_edit', 'user_index'),
'project': ('project_view', 'project_edit', 'project_index'),
'proposal': ('proposal_view', 'proposal_edit', 'proposal_index'),
'report': ('report_view', 'report_edit', 'report_index'),
'service': ('', 'service_edit', ''),
'task': ('task_view', 'task_edit', 'task_index'),
'time': ('time_view', 'time_edit', 'time_index'),
'user': ('user_view', 'user_edit', 'user_index'),
}
class BooleanWidget(widgets.Widget):
"""
Convert strings to boolean values
"""
def clean(self, value):
if value == 'Yes':
return True
else:
return False
class DecimalWidget(widgets.Widget):
"""
Convert strings to decimal values
"""
def clean(self, value):
if value:
return Decimal(value.replace(',', ''))
else:
return Decimal(0)
class UserWidget(widgets.Widget):
"""
"""
def clean(self, value):
return value
def edit(request, **kwargs):
context = {}
obj = None
app_settings_model = kwargs.get('app_settings_model')
client_model = kwargs.get('client_model')
company_model = kwargs.get('company_model')
contact_model = kwargs.get('contact_model')
estimate_model = kwargs.get('estimate_model')
form_model = kwargs.get('form_model')
invoice_model = kwargs.get('invoice_model')
model = kwargs.get('model')
note_model = kwargs.get('note_model')
pk = kwargs.get('pk')
project_model = kwargs.get('project_model')
user_model = kwargs.get('user_model')
if model:
model_name = model._meta.verbose_name
context['active_nav'] = model_name
if pk is None: # New or mail
form = get_form(
client_model=client_model,
form_model=form_model,
invoice_model=invoice_model,
model=model,
user_model=user_model,
request=request)
else: # Existing
if model_name == 'profile':
model = user_model # Swap profile_model with user_model
# for create_user
obj = get_object_or_404(model, pk=pk)
form = get_form(form_model=form_model, obj=obj)
if request.method == 'POST':
ref = request.META['HTTP_REFERER']
if pk is None:
form = form_model(request.POST)
else:
# Copy or delete
copy = request.POST.get('copy')
delete = request.POST.get('delete')
if copy:
return obj_copy(obj)
if delete:
return obj_remove(obj)
# Check boxes
query_checkbox = get_query(request, 'checkbox')
if query_checkbox['condition']:
return set_check_boxes(obj, query_checkbox, ref,
app_settings_model)
# Invoice sent
invoice_sent = request.POST.get('invoice_sent')
if invoice_sent:
return obj_sent(obj)
form = form_model(request.POST, instance=obj)
if form.is_valid():
try:
obj = form.save()
user = set_relationship(
obj,
request,
client_model=client_model,
company_model=company_model,
estimate_model=estimate_model,
invoice_model=invoice_model,
model=model,
project_model=project_model,
user_model=user_model)
if user:
return obj_edit(user.profile, pk=user.pk)
else:
return obj_edit(obj, pk=pk)
except AttributeError: # Mail
obj = mail_obj(
request,
contact_model=contact_model,
estimate_model=estimate_model,
note_model=note_model)
recipients = mail_recipients(obj)
for first_name, email_address in recipients:
mail_send(
**mail_compose(
obj,
form=form,
first_name=first_name,
mail_to=email_address,
request=request))
# if status:
# messages.add_message(request, messages.SUCCESS,
# 'Mail sent to %s!' %
# ', '.join(recipients))
# else:
# messages.add_message(request, messages.WARNING,
# 'Mail not sent to %s!' %
# ', '.join(recipients))
context['form'] = form
context['is_staff'] = request.user.is_staff
context['item'] = obj
context['pk'] = pk
if company_model:
company = company_model.get_solo()
context['company'] = company
if invoice_model: # Dashboard totals for reporting
invoices = invoice_model.objects.filter(last_payment_date=None)
gross, net = get_invoice_totals(invoices)
context['gross'] = gross
context['net'] = net
elif contact_model:
model_name = contact_model._meta.verbose_name
elif note_model:
model_name = note_model._meta.verbose_name
template_name = get_template_and_url(
model_name=model_name, page_type='edit')
return render(request, template_name, context)
def generate_doc(contract):
"""
https://stackoverflow.com/a/24122313/185820
"""
document = Document()
# Head
task = ''
if contract.task:
task = contract.task
title = document.add_heading(
'ACLARK.NET, LLC %s AGREEMENT PREPARED FOR:' % task, level=1)
title.alignment = WD_ALIGN_PARAGRAPH.CENTER
if contract.client:
client_name = document.add_heading(contract.client.name, level=1)
client_name.alignment = WD_ALIGN_PARAGRAPH.CENTER
client_address = document.add_heading(contract.client.address, level=1)
client_address.alignment = WD_ALIGN_PARAGRAPH.CENTER
parser = etree.HTMLParser() # http://lxml.de/parsing.html
tree = etree.parse(StringIO(contract.body), parser)
# Body
for element in tree.iter():
if element.tag == 'h2':
document.add_heading(element.text, level=2)
elif element.tag == 'p':
document.add_paragraph(element.text)
return document
def get_client_city(request):
ip_address = get_client_ip(request)
geo = GeoIP2()
if ip_address:
return geo.city(ip_address)
# https://stackoverflow.com/a/4581997/185820
def get_client_ip(request):
return request.META.get('HTTP_X_REAL_IP')
def get_company_name(company):
if company.name:
company_name = company.name
else:
company_name = fake.text()
# Ghetto
company_name = company.name.replace('.', '_')
company_name = company_name.replace(', ', '_')
company_name = company_name.replace('#', '_')
company_name = company_name.replace('-', '_')
company_name = company_name.replace('(', '_')
company_name = company_name.replace(')', '_')
company_name = company_name.replace(' ', '_')
company_name = company_name.upper()
return company_name
def get_fields(items):
for item in items:
fields = item._meta._get_fields()
item.fields = OrderedDict()
for field in fields:
if not field.is_relation:
item.fields[field.name] = getattr(item, field.name)
return items
def get_form(**kwargs):
"""
Return appropriate form based on new or edit
"""
client_model = kwargs.get('client_model')
form_model = kwargs.get('form_model')
invoice_model = kwargs.get('invoice_model')
model = kwargs.get('model')
obj = kwargs.get('obj')
request = kwargs.get('request')
user_model = kwargs.get('user_model')
query_client = None
query_user = None
if request:
query_user = get_query(request, 'user')
query_client = get_query(request, 'client')
if obj: # Existing object
model_name = obj._meta.verbose_name
if model_name == 'note': # Populate form with tags already set
form = form_model(initial={'tags': obj.tags.all()}, instance=obj)
else:
form = form_model(instance=obj)
else: # New object or mail
initial = {'send_html': True}
if model:
model_name = model._meta.verbose_name
if model_name == 'report' and invoice_model: # Populate new report
# with gross, net.
invoices = invoice_model.objects.filter(last_payment_date=None)
gross, net = get_invoice_totals(invoices)
obj = model(gross=gross, net=net)
form = form_model(instance=obj)
elif model_name == 'contact': # Populate new contact
# with appropriate fields
if query_user:
user = get_object_or_404(user_model, pk=query_user)
obj = model(email=user.email)
elif query_client:
client = get_object_or_404(client_model, pk=query_client)
obj = model(client=client)
form = form_model(instance=obj)
else:
form = form_model(initial=initial)
else:
form = form_model(initial=initial)
return form
def get_index_items(**kwargs):
"""
"""
context = {}
app_settings_model = kwargs.get('app_settings_model')
columns_visible = kwargs.get('columns_visible')
company_model = kwargs.get('company_model')
model = kwargs.get('model')
order_by = kwargs.get('order_by')
page_size = kwargs.get('page_size')
request = kwargs.get('request')
search_fields = kwargs.get('search_fields')
show_search = kwargs.get('show_search')
model_name = model._meta.verbose_name
edit_url = '%s_edit' % model_name
if columns_visible:
context['columns_visible'] = columns_visible
if company_model:
company = company_model.get_solo()
context['company'] = company
page = get_query(request, 'page')
paginated = get_query(request, 'paginated')
search = get_query(request, 'search')
if request:
context['is_staff'] = request.user.is_staff
# Search is easy
if request.method == 'POST':
if search == u'': # Empty search returns none
context['show_search'] = True
return context
else:
return get_search_results(
context,
model,
search_fields,
search,
app_settings_model=app_settings_model,
edit_url=edit_url,
request=request)
# Not a search
if model_name in EXCLUDE_MODELS and get_setting(
request, app_settings_model, 'exclude_hidden'):
items = model.objects.exclude(hidden=True)
else:
items = model.objects.all()
# Order items (http://stackoverflow.com/a/20257999/185820)
if order_by is not None:
items = items.order_by(*order_by)
# Don't show items to anon
if not request.user.is_authenticated:
items = []
# Per model extras
if model_name == 'note':
context['note_info'] = get_note_info(model)
elif model_name == 'time':
context['total_hours'] = get_total_hours(items)
# Paginate if paginated
if paginated:
page_size = get_setting(
request, app_settings_model, 'page_size', page_size=page_size)
items = paginate(items, page, page_size)
context['edit_url'] = edit_url
context['icon_size'] = get_setting(request, app_settings_model,
'icon_size')
context['icon_color'] = get_setting(request, app_settings_model,
'icon_color')
context['page'] = page
context['paginated'] = paginated
context['show_search'] = show_search
items = set_items_name(model_name, items=items)
context['items'] = items
context['active_nav'] = model_name
return context
def get_invoice_totals(invoices):
invoice_amount = invoice_cog = 0
for invoice in invoices:
if invoice.amount:
invoice_amount += float(invoice.amount)
if invoice.cog:
invoice_cog += float(invoice.cog)
return invoice_amount, invoice_amount - invoice_cog
def get_note_info(note_model):
note_info = {}
active = len(note_model.objects.filter(active=True))
inactive = len(note_model.objects.filter(active=False))
hidden = len(note_model.objects.filter(hidden=True))
not_hidden = inactive - hidden
total = len(note_model.objects.all())
note_info['active'] = active
note_info['inactive'] = inactive
note_info['hidden'] = hidden
note_info['not_hidden'] = not_hidden
note_info['total'] = total
return note_info
def get_page_items(**kwargs):
app_settings_model = kwargs.get('app_settings_model')
company_model = kwargs.get('company_model')
columns_visible = kwargs.get('columns_visible')
contact_model = kwargs.get('contact_model')
contract_model = kwargs.get('contract_model')
dashboard_item_model = kwargs.get('dashboard_item_model')
estimate_model = kwargs.get('estimate_model')
invoice_model = kwargs.get('invoice_model')
model = kwargs.get('model')
note_model = kwargs.get('note_model')
obj = kwargs.get('obj')
profile_model = kwargs.get('profile_model')
project_model = kwargs.get('project_model')
report_model = kwargs.get('report_model')
request = kwargs.get('request')
order_by = kwargs.get('order_by')
pk = kwargs.get('pk')
time_model = kwargs.get('time_model')
user_model = kwargs.get('user_model')
context = {}
items = None
if company_model:
company = company_model.get_solo()
context['company'] = company
if columns_visible:
context['columns_visible'] = columns_visible
if model or obj:
if model:
model_name = model._meta.verbose_name
elif obj:
model_name = obj._meta.verbose_name
context['model_name'] = model_name
context['active_nav'] = model_name
context['edit_url'] = '%s_edit' % model_name
if model_name == 'Settings App':
app_settings = app_settings_model.get_solo()
context['items'] = get_fields([app_settings, ]) # table_items.html
elif model_name == 'Settings Company':
company_settings = model.get_solo()
context['items'] = get_fields([company_settings,
]) # table_items.html
elif model_name == 'Settings Contract':
contract_settings = model.get_solo()
context['items'] = get_fields([contract_settings,
]) # table_items.html
elif model_name == 'client':
client = get_object_or_404(model, pk=pk)
contacts = contact_model.objects.filter(client=client)
contracts = contract_model.objects.filter(client=client)
projects = project_model.objects.filter(client=client)
context['contacts'] = contacts
context['contracts'] = contracts
context['item'] = client
context['notes'] = client.note.all()
context['projects'] = projects
elif model_name == 'contact':
contact = get_object_or_404(model, pk=pk)
context['items'] = get_fields([contact, ]) # table_items.html
context['item'] = contact
elif model_name == 'contract':
contract = get_object_or_404(model, pk=pk)
estimate = contract.statement_of_work
if estimate:
times_client = time_model.objects.filter(
client=estimate.client,
estimate=None,
project=None,
invoiced=False,
invoice=None)
times_estimate = time_model.objects.filter(estimate=estimate)
times = times_client | times_estimate
else:
times = None
context['doc_type'] = model_name
context['item'] = contract
context['times'] = times
elif model_name == 'estimate': # handle obj or model
if not obj:
estimate = get_object_or_404(model, pk=pk)
else:
estimate = obj
if not estimate.is_sow:
doc_type = model_name
else:
doc_type = 'statement of work'
if not obj:
times_client = time_model.objects.filter(
client=estimate.client,
estimate=None,
project=None,
invoiced=False,
invoice=None)
times_estimate = time_model.objects.filter(estimate=estimate)
times = times_client | times_estimate
else:
times = obj.times.all()
if order_by:
times = times.order_by(*order_by['time'])
times = set_invoice_totals(times, estimate=estimate)
context['doc_type'] = doc_type
context['entries'] = times
context['item'] = estimate
if model_name == 'file':
file_obj = get_object_or_404(model, pk=pk)
context['doc_type'] = model_name
context['item'] = file_obj
elif model_name == 'invoice':
invoice = get_object_or_404(model, pk=pk)
times = get_times_for_obj(invoice, time_model)
times = times.order_by(*order_by['time'])
times = set_invoice_totals(times, invoice=invoice)
last_payment_date = invoice.last_payment_date
total_hours = get_total_hours(times)
context['doc_type'] = model_name
context['entries'] = times
context['item'] = invoice
context['invoice'] = True
context['last_payment_date'] = last_payment_date
context['total_hours'] = total_hours
elif model_name == 'newsletter':
newsletter = get_object_or_404(model, pk=pk)
context['doc_type'] = model_name
context['item'] = newsletter
elif model_name == 'note':
note = get_object_or_404(model, pk=pk)
context['item'] = note
elif model_name == 'project':
project = get_object_or_404(model, pk=pk)
contacts = contact_model.objects.all()
items = set_items_name('contact', items=contacts)
estimates = estimate_model.objects.filter(project=project)
items = set_items_name('estimate', items=estimates, _items=items)
invoices = invoice_model.objects.filter(project=project)
invoices = invoices.order_by('-issue_date')
items = set_items_name('invoice', items=invoices, _items=items)
times = get_times_for_obj(project, time_model)
times = times.order_by(*order_by['time'])
items = set_items_name('time', items=times, _items=items)
users = user_model.objects.filter(project=project)
items = set_items_name('user', items=users, _items=items)
context['item'] = project
context['items'] = items
elif model_name == 'proposal':
proposal = get_object_or_404(model, pk=pk)
context['doc_type'] = model_name
context['item'] = proposal
elif model_name == 'report':
report = get_object_or_404(model, pk=pk)
reports = model.objects.filter(active=True)
reports = reports.aggregate(
gross=Sum(F('gross')), net=Sum(F('net')))
context['cost'] = report.gross - report.net
context['item'] = report
elif model_name == 'task':
task = get_object_or_404(model, pk=pk)
context['item'] = task
elif model_name == 'time':
time_entry = get_object_or_404(model, pk=pk)
context['item'] = time_entry
elif model_name == 'user':
user = get_object_or_404(model, pk=pk)
profile_model.objects.get_or_create(user=user)
projects = project_model.objects.filter(
team__in=[user, ], active=True)
projects = projects.order_by(*order_by['project'])
times = time_model.objects.filter(
estimate=None, invoiced=False, user=user)
times = times.order_by(*order_by['time'])
contacts = contact_model.objects.all()
context['item'] = user
context['items'] = get_fields([user.profile, ]) # table_items.html
context['projects'] = projects
context['times'] = times
else: # home
if request:
if request.user.is_authenticated:
# Dashboard
dashboard_choices = get_setting(request, app_settings_model,
'dashboard_choices')
dashboard_items = [
i.title.lower() for i in dashboard_item_model.objects.all()
]
context['dashboard_choices'] = dashboard_choices
context['dashboard_items'] = dashboard_items
# Items
invoices = invoice_model.objects.filter(last_payment_date=None)
invoices = invoices.order_by(*order_by['invoice'])
items = set_items_name('invoice', items=invoices)
notes = note_model.objects.filter(active=True, hidden=False)
notes = notes.order_by(*order_by['note'])[:10]
items = set_items_name('note', items=notes, _items=items)
projects = project_model.objects.filter(
active=True, hidden=False)
projects = projects.order_by(*order_by['project'])
items = set_items_name('project', items=projects, _items=items)
times = time_model.objects.filter(
invoiced=False, user=request.user)
times = times.order_by(*order_by['time'])
items = set_items_name('time', items=times, _items=items)
# Plot
points = report_model.objects.filter(active=True)
# Totals
gross, net = get_invoice_totals(invoices)
context['gross'] = gross
context['invoices'] = invoices
context['city_data'] = get_client_city(request)
context['items'] = items
context['net'] = net
context['notes'] = notes
context['note_info'] = get_note_info(note_model)
context['points'] = points
context['projects'] = projects
context['times'] = times
total_hours = get_total_hours(times)
context['total_hours'] = total_hours
context['total_earned'] = get_total_earned(request,
total_hours)
if request:
context['icon_size'] = get_setting(request, app_settings_model,
'icon_size')
context['icon_color'] = get_setting(request, app_settings_model,
'icon_color')
doc = get_query(request, 'doc')
pdf = get_query(request, 'pdf')
context['doc'] = doc
context['pdf'] = pdf
context['is_staff'] = request.user.is_staff
return context
def get_plot(request): # http://stackoverflow.com/a/5515994/185820
"""
"""
points = get_query(request, 'points')
# http://matplotlib.org/examples/api/date_demo.html
x = [
date2num(timezone.datetime.strptime(i[1], '%Y-%m-%d')) for i in points
]
y = [i[0] for i in points]
figure = Figure()
canvas = FigureCanvasAgg(figure)
axes = figure.add_subplot(1, 1, 1)
axes.grid(True)
axes.plot(x, y)
axes.xaxis.set_major_formatter(DateFormatter('%m'))
# write image data to a string buffer and get the PNG image bytes
buf = BytesIO()
canvas.print_png(buf)
data = buf.getvalue()
# write image bytes back to the browser
return HttpResponse(data, content_type="image/png")
def get_query(request, query):
"""
"""
# Special handling for some query strings
if query == 'paginated':
paginated = request.GET.get('paginated')
if paginated == u'false':
return False
else:
return True
elif query == 'search' and request.method == 'POST':
return request.POST.get('search', '')
elif query == 'points': # plot
points = request.GET.get('points')
if points:
points = points.split(' ')
else:
points = []
points = [i.split(',') for i in points]
return points
elif query == 'checkbox':
query_checkbox = {}
query_checkbox_active = request.POST.get('checkbox-active')
query_checkbox_subscribe = request.POST.get('checkbox-subscribe')
condition = ( # if any of these exist
query_checkbox_active == 'on' or query_checkbox_active == 'off' or
query_checkbox_subscribe == 'on' or
query_checkbox_subscribe == 'off')
query_checkbox['active'] = query_checkbox_active
query_checkbox['subscribe'] = query_checkbox_subscribe
query_checkbox['condition'] = condition
return query_checkbox
elif query == 'doc':
doc = request.GET.get('doc')
if doc:
return True
else:
return False
elif query == 'pdf':
pdf = request.GET.get('pdf')
if pdf:
return True
else:
return False
elif query == 'test':
test = request.GET.get('test')
if test:
return True
else:
return False
else: # Normal handling
return request.GET.get(query, '')
def get_search_results(context,
model,
search_fields,
search,
app_settings_model=None,
edit_url=None,
request=None):
query = []
model_name = model._meta.verbose_name
for field in search_fields:
query.append(Q(**{field + '__icontains': search}))
items = model.objects.filter(reduce(OR, query))
context['active_nav'] = model_name
context['edit_url'] = edit_url
context['icon_size'] = get_setting(request, app_settings_model,
'icon_size')
context['icon_color'] = get_setting(request, app_settings_model,
'icon_color')
context['show_search'] = True
items = set_items_name(model_name, items=items)
context['items'] = items
return context
def get_setting(request, app_settings_model, setting, page_size=None):
"""
Allow user to override global setting
"""
if not request.user.is_authenticated:
return
dashboard_override = user_pref = None
app_settings = app_settings_model.get_solo()
if setting == 'icon_size':
if has_profile(request.user):
user_pref = request.user.profile.icon_size
if user_pref:
return user_pref
else:
return app_settings.icon_size
elif setting == 'icon_color':
if has_profile(request.user):
user_pref = request.user.profile.icon_color
if user_pref:
return user_pref
else:
return app_settings.icon_color
elif setting == 'page_size':
if has_profile(request.user):
user_pref = request.user.profile.page_size
if user_pref:
return user_pref
elif page_size: # View's page_size preference
return page_size
else:
return app_settings.page_size
elif setting == 'dashboard_choices':
dashboard_choices = app_settings.dashboard_choices
dashboard_override = False
if has_profile(request.user):
dashboard_override = request.user.profile.dashboard_override
if has_profile(request.user) and dashboard_override:
dashboard_choices = request.user.profile.dashboard_choices
return dashboard_choices
elif setting == 'exclude_hidden':
return app_settings.exclude_hidden
def get_template_and_url(**kwargs):
"""
"""
model_name = kwargs.get('model_name')
page_type = kwargs.get('page_type')
if page_type == 'view':
url_name = URL_NAMES[model_name][0]
template_name = '%s.html' % url_name
return template_name, url_name
elif page_type == 'copy':
url_name = URL_NAMES[model_name][1]
return url_name
elif page_type == 'edit':
template_name = '%s.html' % URL_NAMES[model_name][1]
return template_name
elif page_type == 'home':
url_name = 'home'
return url_name
elif page_type == 'index':
url_name = URL_NAMES[model_name][2]
return url_name
def get_times_for_obj(obj, time_model):
model_name = obj._meta.verbose_name
if model_name == 'invoice':
times = time_model.objects.filter(estimate=None, invoice=obj)
elif model_name == 'project':
times = time_model.objects.filter(
invoiced=False, estimate=None, project=obj)
return times
def get_total_earned(request, total_hours):
total_earned = 0
if has_profile(request.user):
if request.user.profile.rate:
rate = request.user.profile.rate
total_earned = total_hours * rate
return '%.2f' % total_earned
def get_total_hours(items):
total_hours = items.aggregate(hours=Sum(F('hours')))
total_hours = total_hours['hours']
return total_hours
def gravatar_url(email):
"""
MD5 hash of email address for use with Gravatar
"""
return django_settings.GRAVATAR_URL % md5(email.lower()).hexdigest()
def has_profile(user):
return hasattr(user, 'profile')
def last_month():
"""
Returns last day of last month
"""
first = timezone.now().replace(day=1)
return first - timezone.timedelta(days=1)
def mail_compose(obj, **kwargs):
context = {}
first_name = kwargs.get('first_name')
form = kwargs.get('form')
mail_to = kwargs.get('mail_to')
model_name = obj._meta.verbose_name
if model_name == 'contact':
message = form.cleaned_data['message']
subject = form.cleaned_data['subject']
elif model_name == 'estimate':
message = render_to_string('pdf_invoice.html', get_page_items(obj=obj))
subject = obj.subject
elif model_name == 'note':
message = obj.note
subject = obj.title
if first_name:
message = render_to_string('first_name.html', {
'first_name': first_name,
'message': message,
})
if form: # http://stackoverflow.com/a/28476681/185820
if 'send_html' in form.data:
html_message = render_to_string(form.data['template'], {
'message': message,
})
context['html_message'] = html_message
else: # python manage.py send_note
context['html_message'] = render_to_string('mail.html',
{'message': message, })
context['mail_to'] = mail_to
context['mail_from'] = django_settings.EMAIL_FROM
context['message'] = message
context['subject'] = subject
return context
def mail_obj(request, **kwargs):
query_contact = get_query(request, 'contact')
query_estimate = get_query(request, 'estimate')
query_note = get_query(request, 'note')
contact_model = kwargs.get('contact_model')
estimate_model = kwargs.get('estimate_model')
note_model = kwargs.get('note_model')
if contact_model and query_contact:
obj = contact_model.objects.get(pk=query_contact)
elif note_model and query_note:
obj = note_model.objects.get(pk=query_note)
elif estimate_model and query_estimate:
obj = estimate_model.objects.get(pk=query_estimate)
return obj
def mail_recipients(obj):
model_name = obj._meta.verbose_name
if model_name == 'contact':
return ((obj.first_name, obj.email), )
if model_name == 'estimate':
return [(i.first_name, i.email) for i in obj.contacts.all()]
elif model_name == 'note':
return [(i.first_name, i.email) for i in obj.contacts.all()]
def mail_send(**kwargs):
html_message = kwargs.get('html_message')
mail_from = kwargs.get('mail_from')
mail_to = kwargs.get('mail_to')
message = kwargs.get('message')
subject = kwargs.get('subject')
try:
send_mail(
subject,
message,
mail_from, (mail_to, ),
fail_silently=False,
html_message=html_message)
status = True
except BotoServerError:
status = False
return status
def obj_copy(obj):
dup = obj
dup.pk = None
dup.save()
kwargs = {}
kwargs['pk'] = dup.pk
model_name = obj._meta.verbose_name
url_name = get_template_and_url(model_name=model_name, page_type='copy')
return HttpResponseRedirect(reverse(url_name, kwargs=kwargs))
def obj_edit(obj, pk=None):
model_name = obj._meta.verbose_name
template_name, url_name = get_template_and_url(
model_name=model_name, page_type='view') # Redir to view
# New or existing object
kwargs = {}
if pk: # Special cases for some objects e.g. settings, user
if model_name == 'Settings App':
return HttpResponseRedirect(reverse(url_name))
elif model_name == 'Settings Company':
return HttpResponseRedirect(reverse(url_name))
elif model_name == 'Settings Contract':
return HttpResponseRedirect(reverse(url_name))
if model_name == 'profile': # Redir to user pk not profile pk
kwargs['pk'] = obj.user.pk
else:
kwargs['pk'] = pk
else: # New
kwargs['pk'] = obj.pk
return HttpResponseRedirect(reverse(url_name, kwargs=kwargs))
def obj_remove(obj):
model_name = obj._meta.verbose_name
if model_name == 'time':
url_name = get_template_and_url(
model_name=model_name, page_type='home') # Redir to home
else:
url_name = get_template_and_url(
model_name=model_name, page_type='index') # Redir to index
obj.delete()
return HttpResponseRedirect(reverse(url_name))
def obj_sent(obj, ref):
model_name = obj._meta.verbose_name
for time in obj.time_set.all():
time.invoiced = True
time.save()
return HttpResponseRedirect(ref)
def paginate(items, page, page_size):
"""
"""
paginator = Paginator(items, page_size, orphans=5)
try:
items = paginator.page(page)
except PageNotAnInteger:
items = paginator.page(1)
except EmptyPage:
items = paginator.page(paginator.num_pages)
return items
def set_check_boxes(obj, query_checkbox, ref, app_settings_model):
model_name = obj._meta.verbose_name
if (query_checkbox['active'] == 'on' or
query_checkbox['active'] == 'off'): # Active
if query_checkbox['active'] == 'on':
obj.active = True
obj.hidden = False
else:
obj.active = False
# Auto-hide
if model_name == 'note' and app_settings_model:
app_settings = app_settings_model.get_solo()
if app_settings.auto_hide:
obj.hidden = True
elif (query_checkbox['subscribe'] == 'on' or
query_checkbox['subscribe'] == 'off'): # Subscribe
if query_checkbox['active'] == 'on':
obj.subscribed = True
else:
obj.subscribed = False
obj.save()
return HttpResponseRedirect(ref)
def set_invoice_totals(times, estimate=None, invoice=None):
"""
Set invoice, estimate and time totals
"""
invoice_amount = invoice_cog = 0
time_entry_amount = time_entry_cog = 0
for time_entry in times:
hours = time_entry.hours
if time_entry.task:
rate = time_entry.task.rate
time_entry_amount = rate * hours
if time_entry.user:
rate = time_entry.user.profile.rate
if rate:
time_entry_cog = rate * hours
time_entry.amount = '%.2f' % time_entry_amount
time_entry.cog = '%.2f' % time_entry_cog
invoice_amount += time_entry_amount
invoice_cog += time_entry_cog
if invoice:
invoice.amount = '%.2f' % invoice_amount
invoice.cog = '%.2f' % invoice_cog
invoice.save()
elif estimate:
estimate.amount = '%.2f' % invoice_amount
estimate.save()
return times
def set_items_name(model_name, items=None, _items={}):
"""
Share templates by returning dictionary of items e.g.
for item in items.reports
instead of:
for item in reports
"""
_items['%ss' % model_name] = items
return _items
def set_relationship(obj, request, **kwargs):
"""
Sets relationships and returns None unless user is created then
return user.
"""
client_model = kwargs.get('client_model')
company_model = kwargs.get('company_model')
estimate_model = kwargs.get('estimate_model')
invoice_model = kwargs.get('invoice_model')
model = kwargs.get('model')
project_model = kwargs.get('project_model')
user_model = kwargs.get('user_model')
model_name = obj._meta.verbose_name
if model_name == 'contact':
query_client = get_query(request, 'client')
if query_client:
client = get_object_or_404(client_model, pk=query_client)
obj.client = client
obj.save()
elif model_name == 'estimate' or model_name == 'invoice':
query_project = get_query(request, 'project')
if query_project:
project = get_object_or_404(project_model, pk=query_project)
obj.client = project.client
obj.project = project
obj.save()
elif model_name == 'note':
query_client = get_query(request, 'client')
query_company = get_query(request, 'company')
if query_client:
client = get_object_or_404(client_model, pk=query_client)
client.note.add(obj)
client.save()
elif query_company:
company = company_model.get_solo()
company.note.add(obj)
company.save()
elif model_name == 'profile':
if obj.preferred_username:
username = obj.preferred_username
else:
username = fake.text()[:150]
user = user_model.objects.create_user(username=username)
model.objects.get_or_create(user=user) # Create profile
return user # Only condition that returns a value
elif model_name == 'project':
query_client = get_query(request, 'client')
if query_client:
client = get_object_or_404(client_model, pk=query_client)
obj.client = client
obj.save()
elif model_name == 'time':
obj.user = request.user
query_estimate = get_query(request, 'estimate')
query_invoice = get_query(request, 'invoice')
query_project = get_query(request, 'project')
if not request.user.is_staff: # Staff have more than one project
user_projects = project_model.objects.filter(team__in=[obj.user, ])
if len(user_projects) > 0:
obj.project = user_projects[0]
obj.task = obj.project.task
if query_estimate:
estimate = get_object_or_404(estimate_model, pk=query_estimate)
obj.estimate = estimate
if query_invoice:
invoice = get_object_or_404(invoice_model, pk=query_invoice)
obj.invoice = invoice
if query_project:
project = get_object_or_404(project_model, pk=query_project)
obj.project = project
obj.task = project.task
obj.save()
|
#!/usr/bin/env python -B
import mimetypes
import BaseHTTPServer
import SimpleHTTPServer
class StreamHTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
if not mimetypes.inited:
mimetypes.init() # try to read system mime.types
extensions_map = mimetypes.types_map.copy()
extensions_map.update({
'': 'application/octet-stream', # Default
'.m3u8': 'application/x-mpegURL',
'.ts': 'video/MP2T',
'.ogv': 'video/ogg',
'.mp4': 'video/mp4',
'.m4v': 'video/mp4',
'.webm': 'video/webm'
})
if __name__ == "__main__":
SimpleHTTPServer.test(HandlerClass=StreamHTTPRequestHandler,
ServerClass=BaseHTTPServer.HTTPServer)
modified: src/http_stream_server.py
#!/usr/bin/env python -B
import mimetypes
import BaseHTTPServer
import SimpleHTTPServer
class StreamHTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
if not mimetypes.inited:
mimetypes.init() # try to read system mime.types
extensions_map = mimetypes.types_map.copy()
extensions_map.update({
'': 'application/octet-stream', # Default
'.m3u8': 'application/x-mpegurl',
'.ts': 'video/mp2t',
'.ogv': 'video/ogg',
'.mp4': 'video/mp4',
'.m4v': 'video/mp4',
'.webm': 'video/webm'
})
if __name__ == "__main__":
SimpleHTTPServer.test(HandlerClass=StreamHTTPRequestHandler,
ServerClass=BaseHTTPServer.HTTPServer)
|
from collections import OrderedDict
from decimal import Decimal
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.models import User
from django.core.mail import send_mail as django_send_mail
from django.core.paginator import Paginator
from django.core.paginator import EmptyPage
from django.core.paginator import PageNotAnInteger
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.db.models import F
from django.db.models import Sum
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.shortcuts import render
from django.template.loader import render_to_string
from django.utils import timezone
from django.utils.html import strip_tags
from docx import Document
from functools import reduce
from import_export import widgets
from hashlib import md5
from operator import or_ as OR
from smtplib import SMTPSenderRefused
class BooleanWidget(widgets.Widget):
"""
Convert strings to boolean values
"""
def clean(self, value):
if value == 'Yes':
return True
else:
return False
class DecimalWidget(widgets.Widget):
"""
Convert strings to decimal values
"""
def clean(self, value):
if value:
return Decimal(value.replace(',', ''))
else:
return Decimal(0)
class UserWidget(widgets.Widget):
"""
"""
def clean(self, value):
return value
def add_user_to_contacts(request, model, pk=None):
"""
"""
if request.method == 'POST':
if pk is None:
return HttpResponseRedirect(reverse('user_index'))
else:
user = get_object_or_404(User, pk=pk)
if not user.email or not user.first_name or not user.last_name:
messages.add_message(request, messages.WARNING,
'No email no contact!')
return HttpResponseRedirect(reverse('user_index'))
contact = model.objects.filter(email=user.email)
if contact:
contact = contact[0].email
messages.add_message(request, messages.WARNING,
'Found duplicate: %s!' % contact)
return HttpResponseRedirect(reverse('user_index'))
contact = model(
email=user.email,
active=True,
first_name=user.first_name,
last_name=user.last_name)
contact.save()
messages.add_message(request, messages.INFO,
'User added to contacts!')
return HttpResponseRedirect(reverse('contact_index'))
def check_boxes(obj, checkbox, checkbox_subscribed, ref):
if checkbox == 'on' or checkbox == 'off':
if checkbox == 'on':
obj.active = True
else:
obj.active = False
obj.save()
return HttpResponseRedirect(ref)
if checkbox_subscribed == 'on' or checkbox_subscribed == 'off':
if checkbox_subscribed == 'on':
obj.subscribed = True
else:
obj.subscribed = False
obj.save()
return HttpResponseRedirect(ref)
def create_form(model,
form_model,
projects=[],
project=None,
clients=[],
client=None,
gross=None,
net=None,
tasks=[],
task=None):
form = form_model()
# Populate new report with gross and net calculated
# from active invoices
if form._meta.model._meta.verbose_name == 'report':
obj = model(gross=gross, net=net)
form = form_model(instance=obj)
# Limit time entry project, client
# and task choices
if form._meta.model._meta.verbose_name == 'time':
form.fields['project'].queryset = projects
form.fields['client'].queryset = clients
form.fields['task'].queryset = tasks
# Limit project client choices
if form._meta.model._meta.verbose_name == 'project':
form.fields['client'].queryset = clients
# Populate time entry form fields with project, client
# and task values
if project and model._meta.verbose_name == 'time':
entry = model(
project=project, client=project.client, task=project.task)
form = form_model(instance=entry)
# Populate invoice with project
elif project and model._meta.verbose_name == 'invoice':
entry = model(project=project, client=project.client)
form = form_model(instance=entry)
# Populate time entry form fields with client and
# task values
elif client and task:
entry = model(client=client, task=task)
form = form_model(instance=entry)
# Populate project entry form fields with client value
elif client:
entry = model(client=client)
form = form_model(instance=entry)
# Populate time entry form fields with task value
elif task:
entry = model(task=task)
form = form_model(instance=entry)
return form
def daily_burn(project):
try:
days = (project.end_date - project.start_date).days
hours = project.budget
burn = hours / days
return '%.2f' % burn
except (TypeError, ZeroDivisionError):
return ''
def dashboard_totals(model):
results = OrderedDict()
invoices_active = model.objects.filter(last_payment_date=None)
invoices_active = invoices_active.order_by('-pk')
gross = 0
net = 0
for invoice in invoices_active:
results[invoice] = {}
results[invoice]['subtotal'] = invoice.subtotal
results[invoice]['amount'] = invoice.amount
if invoice.subtotal:
gross += invoice.subtotal
if invoice.amount:
net += invoice.amount
return gross, net, invoices_active
def edit_amounts(obj,
amount,
subtotal,
paid_amount,
paid,
kwargs={},
url_name=''):
if amount and subtotal and paid_amount and paid:
obj.amount = amount
obj.last_payment_date = timezone.now()
obj.subtotal = subtotal
obj.paid_amount = paid_amount
obj.save()
return HttpResponseRedirect(reverse(url_name, kwargs=kwargs))
elif amount and subtotal and paid_amount:
obj.amount = amount
obj.subtotal = subtotal
obj.paid_amount = paid_amount
obj.save()
return HttpResponseRedirect(reverse(url_name, kwargs=kwargs))
elif amount and subtotal:
obj.amount = amount
obj.subtotal = subtotal
obj.save()
return HttpResponseRedirect(reverse(url_name))
elif amount:
obj.amount = amount
obj.save()
return HttpResponseRedirect(reverse(url_name))
def edit(
request,
form_model,
model,
url_name,
template,
amount=None,
client=None,
clients=[],
company=None,
contract_settings=None,
context={},
gross=None,
invoices_active=None, # for reporting
kwargs={},
active_nav=None,
net=None,
pk=None,
paid_amount=None,
paid=None,
project=None,
projects=[],
subtotal=None,
task=None,
tasks=[]):
obj = None
if pk is None:
form = create_form(
model,
form_model,
projects=projects,
project=project,
clients=clients,
client=client,
gross=gross,
net=net,
tasks=tasks,
task=task)
else:
obj = get_object_or_404(model, pk=pk)
form = form_model(instance=obj)
if request.method == 'POST':
if pk is None:
form = form_model(request.POST)
else:
checkbox = request.POST.get('checkbox')
checkbox_subscribed = request.POST.get('checkbox-subscribed')
company_note = request.GET.get('company')
copy = request.POST.get('copy')
delete = request.POST.get('delete')
# Copy or delete
if copy:
return obj_copy(obj, url_name)
if company_note:
return obj_edit(obj, company, company_note=True)
if delete:
return obj_delete(obj, company, request=request)
# Check boxes
if (checkbox == 'on' or checkbox == 'off' or
checkbox_subscribed == 'on' or
checkbox_subscribed == 'off'):
ref = request.META['HTTP_REFERER']
return check_boxes(obj, checkbox, checkbox_subscribed, ref)
# Edit amounts
if amount or subtotal or paid_amount or paid:
return edit_amounts(
obj,
amount,
subtotal,
paid_amount,
paid,
kwargs=kwargs,
url_name=url_name)
form = form_model(request.POST, instance=obj)
if form.is_valid():
obj = form.save()
return obj_edit(
obj,
company,
contract_settings,
request=request,
pk=pk,
kwargs=kwargs,
url_name=url_name)
context['active_nav'] = active_nav
context['form'] = form
context['item'] = obj
context['pk'] = pk
return render(request, template, context)
def entries_total(queryset):
"""
Add estimate and invoice time entries, could be an aggregate
(https://docs.djangoproject.com/en/1.9/topics/db/aggregation/)
"""
entries = OrderedDict()
total = 0
running_total_co = 0
running_total_dev = 0
running_total_hours = 0
for entry in queryset:
entries[entry] = {}
hours = entry.hours
if hours:
running_total_hours += hours
entries[entry]['date'] = entry.date
entries[entry]['hours'] = hours
entries[entry]['notes'] = entry.notes
entries[entry]['pk'] = entry.pk
entries[entry]['user'] = entry.user
entries[entry]['task'] = entry.task
line_total = 0
line_total_co = 0
line_total_dev = 0
line_total_client = 0
if entry.task:
rate = entry.task.rate
entries[entry]['rate'] = rate
if rate:
line_total_co = rate * hours
entries[entry]['line_total_co'] = line_total_co
running_total_co += line_total_co
if entry.user and entry.project:
if hasattr(entry.user, 'profile'):
if entry.user.profile.rate:
line_total_dev = entry.user.profile.rate * hours
entries[entry]['line_total_dev'] = line_total_dev
running_total_dev += line_total_dev
if entry.project:
line_total = line_total_co - line_total_dev
line_total_client = line_total_co
entries[entry]['line_total_client'] = '%.2f' % line_total_client
else:
line_total = line_total_co
entries[entry]['line_total'] = '%.2f' % line_total
total = running_total_co - running_total_dev
return (entries, running_total_co, running_total_dev, running_total_hours,
total)
def generate_doc(doc):
"""
https://stackoverflow.com/a/24122313/185820
"""
document = Document()
document.add_paragraph(strip_tags(doc.body))
document.add_page_break()
return document
def get_active_kwarg(model, active=False, user=None):
"""
Kwarg for "active" varies by type
"""
kwargs = {}
if model._meta.verbose_name == 'estimate':
# Unaccepted invoices are "active"
if active:
kwargs['accepted_date'] = None
elif model._meta.verbose_name == 'invoice':
# Unpaid invoices are "active"
if active:
kwargs['last_payment_date'] = None
elif model._meta.verbose_name == 'time':
# Only staff can see all items
if not user.is_staff:
kwargs['user'] = user
# Uninvoiced times are "active"
kwargs['invoiced'] = not (active)
# Estimated times are never "active"
kwargs['estimate'] = None
elif model._meta.verbose_name == 'user':
# Use related model's active field
kwargs['profile__active'] = active
else:
# All other models check active field
kwargs['active'] = active
return kwargs
def get_filename(company):
company_name = company.name.replace('.', '_')
company_name = company_name.replace(', ', '_')
company_name = company_name.upper()
return company_name
def get_setting(request, settings, setting, page_size=None):
"""
Allow user to override global setting
"""
if not request.user.is_authenticated:
return
override = user_pref = None
if setting == 'icon_size':
if hasattr(request.user, 'profile'):
user_pref = request.user.profile.icon_size
if user_pref:
return user_pref
else:
return settings.icon_size
if setting == 'page_size':
if hasattr(request.user, 'profile'):
user_pref = request.user.profile.page_size
if user_pref:
return user_pref
elif page_size: # View's page_size preference
return page_size
else:
return settings.page_size
if setting == 'dashboard_choices':
if hasattr(request.user, 'profile'):
user_pref = request.user.profile.dashboard_choices
override = request.user.profile.override_dashboard
if override:
return user_pref
else:
return settings.dashboard_choices
def get_query(request, query):
"""
"""
# Special handling for some query strings
if query == 'paginated':
paginated = request.GET.get('paginated')
if paginated == u'false':
return False
else:
return True
elif query == 'search' and request.method == 'POST':
return request.POST.get('search', '')
elif query == 'values':
values = request.GET.get('values')
if values:
values = values.split(' ')
else:
values = []
values = [i.split(',') for i in values]
return values
else: # Normal handling
return request.GET.get(query, '')
def get_search_results(model,
fields,
search,
active_nav='',
app_settings=None,
edit_url='',
order_by='-updated',
request=None):
context = {}
query = []
for field in fields:
query.append(Q(**{field + '__icontains': search}))
items = model.objects.filter(reduce(OR, query)).order_by(order_by)
context['active_nav'] = active_nav
context['edit_url'] = edit_url
context['icon_size'] = get_setting(request, app_settings, 'icon_size')
context['items'] = items
context['show_search'] = True
return context
def gravatar_url(email):
"""
MD5 hash of email address for use with Gravatar
"""
return settings.GRAVATAR_URL % md5(email.lower()).hexdigest()
def index_items(request,
model,
fields,
filters={},
order_by=(),
app_settings=None,
active_nav='',
edit_url='',
page_size=None,
show_search=False):
"""
"""
context = {}
page = get_query(request, 'page')
paginated = get_query(request, 'paginated')
search = get_query(request, 'search')
# Search is easy
if request.method == 'POST':
if search == u'': # Empty search returns none
context['show_search'] = True
return context
else:
return get_search_results(
model,
fields,
search,
active_nav=active_nav,
app_settings=app_settings,
edit_url=edit_url,
request=request)
# Not a search
if filters:
items = model.objects.filter(**filters)
else:
items = model.objects.all()
# Reorder items
if order_by:
# http://stackoverflow.com/a/20257999/185820
if len(order_by) > 1:
items = items.order_by(order_by[0], order_by[1])
else:
items = items.order_by(order_by[0])
# Calculate total hours
if model._meta.verbose_name == 'time':
total_hours = items.aggregate(hours=Sum(F('hours')))
total_hours = total_hours['hours']
context['total_hours'] = total_hours
# Calculate cost per report
if model._meta.verbose_name == 'report':
for item in items:
cost = item.gross - item.net
item.cost = cost
item.save()
# Don't show items to anon
if not request.user.is_authenticated:
items = []
# Paginate if paginated
if paginated:
page_size = get_setting(
request, app_settings, 'page_size', page_size=page_size)
items = paginate(items, page, page_size)
context['active_nav'] = active_nav
context['edit_url'] = edit_url
context['icon_size'] = get_setting(request, app_settings, 'icon_size')
context['items'] = items
context['page'] = page
context['paginated'] = paginated
context['show_search'] = show_search
return context
def last_month():
"""
Returns last day of last month
"""
first = timezone.now().replace(day=1)
return first - timezone.timedelta(days=1)
def obj_copy(obj, url_name):
dup = obj
dup.pk = None
dup.save()
kwargs = {}
kwargs['pk'] = dup.pk
url_name = url_name_from(obj._meta.verbose_name, page_type='edit')
return HttpResponseRedirect(reverse(url_name, kwargs=kwargs))
def obj_delete(obj, company, request=None):
url_name = url_name_from(
obj._meta.verbose_name, page_type='index') # Redir to index
# Decrement invoice counter
if (obj._meta.verbose_name == 'invoice' and company.invoice_counter):
company.invoice_counter -= 1
company.save()
# Decrement estimate counter
if (obj._meta.verbose_name == 'estimate' and company.estimate_counter):
company.estimate_counter -= 1
company.save()
if (obj._meta.verbose_name == 'time' and not request.user.is_staff):
url_name = 'home' # Redir to home
obj.delete()
return HttpResponseRedirect(reverse(url_name))
def obj_edit(obj,
company,
contract_settings,
company_note=None,
ref=None,
request=None,
kwargs={},
pk=None,
url_name=''):
# Time entry
if obj._meta.verbose_name == 'time' and pk is None:
# Assign user to time entry on creation
obj.user = User.objects.get(username=request.user)
obj.save()
# Send mail when time entry created
if hasattr(obj.user, 'profile'):
if obj.user.profile.notify:
subject = 'Time entry'
message = '%s entered time! %s' % (
obj.user.username,
obj.get_absolute_url(request.get_host()))
send_mail(request, subject, message, settings.EMAIL_FROM)
# Assign and increment invoice counter
if (obj._meta.verbose_name == 'invoice' and company.invoice_counter and
pk is None):
company.invoice_counter += 1
company.save()
obj.document_id = company.invoice_counter
obj.save()
# Assign and increment estimate counter
if (obj._meta.verbose_name == 'estimate' and company.estimate_counter and
pk is None):
company.estimate_counter += 1
company.save()
obj.document_id = company.estimate_counter
obj.save()
# Assign client to invoice
if obj._meta.verbose_name == 'invoice' and obj.project:
if obj.project.client and not obj.client:
obj.client = obj.project.client
obj.save()
# Redir to appropriate location
if (obj._meta.verbose_name == 'time' and not request.user.is_staff):
url_name = 'home'
# Assign default contract fields
if obj._meta.verbose_name == 'contract' and pk is None:
text = ''
for field in contract_settings._meta.fields:
if field.description == 'Text' and field.name != 'body':
text = ''.join([text, '<h2>', field.verbose_name, '</h2>'])
text = ''.join([text, '<p>', field.get_default(), '</p>'])
setattr(obj, 'body', text)
obj.save()
if obj._meta.verbose_name == 'note' and company_note:
company.note.add(obj)
company.save()
url_name = 'company'
return HttpResponseRedirect(reverse(url_name, kwargs=kwargs))
def paginate(items, page, page_size):
"""
"""
paginator = Paginator(items, page_size, orphans=5)
try:
items = paginator.page(page)
except PageNotAnInteger:
items = paginator.page(1)
except EmptyPage:
items = paginator.page(paginator.num_pages)
return items
def send_mail(request,
subject,
message,
to,
url=None,
uuid=None,
first_name=None):
recipients = []
sender = settings.EMAIL_FROM
recipients.append(to)
# http://stackoverflow.com/a/28476681/185820
if first_name:
username = first_name
else:
username = to
html_message = render_to_string('cerberus-fluid.html', {
'username': username,
'message': message,
'url': url,
'uuid': uuid,
})
try:
django_send_mail(
subject,
message,
sender,
recipients,
fail_silently=False,
html_message=html_message)
return True
except SMTPSenderRefused:
messages.add_message(request, messages.WARNING, 'SMTPSenderRefused!')
return False
def url_name_from(verbose_name, page_type=None):
"""
"""
url_name = {
'client': ('client_edit', 'client_index'),
'contact': ('contact_edit', 'contact_index'),
'contract': ('contract_edit', 'contract_index'),
'estimate': ('estimate_edit', 'estimate_index'),
'invoice': ('invoice_edit', 'invoice_index'),
'newsletter': ('newsletter_edit', 'newsletter_index'),
'note': ('note_edit', 'note_index'),
'project': ('project_edit', 'project_index'),
'report': ('report_edit', 'report_index'),
'task': ('task_edit', 'task_index'),
'time': ('entry_edit', 'entry_index'),
}
if page_type == 'edit':
return url_name[verbose_name][0]
elif page_type == 'index':
return url_name[verbose_name][1]
Update
from collections import OrderedDict
from decimal import Decimal
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.models import User
from django.core.mail import send_mail as django_send_mail
from django.core.paginator import Paginator
from django.core.paginator import EmptyPage
from django.core.paginator import PageNotAnInteger
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.db.models import F
from django.db.models import Sum
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.shortcuts import render
from django.template.loader import render_to_string
from django.utils import timezone
from django.utils.html import strip_tags
from docx import Document
from functools import reduce
from import_export import widgets
from hashlib import md5
from operator import or_ as OR
from smtplib import SMTPSenderRefused
class BooleanWidget(widgets.Widget):
"""
Convert strings to boolean values
"""
def clean(self, value):
if value == 'Yes':
return True
else:
return False
class DecimalWidget(widgets.Widget):
"""
Convert strings to decimal values
"""
def clean(self, value):
if value:
return Decimal(value.replace(',', ''))
else:
return Decimal(0)
class UserWidget(widgets.Widget):
"""
"""
def clean(self, value):
return value
def add_user_to_contacts(request, model, pk=None):
"""
"""
if request.method == 'POST':
if pk is None:
return HttpResponseRedirect(reverse('user_index'))
else:
user = get_object_or_404(User, pk=pk)
if not user.email or not user.first_name or not user.last_name:
messages.add_message(request, messages.WARNING,
'No email no contact!')
return HttpResponseRedirect(reverse('user_index'))
contact = model.objects.filter(email=user.email)
if contact:
contact = contact[0].email
messages.add_message(request, messages.WARNING,
'Found duplicate: %s!' % contact)
return HttpResponseRedirect(reverse('user_index'))
contact = model(
email=user.email,
active=True,
first_name=user.first_name,
last_name=user.last_name)
contact.save()
messages.add_message(request, messages.INFO,
'User added to contacts!')
return HttpResponseRedirect(reverse('contact_index'))
def check_boxes(obj, checkbox, checkbox_subscribed, ref):
if checkbox == 'on' or checkbox == 'off':
if checkbox == 'on':
obj.active = True
else:
obj.active = False
obj.save()
return HttpResponseRedirect(ref)
if checkbox_subscribed == 'on' or checkbox_subscribed == 'off':
if checkbox_subscribed == 'on':
obj.subscribed = True
else:
obj.subscribed = False
obj.save()
return HttpResponseRedirect(ref)
def create_form(model,
form_model,
projects=[],
project=None,
clients=[],
client=None,
gross=None,
net=None,
tasks=[],
task=None):
form = form_model()
# Populate new report with gross and net calculated
# from active invoices
if form._meta.model._meta.verbose_name == 'report':
obj = model(gross=gross, net=net)
form = form_model(instance=obj)
# Limit time entry project, client
# and task choices
if form._meta.model._meta.verbose_name == 'time':
form.fields['project'].queryset = projects
form.fields['client'].queryset = clients
form.fields['task'].queryset = tasks
# Limit project client choices
if form._meta.model._meta.verbose_name == 'project':
form.fields['client'].queryset = clients
# Populate time entry form fields with project, client
# and task values
if project and model._meta.verbose_name == 'time':
entry = model(
project=project, client=project.client, task=project.task)
form = form_model(instance=entry)
# Populate invoice with project
elif project and model._meta.verbose_name == 'invoice':
entry = model(project=project, client=project.client)
form = form_model(instance=entry)
# Populate time entry form fields with client and
# task values
elif client and task:
entry = model(client=client, task=task)
form = form_model(instance=entry)
# Populate project entry form fields with client value
elif client:
entry = model(client=client)
form = form_model(instance=entry)
# Populate time entry form fields with task value
elif task:
entry = model(task=task)
form = form_model(instance=entry)
return form
def daily_burn(project):
try:
days = (project.end_date - project.start_date).days
hours = project.budget
burn = hours / days
return '%.2f' % burn
except (TypeError, ZeroDivisionError):
return ''
def dashboard_totals(model):
results = OrderedDict()
invoices_active = model.objects.filter(last_payment_date=None)
invoices_active = invoices_active.order_by('-pk')
gross = 0
net = 0
for invoice in invoices_active:
results[invoice] = {}
results[invoice]['subtotal'] = invoice.subtotal
results[invoice]['amount'] = invoice.amount
if invoice.subtotal:
gross += invoice.subtotal
if invoice.amount:
net += invoice.amount
return gross, net, invoices_active
def edit_amounts(obj,
amount,
subtotal,
paid_amount,
paid,
kwargs={},
url_name=''):
if amount and subtotal and paid_amount and paid:
obj.amount = amount
obj.last_payment_date = timezone.now()
obj.subtotal = subtotal
obj.paid_amount = paid_amount
obj.save()
return HttpResponseRedirect(reverse(url_name, kwargs=kwargs))
elif amount and subtotal and paid_amount:
obj.amount = amount
obj.subtotal = subtotal
obj.paid_amount = paid_amount
obj.save()
return HttpResponseRedirect(reverse(url_name, kwargs=kwargs))
elif amount and subtotal:
obj.amount = amount
obj.subtotal = subtotal
obj.save()
return HttpResponseRedirect(reverse(url_name))
elif amount:
obj.amount = amount
obj.save()
return HttpResponseRedirect(reverse(url_name))
def edit(
request,
form_model,
model,
url_name,
template,
amount=None,
client=None,
clients=[],
company=None,
contract_settings=None,
context={},
gross=None,
invoices_active=None, # for reporting
kwargs={},
active_nav=None,
net=None,
pk=None,
paid_amount=None,
paid=None,
project=None,
projects=[],
subtotal=None,
task=None,
tasks=[]):
obj = None
if pk is None:
form = create_form(
model,
form_model,
projects=projects,
project=project,
clients=clients,
client=client,
gross=gross,
net=net,
tasks=tasks,
task=task)
else:
obj = get_object_or_404(model, pk=pk)
form = form_model(instance=obj)
if request.method == 'POST':
if pk is None:
form = form_model(request.POST)
else:
checkbox = request.POST.get('checkbox')
checkbox_subscribed = request.POST.get('checkbox-subscribed')
company_note = request.GET.get('company')
copy = request.POST.get('copy')
delete = request.POST.get('delete')
# Copy or delete
if copy:
return obj_copy(obj, url_name)
if company_note:
return obj_edit(obj, company, company_note=True)
if delete:
return obj_delete(obj, company, request=request)
# Check boxes
if (checkbox == 'on' or checkbox == 'off' or
checkbox_subscribed == 'on' or
checkbox_subscribed == 'off'):
ref = request.META['HTTP_REFERER']
return check_boxes(obj, checkbox, checkbox_subscribed, ref)
# Edit amounts
if amount or subtotal or paid_amount or paid:
return edit_amounts(
obj,
amount,
subtotal,
paid_amount,
paid,
kwargs=kwargs,
url_name=url_name)
form = form_model(request.POST, instance=obj)
if form.is_valid():
obj = form.save()
return obj_edit(
obj,
company,
contract_settings,
request=request,
pk=pk,
kwargs=kwargs,
url_name=url_name)
context['active_nav'] = active_nav
context['form'] = form
context['item'] = obj
context['pk'] = pk
return render(request, template, context)
def entries_total(queryset):
"""
Add estimate and invoice time entries, could be an aggregate
(https://docs.djangoproject.com/en/1.9/topics/db/aggregation/)
"""
entries = OrderedDict()
total = 0
running_total_co = 0
running_total_dev = 0
running_total_hours = 0
for entry in queryset:
entries[entry] = {}
hours = entry.hours
if hours:
running_total_hours += hours
entries[entry]['date'] = entry.date
entries[entry]['hours'] = hours
entries[entry]['notes'] = entry.notes
entries[entry]['pk'] = entry.pk
entries[entry]['user'] = entry.user
entries[entry]['task'] = entry.task
line_total = 0
line_total_co = 0
line_total_dev = 0
line_total_client = 0
if entry.task:
rate = entry.task.rate
entries[entry]['rate'] = rate
if rate:
line_total_co = rate * hours
entries[entry]['line_total_co'] = line_total_co
running_total_co += line_total_co
if entry.user and entry.project:
if hasattr(entry.user, 'profile'):
if entry.user.profile.rate:
line_total_dev = entry.user.profile.rate * hours
entries[entry]['line_total_dev'] = line_total_dev
running_total_dev += line_total_dev
if entry.project:
line_total = line_total_co - line_total_dev
line_total_client = line_total_co
entries[entry]['line_total_client'] = '%.2f' % line_total_client
else:
line_total = line_total_co
entries[entry]['line_total'] = '%.2f' % line_total
total = running_total_co - running_total_dev
return (entries, running_total_co, running_total_dev, running_total_hours,
total)
def generate_doc(doc):
"""
https://stackoverflow.com/a/24122313/185820
"""
document = Document()
document.add_paragraph(doc.body)
return document
def get_active_kwarg(model, active=False, user=None):
"""
Kwarg for "active" varies by type
"""
kwargs = {}
if model._meta.verbose_name == 'estimate':
# Unaccepted invoices are "active"
if active:
kwargs['accepted_date'] = None
elif model._meta.verbose_name == 'invoice':
# Unpaid invoices are "active"
if active:
kwargs['last_payment_date'] = None
elif model._meta.verbose_name == 'time':
# Only staff can see all items
if not user.is_staff:
kwargs['user'] = user
# Uninvoiced times are "active"
kwargs['invoiced'] = not (active)
# Estimated times are never "active"
kwargs['estimate'] = None
elif model._meta.verbose_name == 'user':
# Use related model's active field
kwargs['profile__active'] = active
else:
# All other models check active field
kwargs['active'] = active
return kwargs
def get_filename(company):
company_name = company.name.replace('.', '_')
company_name = company_name.replace(', ', '_')
company_name = company_name.upper()
return company_name
def get_setting(request, settings, setting, page_size=None):
"""
Allow user to override global setting
"""
if not request.user.is_authenticated:
return
override = user_pref = None
if setting == 'icon_size':
if hasattr(request.user, 'profile'):
user_pref = request.user.profile.icon_size
if user_pref:
return user_pref
else:
return settings.icon_size
if setting == 'page_size':
if hasattr(request.user, 'profile'):
user_pref = request.user.profile.page_size
if user_pref:
return user_pref
elif page_size: # View's page_size preference
return page_size
else:
return settings.page_size
if setting == 'dashboard_choices':
if hasattr(request.user, 'profile'):
user_pref = request.user.profile.dashboard_choices
override = request.user.profile.override_dashboard
if override:
return user_pref
else:
return settings.dashboard_choices
def get_query(request, query):
"""
"""
# Special handling for some query strings
if query == 'paginated':
paginated = request.GET.get('paginated')
if paginated == u'false':
return False
else:
return True
elif query == 'search' and request.method == 'POST':
return request.POST.get('search', '')
elif query == 'values':
values = request.GET.get('values')
if values:
values = values.split(' ')
else:
values = []
values = [i.split(',') for i in values]
return values
else: # Normal handling
return request.GET.get(query, '')
def get_search_results(model,
fields,
search,
active_nav='',
app_settings=None,
edit_url='',
order_by='-updated',
request=None):
context = {}
query = []
for field in fields:
query.append(Q(**{field + '__icontains': search}))
items = model.objects.filter(reduce(OR, query)).order_by(order_by)
context['active_nav'] = active_nav
context['edit_url'] = edit_url
context['icon_size'] = get_setting(request, app_settings, 'icon_size')
context['items'] = items
context['show_search'] = True
return context
def gravatar_url(email):
"""
MD5 hash of email address for use with Gravatar
"""
return settings.GRAVATAR_URL % md5(email.lower()).hexdigest()
def index_items(request,
model,
fields,
filters={},
order_by=(),
app_settings=None,
active_nav='',
edit_url='',
page_size=None,
show_search=False):
"""
"""
context = {}
page = get_query(request, 'page')
paginated = get_query(request, 'paginated')
search = get_query(request, 'search')
# Search is easy
if request.method == 'POST':
if search == u'': # Empty search returns none
context['show_search'] = True
return context
else:
return get_search_results(
model,
fields,
search,
active_nav=active_nav,
app_settings=app_settings,
edit_url=edit_url,
request=request)
# Not a search
if filters:
items = model.objects.filter(**filters)
else:
items = model.objects.all()
# Reorder items
if order_by:
# http://stackoverflow.com/a/20257999/185820
if len(order_by) > 1:
items = items.order_by(order_by[0], order_by[1])
else:
items = items.order_by(order_by[0])
# Calculate total hours
if model._meta.verbose_name == 'time':
total_hours = items.aggregate(hours=Sum(F('hours')))
total_hours = total_hours['hours']
context['total_hours'] = total_hours
# Calculate cost per report
if model._meta.verbose_name == 'report':
for item in items:
cost = item.gross - item.net
item.cost = cost
item.save()
# Don't show items to anon
if not request.user.is_authenticated:
items = []
# Paginate if paginated
if paginated:
page_size = get_setting(
request, app_settings, 'page_size', page_size=page_size)
items = paginate(items, page, page_size)
context['active_nav'] = active_nav
context['edit_url'] = edit_url
context['icon_size'] = get_setting(request, app_settings, 'icon_size')
context['items'] = items
context['page'] = page
context['paginated'] = paginated
context['show_search'] = show_search
return context
def last_month():
"""
Returns last day of last month
"""
first = timezone.now().replace(day=1)
return first - timezone.timedelta(days=1)
def obj_copy(obj, url_name):
dup = obj
dup.pk = None
dup.save()
kwargs = {}
kwargs['pk'] = dup.pk
url_name = url_name_from(obj._meta.verbose_name, page_type='edit')
return HttpResponseRedirect(reverse(url_name, kwargs=kwargs))
def obj_delete(obj, company, request=None):
url_name = url_name_from(
obj._meta.verbose_name, page_type='index') # Redir to index
# Decrement invoice counter
if (obj._meta.verbose_name == 'invoice' and company.invoice_counter):
company.invoice_counter -= 1
company.save()
# Decrement estimate counter
if (obj._meta.verbose_name == 'estimate' and company.estimate_counter):
company.estimate_counter -= 1
company.save()
if (obj._meta.verbose_name == 'time' and not request.user.is_staff):
url_name = 'home' # Redir to home
obj.delete()
return HttpResponseRedirect(reverse(url_name))
def obj_edit(obj,
company,
contract_settings,
company_note=None,
ref=None,
request=None,
kwargs={},
pk=None,
url_name=''):
# Time entry
if obj._meta.verbose_name == 'time' and pk is None:
# Assign user to time entry on creation
obj.user = User.objects.get(username=request.user)
obj.save()
# Send mail when time entry created
if hasattr(obj.user, 'profile'):
if obj.user.profile.notify:
subject = 'Time entry'
message = '%s entered time! %s' % (
obj.user.username,
obj.get_absolute_url(request.get_host()))
send_mail(request, subject, message, settings.EMAIL_FROM)
# Assign and increment invoice counter
if (obj._meta.verbose_name == 'invoice' and company.invoice_counter and
pk is None):
company.invoice_counter += 1
company.save()
obj.document_id = company.invoice_counter
obj.save()
# Assign and increment estimate counter
if (obj._meta.verbose_name == 'estimate' and company.estimate_counter and
pk is None):
company.estimate_counter += 1
company.save()
obj.document_id = company.estimate_counter
obj.save()
# Assign client to invoice
if obj._meta.verbose_name == 'invoice' and obj.project:
if obj.project.client and not obj.client:
obj.client = obj.project.client
obj.save()
# Redir to appropriate location
if (obj._meta.verbose_name == 'time' and not request.user.is_staff):
url_name = 'home'
# Assign default contract fields
if obj._meta.verbose_name == 'contract' and pk is None:
text = ''
for field in contract_settings._meta.fields:
if field.description == 'Text' and field.name != 'body':
text = ''.join([text, '<h2>', field.verbose_name, '</h2>'])
text = ''.join([text, '<p>', field.get_default(), '</p>'])
setattr(obj, 'body', text)
obj.save()
if obj._meta.verbose_name == 'note' and company_note:
company.note.add(obj)
company.save()
url_name = 'company'
return HttpResponseRedirect(reverse(url_name, kwargs=kwargs))
def paginate(items, page, page_size):
"""
"""
paginator = Paginator(items, page_size, orphans=5)
try:
items = paginator.page(page)
except PageNotAnInteger:
items = paginator.page(1)
except EmptyPage:
items = paginator.page(paginator.num_pages)
return items
def send_mail(request,
subject,
message,
to,
url=None,
uuid=None,
first_name=None):
recipients = []
sender = settings.EMAIL_FROM
recipients.append(to)
# http://stackoverflow.com/a/28476681/185820
if first_name:
username = first_name
else:
username = to
html_message = render_to_string('cerberus-fluid.html', {
'username': username,
'message': message,
'url': url,
'uuid': uuid,
})
try:
django_send_mail(
subject,
message,
sender,
recipients,
fail_silently=False,
html_message=html_message)
return True
except SMTPSenderRefused:
messages.add_message(request, messages.WARNING, 'SMTPSenderRefused!')
return False
def url_name_from(verbose_name, page_type=None):
"""
"""
url_name = {
'client': ('client_edit', 'client_index'),
'contact': ('contact_edit', 'contact_index'),
'contract': ('contract_edit', 'contract_index'),
'estimate': ('estimate_edit', 'estimate_index'),
'invoice': ('invoice_edit', 'invoice_index'),
'newsletter': ('newsletter_edit', 'newsletter_index'),
'note': ('note_edit', 'note_index'),
'project': ('project_edit', 'project_index'),
'report': ('report_edit', 'report_index'),
'task': ('task_edit', 'task_index'),
'time': ('entry_edit', 'entry_index'),
}
if page_type == 'edit':
return url_name[verbose_name][0]
elif page_type == 'index':
return url_name[verbose_name][1]
|
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth.models import User
from django.conf import settings
from django.core.mail import send_mail
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.http import HttpResponse
from django.utils import timezone
from django.shortcuts import get_object_or_404
from django.shortcuts import render
from django_xhtml2pdf.utils import generate_pdf
from .forms import ClientForm
from .forms import ContactForm
from .forms import EstimateForm
from .forms import InvoiceForm
from .forms import MailForm
from .forms import ProjectForm
from .forms import TaskForm
from .forms import TimeForm
from .models import Client
from .models import Company
from .models import Contact
from .models import Estimate
from .models import Invoice
from .models import Project
from .models import Task
from .models import Time
# Create your views here.
@staff_member_required
def client(request, pk=None):
context = {}
client = get_object_or_404(Client, pk=pk)
projects = Project.objects.filter(client=client)
context['client'] = client
context['projects'] = projects
return render(request, 'client.html', context)
@staff_member_required
def client_edit(request, pk=None):
context = {}
if pk is None:
form = ClientForm()
else:
client = get_object_or_404(Client, pk=pk)
form = ClientForm(instance=client)
if request.method == 'POST':
if pk is None:
form = ClientForm(request.POST)
else:
form = ClientForm(request.POST, instance=client)
if form.is_valid():
client = form.save()
return HttpResponseRedirect(reverse('client_index'))
context['form'] = form
return render(request, 'client_edit.html', context)
@staff_member_required
def client_index(request):
context = {}
show_all = request.GET.get('show-all', False)
if show_all:
clients = Client.objects.all()
else:
clients = Client.objects.filter(active=True)
context['clients'] = clients
return render(request, 'client_index.html', context)
@staff_member_required
def contact(request, pk=None):
context = {}
contact = get_object_or_404(Contact, pk=pk)
context['contact'] = contact
return render(request, 'contact.html', context)
@staff_member_required
def contact_edit(request, pk=None):
context = {}
contact = None
if pk is None:
form = ContactForm()
else:
contact = get_object_or_404(Contact, pk=pk)
form = ContactForm(instance=contact)
if request.method == 'POST':
if pk is None:
form = ContactForm(request.POST)
else:
form = ContactForm(request.POST, instance=contact)
if form.is_valid():
contact = form.save()
return HttpResponseRedirect(reverse('contact_index'))
context['contact'] = contact
context['form'] = form
return render(request, 'contact_edit.html', context)
@staff_member_required
def contact_index(request):
context = {}
show_all = request.GET.get('show-all', False)
if show_all:
contacts = Contact.objects.all()
else:
contacts = Contact.objects.filter(active=True)
context['contacts'] = contacts
return render(request, 'contact_index.html', context)
@staff_member_required
def estimate(request, pk=None):
context = {}
entries = {}
company = Company.objects.get()
estimate = get_object_or_404(Estimate, pk=pk)
context['company'] = company
context['estimate'] = estimate
for entry in Time.objects.filter(client=estimate.client):
entries[entry] = {}
hours = entry.hours
entries[entry]['hours'] = hours
if entry.task:
rate = entry.task.rate
entries[entry]['rate'] = rate
entries[entry]['total'] = float(rate) * float(hours.total_seconds()
/ 60)
context['entries'] = entries
if company:
context['company'] = company
return render(request, 'estimate.html', context)
@staff_member_required
def estimate_pdf(request, pk=None):
context = {}
company = Company.objects.get()
estimate = get_object_or_404(Estimate, pk=pk)
context['entries'] = Time.objects.filter(client=estimate.client)
context['estimate'] = estimate
response = HttpResponse(content_type='application/pdf')
if company:
context['company'] = company
return generate_pdf('estimate_table.html',
context=context,
file_object=response)
@staff_member_required
def estimate_edit(request, client=None, pk=None):
context = {}
if pk is None:
if client is None:
form = EstimateForm()
else:
client = get_object_or_404(Client, pk=client)
estimate = Estimate(client=client)
form = EstimateForm(instance=estimate)
else:
estimate = get_object_or_404(Estimate, pk=pk)
form = EstimateForm(instance=estimate)
if request.method == 'POST':
if pk is None:
form = EstimateForm(request.POST)
else:
estimate = get_object_or_404(Estimate, pk=pk)
form = EstimateForm(request.POST, instance=estimate)
if form.is_valid():
estimate = form.save()
return HttpResponseRedirect(reverse('estimate_index'))
context['form'] = form
return render(request, 'estimate_edit.html', context)
@staff_member_required
def estimate_index(request):
context = {}
estimates = Estimate.objects.all()
context['estimates'] = estimates
return render(request, 'estimate_index.html', context)
def home(request):
context = {}
clients = Client.objects.all()
context['request'] = request
context['clients'] = clients
return render(request, 'home.html', context)
@staff_member_required
def invoice(request, pk=None):
client = None
context = {}
invoice = get_object_or_404(Invoice, pk=pk)
project = Project.objects.filter(invoice=invoice)
clients = Client.objects.filter(project=project)
if len(clients) > 0:
client = clients[0]
tasks = Task.objects.all()
context['client'] = client
context['invoice'] = invoice
context['project'] = project
context['tasks'] = tasks
return render(request, 'invoice.html', context)
@staff_member_required
def invoice_edit(request, client=None, pk=None):
context = {}
if pk is None:
if client is None:
form = InvoiceForm()
else:
client = get_object_or_404(Client, pk=client)
invoice = Invoice(client=client)
form = InvoiceForm(instance=invoice)
else:
invoice = get_object_or_404(Invoice, pk=pk)
form = InvoiceForm(instance=invoice)
if request.method == 'POST':
if pk is None:
form = InvoiceForm(request.POST)
else:
invoice = get_object_or_404(Invoice, pk=pk)
form = InvoiceForm(request.POST, instance=invoice)
if form.is_valid():
invoice = form.save()
return HttpResponseRedirect(reverse('invoice_index'))
context['form'] = form
return render(request, 'invoice_edit.html', context)
@staff_member_required
def invoice_index(request):
client = None
context = {}
invoices = []
for invoice in Invoice.objects.all():
clients = Client.objects.filter(project=invoice.project)
if len(clients) > 0:
client = clients[0]
invoices.append([invoice, client])
context['invoices'] = invoices
return render(request, 'invoice_index.html', context)
@staff_member_required
def invoice_pdf(request, pk=None):
invoice = get_object_or_404(Invoice, pk=pk)
context = {}
context['invoice'] = invoice
response = HttpResponse(content_type='application/pdf')
return generate_pdf('invoice.html', context=context, file_object=response)
@staff_member_required
def project(request, pk=None):
context = {}
project = get_object_or_404(Project, pk=pk)
times = Time.objects.filter(project=project)
context['project'] = project
context['times'] = times
return render(request, 'project.html', context)
@staff_member_required
def project_edit(request, pk=None):
context = {}
client = request.GET.get('client', None)
if pk is None:
if client is None:
form = ProjectForm()
else:
client = get_object_or_404(Client, pk=client)
project = Project(client=client)
form = ProjectForm(instance=project)
else:
project = get_object_or_404(Project, pk=pk)
form = ProjectForm(instance=project)
if request.method == 'POST':
if pk is None:
form = ProjectForm(request.POST)
else:
project = get_object_or_404(Project, pk=pk)
form = ProjectForm(request.POST, instance=project)
if form.is_valid():
project = form.save()
return HttpResponseRedirect(reverse('project_index'))
context['form'] = form
return render(request, 'project_edit.html', context)
@staff_member_required
def project_index(request, pk=None):
context = {}
projects = Project.objects.all()
context['projects'] = projects
return render(request, 'project_index.html', context)
@staff_member_required
def task(request, pk=None):
context = {}
task = get_object_or_404(Task, pk=pk)
context['task'] = task
return render(request, 'task.html', context)
@staff_member_required
def task_edit(request, pk=None):
context = {}
project = request.GET.get('project', None)
if pk is None:
if project is None:
form = TaskForm()
else:
project = get_object_or_404(Project, pk=project)
task = Task(project=project)
form = TaskForm(instance=task)
else:
task = get_object_or_404(Task, pk=pk)
form = TaskForm(instance=task)
if request.method == 'POST':
if pk is None:
form = TaskForm(request.POST)
else:
task = get_object_or_404(Task, pk=pk)
form = TaskForm(request.POST, instance=task)
if form.is_valid():
task = form.save()
return HttpResponseRedirect(reverse('task_index'))
context['form'] = form
return render(request, 'task_edit.html', context)
@staff_member_required
def task_index(request):
context = {}
tasks = Task.objects.all()
context['tasks'] = tasks
return render(request, 'task_index.html', context)
@login_required
def time(request, pk=None):
context = {}
entry = get_object_or_404(Time, pk=pk)
context['entry'] = entry
return render(request, 'time.html', context)
@login_required
def time_edit(request, pk=None):
context = {}
project = request.GET.get('project', None)
if pk is None:
if project is None:
form = TimeForm()
else:
project = get_object_or_404(Project, pk=project)
time = Time(project=project)
form = TimeForm(instance=time)
else:
time = get_object_or_404(Time, pk=pk)
form = TimeForm(instance=time)
if request.method == 'POST':
if pk is None:
form = TimeForm(request.POST)
else:
time = get_object_or_404(Time, pk=pk)
form = TimeForm(request.POST, instance=time)
if form.is_valid():
time = form.save()
return HttpResponseRedirect(reverse('entry_index'))
context['form'] = form
return render(request, 'time_edit.html', context)
@login_required
def time_index(request):
context = {}
entries = Time.objects.all()
context['entries'] = entries
return render(request, 'time_index.html', context)
@login_required
def user(request, pk=None):
context = {}
user = get_object_or_404(User, pk=pk)
context['request'] = request
context['user'] = user
if request.user.pk == int(pk) or request.user.is_staff:
return render(request, 'user.html', context)
else:
return HttpResponseRedirect(reverse('home'))
@staff_member_required
def user_index(request):
context = {}
users = User.objects.all()
context['users'] = users
return render(request, 'user_index.html', context)
@staff_member_required
def user_mail(request, pk=None):
context = {}
recipients = []
user = get_object_or_404(User, pk=pk)
if request.method == 'POST':
form = MailForm(request.POST)
if form.is_valid():
sender = settings.DEFAULT_FROM_EMAIL
subject = form.cleaned_data['subject']
message = form.cleaned_data['message']
recipients.append(user.email)
send_mail(subject,
message,
sender,
recipients,
fail_silently=False)
messages.add_message(request, messages.SUCCESS, 'Success!')
return HttpResponseRedirect(reverse('user_index'))
else:
form = MailForm()
context['form'] = form
context['user'] = user
return render(request, 'user_mail.html', context)
Update
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth.models import User
from django.conf import settings
from django.core.mail import send_mail
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.http import HttpResponse
from django.utils import timezone
from django.shortcuts import get_object_or_404
from django.shortcuts import render
from django_xhtml2pdf.utils import generate_pdf
from .forms import ClientForm
from .forms import ContactForm
from .forms import EstimateForm
from .forms import InvoiceForm
from .forms import MailForm
from .forms import ProjectForm
from .forms import TaskForm
from .forms import TimeForm
from .models import Client
from .models import Company
from .models import Contact
from .models import Estimate
from .models import Invoice
from .models import Project
from .models import Task
from .models import Time
# Create your views here.
@staff_member_required
def client(request, pk=None):
context = {}
client = get_object_or_404(Client, pk=pk)
projects = Project.objects.filter(client=client)
context['client'] = client
context['projects'] = projects
return render(request, 'client.html', context)
@staff_member_required
def client_edit(request, pk=None):
context = {}
if pk is None:
form = ClientForm()
else:
client = get_object_or_404(Client, pk=pk)
form = ClientForm(instance=client)
if request.method == 'POST':
if pk is None:
form = ClientForm(request.POST)
else:
form = ClientForm(request.POST, instance=client)
if form.is_valid():
client = form.save()
return HttpResponseRedirect(reverse('client_index'))
context['form'] = form
return render(request, 'client_edit.html', context)
@staff_member_required
def client_index(request):
context = {}
show_all = request.GET.get('show-all', False)
if show_all:
clients = Client.objects.all()
else:
clients = Client.objects.filter(active=True)
context['clients'] = clients
return render(request, 'client_index.html', context)
@staff_member_required
def contact(request, pk=None):
context = {}
contact = get_object_or_404(Contact, pk=pk)
context['contact'] = contact
return render(request, 'contact.html', context)
@staff_member_required
def contact_edit(request, pk=None):
context = {}
contact = None
if pk is None:
form = ContactForm()
else:
contact = get_object_or_404(Contact, pk=pk)
form = ContactForm(instance=contact)
if request.method == 'POST':
if pk is None:
form = ContactForm(request.POST)
else:
form = ContactForm(request.POST, instance=contact)
if form.is_valid():
contact = form.save()
return HttpResponseRedirect(reverse('contact_index'))
context['contact'] = contact
context['form'] = form
return render(request, 'contact_edit.html', context)
@staff_member_required
def contact_index(request):
context = {}
show_all = request.GET.get('show-all', False)
if show_all:
contacts = Contact.objects.all()
else:
contacts = Contact.objects.filter(active=True)
context['contacts'] = contacts
return render(request, 'contact_index.html', context)
@staff_member_required
def estimate(request, pk=None):
context = {}
entries = {}
company = Company.objects.get()
estimate = get_object_or_404(Estimate, pk=pk)
context['company'] = company
context['estimate'] = estimate
for entry in Time.objects.filter(client=estimate.client):
entries[entry] = {}
hours = entry.hours
entries[entry]['hours'] = hours
if entry.task:
rate = entry.task.rate
entries[entry]['rate'] = rate
entries[entry]['total'] = float(rate) * float(
hours.total_seconds() / 60)
context['entries'] = entries
if company:
context['company'] = company
return render(request, 'estimate.html', context)
@staff_member_required
def estimate_pdf(request, pk=None):
context = {}
company = Company.objects.get()
estimate = get_object_or_404(Estimate, pk=pk)
context['entries'] = Time.objects.filter(client=estimate.client)
context['estimate'] = estimate
response = HttpResponse(content_type='application/pdf')
if company:
context['company'] = company
return generate_pdf('estimate_table.html',
context=context,
file_object=response)
@staff_member_required
def estimate_edit(request, client=None, pk=None):
context = {}
if pk is None:
if client is None:
form = EstimateForm()
else:
client = get_object_or_404(Client, pk=client)
estimate = Estimate(client=client)
form = EstimateForm(instance=estimate)
else:
estimate = get_object_or_404(Estimate, pk=pk)
form = EstimateForm(instance=estimate)
if request.method == 'POST':
if pk is None:
form = EstimateForm(request.POST)
else:
estimate = get_object_or_404(Estimate, pk=pk)
form = EstimateForm(request.POST, instance=estimate)
if form.is_valid():
estimate = form.save()
return HttpResponseRedirect(reverse('estimate_index'))
context['form'] = form
return render(request, 'estimate_edit.html', context)
@staff_member_required
def estimate_index(request):
context = {}
estimates = Estimate.objects.all()
context['estimates'] = estimates
return render(request, 'estimate_index.html', context)
def home(request):
context = {}
clients = Client.objects.all()
context['request'] = request
context['clients'] = clients
return render(request, 'home.html', context)
@staff_member_required
def invoice(request, pk=None):
client = None
context = {}
invoice = get_object_or_404(Invoice, pk=pk)
project = Project.objects.filter(invoice=invoice)
clients = Client.objects.filter(project=project)
if len(clients) > 0:
client = clients[0]
tasks = Task.objects.all()
context['client'] = client
context['invoice'] = invoice
context['project'] = project
context['tasks'] = tasks
return render(request, 'invoice.html', context)
@staff_member_required
def invoice_edit(request, client=None, pk=None):
context = {}
if pk is None:
if client is None:
form = InvoiceForm()
else:
client = get_object_or_404(Client, pk=client)
invoice = Invoice(client=client)
form = InvoiceForm(instance=invoice)
else:
invoice = get_object_or_404(Invoice, pk=pk)
form = InvoiceForm(instance=invoice)
if request.method == 'POST':
if pk is None:
form = InvoiceForm(request.POST)
else:
invoice = get_object_or_404(Invoice, pk=pk)
form = InvoiceForm(request.POST, instance=invoice)
if form.is_valid():
invoice = form.save()
return HttpResponseRedirect(reverse('invoice_index'))
context['form'] = form
return render(request, 'invoice_edit.html', context)
@staff_member_required
def invoice_index(request):
client = None
context = {}
invoices = []
for invoice in Invoice.objects.all():
clients = Client.objects.filter(project=invoice.project)
if len(clients) > 0:
client = clients[0]
invoices.append([invoice, client])
context['invoices'] = invoices
return render(request, 'invoice_index.html', context)
@staff_member_required
def invoice_pdf(request, pk=None):
invoice = get_object_or_404(Invoice, pk=pk)
context = {}
context['invoice'] = invoice
response = HttpResponse(content_type='application/pdf')
return generate_pdf('invoice.html', context=context, file_object=response)
@staff_member_required
def project(request, pk=None):
context = {}
project = get_object_or_404(Project, pk=pk)
times = Time.objects.filter(project=project)
context['project'] = project
context['times'] = times
return render(request, 'project.html', context)
@staff_member_required
def project_edit(request, pk=None):
context = {}
client = request.GET.get('client', None)
if pk is None:
if client is None:
form = ProjectForm()
else:
client = get_object_or_404(Client, pk=client)
project = Project(client=client)
form = ProjectForm(instance=project)
else:
project = get_object_or_404(Project, pk=pk)
form = ProjectForm(instance=project)
if request.method == 'POST':
if pk is None:
form = ProjectForm(request.POST)
else:
project = get_object_or_404(Project, pk=pk)
form = ProjectForm(request.POST, instance=project)
if form.is_valid():
project = form.save()
return HttpResponseRedirect(reverse('project_index'))
context['form'] = form
return render(request, 'project_edit.html', context)
@staff_member_required
def project_index(request, pk=None):
context = {}
projects = Project.objects.all()
context['projects'] = projects
return render(request, 'project_index.html', context)
@staff_member_required
def task(request, pk=None):
context = {}
task = get_object_or_404(Task, pk=pk)
context['task'] = task
return render(request, 'task.html', context)
@staff_member_required
def task_edit(request, pk=None):
context = {}
project = request.GET.get('project', None)
if pk is None:
if project is None:
form = TaskForm()
else:
project = get_object_or_404(Project, pk=project)
task = Task(project=project)
form = TaskForm(instance=task)
else:
task = get_object_or_404(Task, pk=pk)
form = TaskForm(instance=task)
if request.method == 'POST':
if pk is None:
form = TaskForm(request.POST)
else:
task = get_object_or_404(Task, pk=pk)
form = TaskForm(request.POST, instance=task)
if form.is_valid():
task = form.save()
return HttpResponseRedirect(reverse('task_index'))
context['form'] = form
return render(request, 'task_edit.html', context)
@staff_member_required
def task_index(request):
context = {}
tasks = Task.objects.all()
context['tasks'] = tasks
return render(request, 'task_index.html', context)
@login_required
def time(request, pk=None):
context = {}
entry = get_object_or_404(Time, pk=pk)
context['entry'] = entry
return render(request, 'time.html', context)
@login_required
def time_edit(request, pk=None):
context = {}
project = request.GET.get('project', None)
if pk is None:
if project is None:
form = TimeForm()
else:
project = get_object_or_404(Project, pk=project)
time = Time(project=project)
form = TimeForm(instance=time)
else:
time = get_object_or_404(Time, pk=pk)
form = TimeForm(instance=time)
if request.method == 'POST':
if pk is None:
form = TimeForm(request.POST)
else:
time = get_object_or_404(Time, pk=pk)
form = TimeForm(request.POST, instance=time)
if form.is_valid():
time = form.save()
return HttpResponseRedirect(reverse('entry_index'))
context['form'] = form
return render(request, 'time_edit.html', context)
@login_required
def time_index(request):
context = {}
entries = Time.objects.all()
context['entries'] = entries
return render(request, 'time_index.html', context)
@login_required
def user(request, pk=None):
context = {}
user = get_object_or_404(User, pk=pk)
context['request'] = request
context['user'] = user
if request.user.pk == int(pk) or request.user.is_staff:
return render(request, 'user.html', context)
else:
return HttpResponseRedirect(reverse('home'))
@staff_member_required
def user_index(request):
context = {}
users = User.objects.all()
context['users'] = users
return render(request, 'user_index.html', context)
@staff_member_required
def user_mail(request, pk=None):
context = {}
recipients = []
user = get_object_or_404(User, pk=pk)
if request.method == 'POST':
form = MailForm(request.POST)
if form.is_valid():
sender = settings.DEFAULT_FROM_EMAIL
subject = form.cleaned_data['subject']
message = form.cleaned_data['message']
recipients.append(user.email)
send_mail(subject,
message,
sender,
recipients,
fail_silently=False)
messages.add_message(request, messages.SUCCESS, 'Success!')
return HttpResponseRedirect(reverse('user_index'))
else:
form = MailForm()
context['form'] = form
context['user'] = user
return render(request, 'user_mail.html', context)
|
from .forms import AppSettingsForm
from .forms import ClientForm
from .forms import CompanyForm
from .forms import ContactForm
from .forms import ContractForm
from .forms import ContractSettingsForm
from .forms import EstimateForm
from .forms import FileForm
from .forms import InvoiceForm
from .forms import MailForm
from .forms import NewsletterForm
from .forms import NoteForm
from .forms import ProfileForm
from .forms import ProjectForm
from .forms import ProposalForm
from .forms import ReportForm
from .forms import ServiceForm
from .forms import TaskForm
from .forms import TimeForm
from .models import AppSettings
from .models import Client
from .models import Company
from .models import Contact
from .models import Contract
from .models import ContractSettings
from .models import Estimate
from .models import File
from .models import Invoice
from .models import Log
from .models import Newsletter
from .models import Note
from .models import Profile
from .models import Project
from .models import Proposal
from .models import Report
from .models import Service
from .models import Testimonial
from .models import Task
from .models import Time
from .serializers import ClientSerializer
from .serializers import ProfileSerializer
from .serializers import ServiceSerializer
from .serializers import TestimonialSerializer
from .utils import add_user_to_contacts
from .utils import get_index_items
from .utils import get_page_items
from .utils import create_and_send_mail
from .utils import edit
from .utils import generate_doc
from .utils import get_client_city
from .utils import get_company_name
from .utils import get_setting
from .utils import get_template_and_url_names
from .utils import get_query
from .utils import send_mail
from datetime import datetime
from django.contrib import messages
from django.contrib.auth import authenticate
from django.contrib.auth import login as auth_login
from django.contrib.auth.decorators import login_required
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.db.models import F, Sum
from django.http import HttpResponseRedirect
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.shortcuts import render
from django_xhtml2pdf.utils import generate_pdf
from io import BytesIO
from matplotlib.dates import DateFormatter
from matplotlib.dates import MonthLocator
from matplotlib.dates import date2num
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg
from rest_framework import viewsets
# Create your views here.
class ClientViewSet(viewsets.ModelViewSet):
"""
"""
queryset = Client.objects.filter(published=True).order_by('name')
serializer_class = ClientSerializer
class ServiceViewSet(viewsets.ModelViewSet):
"""
"""
queryset = Service.objects.filter(active=True).order_by('name')
serializer_class = ServiceSerializer
class TestimonialViewSet(viewsets.ModelViewSet):
"""
"""
queryset = Testimonial.objects.filter(active=True).order_by('-issue_date')
serializer_class = TestimonialSerializer
class ProfileViewSet(viewsets.ModelViewSet):
"""
"""
queryset = Profile.objects.filter(
published=True).order_by('user__first_name')
serializer_class = ProfileSerializer
@staff_member_required
def client(request, pk=None):
context = get_page_items(
request,
app_settings_model=AppSettings,
contact_model=Contact,
contract_model=Contract,
model=Client,
pk=pk,
project_model=Project)
return render(request, 'client.html', context)
@staff_member_required
def client_edit(request, pk=None):
template_name, url_name = get_template_and_url_names(
'client', page_type='edit')
return edit(
request,
ClientForm,
Client,
url_name,
template_name,
active_nav='client',
pk=pk)
@staff_member_required
def client_index(request):
context = get_index_items(
request,
Client,
active_nav='client',
app_settings_model=AppSettings,
edit_url='client_edit', # Delete modal
order_by=('-active', 'name'),
search_fields=('address', 'name'),
show_search=True)
return render(request, 'client_index.html', context)
@staff_member_required
def company_edit(request, pk=None):
return edit(
request,
CompanyForm,
Company,
'company',
'company_edit.html',
active_nav='dropdown',
pk=1)
@staff_member_required
def company(request):
context = {}
company = Company.get_solo()
services = company.service_set.all()
context['active_nav'] = 'dropdown'
context['active_tab'] = 'company'
context['company'] = company
context['notes'] = company.note.all()
context['services'] = services
return render(request, 'company.html', context)
@staff_member_required
def contact(request, pk=None):
context = {}
contact = get_object_or_404(Contact, pk=pk)
context['active_nav'] = 'contact'
context['edit_url'] = 'contact_edit' # Delete modal
context['item'] = contact
return render(request, 'contact.html', context)
@staff_member_required
def contact_edit(request, pk=None):
template_name, url_name = get_template_and_url_names(
'contact', page_type='edit')
return edit(
request,
ContactForm,
Contact,
url_name,
template_name,
active_nav='contact',
pk=pk)
@staff_member_required
def contact_index(request):
context = get_index_items(
request,
Contact,
active_nav='contact',
app_settings_model=AppSettings,
edit_url='contact_edit', # Delete modal
order_by=('-active', 'last_name', 'first_name'),
search_fields=('first_name', 'last_name', 'email', 'notes'),
show_search=True)
return render(request, 'contact_index.html', context)
@staff_member_required
def contact_mail(request, pk=None):
context = {}
contact = get_object_or_404(Contact, pk=pk)
if request.method == 'POST' and create_and_send_mail(
request, Log, mail_form=MailForm, contact=contact, pk=pk):
return HttpResponseRedirect(reverse('contact', kwargs={'pk': pk}))
else:
form = MailForm()
context['active_nav'] = 'contact'
context['contact'] = contact
context['form'] = form
return render(request, 'contact_mail.html', context)
def contact_unsubscribe(request, pk=None):
contact = get_object_or_404(Contact, pk=pk)
uuid = request.GET.get('id')
if uuid == contact.uuid:
contact.subscribed = False
contact.save()
messages.add_message(request, messages.SUCCESS,
'You have been unsubscribed!')
log = Log(entry='%s unsubscribed.' % contact.email)
log.save()
return HttpResponseRedirect(reverse('home'))
else:
messages.add_message(request, messages.WARNING, 'Nothing to see here.')
return HttpResponseRedirect(reverse('home'))
@staff_member_required
def contract(request, pk=None):
"""
"""
context = get_page_items(
request, company_model=Company, model=Contract, pk=pk, time_model=Time)
if context['pdf']:
response = HttpResponse(content_type='application/pdf')
filename = get_company_name(company)
response['Content-Disposition'] = 'filename=%s.pdf' % filename
return generate_pdf(
'pdf_contract.html', context=context, file_object=response)
if context['doc']:
# https://stackoverflow.com/a/24122313/185820
document = generate_doc(contract)
filename = get_company_name(company)
f = BytesIO()
document.save(f)
length = f.tell()
f.seek(0)
content_type = 'application/vnd.openxmlformats-'
content_type += 'officedocument.wordprocessingml.document'
response = HttpResponse(f.getvalue(), content_type=content_type)
response['Content-Disposition'] = 'filename=%s.docx' % filename
response['Content-Length'] = length
return response
else:
return render(request, 'contract.html', context)
@staff_member_required
def contract_edit(request, pk=None):
"""
"""
template_name, url_name = get_template_and_url_names(
'contract', page_type='edit')
return edit(
request,
ContractForm,
Contract,
url_name,
template_name,
active_nav='contract',
pk=pk)
@staff_member_required
def contract_index(request):
"""
"""
context = get_index_items(
request,
Contract,
active_nav='contract',
order_by=('-updated', ),
app_settings_model=AppSettings)
return render(request, 'contract_index.html', context)
@staff_member_required
def contract_settings(request):
context = {}
fields = {}
contract_settings = ContractSettings.get_solo()
for field in contract_settings._meta.fields:
if field.description == 'Text' and field.name != 'body':
fields[field.name] = {}
fields[field.name]['name'] = field.verbose_name
fields[field.name]['value'] = getattr(contract_settings,
field.name)
context['fields'] = fields
context['active_tab'] = 'contract'
context['active_nav'] = 'dropdown'
return render(request, 'contract_settings.html', context)
@staff_member_required
def contract_settings_edit(request, pk=None):
return edit(
request,
ContractSettingsForm,
ContractSettings,
'contract_settings',
'contract_settings_edit.html',
pk=1,
active_nav='dropdown')
@staff_member_required
def estimate(request, pk=None):
context = get_page_items(
request, company_model=Company, model=Estimate, pk=pk, time_model=Time)
if context['pdf']:
response = HttpResponse(content_type='application/pdf')
filename = '-'.join(['estimate', pk])
response['Content-Disposition'] = 'filename=%s.pdf' % filename
return generate_pdf(
'pdf_invoice.html', context=context, file_object=response)
else:
return render(request, 'estimate.html', context)
@staff_member_required
def estimate_edit(request, pk=None):
template_name, url_name = get_template_and_url_names(
'estimate', page_type='edit')
return edit(
request,
EstimateForm,
Estimate,
url_name,
template_name,
active_nav='estimate',
company_model=Company,
pk=pk)
@staff_member_required
def estimate_index(request):
context = get_index_items(
request,
Estimate,
active_nav='estimate',
app_settings_model=AppSettings,
edit_url='estimate_edit', # Delete modal
order_by=('-updated', ),
search_fields=('subject', ),
show_search=True)
context['company'] = company
return render(request, 'estimate_index.html', context)
@staff_member_required
def estimate_mail(request, pk=None):
estimate = get_object_or_404(Estimate, pk=pk)
if create_and_send_mail(
request, Log, estimate=estimate, profile_model=Profile):
return HttpResponseRedirect(reverse('estimate', kwargs={'pk': pk}))
@staff_member_required
def file_view(request, pk=None):
context = get_page_items(
request,
app_settings_model=AppSettings,
company_model=Company,
model=File,
pk=pk)
return render(request, 'file.html', context)
@staff_member_required
def file_edit(request, pk=None):
template_name, url_name = get_template_and_url_names(
'file', page_type='edit')
return edit(
request,
FileForm,
File,
url_name,
template_name,
active_nav='dropdown',
company_model=Company,
pk=pk, )
@staff_member_required
def file_index(request):
context = get_index_items(
request,
File,
active_nav='dropdown',
app_settings_model=AppSettings,
order_by=('-updated', ))
return render(request, 'file_index.html', context)
def home(request):
context = get_page_items(
request,
app_settings_model=AppSettings,
invoice_model=Invoice,
note_model=Note,
order_by={
'note': ('-updated', ),
'project': ('-updated', ),
},
project_model=Project,
report_model=Report)
return render(request, 'home.html', context)
@staff_member_required
def invoice(request, pk=None):
context = get_page_items(
request,
company_model=Company,
model=Invoice,
order_by={'time': ('date', )}, # For time entries
pk=pk,
time_model=Time)
if context['pdf']:
response = HttpResponse(content_type='application/pdf')
filename = '_'.join(['invoice', pk])
response['Content-Disposition'] = 'filename=%s.pdf' % filename
return generate_pdf(
'pdf_invoice.html', context=context, file_object=response)
else:
return render(request, 'invoice.html', context)
@staff_member_required
def invoice_edit(request, pk=None):
template_name, url_name = get_template_and_url_names(
'invoice', page_type='edit')
return edit(
request,
InvoiceForm,
Invoice,
url_name,
template_name,
active_nav='invoice',
company_model=Company,
pk=pk, )
@staff_member_required
def invoice_index(request):
search_fields = (
'client__name',
'document_id',
'issue_date',
'project__name',
'subject', )
context = get_index_items(
request,
Invoice,
active_nav='invoice',
app_settings_model=AppSettings,
edit_url='invoice_edit', # Delete modal
order_by=('-updated', ),
search_fields=search_fields,
show_search=True)
return render(request, 'invoice_index.html', context)
def login(request):
context = {}
context['login'] = True
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
user = authenticate(request, username=username, password=password)
if user is not None:
# https://stackoverflow.com/a/39316967/185820
auth_login(request, user)
city_data = get_client_city(request)
log = Log(entry='%s logged in from %s' % (user, city_data))
log.save()
return HttpResponseRedirect(reverse('home'))
else:
messages.add_message(request, messages.WARNING, 'Login failed.')
return HttpResponseRedirect(reverse('home'))
return render(request, 'login.html', context)
@staff_member_required
def log_index(request):
context = get_index_items(
request,
Log,
active_nav='dropdown',
app_settings_model=AppSettings,
order_by=('-updated', ),
search_fields=('entry', ))
return render(request, 'log_index.html', context)
@staff_member_required
def newsletter(request, pk=None):
"""
"""
context = {}
newsletter = get_object_or_404(Newsletter, pk=pk)
context['active_nav'] = 'dropdown'
context['contacts'] = newsletter.contacts.all()
context['edit_url'] = 'newsletter_edit'
context['item'] = newsletter
return render(request, 'newsletter.html', context)
@staff_member_required
def newsletter_edit(request, pk=None):
"""
"""
template_name, url_name = get_template_and_url_names(
'newsletter', page_type='edit')
return edit(
request,
NewsletterForm,
Newsletter,
url_name,
template_name,
active_nav='dropdown',
pk=pk)
@staff_member_required
def newsletter_index(request, pk=None):
"""
"""
context = get_index_items(
request,
Newsletter,
active_nav='dropdown',
app_settings_model=AppSettings,
order_by=('-updated', ),
search_fields=('text', ))
return render(request, 'newsletter_index.html', context)
@staff_member_required
def newsletter_send(request, pk=None):
"""
"""
context = {}
newsletter = get_object_or_404(Newsletter, pk=pk)
contacts = newsletter.contacts.all()
for contact in contacts:
url = reverse('contact_unsubscribe', kwargs={'pk': contact.pk})
url = ''.join([request.get_host(), url])
to = contact.email
first_name = contact.first_name
subject = newsletter.subject
message = newsletter.text
if send_mail(
request,
subject,
message,
to,
url=url,
uuid=contact.uuid,
first_name=first_name):
log = Log(entry='Mail sent to %s.' % to)
log.save()
messages.add_message(request, messages.SUCCESS, 'Batch mail sent!')
context['active_nav'] = 'newsletter'
context['contacts'] = contacts
context['edit_url'] = 'newsletter_edit'
context['item'] = newsletter
return render(request, 'newsletter.html', context)
@staff_member_required
def note(request, pk=None):
context = {}
pdf = get_query(request, 'pdf')
context['pdf'] = pdf
note = get_object_or_404(Note, pk=pk)
context['active_nav'] = 'note'
context['edit_url'] = 'note_edit'
context['item'] = note
if pdf:
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'filename=note-%s.pdf' % pk
return generate_pdf(
'pdf_note.html', context=context, file_object=response)
else:
return render(request, 'note.html', context)
# https://stackoverflow.com/a/42038839/185820
@staff_member_required(login_url='login')
def note_edit(request, pk=None):
template_name, url_name = get_template_and_url_names(
'note', page_type='edit')
return edit(
request,
NoteForm,
Note,
url_name,
template_name,
active_nav='note',
app_settings_model=AppSettings,
client_model=Client,
company_model=Company,
pk=pk)
@staff_member_required
def note_index(request, pk=None):
filters = {'hidden': False, }
context = get_index_items(
request,
Note,
active_nav='note',
app_settings_model=AppSettings,
filters=filters,
order_by=('-active', '-updated'),
search_fields=('note', 'title'),
show_search=True)
context['edit_url'] = 'note_edit' # Delete modal
return render(request, 'note_index.html', context)
@staff_member_required
def project(request, pk=None):
context = get_page_items(
request,
app_settings_model=AppSettings,
model=Project,
estimate_model=Estimate,
invoice_model=Invoice,
order_by={'time': ('date', )}, # For time entries
time_model=Time,
pk=pk)
return render(request, 'project.html', context)
@staff_member_required
def project_edit(request, pk=None):
template_name, url_name = get_template_and_url_names(
'project', page_type='edit')
return edit(
request,
ProjectForm,
Project,
url_name,
template_name,
active_nav='project',
pk=pk)
@staff_member_required
def project_index(request, pk=None):
context = get_index_items(
request,
Project,
active_nav='project',
app_settings_model=AppSettings,
edit_url='project_edit', # Delete modal
order_by=('-updated', ),
search_fields=('id', 'name'),
show_search=True)
return render(request, 'project_index.html', context)
@staff_member_required
def proposal(request, pk=None):
context = get_page_items(
request, company_model=Company, model=Proposal, pk=pk)
if context['pdf']:
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'filename=proposal-%s.pdf' % pk
return generate_pdf(
'pdf_proposal.html', context=context, file_object=response)
else:
return render(request, 'proposal.html', context)
def proposal_edit(request, pk=None):
"""
"""
template_name, url_name = get_template_and_url_names(
'proposal', page_type='edit')
return edit(
request,
ProposalForm,
Proposal,
url_name,
template_name,
active_nav='proposal',
company_model=Company,
pk=pk)
@staff_member_required
def proposal_index(request, pk=None):
context = get_index_items(
request,
Proposal,
active_nav='proposal',
app_settings_model=AppSettings,
order_by=('-updated', ),
show_search=True)
context['edit_url'] = 'proposal_edit' # Delete modal
return render(request, 'proposal_index.html', context)
@staff_member_required
def report(request, pk=None):
company = Company.get_solo()
context = {}
pdf = get_query(request, 'pdf')
context['pdf'] = pdf
report = get_object_or_404(Report, pk=pk)
reports = Report.objects.filter(active=True)
reports = reports.aggregate(gross=Sum(F('gross')), net=Sum(F('net')))
context['active_nav'] = 'dropdown'
context['company'] = company
context['cost'] = report.gross - report.net
context['edit_url'] = 'report_edit' # Delete modal
context['item'] = report
context['reports'] = reports
if pdf:
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'filename=report-%s.pdf' % pk
return generate_pdf(
'pdf_report.html', context=context, file_object=response)
else:
return render(request, 'report.html', context)
@staff_member_required
def report_edit(request, pk=None):
template_name, url_name = get_template_and_url_names(
'report', page_type='edit')
return edit(
request,
ReportForm,
Report,
url_name,
template_name,
active_nav='dropdown',
invoice_model=Invoice,
pk=pk)
@staff_member_required
def report_index(request):
show_plot = False
reports = Report.objects.filter(active=True)
plot_items = reports # Save for plotting
reports = reports.aggregate(gross=Sum(F('gross')), net=Sum(F('net')))
company = Company.get_solo()
context = get_index_items(
request,
Report,
search_fields,
active_nav='dropdown',
app_settings_model=AppSettings,
edit_url='report_edit', # Delete modal
order_by=('-updated', ),
search_fields=('id', 'name', 'gross', 'net'),
show_search=True)
if reports['gross'] is not None and reports['net'] is not None:
cost = reports['gross'] - reports['net']
else:
reports['gross'] = 0
reports['net'] = 0
cost = 0
if 'items' in context:
if len(context['items']) > 1:
show_plot = True
context['reports'] = reports
context['company'] = company
context['cost'] = cost
context['show_plot'] = show_plot
context['plot_items'] = plot_items
return render(request, 'report_index.html', context)
def report_plot(request): # http://stackoverflow.com/a/5515994/185820
"""
"""
values = get_query(request, 'values')
# http://matplotlib.org/examples/api/date_demo.html
x = [date2num(datetime.strptime(i[1], '%Y-%m-%d')) for i in values]
y = [i[0] for i in values]
figure = Figure()
canvas = FigureCanvasAgg(figure)
axes = figure.add_subplot(1, 1, 1)
axes.grid(True)
axes.plot(x, y)
axes.xaxis.set_major_locator(MonthLocator())
axes.xaxis.set_major_formatter(DateFormatter('%m'))
# write image data to a string buffer and get the PNG image bytes
buf = BytesIO()
canvas.print_png(buf)
data = buf.getvalue()
# write image bytes back to the browser
return HttpResponse(data, content_type="image/png")
# https://stackoverflow.com/a/42038839/185820
@staff_member_required(login_url='login')
def service_edit(request, pk=None):
template_name, url_name = get_template_and_url_names(
'service', page_type='edit')
return edit(
request,
ServiceForm,
Service,
url_name,
template_name,
active_nav='dropdown',
company_model=Company,
pk=pk)
@staff_member_required
def settings(request):
context = {}
app_settings = AppSettings.get_solo()
context['settings'] = app_settings
context['active_tab'] = 'system'
context['active_nav'] = 'dropdown'
return render(request, 'settings.html', context)
@staff_member_required
def settings_edit(request, pk=None):
return edit(
request,
AppSettingsForm,
AppSettings,
'settings',
'settings_edit.html',
active_nav='dropdown',
pk=1)
@staff_member_required
def task(request, pk=None):
context = {}
task = get_object_or_404(Task, pk=pk)
context['active_nav'] = 'task'
context['edit_url'] = 'task_edit' # Delete modal
context['item'] = task
return render(request, 'task.html', context)
@staff_member_required
def task_edit(request, pk=None):
template_name, url_name = get_template_and_url_names(
'task', page_type='edit')
return edit(
request,
TaskForm,
Task,
url_name,
'task_edit.html',
active_nav='task',
pk=pk)
@staff_member_required
def task_index(request):
context = get_index_items(
request,
Task,
active_nav='task',
app_settings_model=AppSettings,
edit_url='task_edit', # Delete modal
order_by=('-updated', ),
search_fields=('name', ),
show_search=True)
return render(request, 'task_index.html', context)
@login_required
def time(request, pk=None):
context = {}
entry = get_object_or_404(Time, pk=pk)
if not entry.user and not request.user.is_staff:
return HttpResponseRedirect(reverse('login'))
if entry.user:
if (not entry.user.username == request.user.username and
not request.user.is_staff):
return HttpResponseRedirect(reverse('login'))
context['active_nav'] = 'time'
context['edit_url'] = 'time_edit' # Delete modal
context['item'] = entry
return render(request, 'time.html', context)
@login_required
def time_edit(request, pk=None):
template_name, url_name = get_template_and_url_names(
'time', page_type='edit')
return edit(
request,
TimeForm,
Time,
url_name,
template_name,
active_nav='time',
invoice_model=Invoice,
estimate_model=Estimate,
project_model=Project,
task_model=Task,
time_model=Time,
pk=pk, )
@login_required
def time_index(request):
search_fields = ('client__name', 'date', 'log', 'pk', 'project__name',
'invoice__document_id', 'user__username')
context = get_index_items(
request,
Time,
active_nav='time',
app_settings_model=AppSettings,
edit_url='time_edit', # Delete modal
order_by=('-updated', ),
search_fields=search_fields,
show_search=True)
if not request.user.is_staff:
return HttpResponseRedirect(reverse('login'))
else:
return render(request, 'time_index.html', context)
@login_required
def user(request, pk=None):
filters = {
'estimate': None,
'user': None, # fill in later
}
context = get_page_items(
request,
app_settings_model=AppSettings,
contact_model=Contact,
filters=filters,
model=User,
order_by=('-updated', ),
profile_model=Profile,
project_model=Project,
time_model=Time,
pk=pk)
return render(request, 'user.html', context)
@staff_member_required
def user_contact(request, pk=None):
return add_user_to_contacts(request, Contact, pk=pk)
@login_required
def user_edit(request, pk=None):
template_name, url_name = get_template_and_url_names(
'user', page_type='edit')
return edit(
request,
ProfileForm,
Profile,
url_name,
template_name,
active_nav='dropdown',
pk=pk)
@staff_member_required
def user_index(request):
context = get_index_items(
request,
User,
active_nav='dropdown',
app_settings_model=AppSettings,
order_by=('-updated', ),
show_search=False)
context['company'] = company
# Check if user is contact
contacts = Contact.objects.all()
items = context['items']
for item in items:
if item.email in [i.email for i in contacts]:
item.is_contact = True
else:
item.is_contact = False
context['items'] = items
return render(request, 'user_index.html', context)
Update
from .forms import AppSettingsForm
from .forms import ClientForm
from .forms import CompanyForm
from .forms import ContactForm
from .forms import ContractForm
from .forms import ContractSettingsForm
from .forms import EstimateForm
from .forms import FileForm
from .forms import InvoiceForm
from .forms import MailForm
from .forms import NewsletterForm
from .forms import NoteForm
from .forms import ProfileForm
from .forms import ProjectForm
from .forms import ProposalForm
from .forms import ReportForm
from .forms import ServiceForm
from .forms import TaskForm
from .forms import TimeForm
from .models import AppSettings
from .models import Client
from .models import Company
from .models import Contact
from .models import Contract
from .models import ContractSettings
from .models import Estimate
from .models import File
from .models import Invoice
from .models import Log
from .models import Newsletter
from .models import Note
from .models import Profile
from .models import Project
from .models import Proposal
from .models import Report
from .models import Service
from .models import Testimonial
from .models import Task
from .models import Time
from .serializers import ClientSerializer
from .serializers import ProfileSerializer
from .serializers import ServiceSerializer
from .serializers import TestimonialSerializer
from .utils import add_user_to_contacts
from .utils import get_index_items
from .utils import get_page_items
from .utils import create_and_send_mail
from .utils import edit
from .utils import generate_doc
from .utils import get_client_city
from .utils import get_company_name
from .utils import get_setting
from .utils import get_template_and_url_names
from .utils import get_query
from .utils import send_mail
from datetime import datetime
from django.contrib import messages
from django.contrib.auth import authenticate
from django.contrib.auth import login as auth_login
from django.contrib.auth.decorators import login_required
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.db.models import F, Sum
from django.http import HttpResponseRedirect
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.shortcuts import render
from django_xhtml2pdf.utils import generate_pdf
from io import BytesIO
from matplotlib.dates import DateFormatter
from matplotlib.dates import MonthLocator
from matplotlib.dates import date2num
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg
from rest_framework import viewsets
# Create your views here.
class ClientViewSet(viewsets.ModelViewSet):
"""
"""
queryset = Client.objects.filter(published=True).order_by('name')
serializer_class = ClientSerializer
class ServiceViewSet(viewsets.ModelViewSet):
"""
"""
queryset = Service.objects.filter(active=True).order_by('name')
serializer_class = ServiceSerializer
class TestimonialViewSet(viewsets.ModelViewSet):
"""
"""
queryset = Testimonial.objects.filter(active=True).order_by('-issue_date')
serializer_class = TestimonialSerializer
class ProfileViewSet(viewsets.ModelViewSet):
"""
"""
queryset = Profile.objects.filter(
published=True).order_by('user__first_name')
serializer_class = ProfileSerializer
@staff_member_required
def client(request, pk=None):
context = get_page_items(
request,
app_settings_model=AppSettings,
contact_model=Contact,
contract_model=Contract,
model=Client,
pk=pk,
project_model=Project)
return render(request, 'client.html', context)
@staff_member_required
def client_edit(request, pk=None):
template_name, url_name = get_template_and_url_names(
'client', page_type='edit')
return edit(
request,
ClientForm,
Client,
url_name,
template_name,
active_nav='client',
pk=pk)
@staff_member_required
def client_index(request):
context = get_index_items(
request,
Client,
active_nav='client',
app_settings_model=AppSettings,
edit_url='client_edit', # Delete modal
order_by=('-active', 'name'),
search_fields=('address', 'name'),
show_search=True)
return render(request, 'client_index.html', context)
@staff_member_required
def company_edit(request, pk=None):
return edit(
request,
CompanyForm,
Company,
'company',
'company_edit.html',
active_nav='dropdown',
pk=1)
@staff_member_required
def company(request):
context = {}
company = Company.get_solo()
services = company.service_set.all()
context['active_nav'] = 'dropdown'
context['active_tab'] = 'company'
context['company'] = company
context['notes'] = company.note.all()
context['services'] = services
return render(request, 'company.html', context)
@staff_member_required
def contact(request, pk=None):
context = {}
contact = get_object_or_404(Contact, pk=pk)
context['active_nav'] = 'contact'
context['edit_url'] = 'contact_edit' # Delete modal
context['item'] = contact
return render(request, 'contact.html', context)
@staff_member_required
def contact_edit(request, pk=None):
template_name, url_name = get_template_and_url_names(
'contact', page_type='edit')
return edit(
request,
ContactForm,
Contact,
url_name,
template_name,
active_nav='contact',
pk=pk)
@staff_member_required
def contact_index(request):
context = get_index_items(
request,
Contact,
active_nav='contact',
app_settings_model=AppSettings,
edit_url='contact_edit', # Delete modal
order_by=('-active', 'last_name', 'first_name'),
search_fields=('first_name', 'last_name', 'email', 'notes'),
show_search=True)
return render(request, 'contact_index.html', context)
@staff_member_required
def contact_mail(request, pk=None):
context = {}
contact = get_object_or_404(Contact, pk=pk)
if request.method == 'POST' and create_and_send_mail(
request, Log, mail_form=MailForm, contact=contact, pk=pk):
return HttpResponseRedirect(reverse('contact', kwargs={'pk': pk}))
else:
form = MailForm()
context['active_nav'] = 'contact'
context['contact'] = contact
context['form'] = form
return render(request, 'contact_mail.html', context)
def contact_unsubscribe(request, pk=None):
contact = get_object_or_404(Contact, pk=pk)
uuid = request.GET.get('id')
if uuid == contact.uuid:
contact.subscribed = False
contact.save()
messages.add_message(request, messages.SUCCESS,
'You have been unsubscribed!')
log = Log(entry='%s unsubscribed.' % contact.email)
log.save()
return HttpResponseRedirect(reverse('home'))
else:
messages.add_message(request, messages.WARNING, 'Nothing to see here.')
return HttpResponseRedirect(reverse('home'))
@staff_member_required
def contract(request, pk=None):
"""
"""
context = get_page_items(
request, company_model=Company, model=Contract, pk=pk, time_model=Time)
if context['pdf']:
response = HttpResponse(content_type='application/pdf')
filename = get_company_name(company)
response['Content-Disposition'] = 'filename=%s.pdf' % filename
return generate_pdf(
'pdf_contract.html', context=context, file_object=response)
if context['doc']:
# https://stackoverflow.com/a/24122313/185820
document = generate_doc(contract)
filename = get_company_name(company)
f = BytesIO()
document.save(f)
length = f.tell()
f.seek(0)
content_type = 'application/vnd.openxmlformats-'
content_type += 'officedocument.wordprocessingml.document'
response = HttpResponse(f.getvalue(), content_type=content_type)
response['Content-Disposition'] = 'filename=%s.docx' % filename
response['Content-Length'] = length
return response
else:
return render(request, 'contract.html', context)
@staff_member_required
def contract_edit(request, pk=None):
"""
"""
template_name, url_name = get_template_and_url_names(
'contract', page_type='edit')
return edit(
request,
ContractForm,
Contract,
url_name,
template_name,
active_nav='contract',
pk=pk)
@staff_member_required
def contract_index(request):
"""
"""
context = get_index_items(
request,
Contract,
active_nav='contract',
order_by=('-updated', ),
app_settings_model=AppSettings)
return render(request, 'contract_index.html', context)
@staff_member_required
def contract_settings(request):
context = {}
fields = {}
contract_settings = ContractSettings.get_solo()
for field in contract_settings._meta.fields:
if field.description == 'Text' and field.name != 'body':
fields[field.name] = {}
fields[field.name]['name'] = field.verbose_name
fields[field.name]['value'] = getattr(contract_settings,
field.name)
context['fields'] = fields
context['active_tab'] = 'contract'
context['active_nav'] = 'dropdown'
return render(request, 'contract_settings.html', context)
@staff_member_required
def contract_settings_edit(request, pk=None):
return edit(
request,
ContractSettingsForm,
ContractSettings,
'contract_settings',
'contract_settings_edit.html',
pk=1,
active_nav='dropdown')
@staff_member_required
def estimate(request, pk=None):
context = get_page_items(
request, company_model=Company, model=Estimate, pk=pk, time_model=Time)
if context['pdf']:
response = HttpResponse(content_type='application/pdf')
filename = '-'.join(['estimate', pk])
response['Content-Disposition'] = 'filename=%s.pdf' % filename
return generate_pdf(
'pdf_invoice.html', context=context, file_object=response)
else:
return render(request, 'estimate.html', context)
@staff_member_required
def estimate_edit(request, pk=None):
template_name, url_name = get_template_and_url_names(
'estimate', page_type='edit')
return edit(
request,
EstimateForm,
Estimate,
url_name,
template_name,
active_nav='estimate',
company_model=Company,
pk=pk)
@staff_member_required
def estimate_index(request):
context = get_index_items(
request,
Estimate,
active_nav='estimate',
app_settings_model=AppSettings,
edit_url='estimate_edit', # Delete modal
order_by=('-updated', ),
search_fields=('subject', ),
show_search=True)
context['company'] = company
return render(request, 'estimate_index.html', context)
@staff_member_required
def estimate_mail(request, pk=None):
estimate = get_object_or_404(Estimate, pk=pk)
if create_and_send_mail(
request, Log, estimate=estimate, profile_model=Profile):
return HttpResponseRedirect(reverse('estimate', kwargs={'pk': pk}))
@staff_member_required
def file_view(request, pk=None):
context = get_page_items(
request,
app_settings_model=AppSettings,
company_model=Company,
model=File,
pk=pk)
return render(request, 'file.html', context)
@staff_member_required
def file_edit(request, pk=None):
template_name, url_name = get_template_and_url_names(
'file', page_type='edit')
return edit(
request,
FileForm,
File,
url_name,
template_name,
active_nav='dropdown',
company_model=Company,
pk=pk, )
@staff_member_required
def file_index(request):
context = get_index_items(
request,
File,
active_nav='dropdown',
app_settings_model=AppSettings,
order_by=('-updated', ))
return render(request, 'file_index.html', context)
def home(request):
context = get_page_items(
request,
app_settings_model=AppSettings,
invoice_model=Invoice,
note_model=Note,
order_by={
'note': ('-updated', ),
'project': ('-updated', ),
},
project_model=Project,
report_model=Report)
return render(request, 'home.html', context)
@staff_member_required
def invoice(request, pk=None):
context = get_page_items(
request,
company_model=Company,
model=Invoice,
order_by={'time': ('date', )}, # For time entries
pk=pk,
time_model=Time)
if context['pdf']:
response = HttpResponse(content_type='application/pdf')
filename = '_'.join(['invoice', pk])
response['Content-Disposition'] = 'filename=%s.pdf' % filename
return generate_pdf(
'pdf_invoice.html', context=context, file_object=response)
else:
return render(request, 'invoice.html', context)
@staff_member_required
def invoice_edit(request, pk=None):
template_name, url_name = get_template_and_url_names(
'invoice', page_type='edit')
return edit(
request,
InvoiceForm,
Invoice,
url_name,
template_name,
active_nav='invoice',
company_model=Company,
pk=pk, )
@staff_member_required
def invoice_index(request):
search_fields = (
'client__name',
'document_id',
'issue_date',
'project__name',
'subject', )
context = get_index_items(
request,
Invoice,
active_nav='invoice',
app_settings_model=AppSettings,
edit_url='invoice_edit', # Delete modal
order_by=('-updated', ),
search_fields=search_fields,
show_search=True)
return render(request, 'invoice_index.html', context)
def login(request):
context = {}
context['login'] = True
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
user = authenticate(request, username=username, password=password)
if user is not None:
# https://stackoverflow.com/a/39316967/185820
auth_login(request, user)
city_data = get_client_city(request)
log = Log(entry='%s logged in from %s' % (user, city_data))
log.save()
return HttpResponseRedirect(reverse('home'))
else:
messages.add_message(request, messages.WARNING, 'Login failed.')
return HttpResponseRedirect(reverse('home'))
return render(request, 'login.html', context)
@staff_member_required
def log_index(request):
context = get_index_items(
request,
Log,
active_nav='dropdown',
app_settings_model=AppSettings,
order_by=('-updated', ),
search_fields=('entry', ))
return render(request, 'log_index.html', context)
@staff_member_required
def newsletter(request, pk=None):
"""
"""
context = {}
newsletter = get_object_or_404(Newsletter, pk=pk)
context['active_nav'] = 'dropdown'
context['contacts'] = newsletter.contacts.all()
context['edit_url'] = 'newsletter_edit'
context['item'] = newsletter
return render(request, 'newsletter.html', context)
@staff_member_required
def newsletter_edit(request, pk=None):
"""
"""
template_name, url_name = get_template_and_url_names(
'newsletter', page_type='edit')
return edit(
request,
NewsletterForm,
Newsletter,
url_name,
template_name,
active_nav='dropdown',
pk=pk)
@staff_member_required
def newsletter_index(request, pk=None):
"""
"""
context = get_index_items(
request,
Newsletter,
active_nav='dropdown',
app_settings_model=AppSettings,
order_by=('-updated', ),
search_fields=('text', ))
return render(request, 'newsletter_index.html', context)
@staff_member_required
def newsletter_send(request, pk=None):
"""
"""
context = {}
newsletter = get_object_or_404(Newsletter, pk=pk)
contacts = newsletter.contacts.all()
for contact in contacts:
url = reverse('contact_unsubscribe', kwargs={'pk': contact.pk})
url = ''.join([request.get_host(), url])
to = contact.email
first_name = contact.first_name
subject = newsletter.subject
message = newsletter.text
if send_mail(
request,
subject,
message,
to,
url=url,
uuid=contact.uuid,
first_name=first_name):
log = Log(entry='Mail sent to %s.' % to)
log.save()
messages.add_message(request, messages.SUCCESS, 'Batch mail sent!')
context['active_nav'] = 'newsletter'
context['contacts'] = contacts
context['edit_url'] = 'newsletter_edit'
context['item'] = newsletter
return render(request, 'newsletter.html', context)
@staff_member_required
def note(request, pk=None):
context = {}
pdf = get_query(request, 'pdf')
context['pdf'] = pdf
note = get_object_or_404(Note, pk=pk)
context['active_nav'] = 'note'
context['edit_url'] = 'note_edit'
context['item'] = note
if pdf:
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'filename=note-%s.pdf' % pk
return generate_pdf(
'pdf_note.html', context=context, file_object=response)
else:
return render(request, 'note.html', context)
# https://stackoverflow.com/a/42038839/185820
@staff_member_required(login_url='login')
def note_edit(request, pk=None):
template_name, url_name = get_template_and_url_names(
'note', page_type='edit')
return edit(
request,
NoteForm,
Note,
url_name,
template_name,
active_nav='note',
app_settings_model=AppSettings,
client_model=Client,
company_model=Company,
pk=pk)
@staff_member_required
def note_index(request, pk=None):
filters = {'hidden': False, }
context = get_index_items(
request,
Note,
active_nav='note',
app_settings_model=AppSettings,
filters=filters,
order_by=('-active', '-updated'),
search_fields=('note', 'title'),
show_search=True)
context['edit_url'] = 'note_edit' # Delete modal
return render(request, 'note_index.html', context)
@staff_member_required
def project(request, pk=None):
context = get_page_items(
request,
app_settings_model=AppSettings,
model=Project,
estimate_model=Estimate,
invoice_model=Invoice,
order_by={'time': ('date', )}, # For time entries
time_model=Time,
pk=pk)
return render(request, 'project.html', context)
@staff_member_required
def project_edit(request, pk=None):
template_name, url_name = get_template_and_url_names(
'project', page_type='edit')
return edit(
request,
ProjectForm,
Project,
url_name,
template_name,
active_nav='project',
pk=pk)
@staff_member_required
def project_index(request, pk=None):
context = get_index_items(
request,
Project,
active_nav='project',
app_settings_model=AppSettings,
edit_url='project_edit', # Delete modal
order_by=('-updated', ),
search_fields=('id', 'name'),
show_search=True)
return render(request, 'project_index.html', context)
@staff_member_required
def proposal(request, pk=None):
context = get_page_items(
request, company_model=Company, model=Proposal, pk=pk)
if context['pdf']:
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'filename=proposal-%s.pdf' % pk
return generate_pdf(
'pdf_proposal.html', context=context, file_object=response)
else:
return render(request, 'proposal.html', context)
def proposal_edit(request, pk=None):
"""
"""
template_name, url_name = get_template_and_url_names(
'proposal', page_type='edit')
return edit(
request,
ProposalForm,
Proposal,
url_name,
template_name,
active_nav='proposal',
company_model=Company,
pk=pk)
@staff_member_required
def proposal_index(request, pk=None):
context = get_index_items(
request,
Proposal,
active_nav='proposal',
app_settings_model=AppSettings,
order_by=('-updated', ),
show_search=True)
context['edit_url'] = 'proposal_edit' # Delete modal
return render(request, 'proposal_index.html', context)
@staff_member_required
def report(request, pk=None):
company = Company.get_solo()
context = {}
pdf = get_query(request, 'pdf')
context['pdf'] = pdf
report = get_object_or_404(Report, pk=pk)
reports = Report.objects.filter(active=True)
reports = reports.aggregate(gross=Sum(F('gross')), net=Sum(F('net')))
context['active_nav'] = 'dropdown'
context['company'] = company
context['cost'] = report.gross - report.net
context['edit_url'] = 'report_edit' # Delete modal
context['item'] = report
context['reports'] = reports
if pdf:
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'filename=report-%s.pdf' % pk
return generate_pdf(
'pdf_report.html', context=context, file_object=response)
else:
return render(request, 'report.html', context)
@staff_member_required
def report_edit(request, pk=None):
template_name, url_name = get_template_and_url_names(
'report', page_type='edit')
return edit(
request,
ReportForm,
Report,
url_name,
template_name,
active_nav='dropdown',
invoice_model=Invoice,
pk=pk)
@staff_member_required
def report_index(request):
show_plot = False
reports = Report.objects.filter(active=True)
plot_items = reports # Save for plotting
reports = reports.aggregate(gross=Sum(F('gross')), net=Sum(F('net')))
company = Company.get_solo()
context = get_index_items(
request,
Report,
search_fields,
active_nav='dropdown',
app_settings_model=AppSettings,
edit_url='report_edit', # Delete modal
order_by=('-updated', ),
search_fields=('id', 'name', 'gross', 'net'),
show_search=True)
if reports['gross'] is not None and reports['net'] is not None:
cost = reports['gross'] - reports['net']
else:
reports['gross'] = 0
reports['net'] = 0
cost = 0
if 'items' in context:
if len(context['items']) > 1:
show_plot = True
context['reports'] = reports
context['company'] = company
context['cost'] = cost
context['show_plot'] = show_plot
context['plot_items'] = plot_items
return render(request, 'report_index.html', context)
def report_plot(request): # http://stackoverflow.com/a/5515994/185820
"""
"""
values = get_query(request, 'values')
# http://matplotlib.org/examples/api/date_demo.html
x = [date2num(datetime.strptime(i[1], '%Y-%m-%d')) for i in values]
y = [i[0] for i in values]
figure = Figure()
canvas = FigureCanvasAgg(figure)
axes = figure.add_subplot(1, 1, 1)
axes.grid(True)
axes.plot(x, y)
axes.xaxis.set_major_locator(MonthLocator())
axes.xaxis.set_major_formatter(DateFormatter('%m'))
# write image data to a string buffer and get the PNG image bytes
buf = BytesIO()
canvas.print_png(buf)
data = buf.getvalue()
# write image bytes back to the browser
return HttpResponse(data, content_type="image/png")
# https://stackoverflow.com/a/42038839/185820
@staff_member_required(login_url='login')
def service_edit(request, pk=None):
template_name, url_name = get_template_and_url_names(
'service', page_type='edit')
return edit(
request,
ServiceForm,
Service,
url_name,
template_name,
active_nav='dropdown',
company_model=Company,
pk=pk)
@staff_member_required
def settings(request):
context = {}
app_settings = AppSettings.get_solo()
context['settings'] = app_settings
context['active_tab'] = 'system'
context['active_nav'] = 'dropdown'
return render(request, 'settings.html', context)
@staff_member_required
def settings_edit(request, pk=None):
return edit(
request,
AppSettingsForm,
AppSettings,
'settings',
'settings_edit.html',
active_nav='dropdown',
pk=1)
@staff_member_required
def task(request, pk=None):
context = {}
task = get_object_or_404(Task, pk=pk)
context['active_nav'] = 'task'
context['edit_url'] = 'task_edit' # Delete modal
context['item'] = task
return render(request, 'task.html', context)
@staff_member_required
def task_edit(request, pk=None):
template_name, url_name = get_template_and_url_names(
'task', page_type='edit')
return edit(
request,
TaskForm,
Task,
url_name,
'task_edit.html',
active_nav='task',
pk=pk)
@staff_member_required
def task_index(request):
context = get_index_items(
request,
Task,
active_nav='task',
app_settings_model=AppSettings,
edit_url='task_edit', # Delete modal
order_by=('-updated', ),
search_fields=('name', ),
show_search=True)
return render(request, 'task_index.html', context)
@login_required
def time(request, pk=None):
context = {}
entry = get_object_or_404(Time, pk=pk)
if not entry.user and not request.user.is_staff:
return HttpResponseRedirect(reverse('login'))
if entry.user:
if (not entry.user.username == request.user.username and
not request.user.is_staff):
return HttpResponseRedirect(reverse('login'))
context['active_nav'] = 'time'
context['edit_url'] = 'time_edit' # Delete modal
context['item'] = entry
return render(request, 'time.html', context)
@login_required
def time_edit(request, pk=None):
template_name, url_name = get_template_and_url_names(
'time', page_type='edit')
return edit(
request,
TimeForm,
Time,
url_name,
template_name,
active_nav='time',
invoice_model=Invoice,
estimate_model=Estimate,
project_model=Project,
task_model=Task,
time_model=Time,
pk=pk, )
@login_required
def time_index(request):
search_fields = ('client__name', 'date', 'log', 'pk', 'project__name',
'invoice__document_id', 'user__username')
context = get_index_items(
request,
Time,
active_nav='time',
app_settings_model=AppSettings,
edit_url='time_edit', # Delete modal
order_by=('-updated', ),
search_fields=search_fields,
show_search=True)
if not request.user.is_staff:
return HttpResponseRedirect(reverse('login'))
else:
return render(request, 'time_index.html', context)
@login_required
def user(request, pk=None):
filters = {
'estimate': None,
'user': None, # fill in later
}
order_by={
'time': ('-updated', ),
},
context = get_page_items(
request,
app_settings_model=AppSettings,
contact_model=Contact,
filters=filters,
model=User,
order_by=order_by,
profile_model=Profile,
project_model=Project,
time_model=Time,
pk=pk)
return render(request, 'user.html', context)
@staff_member_required
def user_contact(request, pk=None):
return add_user_to_contacts(request, Contact, pk=pk)
@login_required
def user_edit(request, pk=None):
template_name, url_name = get_template_and_url_names(
'user', page_type='edit')
return edit(
request,
ProfileForm,
Profile,
url_name,
template_name,
active_nav='dropdown',
pk=pk)
@staff_member_required
def user_index(request):
context = get_index_items(
request,
User,
active_nav='dropdown',
app_settings_model=AppSettings,
order_by=('-updated', ),
show_search=False)
context['company'] = company
# Check if user is contact
contacts = Contact.objects.all()
items = context['items']
for item in items:
if item.email in [i.email for i in contacts]:
item.is_contact = True
else:
item.is_contact = False
context['items'] = items
return render(request, 'user_index.html', context)
|
import asyncio
import inspect
missing = object()
__all__ = ["RestartableTask", "Dispatch", "EventHandler"]
class RestartableTask:
def __init__(self, *, loop):
'''
Abstract task that ensures a previous run's shutdown logic has
completed before the next start call is allowed to continue.
RestartableTasks must call `await self._finish_shutdown()` when they
have safely ended any running coroutines.
'''
self.loop = loop
self.running = False
self.__shutdown_started = asyncio.Event(loop=self.loop)
self.__shutdown_finished = asyncio.Event(loop=self.loop)
async def start(self):
if self.running:
return
if self.__shutdown_started.is_set():
await self.__shutdown_finished.wait()
self.__shutdown_started.clear()
self.__shutdown_finished.clear()
self.running = True
self.loop.create_task(self._task())
async def stop(self):
if not self.running:
return
self.running = False
# Kick off the shutdown process
self.__shutdown_started.set()
await self._start_shutdown()
# Wait for the return signal that the shutdown is complete
await self.__shutdown_finished.wait()
async def _task(self):
pass
async def _start_shutdown(self):
pass
async def _finish_shutdown(self):
self.__shutdown_finished.set()
class Dispatch(RestartableTask):
''' Dispatch unpacked **kwargs to callbacks when events occur '''
def __init__(self, loop):
super().__init__(loop=loop)
self._handlers = {}
self._queue = asyncio.Queue(loop=self.loop)
self._resume_processing = asyncio.Event(loop=self.loop)
def on(self, event):
'''
Returns a wrapper for the given event.
Usage:
@dispatch.on("my_event")
def handle_my_event(foo, bar, baz):
...
'''
handler = self._handlers.get(event, None)
if not handler:
raise ValueError("Unknown event '{}'".format(event))
return handler.register
def register(self, event, params):
'''
Register a new event with available params.
Raises ValueError when the event has already been registered.
Usage:
dispatch.register("my_event", ["foo", "bar", "baz"])
'''
handler = self._handlers.get(event, None)
if handler is not None:
raise ValueError("Event {} already registered".format(event))
self._handlers[event] = EventHandler(event, params, self.loop)
def unregister(self, event):
'''
Remove all registered handlers for an event.
Silent return when event was not registered.
Usage:
dispatch.unregister("my_event")
dispatch.unregister("my_event") # no-op
'''
self._handlers.pop(event, None)
def trigger(self, event, params):
''' Non-blocking enqueue of an event '''
self._queue.put_nowait((event, params))
self._resume_processing.set()
@property
def events(self):
''' Number of events currently enqueued '''
return self._queue.qsize()
def clear(self):
'''
Clear any enqueued events.
Raises a RuntimeException if called while the Dispatcher is running
'''
if self.running:
raise RuntimeError("Can't clear the queue while running")
while self.events:
self._queue.get_nowait()
async def _task(self):
''' Main queue processor '''
for handler in self._handlers.values():
await handler.start()
while self.running:
if self.events:
event, params = await self._queue.get()
handler = self._handlers.get(event, None)
if handler:
handler(params)
else:
# Resume on either the next `trigger` call or a `stop`
await self._resume_processing.wait()
self._resume_processing.clear()
# Give all the handlers a chance to complete their pending tasks
tasks = [handler.stop() for handler in self._handlers.values()]
if tasks:
await asyncio.wait(tasks, loop=self.loop)
# Let the shutdown process continue
await self._finish_shutdown()
async def _start_shutdown(self):
# The processor is waiting, resume so it can exit cleanly
self._resume_processing.set()
await super()._start_shutdown()
class EventHandler(RestartableTask):
def __init__(self, event, params, loop):
super().__init__(loop=loop)
self._event = event
self._params = params
self._callbacks = []
self._tasks = {}
def __call__(self, params):
# Don't handle the call if we're shutting down
if not self.running:
raise RuntimeError(
"EventHandler must be running to delegate events")
for callback in self._callbacks:
task = self.loop.create_task(callback(params))
self._tasks[id(task)] = task
task.add_done_callback(self._delegate_done)
def _delegate_done(self, task):
'''
When a callback is complete, remove it from the active task set.
Don't raise if the task has already been removed
'''
self._tasks.pop(id(task), None)
def register(self, callback):
self._validate(callback)
wrapped = self._wrap(callback)
self._callbacks.append(wrapped)
return callback
async def _start_shutdown(self):
# Give all active tasks a chance to complete
active_tasks = list(self._tasks.values())
if active_tasks:
await asyncio.wait(active_tasks, loop=self.loop)
await self._finish_shutdown()
def _wrap(self, callback):
return partial_bind(callback)
def _validate(self, callback):
validate_func(self._event, callback, self._params)
def validate_func(event, callback, params):
sig = inspect.signature(callback)
expected = set(sig.parameters)
for param in sig.parameters.values():
kind = param.kind
if kind == inspect.Parameter.VAR_POSITIONAL:
raise ValueError(
("function '{}' expects parameter {} to be VAR_POSITIONAL, "
"when it will always be a single value. This parameter "
"must be either POSITIONAL_ONLY, POSITIONAL_OR_KEYWORD, or "
"KEYWORD_ONLY (or omitted)").format(callback.__name__,
param.name))
if kind == inspect.Parameter.VAR_KEYWORD:
# **kwargs are ok, as long as the **name doesn't
# mask an actual param that the event emits.
if param.name in params:
# masking :(
raise ValueError(
("function '{}' expects parameter {} to be VAR_KEYWORD, "
"which masks an actual parameter for event {}. This "
"event has the following parameters, which must not be "
"used as the **VAR_KEYWORD argument. They may be "
"omitted").format(
callback.__name__, param.name, event, params))
else:
# Pop from expected, this will gobble up any unused params
expected.remove(param.name)
available = set(params)
unavailable = expected - available
if unavailable:
raise ValueError(
("function '{}' expects the following parameters for event {} "
"that are not available: {}. Available parameters for this "
"event are: {}").format(callback.__name__, event,
unavailable, available))
def partial_bind(callback):
sig = inspect.signature(callback)
# Wrap non-coroutines so we can always `await callback(**kw)`
if not inspect.iscoroutinefunction(callback):
callback = asyncio.coroutine(callback)
base = {}
for key, param in sig.parameters.items():
default = param.default
# Param has no default - use equivalent of empty
if default is inspect.Parameter.empty:
base[key] = None
else:
base[key] = default
async def wrapper(params):
unbound = base.copy()
# Only map params this callback expects
for key in base:
new_value = params.get(key, missing)
if new_value is not missing:
unbound[key] = new_value
bound = sig.bind(**unbound)
return await callback(*bound.args, **bound.kwargs)
return wrapper
Refactor two-step event into class, rename finish -> complete
import asyncio
import inspect
missing = object()
__all__ = ["RestartableTask", "Dispatch", "EventHandler"]
class Event:
def __init__(self, *, loop):
self.loop = loop
self._start = asyncio.Event(loop=self.loop)
self._complete = asyncio.Event(loop=self.loop)
@property
def started(self):
return self._start.is_set()
@property
def finished(self):
return self._complete.is_set()
def start(self):
self._start.set()
def complete(self):
self._complete.set()
def clear(self):
self._start.clear()
self._complete.clear()
async def wait(self):
await self._start.wait()
await self._complete.wait()
class RestartableTask:
def __init__(self, *, loop):
'''
Abstract task that ensures a previous run's shutdown logic has
completed before the next start call is allowed to continue.
RestartableTasks must call `await self._complete_shutdown()` when they
have safely ended any running coroutines.
'''
self.loop = loop
self.running = False
self._shutdown = Event(loop=self.loop)
async def start(self):
if self.running:
return
if self._shutdown.started:
await self._shutdown.wait()
self._shutdown.clear()
self.running = True
self.loop.create_task(self._task())
async def stop(self):
if not self.running:
return
self.running = False
# Kick off the shutdown process
self._shutdown.start()
await self._start_shutdown()
# Wait for the return signal that the shutdown is complete
await self._shutdown.wait()
async def _task(self):
pass
async def _start_shutdown(self):
pass
async def _complete_shutdown(self):
self._shutdown.complete()
class Dispatch(RestartableTask):
''' Dispatch unpacked **kwargs to callbacks when events occur '''
def __init__(self, loop):
super().__init__(loop=loop)
self._handlers = {}
self._queue = asyncio.Queue(loop=self.loop)
self._resume_processing = asyncio.Event(loop=self.loop)
def on(self, event):
'''
Returns a wrapper for the given event.
Usage:
@dispatch.on("my_event")
def handle_my_event(foo, bar, baz):
...
'''
handler = self._handlers.get(event, None)
if not handler:
raise ValueError("Unknown event '{}'".format(event))
return handler.register
def register(self, event, params):
'''
Register a new event with available params.
Raises ValueError when the event has already been registered.
Usage:
dispatch.register("my_event", ["foo", "bar", "baz"])
'''
handler = self._handlers.get(event, None)
if handler is not None:
raise ValueError("Event {} already registered".format(event))
self._handlers[event] = EventHandler(event, params, self.loop)
def unregister(self, event):
'''
Remove all registered handlers for an event.
Silent return when event was not registered.
Usage:
dispatch.unregister("my_event")
dispatch.unregister("my_event") # no-op
'''
self._handlers.pop(event, None)
def trigger(self, event, params):
''' Non-blocking enqueue of an event '''
self._queue.put_nowait((event, params))
self._resume_processing.set()
@property
def events(self):
''' Number of events currently enqueued '''
return self._queue.qsize()
def clear(self):
'''
Clear any enqueued events.
Raises a RuntimeException if called while the Dispatcher is running
'''
if self.running:
raise RuntimeError("Can't clear the queue while running")
while self.events:
self._queue.get_nowait()
async def _task(self):
''' Main queue processor '''
for handler in self._handlers.values():
await handler.start()
while self.running:
if self.events:
event, params = await self._queue.get()
handler = self._handlers.get(event, None)
if handler:
handler(params)
else:
# Resume on either the next `trigger` call or a `stop`
await self._resume_processing.wait()
self._resume_processing.clear()
# Give all the handlers a chance to complete their pending tasks
tasks = [handler.stop() for handler in self._handlers.values()]
if tasks:
await asyncio.wait(tasks, loop=self.loop)
# Let the shutdown process continue
await self._complete_shutdown()
async def _start_shutdown(self):
# The processor is waiting, resume so it can exit cleanly
self._resume_processing.set()
await super()._start_shutdown()
class EventHandler(RestartableTask):
def __init__(self, event, params, loop):
super().__init__(loop=loop)
self._event = event
self._params = params
self._callbacks = []
self._tasks = {}
def __call__(self, params):
# Don't handle the call if we're shutting down
if not self.running:
raise RuntimeError(
"EventHandler must be running to delegate events")
for callback in self._callbacks:
task = self.loop.create_task(callback(params))
self._tasks[id(task)] = task
task.add_done_callback(self._task_done)
def _task_done(self, task):
'''
When a callback is complete, remove it from the active task set.
Don't raise if the task has already been removed
'''
self._tasks.pop(id(task), None)
def register(self, callback):
self._validate(callback)
wrapped = self._wrap(callback)
self._callbacks.append(wrapped)
return callback
async def _start_shutdown(self):
# Give all active tasks a chance to complete
active_tasks = list(self._tasks.values())
if active_tasks:
await asyncio.wait(active_tasks, loop=self.loop)
await self._complete_shutdown()
def _wrap(self, callback):
return partial_bind(callback)
def _validate(self, callback):
validate_func(self._event, callback, self._params)
def validate_func(event, callback, params):
sig = inspect.signature(callback)
expected = set(sig.parameters)
for param in sig.parameters.values():
kind = param.kind
if kind == inspect.Parameter.VAR_POSITIONAL:
raise ValueError(
("function '{}' expects parameter {} to be VAR_POSITIONAL, "
"when it will always be a single value. This parameter "
"must be either POSITIONAL_ONLY, POSITIONAL_OR_KEYWORD, or "
"KEYWORD_ONLY (or omitted)").format(callback.__name__,
param.name))
if kind == inspect.Parameter.VAR_KEYWORD:
# **kwargs are ok, as long as the **name doesn't
# mask an actual param that the event emits.
if param.name in params:
# masking :(
raise ValueError(
("function '{}' expects parameter {} to be VAR_KEYWORD, "
"which masks an actual parameter for event {}. This "
"event has the following parameters, which must not be "
"used as the **VAR_KEYWORD argument. They may be "
"omitted").format(
callback.__name__, param.name, event, params))
else:
# Pop from expected, this will gobble up any unused params
expected.remove(param.name)
available = set(params)
unavailable = expected - available
if unavailable:
raise ValueError(
("function '{}' expects the following parameters for event {} "
"that are not available: {}. Available parameters for this "
"event are: {}").format(callback.__name__, event,
unavailable, available))
def partial_bind(callback):
sig = inspect.signature(callback)
# Wrap non-coroutines so we can always `await callback(**kw)`
if not inspect.iscoroutinefunction(callback):
callback = asyncio.coroutine(callback)
base = {}
for key, param in sig.parameters.items():
default = param.default
# Param has no default - use equivalent of empty
if default is inspect.Parameter.empty:
base[key] = None
else:
base[key] = default
async def wrapper(params):
unbound = base.copy()
# Only map params this callback expects
for key in base:
new_value = params.get(key, missing)
if new_value is not missing:
unbound[key] = new_value
bound = sig.bind(**unbound)
return await callback(*bound.args, **bound.kwargs)
return wrapper
|
from .forms import AdminProfileForm
from .forms import AdminTimeForm
from .forms import AppSettingsForm
from .forms import ClientForm
from .forms import CompanyForm
from .forms import ContactForm
from .forms import ContractForm
from .forms import ContractSettingsForm
from .forms import EstimateForm
from .forms import FileForm
from .forms import InvoiceForm
from .forms import MailForm
from .forms import NewsletterForm
from .forms import NoteForm
from .forms import ProfileForm
from .forms import ProjectForm
from .forms import ProposalForm
from .forms import ReportForm
from .forms import ServiceForm
from .forms import TaskForm
from .forms import TimeForm
from .models import AppSettings
from .models import Client
from .models import Company
from .models import Contact
from .models import Contract
from .models import ContractSettings
from .models import Estimate
from .models import File
from .models import Invoice
from .models import Log
from .models import Newsletter
from .models import Note
from .models import Profile
from .models import Project
from .models import Proposal
from .models import Report
from .models import Service
from .models import Testimonial
from .models import Task
from .models import Time
from .serializers import ClientSerializer
from .serializers import ProfileSerializer
from .serializers import ServiceSerializer
from .serializers import TestimonialSerializer
from .utils import edit
from .utils import generate_doc
from .utils import get_client_city
from .utils import get_company_name
from .utils import get_index_items
from .utils import get_page_items
from .utils import is_allowed_to_view
from django.contrib import messages
from django.contrib.auth import authenticate
from django.contrib.auth import login as auth_login
from django.contrib.auth.decorators import login_required
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.http import HttpResponse
from django.shortcuts import render
from django_xhtml2pdf.utils import generate_pdf
from io import BytesIO
from rest_framework import viewsets
# Create your views here.
class ClientViewSet(viewsets.ModelViewSet):
"""
"""
queryset = Client.objects.filter(published=True).order_by('name')
serializer_class = ClientSerializer
class ServiceViewSet(viewsets.ModelViewSet):
"""
"""
queryset = Service.objects.filter(active=True).order_by('name')
serializer_class = ServiceSerializer
class TestimonialViewSet(viewsets.ModelViewSet):
"""
"""
queryset = Testimonial.objects.filter(active=True).order_by('-issue_date')
serializer_class = TestimonialSerializer
class ProfileViewSet(viewsets.ModelViewSet):
"""
"""
queryset = Profile.objects.filter(
published=True).order_by('user__first_name')
serializer_class = ProfileSerializer
@staff_member_required
def client(request, pk=None):
context = get_page_items(
request,
app_settings_model=AppSettings,
contact_model=Contact,
contract_model=Contract,
model=Client,
pk=pk,
project_model=Project)
return render(request, 'client.html', context)
@staff_member_required
def client_edit(request, pk=None):
return edit(
request,
form_model=ClientForm,
model=Client,
active_nav='client',
pk=pk)
@staff_member_required
def client_index(request):
context = get_index_items(
request,
Client,
active_nav='client',
app_settings_model=AppSettings,
edit_url='client_edit',
order_by=('-active', '-updated', 'name'),
search_fields=('address', 'name'),
show_search=True)
return render(request, 'client_index.html', context)
@staff_member_required
def contact(request, pk=None):
context = get_page_items(
request, app_settings_model=AppSettings, model=Contact, pk=pk)
return render(request, 'contact.html', context)
@staff_member_required
def contact_edit(request, pk=None):
return edit(
request,
form_model=ContactForm,
model=Contact,
active_nav='contact',
client_model=Client,
pk=pk)
@staff_member_required
def contact_index(request):
context = get_index_items(
request,
Contact,
active_nav='contact',
app_settings_model=AppSettings,
edit_url='contact_edit',
order_by=('-active', 'first_name'),
search_fields=('first_name', 'last_name', 'email', 'notes', 'pk'),
show_search=True)
return render(request, 'contact_index.html', context)
@staff_member_required
def contract(request, pk=None):
"""
"""
company = Company.get_solo()
context = get_page_items(
request, company_model=Company, model=Contract, pk=pk, time_model=Time)
if context['pdf']:
response = HttpResponse(content_type='application/pdf')
filename = get_company_name(company)
response['Content-Disposition'] = 'filename=%s.pdf' % filename
return generate_pdf(
'pdf_contract.html', context=context, file_object=response)
if context['doc']:
# https://stackoverflow.com/a/24122313/185820
document = generate_doc(contract)
filename = get_company_name(company)
f = BytesIO()
document.save(f)
length = f.tell()
f.seek(0)
content_type = 'application/vnd.openxmlformats-'
content_type += 'officedocument.wordprocessingml.document'
response = HttpResponse(f.getvalue(), content_type=content_type)
response['Content-Disposition'] = 'filename=%s.docx' % filename
response['Content-Length'] = length
return response
else:
return render(request, 'contract.html', context)
@staff_member_required
def contract_edit(request, pk=None):
"""
"""
return edit(
request,
form_model=ContractForm,
model=Contract,
active_nav='contract',
pk=pk)
@staff_member_required
def contract_index(request):
"""
"""
context = get_index_items(
request,
Contract,
active_nav='contract',
order_by=('-updated', ),
app_settings_model=AppSettings)
return render(request, 'contract_index.html', context)
@staff_member_required
def estimate(request, pk=None):
order_by = {'time': ('date', ), }
context = get_page_items(
request,
app_settings_model=AppSettings,
company_model=Company,
model=Estimate,
order_by=order_by,
pk=pk,
time_model=Time)
if context['pdf']:
response = HttpResponse(content_type='application/pdf')
filename = '-'.join(['estimate', pk])
response['Content-Disposition'] = 'filename=%s.pdf' % filename
return generate_pdf(
'pdf_invoice.html', context=context, file_object=response)
else:
return render(request, 'estimate.html', context)
@staff_member_required
def estimate_edit(request, pk=None):
return edit(
request,
form_model=EstimateForm,
model=Estimate,
active_nav='estimate',
company_model=Company,
project_model=Project,
pk=pk)
@staff_member_required
def estimate_index(request):
company = Company.get_solo()
context = get_index_items(
request,
Estimate,
active_nav='estimate',
app_settings_model=AppSettings,
edit_url='estimate_edit',
order_by=('-issue_date', ),
search_fields=('subject', ),
show_search=True)
context['company'] = company
return render(request, 'estimate_index.html', context)
@staff_member_required
def file_view(request, pk=None):
context = get_page_items(
request,
app_settings_model=AppSettings,
company_model=Company,
model=File,
pk=pk)
return render(request, 'file.html', context)
@staff_member_required
def file_edit(request, pk=None):
return edit(
request,
form_model=FileForm,
model=File,
active_nav='dropdown',
company_model=Company,
pk=pk, )
@staff_member_required
def file_index(request):
context = get_index_items(
request,
File,
active_nav='dropdown',
app_settings_model=AppSettings,
order_by=('-updated', ))
return render(request, 'file_index.html', context)
def home(request):
context = get_page_items(
request,
app_settings_model=AppSettings,
company_model=Company,
columns_visible={'note': {
'due': 'false',
'hidden': 'false'
}, },
invoice_model=Invoice,
note_model=Note,
order_by={
'note': ('-updated', ),
'project': ('-updated', ),
'time': ('-updated', ),
},
project_model=Project,
time_model=Time,
report_model=Report)
return render(request, 'home.html', context)
@staff_member_required
def invoice(request, pk=None):
context = get_page_items(
request,
app_settings_model=AppSettings,
company_model=Company,
model=Invoice,
order_by={'time': ('date', )}, # For time entries
pk=pk,
time_model=Time)
if context['pdf']:
response = HttpResponse(content_type='application/pdf')
company_name = get_company_name(context['company'])
model_name = context['model_name'].upper()
doc_id = context['item'].document_id or pk
filename = '_'.join([company_name, model_name, str(doc_id)])
response['Content-Disposition'] = 'filename=%s.pdf' % filename
return generate_pdf(
'pdf_invoice.html', context=context, file_object=response)
else:
return render(request, 'invoice.html', context)
@staff_member_required
def invoice_edit(request, pk=None):
return edit(
request,
form_model=InvoiceForm,
model=Invoice,
active_nav='invoice',
company_model=Company,
project_model=Project,
pk=pk, )
@staff_member_required
def invoice_index(request):
search_fields = (
'client__name',
'document_id',
'issue_date',
'project__name',
'subject', )
context = get_index_items(
request,
Invoice,
active_nav='invoice',
app_settings_model=AppSettings,
edit_url='invoice_edit',
order_by=('-updated', ),
search_fields=search_fields,
show_search=True)
return render(request, 'invoice_index.html', context)
def login(request):
context = {}
context['login'] = True
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
user = authenticate(request, username=username, password=password)
if user is not None:
# https://stackoverflow.com/a/39316967/185820
auth_login(request, user)
city_data = get_client_city(request)
log = Log(entry='%s logged in from %s' % (user, city_data))
log.save()
return HttpResponseRedirect(reverse('home'))
else:
messages.add_message(request, messages.WARNING, 'Login failed.')
return HttpResponseRedirect(reverse('home'))
return render(request, 'login.html', context)
@staff_member_required
def log_index(request):
context = get_index_items(
request,
Log,
active_nav='dropdown',
app_settings_model=AppSettings,
order_by=('-updated', ),
search_fields=('entry', ))
return render(request, 'log_index.html', context)
@staff_member_required
def newsletter(request, pk=None):
"""
"""
context = get_page_items(
request, app_settings_model=AppSettings, model=Newsletter, pk=pk)
return render(request, 'newsletter.html', context)
@staff_member_required
def newsletter_edit(request, pk=None):
"""
"""
return edit(
request,
form_model=NewsletterForm,
model=Newsletter,
active_nav='dropdown',
pk=pk)
@staff_member_required
def newsletter_index(request, pk=None):
"""
"""
context = get_index_items(
request,
Newsletter,
active_nav='dropdown',
app_settings_model=AppSettings,
order_by=('-updated', ),
search_fields=('text', ))
return render(request, 'newsletter_index.html', context)
@staff_member_required
def note(request, pk=None):
context = get_page_items(
request, app_settings_model=AppSettings, model=Note, pk=pk)
if context['pdf']:
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'filename=note-%s.pdf' % pk
return generate_pdf(
'pdf_note.html', context=context, file_object=response)
else:
return render(request, 'note.html', context)
# https://stackoverflow.com/a/42038839/185820
@staff_member_required(login_url='login')
def note_edit(request, pk=None):
return edit(
request,
form_model=NoteForm,
model=Note,
active_nav='note',
app_settings_model=AppSettings,
client_model=Client,
company_model=Company,
pk=pk)
@staff_member_required
def note_index(request, pk=None):
context = get_index_items(
request,
Note,
active_nav='note',
app_settings_model=AppSettings,
order_by=('-active', '-updated'),
search_fields=('note', 'title'),
show_search=True)
context['edit_url'] = 'note_edit'
return render(request, 'note_index.html', context)
@staff_member_required
def project(request, pk=None):
context = get_page_items(
request,
app_settings_model=AppSettings,
model=Project,
contact_model=Contact,
estimate_model=Estimate,
invoice_model=Invoice,
user_model=User,
order_by={'time': ('date', )}, # For time entries
time_model=Time,
pk=pk)
return render(request, 'project.html', context)
@staff_member_required
def project_edit(request, pk=None):
return edit(
request,
form_model=ProjectForm,
model=Project,
client_model=Client,
active_nav='project',
pk=pk)
@staff_member_required
def project_index(request, pk=None):
context = get_index_items(
request,
Project,
active_nav='project',
app_settings_model=AppSettings,
columns_visible={'project': {
'notes': 'true',
}, },
edit_url='project_edit',
order_by=(
'-active',
'-updated', ),
search_fields=('id', 'name'),
show_search=True)
return render(request, 'project_index.html', context)
@staff_member_required
def proposal(request, pk=None):
context = get_page_items(
request, company_model=Company, model=Proposal, pk=pk)
if context['pdf']:
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'filename=proposal-%s.pdf' % pk
return generate_pdf(
'pdf_proposal.html', context=context, file_object=response)
else:
return render(request, 'proposal.html', context)
@staff_member_required
def proposal_edit(request, pk=None):
"""
"""
return edit(
request,
form_model=ProposalForm,
model=Proposal,
active_nav='proposal',
company_model=Company,
pk=pk)
@staff_member_required
def proposal_index(request, pk=None):
context = get_index_items(
request,
Proposal,
active_nav='proposal',
app_settings_model=AppSettings,
order_by=('-updated', ),
show_search=True)
context['edit_url'] = 'proposal_edit'
return render(request, 'proposal_index.html', context)
@staff_member_required
def report(request, pk=None):
context = get_page_items(
request, model=Report, app_settings_model=AppSettings, pk=pk)
if context['pdf']:
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'filename=report-%s.pdf' % pk
return generate_pdf(
'pdf_report.html', context=context, file_object=response)
else:
return render(request, 'report.html', context)
@staff_member_required
def report_edit(request, pk=None):
return edit(
request,
form_model=ReportForm,
model=Report,
active_nav='dropdown',
invoice_model=Invoice,
pk=pk)
@staff_member_required
def report_index(request):
context = get_index_items(
request,
Report,
active_nav='dropdown',
app_settings_model=AppSettings,
edit_url='report_edit',
order_by=('-updated', '-active'),
search_fields=('id', 'name', 'gross', 'net'),
show_search=True)
return render(request, 'report_index.html', context)
@staff_member_required(login_url='login')
def mail(request):
"""
"""
return edit(
request,
contact_model=Contact,
form_model=MailForm,
note_model=Note,
page_type='edit')
# https://stackoverflow.com/a/42038839/185820
@staff_member_required(login_url='login')
def service_edit(request, pk=None):
return edit(
request,
form_model=ServiceForm,
model=Service,
active_nav='dropdown',
company_model=Company,
pk=pk)
@staff_member_required
def settings_app(request):
context = get_page_items(
request, model=AppSettings, app_settings_model=AppSettings)
return render(request, 'settings.html', context)
@staff_member_required
def settings_app_edit(request, pk=None):
return edit(
request,
form_model=AppSettingsForm,
model=AppSettings,
active_nav='dropdown',
pk=1)
@staff_member_required
def settings_company_edit(request, pk=None):
return edit(
request,
form_model=CompanyForm,
model=Company,
active_nav='dropdown',
pk=1)
@staff_member_required
def settings_company(request):
context = get_page_items(
request, app_settings_model=AppSettings, model=Company)
return render(request, 'company.html', context)
@staff_member_required
def settings_contract(request):
context = get_page_items(
request, model=ContractSettings, app_settings_model=AppSettings)
return render(request, 'contract_settings.html', context)
@staff_member_required
def settings_contract_edit(request, pk=None):
return edit(
request,
form_model=ContractSettingsForm,
model=ContractSettings,
pk=1,
active_nav='dropdown')
@staff_member_required
def task(request, pk=None):
context = get_page_items(
request, model=Task, app_settings_model=AppSettings, pk=pk)
return render(request, 'task.html', context)
@staff_member_required
def task_edit(request, pk=None):
return edit(
request, form_model=TaskForm, model=Task, active_nav='task', pk=pk)
@staff_member_required
def task_index(request):
context = get_index_items(
request,
Task,
active_nav='task',
app_settings_model=AppSettings,
edit_url='task_edit',
order_by=('-updated', ),
search_fields=('name', ),
show_search=True)
return render(request, 'task_index.html', context)
@login_required
def time(request, pk=None):
return is_allowed_to_view(
Time,
pk,
request,
app_settings_model=AppSettings,
profile_model=Profile)
@login_required
def time_edit(request, pk=None):
if request.user.is_staff:
time_form = AdminTimeForm
else:
time_form = TimeForm
return edit(
request,
form_model=time_form,
model=Time,
active_nav='time',
invoice_model=Invoice,
estimate_model=Estimate,
project_model=Project,
task_model=Task,
time_model=Time,
pk=pk, )
@login_required
def time_index(request):
search_fields = ('client__name', 'date', 'log', 'pk', 'project__name',
'invoice__document_id', 'user__username')
context = get_index_items(
request,
Time,
active_nav='time',
app_settings_model=AppSettings,
columns_visible={
'time': {
'invoiced': 'true',
'invoice': 'true',
'estimate': 'true',
},
},
edit_url='time_edit',
order_by=('-updated', ),
search_fields=search_fields,
show_search=True)
if not request.user.is_staff:
return HttpResponseRedirect(reverse('login'))
else:
return render(request, 'time_index.html', context)
@login_required
def user(request, pk=None):
order_by = {
'time': ('-updated', ),
'project': ('-updated', ),
}
context = get_page_items(
request,
app_settings_model=AppSettings,
contact_model=Contact,
model=User,
order_by=order_by,
profile_model=Profile,
project_model=Project,
time_model=Time,
pk=pk)
return render(request, 'user.html', context)
@login_required
def user_edit(request, pk=None):
if request.user.is_staff:
profile_form = AdminProfileForm
else:
profile_form = ProfileForm
return edit(
request,
form_model=profile_form,
model=Profile,
active_nav='dropdown',
pk=pk)
@staff_member_required
def user_index(request):
context = get_index_items(
request,
User,
active_nav='dropdown',
app_settings_model=AppSettings,
company_model=Company,
contact_model=Contact,
order_by=('-profile__active', '-profile__updated'),
show_search=False)
return render(request, 'user_index.html', context)
@staff_member_required
def plot(request):
"""
"""
Update
from .forms import AdminProfileForm
from .forms import AdminTimeForm
from .forms import AppSettingsForm
from .forms import ClientForm
from .forms import CompanyForm
from .forms import ContactForm
from .forms import ContractForm
from .forms import ContractSettingsForm
from .forms import EstimateForm
from .forms import FileForm
from .forms import InvoiceForm
from .forms import MailForm
from .forms import NewsletterForm
from .forms import NoteForm
from .forms import ProfileForm
from .forms import ProjectForm
from .forms import ProposalForm
from .forms import ReportForm
from .forms import ServiceForm
from .forms import TaskForm
from .forms import TimeForm
from .models import AppSettings
from .models import Client
from .models import Company
from .models import Contact
from .models import Contract
from .models import ContractSettings
from .models import Estimate
from .models import File
from .models import Invoice
from .models import Log
from .models import Newsletter
from .models import Note
from .models import Profile
from .models import Project
from .models import Proposal
from .models import Report
from .models import Service
from .models import Testimonial
from .models import Task
from .models import Time
from .serializers import ClientSerializer
from .serializers import ProfileSerializer
from .serializers import ServiceSerializer
from .serializers import TestimonialSerializer
from .utils import edit
from .utils import generate_doc
from .utils import get_client_city
from .utils import get_company_name
from .utils import get_index_items
from .utils import get_page_items
from .utils import is_allowed_to_view
from django.contrib import messages
from django.contrib.auth import authenticate
from django.contrib.auth import login as auth_login
from django.contrib.auth.decorators import login_required
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.http import HttpResponse
from django.shortcuts import render
from django_xhtml2pdf.utils import generate_pdf
from io import BytesIO
from rest_framework import viewsets
# Create your views here.
class ClientViewSet(viewsets.ModelViewSet):
"""
"""
queryset = Client.objects.filter(published=True).order_by('name')
serializer_class = ClientSerializer
class ServiceViewSet(viewsets.ModelViewSet):
"""
"""
queryset = Service.objects.filter(active=True).order_by('name')
serializer_class = ServiceSerializer
class TestimonialViewSet(viewsets.ModelViewSet):
"""
"""
queryset = Testimonial.objects.filter(active=True).order_by('-issue_date')
serializer_class = TestimonialSerializer
class ProfileViewSet(viewsets.ModelViewSet):
"""
"""
queryset = Profile.objects.filter(
published=True).order_by('user__first_name')
serializer_class = ProfileSerializer
@staff_member_required
def client(request, pk=None):
context = get_page_items(
request,
app_settings_model=AppSettings,
contact_model=Contact,
contract_model=Contract,
model=Client,
pk=pk,
project_model=Project)
return render(request, 'client.html', context)
@staff_member_required
def client_edit(request, pk=None):
return edit(
request,
form_model=ClientForm,
model=Client,
active_nav='client',
pk=pk)
@staff_member_required
def client_index(request):
context = get_index_items(
request,
Client,
active_nav='client',
app_settings_model=AppSettings,
edit_url='client_edit',
order_by=('-active', '-updated', 'name'),
search_fields=('address', 'name'),
show_search=True)
return render(request, 'client_index.html', context)
@staff_member_required
def contact(request, pk=None):
context = get_page_items(
request, app_settings_model=AppSettings, model=Contact, pk=pk)
return render(request, 'contact.html', context)
@staff_member_required
def contact_edit(request, pk=None):
return edit(
request,
form_model=ContactForm,
model=Contact,
active_nav='contact',
client_model=Client,
pk=pk)
@staff_member_required
def contact_index(request):
context = get_index_items(
request,
Contact,
active_nav='contact',
app_settings_model=AppSettings,
edit_url='contact_edit',
order_by=('-active', 'first_name'),
search_fields=('first_name', 'last_name', 'email', 'notes', 'pk'),
show_search=True)
return render(request, 'contact_index.html', context)
@staff_member_required
def contract(request, pk=None):
"""
"""
company = Company.get_solo()
context = get_page_items(
request, company_model=Company, model=Contract, pk=pk, time_model=Time)
if context['pdf']:
response = HttpResponse(content_type='application/pdf')
filename = get_company_name(company)
response['Content-Disposition'] = 'filename=%s.pdf' % filename
return generate_pdf(
'pdf_contract.html', context=context, file_object=response)
if context['doc']:
# https://stackoverflow.com/a/24122313/185820
document = generate_doc(contract)
filename = get_company_name(company)
f = BytesIO()
document.save(f)
length = f.tell()
f.seek(0)
content_type = 'application/vnd.openxmlformats-'
content_type += 'officedocument.wordprocessingml.document'
response = HttpResponse(f.getvalue(), content_type=content_type)
response['Content-Disposition'] = 'filename=%s.docx' % filename
response['Content-Length'] = length
return response
else:
return render(request, 'contract.html', context)
@staff_member_required
def contract_edit(request, pk=None):
"""
"""
return edit(
request,
form_model=ContractForm,
model=Contract,
active_nav='contract',
pk=pk)
@staff_member_required
def contract_index(request):
"""
"""
context = get_index_items(
request,
Contract,
active_nav='contract',
order_by=('-updated', ),
app_settings_model=AppSettings)
return render(request, 'contract_index.html', context)
@staff_member_required
def estimate(request, pk=None):
order_by = {'time': ('date', ), }
context = get_page_items(
request,
app_settings_model=AppSettings,
company_model=Company,
model=Estimate,
order_by=order_by,
pk=pk,
time_model=Time)
if context['pdf']:
response = HttpResponse(content_type='application/pdf')
filename = '-'.join(['estimate', pk])
response['Content-Disposition'] = 'filename=%s.pdf' % filename
return generate_pdf(
'pdf_invoice.html', context=context, file_object=response)
else:
return render(request, 'estimate.html', context)
@staff_member_required
def estimate_edit(request, pk=None):
return edit(
request,
form_model=EstimateForm,
model=Estimate,
active_nav='estimate',
company_model=Company,
project_model=Project,
pk=pk)
@staff_member_required
def estimate_index(request):
company = Company.get_solo()
context = get_index_items(
request,
Estimate,
active_nav='estimate',
app_settings_model=AppSettings,
edit_url='estimate_edit',
order_by=('-issue_date', ),
search_fields=('subject', ),
show_search=True)
context['company'] = company
return render(request, 'estimate_index.html', context)
@staff_member_required
def file_view(request, pk=None):
context = get_page_items(
request,
app_settings_model=AppSettings,
company_model=Company,
model=File,
pk=pk)
return render(request, 'file.html', context)
@staff_member_required
def file_edit(request, pk=None):
return edit(
request,
form_model=FileForm,
model=File,
active_nav='dropdown',
company_model=Company,
pk=pk, )
@staff_member_required
def file_index(request):
context = get_index_items(
request,
File,
active_nav='dropdown',
app_settings_model=AppSettings,
order_by=('-updated', ))
return render(request, 'file_index.html', context)
def home(request):
context = get_page_items(
request,
app_settings_model=AppSettings,
company_model=Company,
columns_visible={'note': {
'due': 'false',
'hidden': 'false'
}, },
invoice_model=Invoice,
note_model=Note,
order_by={
'note': ('-updated', ),
'project': ('-updated', ),
'time': ('-updated', ),
},
project_model=Project,
time_model=Time,
report_model=Report)
return render(request, 'home.html', context)
@staff_member_required
def invoice(request, pk=None):
context = get_page_items(
request,
app_settings_model=AppSettings,
company_model=Company,
model=Invoice,
order_by={'time': ('date', )}, # For time entries
pk=pk,
time_model=Time)
if context['pdf']:
response = HttpResponse(content_type='application/pdf')
company_name = get_company_name(context['company'])
model_name = context['model_name'].upper()
doc_id = context['item'].document_id or pk
filename = '_'.join([company_name, model_name, str(doc_id)])
response['Content-Disposition'] = 'filename=%s.pdf' % filename
return generate_pdf(
'pdf_invoice.html', context=context, file_object=response)
else:
return render(request, 'invoice.html', context)
@staff_member_required
def invoice_edit(request, pk=None):
return edit(
request,
form_model=InvoiceForm,
model=Invoice,
active_nav='invoice',
company_model=Company,
project_model=Project,
pk=pk, )
@staff_member_required
def invoice_index(request):
search_fields = (
'client__name',
'document_id',
'issue_date',
'project__name',
'subject', )
context = get_index_items(
request,
Invoice,
active_nav='invoice',
app_settings_model=AppSettings,
edit_url='invoice_edit',
order_by=('-updated', ),
search_fields=search_fields,
show_search=True)
return render(request, 'invoice_index.html', context)
def login(request):
context = {}
context['login'] = True
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
user = authenticate(request, username=username, password=password)
if user is not None:
# https://stackoverflow.com/a/39316967/185820
auth_login(request, user)
city_data = get_client_city(request)
log = Log(entry='%s logged in from %s' % (user, city_data))
log.save()
return HttpResponseRedirect(reverse('home'))
else:
messages.add_message(request, messages.WARNING, 'Login failed.')
return HttpResponseRedirect(reverse('home'))
return render(request, 'login.html', context)
@staff_member_required
def log_index(request):
context = get_index_items(
request,
Log,
active_nav='dropdown',
app_settings_model=AppSettings,
order_by=('-updated', ),
search_fields=('entry', ))
return render(request, 'log_index.html', context)
@staff_member_required(login_url='login')
def mail(request):
"""
"""
return edit(
request,
contact_model=Contact,
form_model=MailForm,
note_model=Note,
page_type='edit')
@staff_member_required
def newsletter(request, pk=None):
"""
"""
context = get_page_items(
request, app_settings_model=AppSettings, model=Newsletter, pk=pk)
return render(request, 'newsletter.html', context)
@staff_member_required
def newsletter_edit(request, pk=None):
"""
"""
return edit(
request,
form_model=NewsletterForm,
model=Newsletter,
active_nav='dropdown',
pk=pk)
@staff_member_required
def newsletter_index(request, pk=None):
"""
"""
context = get_index_items(
request,
Newsletter,
active_nav='dropdown',
app_settings_model=AppSettings,
order_by=('-updated', ),
search_fields=('text', ))
return render(request, 'newsletter_index.html', context)
@staff_member_required
def note(request, pk=None):
context = get_page_items(
request, app_settings_model=AppSettings, model=Note, pk=pk)
if context['pdf']:
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'filename=note-%s.pdf' % pk
return generate_pdf(
'pdf_note.html', context=context, file_object=response)
else:
return render(request, 'note.html', context)
# https://stackoverflow.com/a/42038839/185820
@staff_member_required(login_url='login')
def note_edit(request, pk=None):
return edit(
request,
form_model=NoteForm,
model=Note,
active_nav='note',
app_settings_model=AppSettings,
client_model=Client,
company_model=Company,
pk=pk)
@staff_member_required
def note_index(request, pk=None):
context = get_index_items(
request,
Note,
active_nav='note',
app_settings_model=AppSettings,
order_by=('-active', '-updated'),
search_fields=('note', 'title'),
show_search=True)
context['edit_url'] = 'note_edit'
return render(request, 'note_index.html', context)
@staff_member_required
def project(request, pk=None):
context = get_page_items(
request,
app_settings_model=AppSettings,
model=Project,
contact_model=Contact,
estimate_model=Estimate,
invoice_model=Invoice,
user_model=User,
order_by={'time': ('date', )}, # For time entries
time_model=Time,
pk=pk)
return render(request, 'project.html', context)
@staff_member_required
def project_edit(request, pk=None):
return edit(
request,
form_model=ProjectForm,
model=Project,
client_model=Client,
active_nav='project',
pk=pk)
@staff_member_required
def project_index(request, pk=None):
context = get_index_items(
request,
Project,
active_nav='project',
app_settings_model=AppSettings,
columns_visible={'project': {
'notes': 'true',
}, },
edit_url='project_edit',
order_by=(
'-active',
'-updated', ),
search_fields=('id', 'name'),
show_search=True)
return render(request, 'project_index.html', context)
@staff_member_required
def proposal(request, pk=None):
context = get_page_items(
request, company_model=Company, model=Proposal, pk=pk)
if context['pdf']:
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'filename=proposal-%s.pdf' % pk
return generate_pdf(
'pdf_proposal.html', context=context, file_object=response)
else:
return render(request, 'proposal.html', context)
@staff_member_required
def proposal_edit(request, pk=None):
"""
"""
return edit(
request,
form_model=ProposalForm,
model=Proposal,
active_nav='proposal',
company_model=Company,
pk=pk)
@staff_member_required
def proposal_index(request, pk=None):
context = get_index_items(
request,
Proposal,
active_nav='proposal',
app_settings_model=AppSettings,
order_by=('-updated', ),
show_search=True)
context['edit_url'] = 'proposal_edit'
return render(request, 'proposal_index.html', context)
@staff_member_required
def report(request, pk=None):
context = get_page_items(
request, model=Report, app_settings_model=AppSettings, pk=pk)
if context['pdf']:
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'filename=report-%s.pdf' % pk
return generate_pdf(
'pdf_report.html', context=context, file_object=response)
else:
return render(request, 'report.html', context)
@staff_member_required
def report_edit(request, pk=None):
return edit(
request,
form_model=ReportForm,
model=Report,
active_nav='dropdown',
invoice_model=Invoice,
pk=pk)
@staff_member_required
def report_index(request):
context = get_index_items(
request,
Report,
active_nav='dropdown',
app_settings_model=AppSettings,
edit_url='report_edit',
order_by=('-updated', '-active'),
search_fields=('id', 'name', 'gross', 'net'),
show_search=True)
return render(request, 'report_index.html', context)
# https://stackoverflow.com/a/42038839/185820
@staff_member_required(login_url='login')
def service_edit(request, pk=None):
return edit(
request,
form_model=ServiceForm,
model=Service,
active_nav='dropdown',
company_model=Company,
pk=pk)
@staff_member_required
def settings_app(request):
context = get_page_items(
request, model=AppSettings, app_settings_model=AppSettings)
return render(request, 'settings.html', context)
@staff_member_required
def settings_app_edit(request, pk=None):
return edit(
request,
form_model=AppSettingsForm,
model=AppSettings,
active_nav='dropdown',
pk=1)
@staff_member_required
def settings_company_edit(request, pk=None):
return edit(
request,
form_model=CompanyForm,
model=Company,
active_nav='dropdown',
pk=1)
@staff_member_required
def settings_company(request):
context = get_page_items(
request, app_settings_model=AppSettings, model=Company)
return render(request, 'company.html', context)
@staff_member_required
def settings_contract(request):
context = get_page_items(
request, model=ContractSettings, app_settings_model=AppSettings)
return render(request, 'contract_settings.html', context)
@staff_member_required
def settings_contract_edit(request, pk=None):
return edit(
request,
form_model=ContractSettingsForm,
model=ContractSettings,
pk=1,
active_nav='dropdown')
@staff_member_required
def task(request, pk=None):
context = get_page_items(
request, model=Task, app_settings_model=AppSettings, pk=pk)
return render(request, 'task.html', context)
@staff_member_required
def task_edit(request, pk=None):
return edit(
request, form_model=TaskForm, model=Task, active_nav='task', pk=pk)
@staff_member_required
def task_index(request):
context = get_index_items(
request,
Task,
active_nav='task',
app_settings_model=AppSettings,
edit_url='task_edit',
order_by=('-updated', ),
search_fields=('name', ),
show_search=True)
return render(request, 'task_index.html', context)
@login_required
def time(request, pk=None):
return is_allowed_to_view(
Time,
pk,
request,
app_settings_model=AppSettings,
profile_model=Profile)
@login_required
def time_edit(request, pk=None):
if request.user.is_staff:
time_form = AdminTimeForm
else:
time_form = TimeForm
return edit(
request,
form_model=time_form,
model=Time,
active_nav='time',
invoice_model=Invoice,
estimate_model=Estimate,
project_model=Project,
task_model=Task,
time_model=Time,
pk=pk, )
@login_required
def time_index(request):
search_fields = ('client__name', 'date', 'log', 'pk', 'project__name',
'invoice__document_id', 'user__username')
context = get_index_items(
request,
Time,
active_nav='time',
app_settings_model=AppSettings,
columns_visible={
'time': {
'invoiced': 'true',
'invoice': 'true',
'estimate': 'true',
},
},
edit_url='time_edit',
order_by=('-updated', ),
search_fields=search_fields,
show_search=True)
if not request.user.is_staff:
return HttpResponseRedirect(reverse('login'))
else:
return render(request, 'time_index.html', context)
@login_required
def user(request, pk=None):
order_by = {
'time': ('-updated', ),
'project': ('-updated', ),
}
context = get_page_items(
request,
app_settings_model=AppSettings,
contact_model=Contact,
model=User,
order_by=order_by,
profile_model=Profile,
project_model=Project,
time_model=Time,
pk=pk)
return render(request, 'user.html', context)
@login_required
def user_edit(request, pk=None):
if request.user.is_staff:
profile_form = AdminProfileForm
else:
profile_form = ProfileForm
return edit(
request,
form_model=profile_form,
model=Profile,
active_nav='dropdown',
pk=pk)
@staff_member_required
def user_index(request):
context = get_index_items(
request,
User,
active_nav='dropdown',
app_settings_model=AppSettings,
company_model=Company,
contact_model=Contact,
order_by=('-profile__active', '-profile__updated'),
show_search=False)
return render(request, 'user_index.html', context)
@staff_member_required
def plot(request):
"""
"""
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.