CombinedText stringlengths 4 3.42M |
|---|
"""
"""
from Tkinter import *
from re import escape
import string
import mimetypes
class DataEvent(object):
def __init__(self, widget):
self.widget = widget
self.widget.bind('<Key>', self.dispatch_data, add=True)
def dispatch_data(self, event):
keysym = chr(event.keysym_num)
self.widget.event_generate('<<Data>>', data=keysym)
class IdleEvent(object):
def __init__(self, widget):
self.widget.bind('<<Data>>', self.dispatch_idle, add=True)
self.widget = widget
self.timeout = 400
self.funcid = ''
def dispatch_idle(self, event):
self.widget.after_cancel(self.funcid)
self.funcid = self.widget.after(self.timeout,
lambda: self.widget.event_generate('<<Idle>>'))
class AreaVi(Text, DataEvent, IdleEvent):
ACTIVE = None
def __init__(self, default_filename, *args, **kwargs):
"""
This class receives all Text widget arguments
and one named default_filename which means
the filename that is saved when no filename
is specified.
default_filename:
The default path file where contents are saved.
"""
Text.__init__(self, *args, **kwargs)
DataEvent.__init__(self, self)
IdleEvent.__init__(self, self)
self.setup = dict()
# Maybe it should be?
# abspath(default_filename)
self.default_filename = default_filename
# The file's path and name.
self.filename = default_filename
self.mark_set('(CURSOR_LAST_COL)', '1.0')
self.mark_set('(RANGE_SEL_MARK)', '1.0')
self.mark_set('(BLOCK_SEL_MARK)', '1.0')
# def cave(event):
# AreaVi.ACTIVE = event.widget
# self.hook(-1, '<FocusIn>', cave)
AreaVi.ACTIVE = self
self.charset = 'utf-8'
self.map = {}
def update_map(self, keymap):
self.map.update(keymap)
def active(self):
"""
It is used to create a model of target for plugins
defining python functions to access the AreaVi instance that was
set as target.
Plugins that expose python functions to be executed from vy
should access AreaVi.ACTIVE when having to manipulate some AreaVi
instance content.
"""
AreaVi.ACTIVE = self
def chmode(self, id):
"""
This function is used to change the AreaVi instance's mode.
It receives one parameter named id which means the
mode name.
area = AreaVi('None')
area.chmode('INSERT')
It would make area be in INSERT mode.
"""
opt = self.setup[id]
self.id = id
mode0 = 'mode%s-1' % self
mode1 = 'mode%s%s' % (self, id)
if opt: self.bindtags((mode0, mode1, self, 'Text', '.'))
else: self.bindtags((mode0, mode1, self, '.'))
self.event_generate('<<Chmode>>')
self.event_generate('<<Chmode-%s>>' % id)
def add_mode(self, id, opt=False):
"""
It adds a new mode. The opt argument means whether
it should propagate the event to the internal text widget callbacks.
def install(area):
area.add_mode('MODE')
The code above would add a mode named MODE to the AreaVi instance.
def install(area):
area.add_mode('TYPING', opt=True)
The code above would add a mode named 'TYPING' that is possible to edit
the content of the AreaVi instance. It means that keystrokes that maps
printable characters it would be dropped over the AreaVi instance that has focus.
"""
self.setup[id] = opt
def del_mode(self, id):
"""
"""
pass
def hook(self, id, seq, callback, add=True):
"""
This method is used to hook a callback to a sequence
specified with its mode:
def callback(event):
event.widget.insert('An event happened!')
def install(area):
area.hook(('INSERT' '<Key-i>', callback))
In the example above, whenever the event <Key-i> happens then
the function named callback will be called with the event object.
"""
for id, seq in self.map.get((id, seq), ((id, seq), )):
self.bind_class('mode%s%s' % (self, id), seq, callback, add)
def unhook(self, id, seq):
"""
The opposite of AreaVi.hook.
area.unhook('mode' '<Event>')
"""
mode = 'mode%s%s' % (self, id)
self.unbind_class(mode, seq)
def install(self, *args):
"""
It is a shorthand for AreaVi.hook. It is used as follows:
def install(area):
area.install(('MODE1', '<Event1>', callback1),
('MODE2', '<Event2>', callback2),
('MODE3', '<Event3>', callback3), ...)
"""
for ind in args:
self.hook(*ind)
def uninstall(self, *args):
"""
The opposite of AreaVi.install.
area.uninstall(('mode', '<Event>'), ...)
"""
for id, seq, callback in args:
self.unhook(id, seq, callback)
def append(self, data, *args):
"""
This method is used to insert data to the end of the AreaVi instance widget
and place the cursor at the end of the data that was appended. It makes the cursor
visible.
"""
# This is sort of odd, it seems that
# i have to add -1l for it to work.
# It shouldn't be necessary.
index0 = self.index('end -1l')
self.insert('end', data)
for ind in args:
self.tag_add(ind, index0,
'%s +%sc' % (index0, len(data)))
# self.mark_set('insert', 'end')
self.see('insert')
def curline(self):
"""
This method returns the string that corresponds to the cursor line.
"""
return self.get('insert linestart', 'insert +1l linestart')
def tag_swap(self, name, index0, index1, *args):
"""
It removes a given tag from index0 to index1 and re adds
the tag to the ranges of text delimited in args.
Example:
DATA_X = 'It is black.\n'
DATA_Y = 'It is blue.\n'
text = Text()
text.pack()
text.insert('1.0', DATA_X)
text.insert('2.0', DATA_Y)
text.tag_add('X', '1.0', '1.0 lineend')
text.tag_add('Y', '2.0', '2.0 lineend')
text.tag_config('X', background='black')
text.tag_config('Y', foreground='blue')
text.tag_update(text, 'X', '1.0', 'end', ('2.0', '2.0 lineend'))
It removes the X tag from '1.0' to 'end' then adds
the X tag to the range '2.0' '2.0 lineend'.
"""
self.tag_remove(name, index0, index1)
for indi, indj in args:
self.tag_add(name, indi, indj)
def insee(self, index, data):
"""
This method inserts data at index position then makes the cursor visible.
"""
self.insert(index, data)
self.see('insert')
def cmd_like(self):
"""
This method retrieves the cursor line then deletes it afterwards.
"""
data = self.get('insert linestart', 'insert lineend')
self.delete('insert linestart', 'insert lineend')
return data
def indref(self, index):
"""
This is a short hand function. It is used to convert a Text index
into two integers like:
a, b = area.indref('insert')
Now, a and b can be manipulated
as numbers.
"""
a, b = self.index(index).split('.')
return int(a), int(b)
def setcur(self, line, col='0'):
"""
It is used to set the cursor position at a given index using line
and col.
"""
self.mark_set('insert', '%s.%s' % (line, col))
self.see('insert')
def indint(self, index):
"""
Just a shorthand for:
a, b = index.split('2.3')
a, b = int(a), int(b)
"""
a, b = index.split('.')
return int(a), int(b)
def indcur(self):
"""
It returns two integers that corresponds to the cursor
position line and col.
"""
a, b = self.indref('insert')
return int(a), int(b)
def seecur(self, index):
"""
Just a shorthand for:
area.mark_set('insert', index)
area.see('insert')
"""
self.mark_set('insert', index)
self.see('insert')
def is_end(self):
"""
This function returns True if the cursor is positioned
at the end of the AreaVi instance.
"""
# I have to use 'end -1l linestart' since it seems the 'end' tag
# corresponds to a one line after the last visible line.
# So last line lineend != 'end'.
return self.compare('insert linestart', '!=', 'end -1l linestart')
def is_start(self):
"""
This function returns True if the cursor is
at the start of the text region. It is on index '1.0'
"""
return self.compare('insert linestart', '!=', '1.0')
def down(self):
"""
It sets the cursor position one line down.
"""
if not self.is_end():
return
a, b = self.indref('(CURSOR_LAST_COL)')
c, d = self.indcur()
self.setcur(c + 1, b)
def up(self):
"""
It sets the cursor one line up.
"""
if not self.is_start():
return
a, b = self.indref('(CURSOR_LAST_COL)')
c, d = self.indcur()
self.setcur(c - 1, b)
def left(self):
"""
It moves the cursor one character left.
"""
self.mark_set('insert', 'insert -1c')
# The mark used by self.down, self.up.
self.mark_set('(CURSOR_LAST_COL)', 'insert')
def right(self):
"""
It moves the cursor one character right.
"""
self.mark_set('insert', 'insert +1c')
# The mark used by self.down, self.up.
self.mark_set('(CURSOR_LAST_COL)', 'insert')
def start_selection(self):
"""
Start range selection.
"""
self.mark_set('(RANGE_SEL_MARK)', 'insert')
def start_block_selection(self):
"""
Start block selection.
"""
self.mark_set('(BLOCK_SEL_MARK)', 'insert')
def rmsel(self, index0, index1):
"""
It removes the tag sel from the range that is delimited by index0 and index1
regardless whether index0 <= index1.
"""
index2 = self.min(index0, index1)
index3 = self.max(index0, index1)
self.tag_remove('sel', index2, index3)
def addsel(self, index0, index1):
"""
It adds the tag sel to the range delimited by index0 and index1 regardless
whether index0 <= index1.
"""
index2 = self.min(index0, index1)
index3 = self.max(index0, index1)
self.tag_add('sel', index2, index3)
def min(self, index0, index1):
"""
It returns the min between index0 and index1.
"""
if self.compare(index0, '<=', index1):
return index0
else:
return index1
def max(self, index0, index1):
"""
It returns the max between index0 and index1.
"""
if self.compare(index0, '<=', index1):
return index1
else:
return index0
def sel_up(self):
"""
It adds 'sel' one line up the 'insert' position
and sets the cursor one line up.
"""
self.rmsel('(RANGE_SEL_MARK)', 'insert')
self.up()
self.addsel('(RANGE_SEL_MARK)', 'insert')
def sel_down(self):
"""
It adds or removes selection one line down.
"""
self.rmsel('(RANGE_SEL_MARK)', 'insert')
self.down()
self.addsel('(RANGE_SEL_MARK)', 'insert')
def sel_right(self):
"""
It adds or removes selection one character right.
"""
self.rmsel('(RANGE_SEL_MARK)', 'insert')
self.right()
self.addsel('(RANGE_SEL_MARK)', 'insert')
def sel_left(self):
"""
It adds or removes selection one character left.
"""
self.rmsel('(RANGE_SEL_MARK)', 'insert')
self.left()
self.addsel('(RANGE_SEL_MARK)', 'insert')
def addblock(self, index0, index1):
"""
It adds block selection from index0 to index1.
"""
index2 = self.min(index0, index1)
index3 = self.max(index0, index1)
a, b = self.indint(index2)
c, d = self.indint(index3)
for ind in xrange(a, c + 1):
self.addsel('%s.%s' % (ind, min(b, d)), '%s.%s' % (ind, max(b, d)))
def rmblock(self, index0, index1):
"""
It removes block selection from index0 to index1.
"""
index2 = self.min(index0, index1)
index3 = self.max(index0, index1)
a, b = self.indint(index2)
c, d = self.indint(index3)
for ind in xrange(a, c + 1):
self.rmsel('%s.%s' % (ind, min(b, d)), '%s.%s' % (ind, max(b, d)))
def block_down(self):
"""
It adds or removes block selection one line down.
"""
a, b = self.indref('(CURSOR_LAST_COL)')
c, d = self.indcur()
index = self.index('(BLOCK_SEL_MARK)')
self.rmblock(index, '%s.%s' % (c, b))
self.down()
a, b = self.indref('(CURSOR_LAST_COL)')
c, d = self.indcur()
self.addblock(index, '%s.%s' % (c, b))
def block_up(self):
"""
It adds or removes block selection one line up.
"""
a, b = self.indref('(CURSOR_LAST_COL)')
c, d = self.indcur()
index = self.index('(BLOCK_SEL_MARK)')
self.rmblock(index, '%s.%s' % (c, b))
self.up()
a, b = self.indref('(CURSOR_LAST_COL)')
c, d = self.indcur()
self.addblock(index, '%s.%s' % (c, b))
def is_line_start(self):
"""
It returns True if the cursor is at the start of the cursor line.
"""
return self.compare('insert', '!=', 'insert linestart')
def block_left(self):
"""
It adds block selection to the left.
"""
a, b = self.indref('(CURSOR_LAST_COL)')
c, d = self.indcur()
index = self.index('(BLOCK_SEL_MARK)')
self.rmblock(index, '%s.%s' % (c, b))
self.left()
a, b = self.indref('(CURSOR_LAST_COL)')
c, d = self.indcur()
self.addblock(index, '%s.%s' % (c, b))
def is_line_end(self):
"""
It returns True if the cursor is at the end of the cursor line.
"""
return self.compare('insert', '!=', 'insert lineend')
def block_right(self):
"""
It adds/removes block selection to the right.
"""
a, b = self.indref('(CURSOR_LAST_COL)')
c, d = self.indcur()
index = self.index('(BLOCK_SEL_MARK)')
self.rmblock(index, '%s.%s' % (c, b))
self.right()
a, b = self.indref('(CURSOR_LAST_COL)')
c, d = self.indcur()
self.addblock(index, '%s.%s' % (c, b))
def clear_selection(self):
"""
Unselect all text.
"""
try:
self.tag_remove('sel', 'sel.first', 'sel.last')
except Exception:
pass
def select_char(self):
"""
Select the cursor char.
"""
self.addsel('insert', 'insert +1c')
def unselect_char(self):
"""
Unselect the cursor char.
"""
self.rmsel('insert', 'insert +1c')
def del_char(self):
"""
It deletes a char from the cursor position.
"""
self.edit_separator()
self.delete('insert', 'insert +1c')
def echo(self, data):
self.insert('insert', data)
def backspace(self):
"""
"""
self.delete('insert -1c', 'insert')
def do_undo(self):
"""
It does undo.
"""
try:
self.edit_undo()
except TclError:
pass
def do_redo(self):
"""
It redoes.
"""
try:
self.edit_redo()
except TclError:
pass
def sel_text_start(self):
"""
It selects all text from cursor position to the start position
of the text.
"""
index = self.index('insert')
self.go_text_start()
self.addsel(index, 'insert')
def sel_text_end(self):
"""
It selects all text from the cursor position to the end of the text.
"""
index = self.index('insert')
self.go_text_end()
self.addsel(index, 'insert')
def go_text_start(self):
"""
Place the cursor at the beginning of the file.
"""
self.mark_set('insert', '1.0')
self.see('insert')
def go_text_end(self):
"""
Place the cursor at the end of the file.
"""
self.mark_set('insert', 'end linestart')
self.see('insert')
def sel_line_start(self):
"""
It adds selection from the cursor position to the
start of the line.
"""
index = self.index('insert')
self.go_line_start()
self.addsel(index, 'insert')
def sel_line_end(self):
"""
It selects all text from the cursor position to the end of the line.
"""
index = self.index('insert')
self.go_line_end()
self.addsel(index, 'insert')
def go_line_start(self):
"""
Place the cursor at the beginning of the line.
"""
self.mark_set('insert', 'insert linestart')
def go_line_end(self):
"""
Place the cursor at the end of the line.
"""
self.mark_set('insert', 'insert lineend')
def go_next_word(self):
"""
Place the cursor at the next word.
"""
self.iseek('\M', index='insert', stopindex='end')
def go_prev_word(self):
"""
Place the cursor at the previous word.
"""
self.iseek('\M', backwards=True, index='insert', stopindex='1.0')
def go_next_sym(self, chars):
"""
Place the cursor at the next occurrence of one of the chars.
"""
chars = map(lambda ind: escape(ind), chars)
REG = '|'.join(chars)
self.iseek(REG, index='insert', stopindex='end')
def go_prev_sym(self, chars):
"""
Place the cursor at the previous occurrence of one of the chars.
"""
chars = map(lambda ind: escape(ind), chars)
REG = '|'.join(chars)
self.iseek(REG, backwards=True, stopindex='1.0')
def del_line(self):
"""
It deletes the cursor line, makes the cursor visible
and adds a separator to the undo stack.
"""
self.edit_separator()
self.delete('insert linestart', 'insert +1l linestart')
self.see('insert')
def cpsel(self, sep=''):
"""
Copy selected text to the clipboard.
"""
data = self.join_ranges('sel', sep)
self.clipboard_clear()
self.clipboard_append(data)
self.tag_remove('sel', 'sel.first', 'sel.last')
def ctsel(self, sep=''):
"""
It cuts the selected text.
"""
data = self.join_ranges('sel', sep)
self.clipboard_clear()
self.clipboard_append(data)
self.edit_separator()
self.delete_ranges('sel')
def del_sel(self):
"""
It deletes all selected text.
"""
self.edit_separator()
self.delete_ranges('sel')
def ptsel(self):
"""
Paste text at the cursor position.
"""
data = self.clipboard_get()
self.edit_separator()
self.insert('insert', data)
def ptsel_after(self):
"""
Paste text one line down the cursor position.
"""
data = self.clipboard_get()
self.edit_separator()
self.insert('insert +1l linestart', data)
def ptsel_before(self):
"""
Paste text one line up the cursor position.
"""
data = self.clipboard_get()
self.edit_separator()
self.insert('insert linestart', data)
def select_line(self):
"""
It adds selection to the cursor line.
"""
self.tag_add('sel', 'insert linestart', 'insert +1l linestart')
def unselect_line(self):
"""
It removes selection from the cursor line.
"""
self.tag_remove('sel', 'insert linestart', 'insert +1l linestart')
def toggle_line_selection(self):
"""
Toggle line selection.
"""
self.toggle_sel('insert linestart', 'insert +1l linestart')
def toggle_sel(self, index0, index1):
"""
Toggle selection in the range defined by index0 and index1.
"""
self.toggle_range('sel', index0, index1)
def toggle_range(self, name, index0, index1):
"""
Toggle tag name in the range defined by index0 and index1.
It means it adds a tag name to the range index0 and index1 if there is no
tag mapped to that range otherwise it removes the tag name from the range.
"""
index2 = index0
index0 = self.min(index0, index1)
index1 = self.max(index2, index1)
map = self.is_tag_range(name, index0, index1)
if map:
self.tag_remove(name, index0, index1)
else:
self.tag_add(name, index0, index1)
def select_word(self, index='insert'):
"""
Select the closest word from the cursor.
"""
index1, index2 = self.get_word_range(index)
self.tag_add('sel', index1, index2)
def get_word_range(self, index):
index1 = self.search('\W', index, regexp=True, stopindex='%s linestart' % index, backwards=True)
index2 = self.search('\W', index, regexp=True, stopindex='%s lineend' % index)
index1 = '%s linestart' % index if not index1 else '%s +1c' % index1
index2 = '%s lineend' % index if not index2 else index2
return index1, index2
def select_seq(self, index='insert'):
"""
Select the closest sequence of non blank characters from the cursor.
"""
index1, index2 = self.get_seq_range(index)
self.tag_add('sel', index1, index2)
def get_seq_range(self, index):
index1 = self.search(' ', index, regexp=True, stopindex='%s linestart' %index, backwards=True)
index2 = self.search(' ', index, regexp=True, stopindex='%s lineend' % index)
index1 = '%s linestart' % index if not index1 else '%s +1c' % index1
index2= '%s lineend' % index if not index2 else index2
return index1, index2
def get_seq(self, index='insert'):
return self.get(*self.get_seq_range(index))
def get_line(self, index='insert'):
return self.get('%s linestart' % index,
'%s lineend' % index)
def scroll_line_up(self):
"""
It scrolls one line up
"""
# should be rewritten.
# it fails with append.
self.yview(SCROLL, -1, 'units')
is_visible = self.dlineinfo('insert')
if not is_visible:
self.mark_set('insert', 'insert -1l')
def scroll_line_down(self):
"""
It scrolls one line down.
"""
# should be rewritten.
# it fails with append.
self.yview(SCROLL, 1, 'units')
is_visible = self.dlineinfo('insert')
if not is_visible:
self.mark_set('insert', 'insert +1l')
def scroll_page_down(self):
"""
It scrolls one page down.
"""
self.yview(SCROLL, 1, 'page')
self.mark_set('insert', '@0,0')
def scroll_page_up(self):
"""
It scrolls one page up.
"""
self.yview(SCROLL, -1, 'page')
self.mark_set('insert', '@0,0')
def insert_line_down(self):
"""
It inserts one line down from the cursor position.
"""
self.edit_separator()
self.insert('insert +1l linestart', '\n')
self.mark_set('insert', 'insert +1l linestart')
self.see('insert')
def select_all(self):
"""
It selects all text.
"""
self.tag_add('sel', '1.0', 'end')
def insert_line_up(self):
"""
It inserts one line up.
"""
self.edit_separator()
self.insert('insert linestart', '\n')
self.mark_set('insert', 'insert -1l linestart')
self.see('insert')
def shift_sel_right(self, width, char):
"""
Shift ranges of selected text to the right.
"""
srow, scol = self.indref('sel.first')
erow, ecol = self.indref('sel.last')
self.shift_right(srow, erow, width, char)
def shift_sel_left(self, width):
"""
Shift ranges of selected text to the left.
"""
srow, scol = self.indref('sel.first')
erow, ecol = self.indref('sel.last')
self.shift_left(srow, erow, width)
def shift_right(self, srow, erow, width, char):
"""
Given a start row and a end row it shifts
a block of text to the right.
This is specially useful when working with
source code files.
"""
self.edit_separator()
for ind in xrange(srow, erow + 1):
self.insert('%s.0' % ind, width * char)
def shift_left(self, srow, erow, width):
"""
Given a start row and a end row it shifts
a block of text to the left.
This is specially useful when working with
source code files.
"""
self.edit_separator()
for ind in xrange(srow, erow + 1):
self.delete('%s.0' % ind, '%s.%s' % (ind, width))
def collect(self, name, regex, *args, **kwargs):
"""
The code below would find for 'PATTERN' in all selected text of an
AreaVi instance:
for data, pos0, pos1 in area.collect('sel', 'PATTERN'):
pass
"""
# It should be built on top of nextrange.
map = self.tag_ranges(name)
for indi in range(0, len(map) - 1, 2):
seq = self.find(regex, map[indi], map[indi + 1], *args, **kwargs)
for indj in seq:
yield indj
def replace_ranges(self, name, regex, data, index='1.0', stopindex='end',
*args, **kwargs):
"""
It replaces all occurrences of regex in the ranges that are mapped to tag name.
name - Name of the tag.
regex - The pattern.
data - The data to replace.
args - Arguments given to AreaVi.find.
**kwargs - A dictionary of arguments given to AreaVi.find.
"""
while True:
map = self.tag_nextrange(name, index, stopindex)
if not map: break
index3, index4 = map
index = self.replace_all(regex, data, index3, index4, *args, **kwargs)
def map_matches(self, name, matches):
""""
It adds a tag to the match ranges from either AreaVi.find or
AreaVi.collect.
name - The tag to be added.
map - An iterator from AreaVi.find or AreaVi.collect.
"""
for _, index0, index1 in matches:
self.tag_add(name, index0, index1)
def split(self, regex, index='1.0', stopindex='end', *args, **kwargs):
"""
It tokenizes the contents of an AreaVi widget based on a regex.
The *args, **kwargs are the same passed to AreaVi.find method.
for token, index0, index1 in area.tokenize(PATTERN):
pass
"""
index0 = index
for chk, index1, index2 in self.find(regex, index, stopindex, *args, **kwargs):
if self.compare(index1, '>', index0):
yield(self.get(index0, index1), index0, index1)
index0 = index2
else:
yield(chk, index2, stopindex)
def find(self, regex, index='1.0', stopindex='end', exact=None, regexp=True, nocase=None,
elide=None, nolinestop=None, step=''):
"""
It returns an iterator of matches. It is based on the Text.search method.
for match, index0, index1 in area.find('pattern'):
passs
"""
count = IntVar()
while True:
index = self.search(regex, index, stopindex, exact=exact, nocase=nocase,
nolinestop=nolinestop, regexp=regexp, elide=elide, count=count)
# If regex = '' it ends in an infinite loop.
if not index or not regex:
break
len = count.get()
tmp = '%s +%sc' % (index, len)
chunk = self.get(index, tmp)
pos0 = self.index(index)
pos1 = self.index('%s +%sc' % (index, len))
index = '%s%s' % (tmp, step)
yield(chunk, pos0, pos1)
def search(self, pattern, index, stopindex=None, forwards=None,
backwards=None, exact=None, regexp=None, nocase=None,
count=None, elide=None, nolinestop=None):
"""
Standard search method, but with support for the nolinestop
option which is new in tk 8.5 but not supported by tkinter out
of the box.
"""
args = [self._w, 'search']
if forwards: args.append('-forwards')
if backwards: args.append('-backwards')
if exact: args.append('-exact')
if regexp: args.append('-regexp')
if nocase: args.append('-nocase')
if elide: args.append('-elide')
if nolinestop: args.append("-nolinestop")
if count: args.append('-count'); args.append(count)
if pattern and pattern[0] == '-': args.append('--')
args.append(pattern)
args.append(index)
if stopindex: args.append(stopindex)
return str(self.tk.call(tuple(args)))
def iseek(self, regex, index='insert', stopindex='end', backwards=None, exact=None, regexp=True,
nocase=None, elide=None, nolinestop=None):
"""
Find regex backwards/fowards from index position and changes insert
mark to the prev/next match.
"""
count = IntVar()
index = self.search(regex, index=index, stopindex=stopindex, regexp=regexp,
exact=exact, nocase=nocase, elide=elide, nolinestop=nolinestop,
backwards=backwards, count=count)
if not index: return
index0 = self.index('%s +%sc' % (index, count.get()))
self.mark_set('insert', index if backwards else index0)
self.see('insert')
return index, index0
def ipick(self, name, regex, index='insert', stopindex='end', verbose=False, backwards=None, exact=None, regexp=True,
nocase=None, elide=None, nolinestop=None):
"""
"""
# Force to do a search from index.
if verbose: self.tag_remove(name, '1.0', 'end')
if not backwards: ranges = self.tag_nextrange(name, index, 'end')
else: ranges = self.tag_prevrange(name, index, '1.0')
if ranges: index0, index1 = ranges[:2]
else: index0 = index1 = index
index = self.iseek(regex, index=index0 if backwards else index1, stopindex=stopindex,
backwards=backwards, exact=exact, regexp=regexp, nocase=nocase,
elide=elide, nolinestop=nolinestop)
if not index:
return
self.tag_remove(name, '1.0', 'end')
self.tag_add(name, *index)
return index
def replace(self, regex, data, index=None, stopindex=None, forwards=None,
backwards=None, exact=None, regexp=True, nocase=None, elide=None, nolinestop=None):
"""
It is used to replace occurrences of a given match.
It is possible to use a callback function to return what is replaced
as well.
"""
count = IntVar()
index = self.search(regex, index, stopindex, forwards=forwards, backwards=backwards, exact=exact, nocase=nocase,
nolinestop=nolinestop, regexp=regexp, elide=elide, count=count)
if not index:
return
index0 = self.index('%s +%sc' % (index, count.get()))
if callable(data):
data = data(self.get(index, index0), index, index0)
self.delete(index, index0)
self.insert(index, data)
return index, len(data)
def replace_all(self, regex, data, index='1.0', stopindex='end', exact=None, regexp=True, nocase=None,
elide=None, nolinestop=None):
"""
It is used to replace all occurrences of a given match in a range.
It accepts a callback function that determines what is replaced.
"""
# It is needed because the range will grow
# when data is inserted, the intent is searching
# over a pre defined range.
self.mark_set('(REP_STOPINDEX)', stopindex)
while True:
map = self.replace(regex, data, index, '(REP_STOPINDEX)', exact=exact, nocase=nocase,
nolinestop=nolinestop, regexp=regexp, elide=elide)
if not map:
return self.index('(REP_STOPINDEX)')
index, size = map
index = self.index('%s +%sc' % (index, size))
def get_paren_search_dir(self, index, start, end):
"""
"""
char = self.get(index, '%s +1c' % index)
if char == start:
return False
elif char == end:
return True
else:
return None
def get_paren_search_sign(self, index, start, end):
"""
"""
char = self.get(index, '%s +1c' % index)
if char == start:
return '+'
elif char == end:
return '-'
else:
return None
def sel_matching_pair_data(self, index, max=1500, pair=('(', ')')):
index = self.case_pair(index, max, *pair)
if not index: return
min = self.min(index, 'insert')
max = self.max(index, 'insert')
min = '%s +1c' % min
self.tag_add('sel', min, max)
def sel_matching_pair(self, index, max=1500, pair=('(', ')')):
"""
"""
index = self.case_pair(index, max, *pair)
if not index: return
min = self.min(index, 'insert')
max = self.max(index, 'insert')
max = '%s +1c' % max
self.tag_add('sel', min, max)
def get_matching_pair(self, index, max, start='(', end=')'):
"""
"""
index0 = self.search(start, regexp=False, index=index, backwards=True)
if not index0: return
index1 = self.search(end, regexp=False, index=index)
if not index1: return
index2 = self.case_pair(index0, max, start, end)
if not index2: return
index3 = self.case_pair(index1, max, start, end)
if not index3: return
if self.is_in_range(index, index0, index2):
return index0, index2
elif self.is_in_range(index, index3, index1):
return index3, index1
def case_pair(self, index, max, start='(', end=')'):
"""
Once this method is called, it returns an index for the next
matching parenthesis or None if the char over the cursor
isn't either '(' or ')'.
"""
dir = self.get_paren_search_dir(index, start, end)
# If dir is None then there is no match.
if dir == None: return ''
sign = self.get_paren_search_sign(index, start, end)
count = 0
# If we are searching fowards we don't need
# to add 1c.
index0 = '%s %s' % (index, '+1c' if dir else '')
size = IntVar(0)
while True:
index0 = self.search('\%s|\%s' % (start, end), index = index0,
stopindex = '%s %s%sc' % (index, sign, max),
count = size, backwards = dir, regexp = True)
if not index0: return ''
char = self.get(index0, '%s +1c' % index0)
count = count + (1 if char == start else -1)
if not count:
return index0
# When we are searching backwards we don't need
# to set a character back because index will point
# to the start of the match.
index0 = '%s %s' % (index0, '+1c' if not dir else '')
def clear_data(self):
"""
It clears all text inside an AreaVi instance.
"""
import os
self.delete('1.0', 'end')
self.filename = os.path.abspath(self.default_filename)
self.event_generate('<<ClearData>>')
def load_data(self, filename):
"""
It dumps all text from a file into an AreaVi instance.
filename - Name of the file.
"""
import os
filename = os.path.abspath(filename)
self.filename = filename
fd = open(filename, 'r')
data = fd.read()
fd.close()
# i could generate a tk event here.
try:
data = data.decode(self.charset)
except UnicodeDecodeError:
self.charset = ''
self.delete('1.0', 'end')
self.insert('1.0', data)
self.event_generate('<<LoadData>>')
type, _ = mimetypes.guess_type(self.filename)
self.event_generate('<<Load-%s>>' % type)
def decode(self, name):
self.charset = name
self.load_data(self.filename)
def save_data(self):
"""
It saves the actual text content in the current file.
"""
type, _ = mimetypes.guess_type(self.filename)
self.event_generate('<<Pre-SaveData>>')
self.event_generate('<<Pre-Save-%s>>' % type)
data = self.get('1.0', 'end')
data = data.encode(self.charset)
fd = open(self.filename, 'w')
fd.write(data)
fd.close()
self.event_generate('<<SaveData>>')
self.event_generate('<<Save-%s>>' % type)
def save_data_as(self, filename):
"""
It saves the content of the given AreaVi instance into
a file whose name is specified in filename.
filename - Name of the file to save the data.
"""
self.filename = filename
self.save_data()
def is_tag_range(self, name, index0, index1):
"""
Consider:
area.tag_add('tag', '2.0', '5.0')
# It returns True.
area.is_tag_range('tag', '2.0', '3.0')
# It returns False.
area.is_tag_range('tag', '1.0', '2.0')
"""
ranges = self.tag_ranges(name)
for ind in xrange(0, len(ranges) - 1, 2):
if self.is_subrange(index0, index1, ranges[ind].string,
ranges[ind + 1].string):
return ranges[ind].string, ranges[ind + 1].string
def is_in_range(self, index, index0, index1):
"""
It returns True if index0 <= index <= index1 otherwise
it returns False.
"""
index2 = self.min(index0, index1)
index3 = self.max(index0, index1)
r1 = self.compare(index2, '<=', index)
r2 = self.compare(index3, '>=', index)
return r1 and r2
def is_subrange(self, index0, index1, index2, index3):
"""
It returns True if index2 <= index0 <= index1 <= index2 otherwise
it returns False.
"""
r1 = self.is_in_range(index0, index2, index3)
r2 = self.is_in_range(index1, index2, index3)
return r1 and r2
def swap(self, data, index0, index1):
"""
Swap the text in the range index0, index1 for data.
"""
self.delete(index0, index1)
self.insert(index0, data)
def swap_ranges(self, name, data, index0='1.0', index1='end'):
"""
It swaps ranges of text that are mapped to a tag name for data between index0
and index1.
"""
while True:
range = self.tag_nextrange(name, index0, index1)
if not range: break
self.swap(data, *range)
def delete_ranges(self, name, index0='1.0', index1='end'):
"""
It deletes ranges of text that are mapped to tag name between index0 and index1.
"""
self.swap_ranges(name, '', index0, index1)
def join_ranges(self, name, sep=''):
"""
Join ranges of text that corresponds to a tag defined by name using a seperator.
"""
data = ''
for ind in self.get_ranges(name):
data = data + ind + sep
return data
def get_ranges(self, name):
"""
It returns an iterator whose elements are ranges of text that
corresponds to the ranges of the tag name.
"""
ranges = self.tag_ranges(name)
for ind in xrange(0, len(ranges) - 1, 2):
data = self.get(ranges[ind], ranges[ind + 1])
yield(data)
def tag_prev_occur(self, tag_names, index0, index1, default):
"""
Should be renamed.
"""
for ind in tag_names:
pos = self.tag_prevrange(ind, index0, index1)
if pos: return pos[1]
return default
def tag_next_occur(self, tag_names, index0, index1, default):
"""
Should be renamed.
"""
for ind in tag_names:
pos = self.tag_nextrange(ind, index0, index1)
if pos: return pos[0]
return default
@staticmethod
def areavi_widgets(wid):
"""
This method is a static method that receives a widget as argument
then returns an iterator of AreaVi instances that have the wid paramater as
master widget. It is used like:
from vyapp.app import root
for ind in AreaVi.areavi_widgets(root):
ind.insert('end', 'FOO')
The code above would insert 'FOO' at the end of all AreaVi widgets
that have root as one of its master widget.
"""
for ind in wid.winfo_children():
if isinstance(ind, AreaVi):
yield ind
else:
for ind in AreaVi.areavi_widgets(ind):
yield ind
@staticmethod
def get_opened_files(wid):
"""
This method returns a dictionary that maps all AreaVi instances
that have widget as master like:
from vyapp.app import root
map = area.get_opened_files(root)
Where map is a dictionary like:
map = { '/home/tau/file.c':AreaVi_Instance,
'/home/tau/file.b': AreaVi_Instance}
"""
map = dict()
for ind in AreaVi.areavi_widgets(wid):
map[ind.filename] = ind
return map
@staticmethod
def find_all(wid, regex, index='1.0', stopindex='end', *args, **kwargs):
"""
This method is used to perform pattern searches over all AreaVi instances that have
wid as master. It basically returns an iterator that corresponds to:
from vyapp.app import root
for ind, (match, index0, index1) in area.find_all(root, 'pattern'):
pass
Where ind is the AreaVi widget that the pattern matched and match is the match,
index0 and index1 are the positions in the text.
"""
for indi in AreaVi.areavi_widgets(wid):
it = indi.find(regex, index, stopindex, *args, **kwargs)
for indj in it:
yield indi, indj
Fixing small bug with vyirc.
"""
"""
from Tkinter import *
from re import escape
import string
import mimetypes
class DataEvent(object):
def __init__(self, widget):
self.widget = widget
self.widget.bind('<Key>', self.dispatch_data, add=True)
def dispatch_data(self, event):
keysym = chr(event.keysym_num)
self.widget.event_generate('<<Data>>', data=keysym)
class IdleEvent(object):
def __init__(self, widget):
self.widget.bind('<<Data>>', self.dispatch_idle, add=True)
self.widget = widget
self.timeout = 400
self.funcid = ''
def dispatch_idle(self, event):
self.widget.after_cancel(self.funcid)
self.funcid = self.widget.after(self.timeout,
lambda: self.widget.event_generate('<<Idle>>'))
class AreaVi(Text, DataEvent, IdleEvent):
ACTIVE = None
def __init__(self, default_filename, *args, **kwargs):
"""
This class receives all Text widget arguments
and one named default_filename which means
the filename that is saved when no filename
is specified.
default_filename:
The default path file where contents are saved.
"""
Text.__init__(self, *args, **kwargs)
DataEvent.__init__(self, self)
IdleEvent.__init__(self, self)
self.setup = dict()
# Maybe it should be?
# abspath(default_filename)
self.default_filename = default_filename
# The file's path and name.
self.filename = default_filename
self.mark_set('(CURSOR_LAST_COL)', '1.0')
self.mark_set('(RANGE_SEL_MARK)', '1.0')
self.mark_set('(BLOCK_SEL_MARK)', '1.0')
# def cave(event):
# AreaVi.ACTIVE = event.widget
# self.hook(-1, '<FocusIn>', cave)
AreaVi.ACTIVE = self
self.charset = 'utf-8'
self.map = {}
def update_map(self, keymap):
self.map.update(keymap)
def active(self):
"""
It is used to create a model of target for plugins
defining python functions to access the AreaVi instance that was
set as target.
Plugins that expose python functions to be executed from vy
should access AreaVi.ACTIVE when having to manipulate some AreaVi
instance content.
"""
AreaVi.ACTIVE = self
def chmode(self, id):
"""
This function is used to change the AreaVi instance's mode.
It receives one parameter named id which means the
mode name.
area = AreaVi('None')
area.chmode('INSERT')
It would make area be in INSERT mode.
"""
opt = self.setup[id]
self.id = id
mode0 = 'mode%s-1' % self
mode1 = 'mode%s%s' % (self, id)
if opt: self.bindtags((mode0, mode1, self, 'Text', '.'))
else: self.bindtags((mode0, mode1, self, '.'))
self.event_generate('<<Chmode>>')
self.event_generate('<<Chmode-%s>>' % id)
def add_mode(self, id, opt=False):
"""
It adds a new mode. The opt argument means whether
it should propagate the event to the internal text widget callbacks.
def install(area):
area.add_mode('MODE')
The code above would add a mode named MODE to the AreaVi instance.
def install(area):
area.add_mode('TYPING', opt=True)
The code above would add a mode named 'TYPING' that is possible to edit
the content of the AreaVi instance. It means that keystrokes that maps
printable characters it would be dropped over the AreaVi instance that has focus.
"""
self.setup[id] = opt
def del_mode(self, id):
"""
"""
pass
def hook(self, id, seq, callback, add=True):
"""
This method is used to hook a callback to a sequence
specified with its mode:
def callback(event):
event.widget.insert('An event happened!')
def install(area):
area.hook(('INSERT' '<Key-i>', callback))
In the example above, whenever the event <Key-i> happens then
the function named callback will be called with the event object.
"""
for id, seq in self.map.get((id, seq), ((id, seq), )):
self.bind_class('mode%s%s' % (self, id), seq, callback, add)
def unhook(self, id, seq):
"""
The opposite of AreaVi.hook.
area.unhook('mode' '<Event>')
"""
mode = 'mode%s%s' % (self, id)
self.unbind_class(mode, seq)
def install(self, *args):
"""
It is a shorthand for AreaVi.hook. It is used as follows:
def install(area):
area.install(('MODE1', '<Event1>', callback1),
('MODE2', '<Event2>', callback2),
('MODE3', '<Event3>', callback3), ...)
"""
for ind in args:
self.hook(*ind)
def uninstall(self, *args):
"""
The opposite of AreaVi.install.
area.uninstall(('mode', '<Event>'), ...)
"""
for id, seq, callback in args:
self.unhook(id, seq, callback)
def append(self, data, *args):
"""
This method is used to insert data to the end of the AreaVi instance widget
and place the cursor at the end of the data that was appended. It makes the cursor
visible.
"""
# This is sort of odd, it seems that
# i have to add -1l for it to work.
# It shouldn't be necessary.
index0 = self.index('end -1l')
self.insert('end', data)
for ind in args:
self.tag_add(ind, index0, 'end -1c')
# self.mark_set('insert', 'end')
self.see('insert')
def curline(self):
"""
This method returns the string that corresponds to the cursor line.
"""
return self.get('insert linestart', 'insert +1l linestart')
def tag_swap(self, name, index0, index1, *args):
"""
It removes a given tag from index0 to index1 and re adds
the tag to the ranges of text delimited in args.
Example:
DATA_X = 'It is black.\n'
DATA_Y = 'It is blue.\n'
text = Text()
text.pack()
text.insert('1.0', DATA_X)
text.insert('2.0', DATA_Y)
text.tag_add('X', '1.0', '1.0 lineend')
text.tag_add('Y', '2.0', '2.0 lineend')
text.tag_config('X', background='black')
text.tag_config('Y', foreground='blue')
text.tag_update(text, 'X', '1.0', 'end', ('2.0', '2.0 lineend'))
It removes the X tag from '1.0' to 'end' then adds
the X tag to the range '2.0' '2.0 lineend'.
"""
self.tag_remove(name, index0, index1)
for indi, indj in args:
self.tag_add(name, indi, indj)
def insee(self, index, data):
"""
This method inserts data at index position then makes the cursor visible.
"""
self.insert(index, data)
self.see('insert')
def cmd_like(self):
"""
This method retrieves the cursor line then deletes it afterwards.
"""
data = self.get('insert linestart', 'insert lineend')
self.delete('insert linestart', 'insert lineend')
return data
def indref(self, index):
"""
This is a short hand function. It is used to convert a Text index
into two integers like:
a, b = area.indref('insert')
Now, a and b can be manipulated
as numbers.
"""
a, b = self.index(index).split('.')
return int(a), int(b)
def setcur(self, line, col='0'):
"""
It is used to set the cursor position at a given index using line
and col.
"""
self.mark_set('insert', '%s.%s' % (line, col))
self.see('insert')
def indint(self, index):
"""
Just a shorthand for:
a, b = index.split('2.3')
a, b = int(a), int(b)
"""
a, b = index.split('.')
return int(a), int(b)
def indcur(self):
"""
It returns two integers that corresponds to the cursor
position line and col.
"""
a, b = self.indref('insert')
return int(a), int(b)
def seecur(self, index):
"""
Just a shorthand for:
area.mark_set('insert', index)
area.see('insert')
"""
self.mark_set('insert', index)
self.see('insert')
def is_end(self):
"""
This function returns True if the cursor is positioned
at the end of the AreaVi instance.
"""
# I have to use 'end -1l linestart' since it seems the 'end' tag
# corresponds to a one line after the last visible line.
# So last line lineend != 'end'.
return self.compare('insert linestart', '!=', 'end -1l linestart')
def is_start(self):
"""
This function returns True if the cursor is
at the start of the text region. It is on index '1.0'
"""
return self.compare('insert linestart', '!=', '1.0')
def down(self):
"""
It sets the cursor position one line down.
"""
if not self.is_end():
return
a, b = self.indref('(CURSOR_LAST_COL)')
c, d = self.indcur()
self.setcur(c + 1, b)
def up(self):
"""
It sets the cursor one line up.
"""
if not self.is_start():
return
a, b = self.indref('(CURSOR_LAST_COL)')
c, d = self.indcur()
self.setcur(c - 1, b)
def left(self):
"""
It moves the cursor one character left.
"""
self.mark_set('insert', 'insert -1c')
# The mark used by self.down, self.up.
self.mark_set('(CURSOR_LAST_COL)', 'insert')
def right(self):
"""
It moves the cursor one character right.
"""
self.mark_set('insert', 'insert +1c')
# The mark used by self.down, self.up.
self.mark_set('(CURSOR_LAST_COL)', 'insert')
def start_selection(self):
"""
Start range selection.
"""
self.mark_set('(RANGE_SEL_MARK)', 'insert')
def start_block_selection(self):
"""
Start block selection.
"""
self.mark_set('(BLOCK_SEL_MARK)', 'insert')
def rmsel(self, index0, index1):
"""
It removes the tag sel from the range that is delimited by index0 and index1
regardless whether index0 <= index1.
"""
index2 = self.min(index0, index1)
index3 = self.max(index0, index1)
self.tag_remove('sel', index2, index3)
def addsel(self, index0, index1):
"""
It adds the tag sel to the range delimited by index0 and index1 regardless
whether index0 <= index1.
"""
index2 = self.min(index0, index1)
index3 = self.max(index0, index1)
self.tag_add('sel', index2, index3)
def min(self, index0, index1):
"""
It returns the min between index0 and index1.
"""
if self.compare(index0, '<=', index1):
return index0
else:
return index1
def max(self, index0, index1):
"""
It returns the max between index0 and index1.
"""
if self.compare(index0, '<=', index1):
return index1
else:
return index0
def sel_up(self):
"""
It adds 'sel' one line up the 'insert' position
and sets the cursor one line up.
"""
self.rmsel('(RANGE_SEL_MARK)', 'insert')
self.up()
self.addsel('(RANGE_SEL_MARK)', 'insert')
def sel_down(self):
"""
It adds or removes selection one line down.
"""
self.rmsel('(RANGE_SEL_MARK)', 'insert')
self.down()
self.addsel('(RANGE_SEL_MARK)', 'insert')
def sel_right(self):
"""
It adds or removes selection one character right.
"""
self.rmsel('(RANGE_SEL_MARK)', 'insert')
self.right()
self.addsel('(RANGE_SEL_MARK)', 'insert')
def sel_left(self):
"""
It adds or removes selection one character left.
"""
self.rmsel('(RANGE_SEL_MARK)', 'insert')
self.left()
self.addsel('(RANGE_SEL_MARK)', 'insert')
def addblock(self, index0, index1):
"""
It adds block selection from index0 to index1.
"""
index2 = self.min(index0, index1)
index3 = self.max(index0, index1)
a, b = self.indint(index2)
c, d = self.indint(index3)
for ind in xrange(a, c + 1):
self.addsel('%s.%s' % (ind, min(b, d)), '%s.%s' % (ind, max(b, d)))
def rmblock(self, index0, index1):
"""
It removes block selection from index0 to index1.
"""
index2 = self.min(index0, index1)
index3 = self.max(index0, index1)
a, b = self.indint(index2)
c, d = self.indint(index3)
for ind in xrange(a, c + 1):
self.rmsel('%s.%s' % (ind, min(b, d)), '%s.%s' % (ind, max(b, d)))
def block_down(self):
"""
It adds or removes block selection one line down.
"""
a, b = self.indref('(CURSOR_LAST_COL)')
c, d = self.indcur()
index = self.index('(BLOCK_SEL_MARK)')
self.rmblock(index, '%s.%s' % (c, b))
self.down()
a, b = self.indref('(CURSOR_LAST_COL)')
c, d = self.indcur()
self.addblock(index, '%s.%s' % (c, b))
def block_up(self):
"""
It adds or removes block selection one line up.
"""
a, b = self.indref('(CURSOR_LAST_COL)')
c, d = self.indcur()
index = self.index('(BLOCK_SEL_MARK)')
self.rmblock(index, '%s.%s' % (c, b))
self.up()
a, b = self.indref('(CURSOR_LAST_COL)')
c, d = self.indcur()
self.addblock(index, '%s.%s' % (c, b))
def is_line_start(self):
"""
It returns True if the cursor is at the start of the cursor line.
"""
return self.compare('insert', '!=', 'insert linestart')
def block_left(self):
"""
It adds block selection to the left.
"""
a, b = self.indref('(CURSOR_LAST_COL)')
c, d = self.indcur()
index = self.index('(BLOCK_SEL_MARK)')
self.rmblock(index, '%s.%s' % (c, b))
self.left()
a, b = self.indref('(CURSOR_LAST_COL)')
c, d = self.indcur()
self.addblock(index, '%s.%s' % (c, b))
def is_line_end(self):
"""
It returns True if the cursor is at the end of the cursor line.
"""
return self.compare('insert', '!=', 'insert lineend')
def block_right(self):
"""
It adds/removes block selection to the right.
"""
a, b = self.indref('(CURSOR_LAST_COL)')
c, d = self.indcur()
index = self.index('(BLOCK_SEL_MARK)')
self.rmblock(index, '%s.%s' % (c, b))
self.right()
a, b = self.indref('(CURSOR_LAST_COL)')
c, d = self.indcur()
self.addblock(index, '%s.%s' % (c, b))
def clear_selection(self):
"""
Unselect all text.
"""
try:
self.tag_remove('sel', 'sel.first', 'sel.last')
except Exception:
pass
def select_char(self):
"""
Select the cursor char.
"""
self.addsel('insert', 'insert +1c')
def unselect_char(self):
"""
Unselect the cursor char.
"""
self.rmsel('insert', 'insert +1c')
def del_char(self):
"""
It deletes a char from the cursor position.
"""
self.edit_separator()
self.delete('insert', 'insert +1c')
def echo(self, data):
self.insert('insert', data)
def backspace(self):
"""
"""
self.delete('insert -1c', 'insert')
def do_undo(self):
"""
It does undo.
"""
try:
self.edit_undo()
except TclError:
pass
def do_redo(self):
"""
It redoes.
"""
try:
self.edit_redo()
except TclError:
pass
def sel_text_start(self):
"""
It selects all text from cursor position to the start position
of the text.
"""
index = self.index('insert')
self.go_text_start()
self.addsel(index, 'insert')
def sel_text_end(self):
"""
It selects all text from the cursor position to the end of the text.
"""
index = self.index('insert')
self.go_text_end()
self.addsel(index, 'insert')
def go_text_start(self):
"""
Place the cursor at the beginning of the file.
"""
self.mark_set('insert', '1.0')
self.see('insert')
def go_text_end(self):
"""
Place the cursor at the end of the file.
"""
self.mark_set('insert', 'end linestart')
self.see('insert')
def sel_line_start(self):
"""
It adds selection from the cursor position to the
start of the line.
"""
index = self.index('insert')
self.go_line_start()
self.addsel(index, 'insert')
def sel_line_end(self):
"""
It selects all text from the cursor position to the end of the line.
"""
index = self.index('insert')
self.go_line_end()
self.addsel(index, 'insert')
def go_line_start(self):
"""
Place the cursor at the beginning of the line.
"""
self.mark_set('insert', 'insert linestart')
def go_line_end(self):
"""
Place the cursor at the end of the line.
"""
self.mark_set('insert', 'insert lineend')
def go_next_word(self):
"""
Place the cursor at the next word.
"""
self.iseek('\M', index='insert', stopindex='end')
def go_prev_word(self):
"""
Place the cursor at the previous word.
"""
self.iseek('\M', backwards=True, index='insert', stopindex='1.0')
def go_next_sym(self, chars):
"""
Place the cursor at the next occurrence of one of the chars.
"""
chars = map(lambda ind: escape(ind), chars)
REG = '|'.join(chars)
self.iseek(REG, index='insert', stopindex='end')
def go_prev_sym(self, chars):
"""
Place the cursor at the previous occurrence of one of the chars.
"""
chars = map(lambda ind: escape(ind), chars)
REG = '|'.join(chars)
self.iseek(REG, backwards=True, stopindex='1.0')
def del_line(self):
"""
It deletes the cursor line, makes the cursor visible
and adds a separator to the undo stack.
"""
self.edit_separator()
self.delete('insert linestart', 'insert +1l linestart')
self.see('insert')
def cpsel(self, sep=''):
"""
Copy selected text to the clipboard.
"""
data = self.join_ranges('sel', sep)
self.clipboard_clear()
self.clipboard_append(data)
self.tag_remove('sel', 'sel.first', 'sel.last')
def ctsel(self, sep=''):
"""
It cuts the selected text.
"""
data = self.join_ranges('sel', sep)
self.clipboard_clear()
self.clipboard_append(data)
self.edit_separator()
self.delete_ranges('sel')
def del_sel(self):
"""
It deletes all selected text.
"""
self.edit_separator()
self.delete_ranges('sel')
def ptsel(self):
"""
Paste text at the cursor position.
"""
data = self.clipboard_get()
self.edit_separator()
self.insert('insert', data)
def ptsel_after(self):
"""
Paste text one line down the cursor position.
"""
data = self.clipboard_get()
self.edit_separator()
self.insert('insert +1l linestart', data)
def ptsel_before(self):
"""
Paste text one line up the cursor position.
"""
data = self.clipboard_get()
self.edit_separator()
self.insert('insert linestart', data)
def select_line(self):
"""
It adds selection to the cursor line.
"""
self.tag_add('sel', 'insert linestart', 'insert +1l linestart')
def unselect_line(self):
"""
It removes selection from the cursor line.
"""
self.tag_remove('sel', 'insert linestart', 'insert +1l linestart')
def toggle_line_selection(self):
"""
Toggle line selection.
"""
self.toggle_sel('insert linestart', 'insert +1l linestart')
def toggle_sel(self, index0, index1):
"""
Toggle selection in the range defined by index0 and index1.
"""
self.toggle_range('sel', index0, index1)
def toggle_range(self, name, index0, index1):
"""
Toggle tag name in the range defined by index0 and index1.
It means it adds a tag name to the range index0 and index1 if there is no
tag mapped to that range otherwise it removes the tag name from the range.
"""
index2 = index0
index0 = self.min(index0, index1)
index1 = self.max(index2, index1)
map = self.is_tag_range(name, index0, index1)
if map:
self.tag_remove(name, index0, index1)
else:
self.tag_add(name, index0, index1)
def select_word(self, index='insert'):
"""
Select the closest word from the cursor.
"""
index1, index2 = self.get_word_range(index)
self.tag_add('sel', index1, index2)
def get_word_range(self, index):
index1 = self.search('\W', index, regexp=True, stopindex='%s linestart' % index, backwards=True)
index2 = self.search('\W', index, regexp=True, stopindex='%s lineend' % index)
index1 = '%s linestart' % index if not index1 else '%s +1c' % index1
index2 = '%s lineend' % index if not index2 else index2
return index1, index2
def select_seq(self, index='insert'):
"""
Select the closest sequence of non blank characters from the cursor.
"""
index1, index2 = self.get_seq_range(index)
self.tag_add('sel', index1, index2)
def get_seq_range(self, index):
index1 = self.search(' ', index, regexp=True, stopindex='%s linestart' %index, backwards=True)
index2 = self.search(' ', index, regexp=True, stopindex='%s lineend' % index)
index1 = '%s linestart' % index if not index1 else '%s +1c' % index1
index2= '%s lineend' % index if not index2 else index2
return index1, index2
def get_seq(self, index='insert'):
return self.get(*self.get_seq_range(index))
def get_line(self, index='insert'):
return self.get('%s linestart' % index,
'%s lineend' % index)
def scroll_line_up(self):
"""
It scrolls one line up
"""
# should be rewritten.
# it fails with append.
self.yview(SCROLL, -1, 'units')
is_visible = self.dlineinfo('insert')
if not is_visible:
self.mark_set('insert', 'insert -1l')
def scroll_line_down(self):
"""
It scrolls one line down.
"""
# should be rewritten.
# it fails with append.
self.yview(SCROLL, 1, 'units')
is_visible = self.dlineinfo('insert')
if not is_visible:
self.mark_set('insert', 'insert +1l')
def scroll_page_down(self):
"""
It scrolls one page down.
"""
self.yview(SCROLL, 1, 'page')
self.mark_set('insert', '@0,0')
def scroll_page_up(self):
"""
It scrolls one page up.
"""
self.yview(SCROLL, -1, 'page')
self.mark_set('insert', '@0,0')
def insert_line_down(self):
"""
It inserts one line down from the cursor position.
"""
self.edit_separator()
self.insert('insert +1l linestart', '\n')
self.mark_set('insert', 'insert +1l linestart')
self.see('insert')
def select_all(self):
"""
It selects all text.
"""
self.tag_add('sel', '1.0', 'end')
def insert_line_up(self):
"""
It inserts one line up.
"""
self.edit_separator()
self.insert('insert linestart', '\n')
self.mark_set('insert', 'insert -1l linestart')
self.see('insert')
def shift_sel_right(self, width, char):
"""
Shift ranges of selected text to the right.
"""
srow, scol = self.indref('sel.first')
erow, ecol = self.indref('sel.last')
self.shift_right(srow, erow, width, char)
def shift_sel_left(self, width):
"""
Shift ranges of selected text to the left.
"""
srow, scol = self.indref('sel.first')
erow, ecol = self.indref('sel.last')
self.shift_left(srow, erow, width)
def shift_right(self, srow, erow, width, char):
"""
Given a start row and a end row it shifts
a block of text to the right.
This is specially useful when working with
source code files.
"""
self.edit_separator()
for ind in xrange(srow, erow + 1):
self.insert('%s.0' % ind, width * char)
def shift_left(self, srow, erow, width):
"""
Given a start row and a end row it shifts
a block of text to the left.
This is specially useful when working with
source code files.
"""
self.edit_separator()
for ind in xrange(srow, erow + 1):
self.delete('%s.0' % ind, '%s.%s' % (ind, width))
def collect(self, name, regex, *args, **kwargs):
"""
The code below would find for 'PATTERN' in all selected text of an
AreaVi instance:
for data, pos0, pos1 in area.collect('sel', 'PATTERN'):
pass
"""
# It should be built on top of nextrange.
map = self.tag_ranges(name)
for indi in range(0, len(map) - 1, 2):
seq = self.find(regex, map[indi], map[indi + 1], *args, **kwargs)
for indj in seq:
yield indj
def replace_ranges(self, name, regex, data, index='1.0', stopindex='end',
*args, **kwargs):
"""
It replaces all occurrences of regex in the ranges that are mapped to tag name.
name - Name of the tag.
regex - The pattern.
data - The data to replace.
args - Arguments given to AreaVi.find.
**kwargs - A dictionary of arguments given to AreaVi.find.
"""
while True:
map = self.tag_nextrange(name, index, stopindex)
if not map: break
index3, index4 = map
index = self.replace_all(regex, data, index3, index4, *args, **kwargs)
def map_matches(self, name, matches):
""""
It adds a tag to the match ranges from either AreaVi.find or
AreaVi.collect.
name - The tag to be added.
map - An iterator from AreaVi.find or AreaVi.collect.
"""
for _, index0, index1 in matches:
self.tag_add(name, index0, index1)
def split(self, regex, index='1.0', stopindex='end', *args, **kwargs):
"""
It tokenizes the contents of an AreaVi widget based on a regex.
The *args, **kwargs are the same passed to AreaVi.find method.
for token, index0, index1 in area.tokenize(PATTERN):
pass
"""
index0 = index
for chk, index1, index2 in self.find(regex, index, stopindex, *args, **kwargs):
if self.compare(index1, '>', index0):
yield(self.get(index0, index1), index0, index1)
index0 = index2
else:
yield(chk, index2, stopindex)
def find(self, regex, index='1.0', stopindex='end', exact=None, regexp=True, nocase=None,
elide=None, nolinestop=None, step=''):
"""
It returns an iterator of matches. It is based on the Text.search method.
for match, index0, index1 in area.find('pattern'):
passs
"""
count = IntVar()
while True:
index = self.search(regex, index, stopindex, exact=exact, nocase=nocase,
nolinestop=nolinestop, regexp=regexp, elide=elide, count=count)
# If regex = '' it ends in an infinite loop.
if not index or not regex:
break
len = count.get()
tmp = '%s +%sc' % (index, len)
chunk = self.get(index, tmp)
pos0 = self.index(index)
pos1 = self.index('%s +%sc' % (index, len))
index = '%s%s' % (tmp, step)
yield(chunk, pos0, pos1)
def search(self, pattern, index, stopindex=None, forwards=None,
backwards=None, exact=None, regexp=None, nocase=None,
count=None, elide=None, nolinestop=None):
"""
Standard search method, but with support for the nolinestop
option which is new in tk 8.5 but not supported by tkinter out
of the box.
"""
args = [self._w, 'search']
if forwards: args.append('-forwards')
if backwards: args.append('-backwards')
if exact: args.append('-exact')
if regexp: args.append('-regexp')
if nocase: args.append('-nocase')
if elide: args.append('-elide')
if nolinestop: args.append("-nolinestop")
if count: args.append('-count'); args.append(count)
if pattern and pattern[0] == '-': args.append('--')
args.append(pattern)
args.append(index)
if stopindex: args.append(stopindex)
return str(self.tk.call(tuple(args)))
def iseek(self, regex, index='insert', stopindex='end', backwards=None, exact=None, regexp=True,
nocase=None, elide=None, nolinestop=None):
"""
Find regex backwards/fowards from index position and changes insert
mark to the prev/next match.
"""
count = IntVar()
index = self.search(regex, index=index, stopindex=stopindex, regexp=regexp,
exact=exact, nocase=nocase, elide=elide, nolinestop=nolinestop,
backwards=backwards, count=count)
if not index: return
index0 = self.index('%s +%sc' % (index, count.get()))
self.mark_set('insert', index if backwards else index0)
self.see('insert')
return index, index0
def ipick(self, name, regex, index='insert', stopindex='end', verbose=False, backwards=None, exact=None, regexp=True,
nocase=None, elide=None, nolinestop=None):
"""
"""
# Force to do a search from index.
if verbose: self.tag_remove(name, '1.0', 'end')
if not backwards: ranges = self.tag_nextrange(name, index, 'end')
else: ranges = self.tag_prevrange(name, index, '1.0')
if ranges: index0, index1 = ranges[:2]
else: index0 = index1 = index
index = self.iseek(regex, index=index0 if backwards else index1, stopindex=stopindex,
backwards=backwards, exact=exact, regexp=regexp, nocase=nocase,
elide=elide, nolinestop=nolinestop)
if not index:
return
self.tag_remove(name, '1.0', 'end')
self.tag_add(name, *index)
return index
def replace(self, regex, data, index=None, stopindex=None, forwards=None,
backwards=None, exact=None, regexp=True, nocase=None, elide=None, nolinestop=None):
"""
It is used to replace occurrences of a given match.
It is possible to use a callback function to return what is replaced
as well.
"""
count = IntVar()
index = self.search(regex, index, stopindex, forwards=forwards, backwards=backwards, exact=exact, nocase=nocase,
nolinestop=nolinestop, regexp=regexp, elide=elide, count=count)
if not index:
return
index0 = self.index('%s +%sc' % (index, count.get()))
if callable(data):
data = data(self.get(index, index0), index, index0)
self.delete(index, index0)
self.insert(index, data)
return index, len(data)
def replace_all(self, regex, data, index='1.0', stopindex='end', exact=None, regexp=True, nocase=None,
elide=None, nolinestop=None):
"""
It is used to replace all occurrences of a given match in a range.
It accepts a callback function that determines what is replaced.
"""
# It is needed because the range will grow
# when data is inserted, the intent is searching
# over a pre defined range.
self.mark_set('(REP_STOPINDEX)', stopindex)
while True:
map = self.replace(regex, data, index, '(REP_STOPINDEX)', exact=exact, nocase=nocase,
nolinestop=nolinestop, regexp=regexp, elide=elide)
if not map:
return self.index('(REP_STOPINDEX)')
index, size = map
index = self.index('%s +%sc' % (index, size))
def get_paren_search_dir(self, index, start, end):
"""
"""
char = self.get(index, '%s +1c' % index)
if char == start:
return False
elif char == end:
return True
else:
return None
def get_paren_search_sign(self, index, start, end):
"""
"""
char = self.get(index, '%s +1c' % index)
if char == start:
return '+'
elif char == end:
return '-'
else:
return None
def sel_matching_pair_data(self, index, max=1500, pair=('(', ')')):
index = self.case_pair(index, max, *pair)
if not index: return
min = self.min(index, 'insert')
max = self.max(index, 'insert')
min = '%s +1c' % min
self.tag_add('sel', min, max)
def sel_matching_pair(self, index, max=1500, pair=('(', ')')):
"""
"""
index = self.case_pair(index, max, *pair)
if not index: return
min = self.min(index, 'insert')
max = self.max(index, 'insert')
max = '%s +1c' % max
self.tag_add('sel', min, max)
def get_matching_pair(self, index, max, start='(', end=')'):
"""
"""
index0 = self.search(start, regexp=False, index=index, backwards=True)
if not index0: return
index1 = self.search(end, regexp=False, index=index)
if not index1: return
index2 = self.case_pair(index0, max, start, end)
if not index2: return
index3 = self.case_pair(index1, max, start, end)
if not index3: return
if self.is_in_range(index, index0, index2):
return index0, index2
elif self.is_in_range(index, index3, index1):
return index3, index1
def case_pair(self, index, max, start='(', end=')'):
"""
Once this method is called, it returns an index for the next
matching parenthesis or None if the char over the cursor
isn't either '(' or ')'.
"""
dir = self.get_paren_search_dir(index, start, end)
# If dir is None then there is no match.
if dir == None: return ''
sign = self.get_paren_search_sign(index, start, end)
count = 0
# If we are searching fowards we don't need
# to add 1c.
index0 = '%s %s' % (index, '+1c' if dir else '')
size = IntVar(0)
while True:
index0 = self.search('\%s|\%s' % (start, end), index = index0,
stopindex = '%s %s%sc' % (index, sign, max),
count = size, backwards = dir, regexp = True)
if not index0: return ''
char = self.get(index0, '%s +1c' % index0)
count = count + (1 if char == start else -1)
if not count:
return index0
# When we are searching backwards we don't need
# to set a character back because index will point
# to the start of the match.
index0 = '%s %s' % (index0, '+1c' if not dir else '')
def clear_data(self):
"""
It clears all text inside an AreaVi instance.
"""
import os
self.delete('1.0', 'end')
self.filename = os.path.abspath(self.default_filename)
self.event_generate('<<ClearData>>')
def load_data(self, filename):
"""
It dumps all text from a file into an AreaVi instance.
filename - Name of the file.
"""
import os
filename = os.path.abspath(filename)
self.filename = filename
fd = open(filename, 'r')
data = fd.read()
fd.close()
# i could generate a tk event here.
try:
data = data.decode(self.charset)
except UnicodeDecodeError:
self.charset = ''
self.delete('1.0', 'end')
self.insert('1.0', data)
self.event_generate('<<LoadData>>')
type, _ = mimetypes.guess_type(self.filename)
self.event_generate('<<Load-%s>>' % type)
def decode(self, name):
self.charset = name
self.load_data(self.filename)
def save_data(self):
"""
It saves the actual text content in the current file.
"""
type, _ = mimetypes.guess_type(self.filename)
self.event_generate('<<Pre-SaveData>>')
self.event_generate('<<Pre-Save-%s>>' % type)
data = self.get('1.0', 'end')
data = data.encode(self.charset)
fd = open(self.filename, 'w')
fd.write(data)
fd.close()
self.event_generate('<<SaveData>>')
self.event_generate('<<Save-%s>>' % type)
def save_data_as(self, filename):
"""
It saves the content of the given AreaVi instance into
a file whose name is specified in filename.
filename - Name of the file to save the data.
"""
self.filename = filename
self.save_data()
def is_tag_range(self, name, index0, index1):
"""
Consider:
area.tag_add('tag', '2.0', '5.0')
# It returns True.
area.is_tag_range('tag', '2.0', '3.0')
# It returns False.
area.is_tag_range('tag', '1.0', '2.0')
"""
ranges = self.tag_ranges(name)
for ind in xrange(0, len(ranges) - 1, 2):
if self.is_subrange(index0, index1, ranges[ind].string,
ranges[ind + 1].string):
return ranges[ind].string, ranges[ind + 1].string
def is_in_range(self, index, index0, index1):
"""
It returns True if index0 <= index <= index1 otherwise
it returns False.
"""
index2 = self.min(index0, index1)
index3 = self.max(index0, index1)
r1 = self.compare(index2, '<=', index)
r2 = self.compare(index3, '>=', index)
return r1 and r2
def is_subrange(self, index0, index1, index2, index3):
"""
It returns True if index2 <= index0 <= index1 <= index2 otherwise
it returns False.
"""
r1 = self.is_in_range(index0, index2, index3)
r2 = self.is_in_range(index1, index2, index3)
return r1 and r2
def swap(self, data, index0, index1):
"""
Swap the text in the range index0, index1 for data.
"""
self.delete(index0, index1)
self.insert(index0, data)
def swap_ranges(self, name, data, index0='1.0', index1='end'):
"""
It swaps ranges of text that are mapped to a tag name for data between index0
and index1.
"""
while True:
range = self.tag_nextrange(name, index0, index1)
if not range: break
self.swap(data, *range)
def delete_ranges(self, name, index0='1.0', index1='end'):
"""
It deletes ranges of text that are mapped to tag name between index0 and index1.
"""
self.swap_ranges(name, '', index0, index1)
def join_ranges(self, name, sep=''):
"""
Join ranges of text that corresponds to a tag defined by name using a seperator.
"""
data = ''
for ind in self.get_ranges(name):
data = data + ind + sep
return data
def get_ranges(self, name):
"""
It returns an iterator whose elements are ranges of text that
corresponds to the ranges of the tag name.
"""
ranges = self.tag_ranges(name)
for ind in xrange(0, len(ranges) - 1, 2):
data = self.get(ranges[ind], ranges[ind + 1])
yield(data)
def tag_prev_occur(self, tag_names, index0, index1, default):
"""
Should be renamed.
"""
for ind in tag_names:
pos = self.tag_prevrange(ind, index0, index1)
if pos: return pos[1]
return default
def tag_next_occur(self, tag_names, index0, index1, default):
"""
Should be renamed.
"""
for ind in tag_names:
pos = self.tag_nextrange(ind, index0, index1)
if pos: return pos[0]
return default
@staticmethod
def areavi_widgets(wid):
"""
This method is a static method that receives a widget as argument
then returns an iterator of AreaVi instances that have the wid paramater as
master widget. It is used like:
from vyapp.app import root
for ind in AreaVi.areavi_widgets(root):
ind.insert('end', 'FOO')
The code above would insert 'FOO' at the end of all AreaVi widgets
that have root as one of its master widget.
"""
for ind in wid.winfo_children():
if isinstance(ind, AreaVi):
yield ind
else:
for ind in AreaVi.areavi_widgets(ind):
yield ind
@staticmethod
def get_opened_files(wid):
"""
This method returns a dictionary that maps all AreaVi instances
that have widget as master like:
from vyapp.app import root
map = area.get_opened_files(root)
Where map is a dictionary like:
map = { '/home/tau/file.c':AreaVi_Instance,
'/home/tau/file.b': AreaVi_Instance}
"""
map = dict()
for ind in AreaVi.areavi_widgets(wid):
map[ind.filename] = ind
return map
@staticmethod
def find_all(wid, regex, index='1.0', stopindex='end', *args, **kwargs):
"""
This method is used to perform pattern searches over all AreaVi instances that have
wid as master. It basically returns an iterator that corresponds to:
from vyapp.app import root
for ind, (match, index0, index1) in area.find_all(root, 'pattern'):
pass
Where ind is the AreaVi widget that the pattern matched and match is the match,
index0 and index1 are the positions in the text.
"""
for indi in AreaVi.areavi_widgets(wid):
it = indi.find(regex, index, stopindex, *args, **kwargs)
for indj in it:
yield indi, indj
|
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Rahul Handay <rahulha@saltstack.com>`
'''
# Import Python Libs
from __future__ import absolute_import
# Import Salt Testing Libs
from salttesting import TestCase, skipIf
from salttesting.helpers import ensure_in_syspath
from salttesting.mock import (
MagicMock,
patch,
NO_MOCK,
NO_MOCK_REASON
)
ensure_in_syspath('../../')
# Import Salt Libs
from salt.states import network
# Globals
network.__salt__ = {}
network.__grains__ = {}
network.__opts__ = {}
class MockNetwork(object):
'''
Mock network class
'''
def __init__(self):
pass
@staticmethod
def interfaces():
'''
Mock interface method
'''
return {'salt': {'up': 1}}
class MockGrains(object):
'''
Mock Grains class
'''
def __init__(self):
pass
@staticmethod
def grains(lis, bol):
'''
Mock grains method
'''
return {'A': 'B'}
@skipIf(NO_MOCK, NO_MOCK_REASON)
class NetworkTestCase(TestCase):
'''
Validate the network state
'''
@patch('salt.states.network.salt.utils.network', MockNetwork())
@patch('salt.states.network.salt.loader', MockGrains())
def test_managed(self):
'''
Test to ensure that the named interface is configured properly
'''
ret = {'name': 'salt',
'changes': {},
'result': False,
'comment': ''}
change = {'interface': '--- \n+++ \n@@ -1 +1 @@\n-A\n+B',
'status': 'Interface salt restart to validate'}
mock = MagicMock(side_effect=[AttributeError, 'A', 'A', 'A', 'A', 'A'])
with patch.dict(network.__salt__, {"ip.get_interface": mock}):
self.assertDictEqual(network.managed('salt',
'stack', test='a'), ret)
mock = MagicMock(return_value='B')
with patch.dict(network.__salt__, {"ip.build_interface": mock}):
mock = MagicMock(side_effect=AttributeError)
with patch.dict(network.__salt__, {"ip.get_bond": mock}):
self.assertDictEqual(network.managed('salt',
'bond',
test='a'), ret)
ret.update({'comment': 'Interface salt is set to be'
' updated:\n--- \n+++ \n@@ -1 +1 @@\n-A\n+B',
'result': None})
self.assertDictEqual(network.managed('salt', 'stack',
test='a'), ret)
mock = MagicMock(return_value=True)
with patch.dict(network.__salt__, {"ip.down": mock}):
with patch.dict(network.__salt__, {"ip.up": mock}):
ret.update({'comment': 'Interface salt updated.',
'result': True,
'changes': change})
self.assertDictEqual(network.managed('salt', 'stack'),
ret)
with patch.dict(network.__grains__, {"A": True}):
with patch.dict(network.__salt__,
{"saltutil.refresh_modules": mock}
):
ret.update({'result': True,
'changes': {'interface': '--- \n+'
'++ \n@@ -1 +1 @@\n-A'
'\n+B',
'status': 'Interface'
' salt down'}})
self.assertDictEqual(network.managed('salt',
'stack',
False),
ret)
ret.update({'changes': {'interface':
'--- \n+++ \n@@ -1 +1 @@\n-A\n+B'},
'result': False,
'comment': "'ip.down'"})
self.assertDictEqual(network.managed('salt', 'stack'), ret)
def test_routes(self):
'''
Test to manage network interface static routes.
'''
ret = {'name': 'salt',
'changes': {},
'result': False,
'comment': ''}
mock = MagicMock(side_effect=[AttributeError, False, False, "True",
False, False])
with patch.dict(network.__salt__, {"ip.get_routes": mock}):
self.assertDictEqual(network.routes('salt'), ret)
mock = MagicMock(side_effect=[False, True, '', True, True])
with patch.dict(network.__salt__, {"ip.build_routes": mock}):
ret.update({'result': True,
'comment': 'Interface salt routes are up to date.'
})
self.assertDictEqual(network.routes('salt', test='a'), ret)
ret.update({'comment': 'Interface salt routes are'
' set to be added.',
'result': None})
self.assertDictEqual(network.routes('salt', test='a'), ret)
ret.update({'comment': 'Interface salt routes are set to be'
' updated:\n--- \n+++ \n@@ -1,4 +0,0 @@\n-T\n-r'
'\n-u\n-e'})
self.assertDictEqual(network.routes('salt', test='a'), ret)
mock = MagicMock(side_effect=[AttributeError, True])
with patch.dict(network.__salt__,
{"ip.apply_network_settings": mock}):
ret.update({'changes': {'network_routes':
'Added interface salt routes.'},
'comment': '',
'result': False})
self.assertDictEqual(network.routes('salt'), ret)
ret.update({'changes': {'network_routes':
'Added interface salt routes.'},
'comment': 'Interface salt routes added.',
'result': True})
self.assertDictEqual(network.routes('salt'), ret)
def test_system(self):
'''
Test to ensure that global network settings
are configured properly
'''
ret = {'name': 'salt',
'changes': {},
'result': False,
'comment': ''}
with patch.dict(network.__opts__, {"test": True}):
mock = MagicMock(side_effect=[AttributeError, False, False, 'As'])
with patch.dict(network.__salt__,
{"ip.get_network_settings": mock}):
self.assertDictEqual(network.system('salt'), ret)
mock = MagicMock(side_effect=[False, True, ''])
with patch.dict(network.__salt__,
{"ip.build_network_settings": mock}):
ret.update({'comment': 'Global network settings'
' are up to date.',
'result': True})
self.assertDictEqual(network.system('salt'), ret)
ret.update({'comment': 'Global network settings are set to'
' be added.',
'result': None})
self.assertDictEqual(network.system('salt'), ret)
ret.update({'comment': 'Global network settings are set to'
' be updated:\n--- \n+++ \n@@ -1,2 +0,0'
' @@\n-A\n-s'})
self.assertDictEqual(network.system('salt'), ret)
with patch.dict(network.__opts__, {"test": False}):
mock = MagicMock(side_effect=[False, False])
with patch.dict(network.__salt__,
{"ip.get_network_settings": mock}):
mock = MagicMock(side_effect=[True, True])
with patch.dict(network.__salt__,
{"ip.build_network_settings": mock}):
mock = MagicMock(side_effect=[AttributeError, True])
with patch.dict(network.__salt__,
{"ip.apply_network_settings": mock}):
ret.update({'changes': {'network_settings':
'Added global network'
' settings.'},
'comment': '',
'result': False})
self.assertDictEqual(network.system('salt'), ret)
ret.update({'changes': {'network_settings':
'Added global network'
' settings.'},
'comment': 'Global network settings'
' are up to date.',
'result': True})
self.assertDictEqual(network.system('salt'), ret)
if __name__ == '__main__':
from integration import run_tests
run_tests(NetworkTestCase, needs_daemon=False)
Skip the network state unit tests if sustem is on Python 2.6
These tests need to be refactored to be able to run on Python 2.6
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Rahul Handay <rahulha@saltstack.com>`
'''
# Import Python Libs
from __future__ import absolute_import
import sys
# Import Salt Testing Libs
from salttesting import TestCase, skipIf
from salttesting.helpers import ensure_in_syspath
from salttesting.mock import (
MagicMock,
patch,
NO_MOCK,
NO_MOCK_REASON
)
ensure_in_syspath('../../')
# Import Salt Libs
from salt.states import network
# Globals
network.__salt__ = {}
network.__grains__ = {}
network.__opts__ = {}
class MockNetwork(object):
'''
Mock network class
'''
def __init__(self):
pass
@staticmethod
def interfaces():
'''
Mock interface method
'''
return {'salt': {'up': 1}}
class MockGrains(object):
'''
Mock Grains class
'''
def __init__(self):
pass
@staticmethod
def grains(lis, bol):
'''
Mock grains method
'''
return {'A': 'B'}
@skipIf(sys.version_info < (2, 7), 'This needs to be refactored to work with Python 2.6')
@skipIf(NO_MOCK, NO_MOCK_REASON)
class NetworkTestCase(TestCase):
'''
Validate the network state
'''
@patch('salt.states.network.salt.utils.network', MockNetwork())
@patch('salt.states.network.salt.loader', MockGrains())
def test_managed(self):
'''
Test to ensure that the named interface is configured properly
'''
ret = {'name': 'salt',
'changes': {},
'result': False,
'comment': ''}
change = {'interface': '--- \n+++ \n@@ -1 +1 @@\n-A\n+B',
'status': 'Interface salt restart to validate'}
mock = MagicMock(side_effect=[AttributeError, 'A', 'A', 'A', 'A', 'A'])
with patch.dict(network.__salt__, {"ip.get_interface": mock}):
self.assertDictEqual(network.managed('salt',
'stack', test='a'), ret)
mock = MagicMock(return_value='B')
with patch.dict(network.__salt__, {"ip.build_interface": mock}):
mock = MagicMock(side_effect=AttributeError)
with patch.dict(network.__salt__, {"ip.get_bond": mock}):
self.assertDictEqual(network.managed('salt',
'bond',
test='a'), ret)
ret.update({'comment': 'Interface salt is set to be'
' updated:\n--- \n+++ \n@@ -1 +1 @@\n-A\n+B',
'result': None})
self.assertDictEqual(network.managed('salt', 'stack',
test='a'), ret)
mock = MagicMock(return_value=True)
with patch.dict(network.__salt__, {"ip.down": mock}):
with patch.dict(network.__salt__, {"ip.up": mock}):
ret.update({'comment': 'Interface salt updated.',
'result': True,
'changes': change})
self.assertDictEqual(network.managed('salt', 'stack'),
ret)
with patch.dict(network.__grains__, {"A": True}):
with patch.dict(network.__salt__,
{"saltutil.refresh_modules": mock}
):
ret.update({'result': True,
'changes': {'interface': '--- \n+'
'++ \n@@ -1 +1 @@\n-A'
'\n+B',
'status': 'Interface'
' salt down'}})
self.assertDictEqual(network.managed('salt',
'stack',
False),
ret)
ret.update({'changes': {'interface':
'--- \n+++ \n@@ -1 +1 @@\n-A\n+B'},
'result': False,
'comment': "'ip.down'"})
self.assertDictEqual(network.managed('salt', 'stack'), ret)
def test_routes(self):
'''
Test to manage network interface static routes.
'''
ret = {'name': 'salt',
'changes': {},
'result': False,
'comment': ''}
mock = MagicMock(side_effect=[AttributeError, False, False, "True",
False, False])
with patch.dict(network.__salt__, {"ip.get_routes": mock}):
self.assertDictEqual(network.routes('salt'), ret)
mock = MagicMock(side_effect=[False, True, '', True, True])
with patch.dict(network.__salt__, {"ip.build_routes": mock}):
ret.update({'result': True,
'comment': 'Interface salt routes are up to date.'
})
self.assertDictEqual(network.routes('salt', test='a'), ret)
ret.update({'comment': 'Interface salt routes are'
' set to be added.',
'result': None})
self.assertDictEqual(network.routes('salt', test='a'), ret)
ret.update({'comment': 'Interface salt routes are set to be'
' updated:\n--- \n+++ \n@@ -1,4 +0,0 @@\n-T\n-r'
'\n-u\n-e'})
self.assertDictEqual(network.routes('salt', test='a'), ret)
mock = MagicMock(side_effect=[AttributeError, True])
with patch.dict(network.__salt__,
{"ip.apply_network_settings": mock}):
ret.update({'changes': {'network_routes':
'Added interface salt routes.'},
'comment': '',
'result': False})
self.assertDictEqual(network.routes('salt'), ret)
ret.update({'changes': {'network_routes':
'Added interface salt routes.'},
'comment': 'Interface salt routes added.',
'result': True})
self.assertDictEqual(network.routes('salt'), ret)
def test_system(self):
'''
Test to ensure that global network settings
are configured properly
'''
ret = {'name': 'salt',
'changes': {},
'result': False,
'comment': ''}
with patch.dict(network.__opts__, {"test": True}):
mock = MagicMock(side_effect=[AttributeError, False, False, 'As'])
with patch.dict(network.__salt__,
{"ip.get_network_settings": mock}):
self.assertDictEqual(network.system('salt'), ret)
mock = MagicMock(side_effect=[False, True, ''])
with patch.dict(network.__salt__,
{"ip.build_network_settings": mock}):
ret.update({'comment': 'Global network settings'
' are up to date.',
'result': True})
self.assertDictEqual(network.system('salt'), ret)
ret.update({'comment': 'Global network settings are set to'
' be added.',
'result': None})
self.assertDictEqual(network.system('salt'), ret)
ret.update({'comment': 'Global network settings are set to'
' be updated:\n--- \n+++ \n@@ -1,2 +0,0'
' @@\n-A\n-s'})
self.assertDictEqual(network.system('salt'), ret)
with patch.dict(network.__opts__, {"test": False}):
mock = MagicMock(side_effect=[False, False])
with patch.dict(network.__salt__,
{"ip.get_network_settings": mock}):
mock = MagicMock(side_effect=[True, True])
with patch.dict(network.__salt__,
{"ip.build_network_settings": mock}):
mock = MagicMock(side_effect=[AttributeError, True])
with patch.dict(network.__salt__,
{"ip.apply_network_settings": mock}):
ret.update({'changes': {'network_settings':
'Added global network'
' settings.'},
'comment': '',
'result': False})
self.assertDictEqual(network.system('salt'), ret)
ret.update({'changes': {'network_settings':
'Added global network'
' settings.'},
'comment': 'Global network settings'
' are up to date.',
'result': True})
self.assertDictEqual(network.system('salt'), ret)
if __name__ == '__main__':
from integration import run_tests
run_tests(NetworkTestCase, needs_daemon=False)
|
from collections import defaultdict
from uuid import uuid4
class _Packet:
_pid = 0
@classmethod
def _next_pid(cls):
from uuid import uuid4
return str(uuid4())
@classmethod
def ack(cls, request_id):
return {'pid': cls._next_pid(), 'type': 'ack', 'request_id': request_id}
@classmethod
def pong(cls, node_id, payload=None):
return cls._get_ping_pong(node_id, 'pong', payload=payload)
@classmethod
def ping(cls, node_id, payload=None):
return cls._get_ping_pong(node_id, 'ping', payload=payload)
@classmethod
def _get_ping_pong(cls, node_id, packet_type, payload=None):
if payload:
return {'pid': cls._next_pid(), 'type': packet_type, 'node_id': node_id, 'payload': payload}
return {'pid': cls._next_pid(), 'type': packet_type, 'node_id': node_id}
class ControlPacket(_Packet):
@classmethod
def registration(cls, ip: str, port: int, node_id, service: str, version: str, dependencies, service_type: str):
v = [{'service': vendor.name, 'version': vendor.version} for vendor in dependencies]
params = {'service': service,
'version': version,
'host': ip,
'port': port,
'node_id': node_id,
'dependencies': v,
'type': service_type}
packet = {'pid': cls._next_pid(), 'type': 'register', 'params': params}
return packet
@classmethod
def get_instances(cls, service, version):
params = {'service': service, 'version': version}
packet = {'pid': cls._next_pid(),
'type': 'get_instances',
'service': service,
'version': version,
'params': params,
'request_id': str(uuid4())}
return packet
@classmethod
def get_subscribers(cls, service, version, endpoint):
params = {'service': service, 'version': version, 'endpoint': endpoint}
packet = {'pid': cls._next_pid(),
'type': 'get_subscribers',
'params': params,
'request_id': str(uuid4())}
return packet
@classmethod
def send_instances(cls, service, version, request_id, instances):
instance_packet = [{'host': host, 'port': port, 'node': node, 'type': service_type} for
host, port, node, service_type in instances]
instance_packet_params = {'service': service, 'version': version, 'instances': instance_packet}
return {'pid': cls._next_pid(), 'type': 'instances', 'params': instance_packet_params, 'request_id': request_id}
@classmethod
# TODO : fix parsing on client side
def deregister(cls, service, version, node_id):
params = {'node_id': node_id, 'service': service, 'version': version}
packet = {'pid': cls._next_pid(), 'type': 'deregister', 'params': params}
return packet
@classmethod
def activated(cls, instances):
vendors_packet = []
for k, v in instances.items():
vendor_packet = defaultdict(list)
vendor_packet['name'] = k[0]
vendor_packet['version'] = k[1]
for host, port, node, service_type in v:
vendor_node_packet = {
'host': host,
'port': port,
'node_id': node,
'type': service_type
}
vendor_packet['addresses'].append(vendor_node_packet)
vendors_packet.append(vendor_packet)
params = {
'vendors': vendors_packet
}
packet = {'pid': cls._next_pid(),
'type': 'registered',
'params': params}
return packet
@classmethod
def xsubscribe(cls, service, version, host, port, node_id, endpoints):
params = {'service': service, 'version': version, 'host': host, 'port': port, 'node_id': node_id}
events = [{'service': _service, 'version': _version, 'endpoint': endpoint, 'strategy': strategy} for
_service, _version, endpoint, strategy in endpoints]
params['events'] = events
packet = {'pid': cls._next_pid(),
'type': 'xsubscribe',
'params': params}
return packet
@classmethod
def subscribers(cls, service, version, endpoint, request_id, subscribers):
params = {'service': service, 'version': version, 'endpoint': endpoint}
subscribers = [{'service': _service, 'version': _version, 'host': host, 'port': port, 'node_id': node_id,
'strategy': strategy} for _service, _version, host, port, node_id, strategy in subscribers]
params['subscribers'] = subscribers
packet = {'pid': cls._next_pid(),
'request_id': request_id,
'type': 'subscribers',
'params': params}
return packet
@classmethod
def uptime(cls, uptimes):
packet = {'pid': cls._next_pid(),
'type': 'uptime_report',
'params': dict(uptimes)}
return packet
@classmethod
def new_instance(cls, service_name, version, host, port, node_id, type):
params = {'service': service_name, 'version': version, 'host': host, 'port': port, 'node': node_id,
'type': type}
return {'pid': cls._next_pid(),
'type': 'new_instance',
'params': params}
class MessagePacket(_Packet):
@classmethod
def request(cls, name, version, app_name, packet_type, endpoint, params, entity):
return {'pid': cls._next_pid(),
'app': app_name,
'service': name,
'version': version,
'entity': entity,
'endpoint': endpoint,
'type': packet_type,
'payload': params}
@classmethod
def publish(cls, publish_id, service, version, endpoint, payload):
return {'pid': cls._next_pid(),
'type': 'publish',
'service': service,
'version': version,
'endpoint': endpoint,
'payload': payload,
'publish_id': publish_id}
Reduced code repetition in _Packet._get_ping_pong
from collections import defaultdict
from uuid import uuid4
class _Packet:
_pid = 0
@classmethod
def _next_pid(cls):
from uuid import uuid4
return str(uuid4())
@classmethod
def ack(cls, request_id):
return {'pid': cls._next_pid(), 'type': 'ack', 'request_id': request_id}
@classmethod
def pong(cls, node_id, payload=None):
return cls._get_ping_pong(node_id, 'pong', payload=payload)
@classmethod
def ping(cls, node_id, payload=None):
return cls._get_ping_pong(node_id, 'ping', payload=payload)
@classmethod
def _get_ping_pong(cls, node_id, packet_type, payload=None):
return_dict = {'pid': cls._next_pid(), 'type': packet_type, 'node_id': node_id}
if payload:
return_dict['payload'] = payload
return return_dict
class ControlPacket(_Packet):
@classmethod
def registration(cls, ip: str, port: int, node_id, service: str, version: str, dependencies, service_type: str):
v = [{'service': vendor.name, 'version': vendor.version} for vendor in dependencies]
params = {'service': service,
'version': version,
'host': ip,
'port': port,
'node_id': node_id,
'dependencies': v,
'type': service_type}
packet = {'pid': cls._next_pid(), 'type': 'register', 'params': params}
return packet
@classmethod
def get_instances(cls, service, version):
params = {'service': service, 'version': version}
packet = {'pid': cls._next_pid(),
'type': 'get_instances',
'service': service,
'version': version,
'params': params,
'request_id': str(uuid4())}
return packet
@classmethod
def get_subscribers(cls, service, version, endpoint):
params = {'service': service, 'version': version, 'endpoint': endpoint}
packet = {'pid': cls._next_pid(),
'type': 'get_subscribers',
'params': params,
'request_id': str(uuid4())}
return packet
@classmethod
def send_instances(cls, service, version, request_id, instances):
instance_packet = [{'host': host, 'port': port, 'node': node, 'type': service_type} for
host, port, node, service_type in instances]
instance_packet_params = {'service': service, 'version': version, 'instances': instance_packet}
return {'pid': cls._next_pid(), 'type': 'instances', 'params': instance_packet_params, 'request_id': request_id}
@classmethod
# TODO : fix parsing on client side
def deregister(cls, service, version, node_id):
params = {'node_id': node_id, 'service': service, 'version': version}
packet = {'pid': cls._next_pid(), 'type': 'deregister', 'params': params}
return packet
@classmethod
def activated(cls, instances):
vendors_packet = []
for k, v in instances.items():
vendor_packet = defaultdict(list)
vendor_packet['name'] = k[0]
vendor_packet['version'] = k[1]
for host, port, node, service_type in v:
vendor_node_packet = {
'host': host,
'port': port,
'node_id': node,
'type': service_type
}
vendor_packet['addresses'].append(vendor_node_packet)
vendors_packet.append(vendor_packet)
params = {
'vendors': vendors_packet
}
packet = {'pid': cls._next_pid(),
'type': 'registered',
'params': params}
return packet
@classmethod
def xsubscribe(cls, service, version, host, port, node_id, endpoints):
params = {'service': service, 'version': version, 'host': host, 'port': port, 'node_id': node_id}
events = [{'service': _service, 'version': _version, 'endpoint': endpoint, 'strategy': strategy} for
_service, _version, endpoint, strategy in endpoints]
params['events'] = events
packet = {'pid': cls._next_pid(),
'type': 'xsubscribe',
'params': params}
return packet
@classmethod
def subscribers(cls, service, version, endpoint, request_id, subscribers):
params = {'service': service, 'version': version, 'endpoint': endpoint}
subscribers = [{'service': _service, 'version': _version, 'host': host, 'port': port, 'node_id': node_id,
'strategy': strategy} for _service, _version, host, port, node_id, strategy in subscribers]
params['subscribers'] = subscribers
packet = {'pid': cls._next_pid(),
'request_id': request_id,
'type': 'subscribers',
'params': params}
return packet
@classmethod
def uptime(cls, uptimes):
packet = {'pid': cls._next_pid(),
'type': 'uptime_report',
'params': dict(uptimes)}
return packet
@classmethod
def new_instance(cls, service_name, version, host, port, node_id, type):
params = {'service': service_name, 'version': version, 'host': host, 'port': port, 'node': node_id,
'type': type}
return {'pid': cls._next_pid(),
'type': 'new_instance',
'params': params}
class MessagePacket(_Packet):
@classmethod
def request(cls, name, version, app_name, packet_type, endpoint, params, entity):
return {'pid': cls._next_pid(),
'app': app_name,
'service': name,
'version': version,
'entity': entity,
'endpoint': endpoint,
'type': packet_type,
'payload': params}
@classmethod
def publish(cls, publish_id, service, version, endpoint, payload):
return {'pid': cls._next_pid(),
'type': 'publish',
'service': service,
'version': version,
'endpoint': endpoint,
'payload': payload,
'publish_id': publish_id}
|
#!/usr/bin/env python
import fileinput
import re
import sys
signals = {}
signal_names = {}
def decode_instruction(opcode, ext):
sq = opcode >> 12
qc = (opcode >> 10) & 0o3
s = opcode & 0o7777
es = s & 0o1777
if ext:
sq += 0o10
if sq == 0o0:
if s == 0o3:
return 'RELINT'
elif s == 0o4:
return 'INHINT'
elif s == 0o6:
return 'EXTEND'
else:
return 'TC %04o' % s
elif sq == 0o1:
if qc == 0o0:
return 'CCS %04o' % es
else:
return 'TCF %04o' % s
elif sq == 0o2:
if qc == 0o0:
return 'DAS %04o' % (es-1)
elif qc == 0o1:
return 'LXCH %04o' % es
elif qc == 0o2:
return 'INCR %04o' % es
elif qc == 0o3:
return 'ADS %04o' % es
elif sq == 0o3:
return 'CA %04o' % s
elif sq == 0o4:
return 'CS %04o' % s
elif sq == 0o5:
if qc == 0o0:
if s == 0o17:
return 'RESUME'
else:
return 'INDEX %04o' % es
elif qc == 0o1:
return 'DXCH %04o' % (es-1)
elif qc == 0o2:
return 'TS %04o' % es
elif qc == 0o3:
return 'XCH %04o' % es
elif sq == 0o6:
return 'AD %04o' % s
elif sq == 0o7:
return 'MASK %04o' % s
elif sq == 0o10:
s10 = (opcode >> 9) & 0o1
if qc == 0o0:
if s10 == 0o0:
return 'READ %04o' % (es & 0o777)
else:
return 'WRITE %04o' % (es & 0o777)
elif qc == 0o1:
if s10 == 0o0:
return 'RAND %04o' % (es & 0o777)
else:
return 'WAND %04o' % (es & 0o777)
elif qc == 0o2:
if s10 == 0o0:
return 'ROR %04o' % (es & 0o777)
else:
return 'WOR %04o' % (es & 0o777)
elif qc == 0o3:
if s10 == 0o0:
return 'RXOR %04o' % (es & 0o777)
else:
return 'RUPT'
elif sq == 0o11:
if qc == 0o0:
return 'DV %04o' % es
else:
return 'BZF %04o' % s
elif sq == 0o12:
if qc == 0o0:
return 'MSU %04o' % es
elif qc == 0o1:
return 'QXCH %04o' % es
elif qc == 0o2:
return 'AUG %04o' % es
elif qc == 0o3:
return 'DIM %04o' % es
elif sq == 0o13:
return 'DCA %04o' % (s-1)
elif sq == 0o14:
return 'DCS %04o' % (s-1)
elif sq == 0o15:
return 'INDEX %04o' % s
elif sq == 0o16:
if qc == 0o0:
return 'SU %04o' % es
else:
return 'BZMF %04o' % s
elif sq == 0o16:
return 'MP %04o' % s
return '???'
dump_lines = []
while True:
time = 0
staged_inst = None
instruction_starting = False
inkl_inst = None
# Buffer up all the lines we need. Going on the fly is too slow
line = sys.stdin.readline()
if not line:
break
if not line.startswith('$comment data_end'):
dump_lines.append(line)
continue
dbgf = open('/home/mike/agc_simulation/dbg.txt', 'w+')
for line in dump_lines:
if line.startswith('$'):
if line.startswith('$var'):
idx = 2
if 'var wire' in line:
idx += 1
toks = line.split()
sig_num = int(toks[idx])
sig_name = re.match('^(?:__.*?__)?(.+?)\[', toks[idx+1]).groups()[0]
dbgf.write('%u: %s\n' % (sig_num, sig_name))
signal_names[sig_num] = sig_name
signals[sig_name] = 0
elif line.startswith('$dumpvars'):
print('$name Instruction')
print('#0')
continue
if line.startswith('#'):
# Apply staged changes
if instruction_starting and staged_inst:
print('#%u %s' % (time, staged_inst))
instruction_starting = False
if staged_inst == 'GOJAM':
staged_inst = 'TC 4000'
else:
staged_inst = None
time = int(line[1:])
continue
state = int(line[0]) if line[0] not in 'zx' else 0
sig_num = int(line[1:])
sig_name = signal_names[sig_num]
signals[sig_name] = state
if sig_name == 'T01' and state == 1 and signals['INKL'] == 0 and signals['STG1'] == 0 and signals['STG3'] == 0 and inkl_inst is None:
if signals['STG2'] == 0 or (signals['STG2'] == 1 and staged_inst in ['RELINT', 'INHINT', 'EXTEND']):
instruction_starting = True
elif sig_name == 'WSQG_n' and state == 1:
print('#%u' % time)
elif sig_name == 'GOJAM' and state == 0:
staged_inst = 'GOJAM'
instruction_starting = True
elif sig_name == 'T07' and state == 0:
if signals['TSUDO_n'] == 0 or signals['IC2'] == 1:
# G should be ready by now, we don't expect G to change during this time
G = 0
for i in range(1,16):
G = G | signals['G%02u' % i] << (i-1)
staged_inst = decode_instruction(G, signals['FUTEXT'])
elif sig_name == 'RPTFRC' and state == 1:
staged_inst = 'RUPT'
elif sig_name == 'PINC' and state == 1:
print('#%u PINC' % time)
elif sig_name == 'MINC' and state == 1:
print('#%u MINC' % time)
elif sig_name == 'DINC' and state == 1:
print('#%u DINC' % time)
elif sig_name == 'PCDU' and state == 1:
print('#%u PCDU' % time)
elif sig_name == 'MCDU' and state == 1:
print('#%u MCDU' % time)
elif sig_name == 'SHINC' and state == 1:
print('#%u SHINC' % time)
elif sig_name == 'SHANC' and state == 1:
print('#%u SHANC' % time)
elif sig_name == 'INKL' and state == 0 and inkl_inst is not None:
staged_inst = inkl_inst
inkl_inst = None
print('$finish')
sys.stdout.flush()
dump_lines = []
Removed unneeded debug logging from the instruction decoder
#!/usr/bin/env python
import fileinput
import re
import sys
signals = {}
signal_names = {}
def decode_instruction(opcode, ext):
sq = opcode >> 12
qc = (opcode >> 10) & 0o3
s = opcode & 0o7777
es = s & 0o1777
if ext:
sq += 0o10
if sq == 0o0:
if s == 0o3:
return 'RELINT'
elif s == 0o4:
return 'INHINT'
elif s == 0o6:
return 'EXTEND'
else:
return 'TC %04o' % s
elif sq == 0o1:
if qc == 0o0:
return 'CCS %04o' % es
else:
return 'TCF %04o' % s
elif sq == 0o2:
if qc == 0o0:
return 'DAS %04o' % (es-1)
elif qc == 0o1:
return 'LXCH %04o' % es
elif qc == 0o2:
return 'INCR %04o' % es
elif qc == 0o3:
return 'ADS %04o' % es
elif sq == 0o3:
return 'CA %04o' % s
elif sq == 0o4:
return 'CS %04o' % s
elif sq == 0o5:
if qc == 0o0:
if s == 0o17:
return 'RESUME'
else:
return 'INDEX %04o' % es
elif qc == 0o1:
return 'DXCH %04o' % (es-1)
elif qc == 0o2:
return 'TS %04o' % es
elif qc == 0o3:
return 'XCH %04o' % es
elif sq == 0o6:
return 'AD %04o' % s
elif sq == 0o7:
return 'MASK %04o' % s
elif sq == 0o10:
s10 = (opcode >> 9) & 0o1
if qc == 0o0:
if s10 == 0o0:
return 'READ %04o' % (es & 0o777)
else:
return 'WRITE %04o' % (es & 0o777)
elif qc == 0o1:
if s10 == 0o0:
return 'RAND %04o' % (es & 0o777)
else:
return 'WAND %04o' % (es & 0o777)
elif qc == 0o2:
if s10 == 0o0:
return 'ROR %04o' % (es & 0o777)
else:
return 'WOR %04o' % (es & 0o777)
elif qc == 0o3:
if s10 == 0o0:
return 'RXOR %04o' % (es & 0o777)
else:
return 'RUPT'
elif sq == 0o11:
if qc == 0o0:
return 'DV %04o' % es
else:
return 'BZF %04o' % s
elif sq == 0o12:
if qc == 0o0:
return 'MSU %04o' % es
elif qc == 0o1:
return 'QXCH %04o' % es
elif qc == 0o2:
return 'AUG %04o' % es
elif qc == 0o3:
return 'DIM %04o' % es
elif sq == 0o13:
return 'DCA %04o' % (s-1)
elif sq == 0o14:
return 'DCS %04o' % (s-1)
elif sq == 0o15:
return 'INDEX %04o' % s
elif sq == 0o16:
if qc == 0o0:
return 'SU %04o' % es
else:
return 'BZMF %04o' % s
elif sq == 0o16:
return 'MP %04o' % s
return '???'
dump_lines = []
while True:
time = 0
staged_inst = None
instruction_starting = False
inkl_inst = None
# Buffer up all the lines we need. Going on the fly is too slow
line = sys.stdin.readline()
if not line:
break
if not line.startswith('$comment data_end'):
dump_lines.append(line)
continue
for line in dump_lines:
if line.startswith('$'):
if line.startswith('$var'):
idx = 2
if 'var wire' in line:
idx += 1
toks = line.split()
sig_num = int(toks[idx])
sig_name = re.match('^(?:__.*?__)?(.+?)\[', toks[idx+1]).groups()[0]
signal_names[sig_num] = sig_name
signals[sig_name] = 0
elif line.startswith('$dumpvars'):
print('$name Instruction')
print('#0')
continue
if line.startswith('#'):
# Apply staged changes
if instruction_starting and staged_inst:
print('#%u %s' % (time, staged_inst))
instruction_starting = False
if staged_inst == 'GOJAM':
staged_inst = 'TC 4000'
else:
staged_inst = None
time = int(line[1:])
continue
state = int(line[0]) if line[0] not in 'zx' else 0
sig_num = int(line[1:])
sig_name = signal_names[sig_num]
signals[sig_name] = state
if sig_name == 'T01' and state == 1 and signals['INKL'] == 0 and signals['STG1'] == 0 and signals['STG3'] == 0 and inkl_inst is None:
if signals['STG2'] == 0 or (signals['STG2'] == 1 and staged_inst in ['RELINT', 'INHINT', 'EXTEND']):
instruction_starting = True
elif sig_name == 'WSQG_n' and state == 1:
print('#%u' % time)
elif sig_name == 'GOJAM' and state == 0:
staged_inst = 'GOJAM'
instruction_starting = True
elif sig_name == 'T07' and state == 0:
if signals['TSUDO_n'] == 0 or signals['IC2'] == 1:
# G should be ready by now, we don't expect G to change during this time
G = 0
for i in range(1,16):
G = G | signals['G%02u' % i] << (i-1)
staged_inst = decode_instruction(G, signals['FUTEXT'])
elif sig_name == 'RPTFRC' and state == 1:
staged_inst = 'RUPT'
elif sig_name == 'PINC' and state == 1:
print('#%u PINC' % time)
elif sig_name == 'MINC' and state == 1:
print('#%u MINC' % time)
elif sig_name == 'DINC' and state == 1:
print('#%u DINC' % time)
elif sig_name == 'PCDU' and state == 1:
print('#%u PCDU' % time)
elif sig_name == 'MCDU' and state == 1:
print('#%u MCDU' % time)
elif sig_name == 'SHINC' and state == 1:
print('#%u SHINC' % time)
elif sig_name == 'SHANC' and state == 1:
print('#%u SHANC' % time)
elif sig_name == 'INKL' and state == 0 and inkl_inst is not None:
staged_inst = inkl_inst
inkl_inst = None
print('$finish')
sys.stdout.flush()
dump_lines = []
|
import os
import tempfile
import pexpect
from hamcrest import assert_that, equal_to
from cli_bdd.behave.steps import command as behave_command
from cli_bdd.core.steps.command import base_steps
from cli_bdd.lettuce.steps import command as lettuce_command
from testutils import (
BehaveStepsTestMixin,
LettuceStepsTestMixin,
StepsSentenceRegexTestMixin,
TestCase
)
class CommandStepsMixin(object):
def test_command_run(self):
file_path = os.path.join(tempfile.gettempdir(), 'test.txt')
try:
os.remove(file_path)
except OSError:
pass
self.execute_module_step(
'run_command',
kwargs={
'command': 'echo "hello" > %s' % file_path,
}
)
assert_that(open(file_path).read(), equal_to('hello\n'))
def test_successfully_run_command(self):
# no error
self.execute_module_step(
'successfully_run_command',
kwargs={
'command': 'echo "hello"',
}
)
# with error
try:
self.execute_module_step(
'successfully_run_command',
kwargs={
'command': 'cat /',
}
)
except Exception as e:
assert_that(
str(e),
equal_to('cat: /: Is a directory\r\n (exit code 1)')
)
else:
raise AssertionError(
'Should fail when response is not successfull'
)
def test_run_command_interactively(self):
file_path = os.path.join(tempfile.gettempdir(), 'test_interactive.txt')
with open(file_path, 'wr') as ff:
ff.write('Some text')
context = self.execute_module_step(
'run_command_interactively',
kwargs={
'command': 'rm -i %s' % file_path,
}
)
# file should not be removed yet
assert_that(os.path.isfile(file_path), equal_to(True))
# let's communicate and say Yes
context.command_response['child'].sendline('Y')
context.command_response['child'].expect(pexpect.EOF)
# file should be removed
assert_that(os.path.isfile(file_path), equal_to(False))
def test_type_into_command(self):
file_path = os.path.join(tempfile.gettempdir(), 'test_interactive.txt')
with open(file_path, 'wr') as ff:
ff.write('Some text')
context = self.execute_module_step(
'run_command_interactively',
kwargs={
'command': 'rm -i %s' % file_path,
}
)
# file should not be removed yet
assert_that(os.path.isfile(file_path), equal_to(True))
# let's communicate and say Yes via step
self.execute_module_step(
'type_into_command',
context=context,
kwargs={
'input_': 'Y',
}
)
context.command_response['child'].expect(pexpect.EOF)
# file should be removed
assert_that(os.path.isfile(file_path), equal_to(False))
def test_got_interactive_dialog(self):
file_path = os.path.join(tempfile.gettempdir(), 'test_interactive.txt')
with open(file_path, 'wr') as ff:
ff.write('Some text')
for matcher, valid in (
('remove test_interactive.txt', False),
('remove .*/test_interactive.txt', True)
):
context = self.execute_module_step(
'run_command_interactively',
kwargs={
'command': 'rm -i %s' % file_path,
}
)
# file should not be removed yet
assert_that(os.path.isfile(file_path), equal_to(True))
# let's wait for a dialog
try:
self.execute_module_step(
'got_interactive_dialog',
context=context,
kwargs={
'dialog_matcher': matcher,
'timeout': '0.1'
},
)
except AssertionError:
if valid:
raise AssertionError(
'Should not fail with timeout '
'error for valid dialog match "%s"' % matcher
)
else:
if not valid:
raise AssertionError(
'Should fail with timeout '
'error for invalid dialog match "%s"' % matcher
)
def test_output_should_contain_text__stdout(self):
context = self.execute_module_step(
'run_command',
kwargs={
'command': 'echo "hello"',
}
)
# stdout contains text
self.execute_module_step(
'output_should_contain_text',
context=context,
kwargs={
'output': 'output',
},
text='ell'
)
# stdout doesn't contain exact text
try:
self.execute_module_step(
'output_should_contain_text',
context=context,
kwargs={
'output': 'output',
'exactly': True
},
text='ell'
)
except AssertionError as e:
assert_that(
str(e),
equal_to(
'Comparison error. Diff:\n'
'*** \n'
'\n'
'--- \n'
'\n'
'***************\n'
'\n'
'*** 1,2 ****\n'
'\n'
'! hello\n'
'! \n'
'--- 1 ----\n'
'\n'
'! ell'
)
)
else:
raise AssertionError("stdout doesn't contain exact text")
# stdout does contain exact text
try:
self.execute_module_step(
'output_should_contain_text',
context=context,
kwargs={
'output': 'output',
'should_not': 'not'
},
text='ell'
)
except AssertionError:
pass
else:
raise AssertionError("stdout does contain exact text")
def test_output_should_contain_text__new_line_highlight(self):
# expected does not contain new line
context = self.execute_module_step(
'run_command',
kwargs={
'command': 'echo "hello"',
}
)
try:
self.execute_module_step(
'output_should_contain_text',
context=context,
kwargs={
'output': 'output',
'exactly': True
},
text='hello'
)
except AssertionError as e:
assert_that(
str(e),
equal_to(
'Comparison error. Diff:\n'
'*** \n'
'\n'
'--- \n'
'\n'
'***************\n'
'\n'
'*** 1,2 ****\n'
'\n'
' hello\n'
'- \n'
'--- 1 ----\n'
)
)
else:
raise AssertionError("stdout does not contain exact text")
# response does not contain new line
context = self.execute_module_step(
'run_command',
kwargs={
'command': 'printf "hello"',
}
)
try:
self.execute_module_step(
'output_should_contain_text',
context=context,
kwargs={
'output': 'output',
'exactly': True
},
text='hello\n'
)
except AssertionError as e:
assert_that(
str(e),
equal_to(
'Comparison error. Diff:\n'
'*** \n'
'\n'
'--- \n'
'\n'
'***************\n'
'\n'
'*** 1 ****\n'
'\n'
'--- 1,2 ----\n'
'\n'
' hello\n'
'+ '
)
)
else:
raise AssertionError("stdout does not contain exact text")
def test_output_should_contain_text__stderr(self):
not_existing_file_path = os.path.join(
tempfile.gettempdir(),
'not_exists.txt'
)
# remove non existing file
try:
os.remove(not_existing_file_path)
except OSError:
pass
error_context = self.execute_module_step(
'run_command',
kwargs={
'command': 'rm %s' % not_existing_file_path,
}
)
# stderr contains text
self.execute_module_step(
'output_should_contain_text',
context=error_context,
kwargs={
'output': 'stderr',
},
text='No such file or directory'
)
def test_output_should_contain_lines__stdout(self):
context = self.execute_module_step(
'run_command',
kwargs={
'command': 'echo "hello\nworld\n"',
}
)
# stdout contains lines
self.execute_module_step(
'output_should_contain_lines',
context=context,
kwargs={
'output': 'output',
'count': '2'
}
)
# stdout does not contain lines
try:
self.execute_module_step(
'output_should_contain_lines',
context=context,
kwargs={
'output': 'output',
'should_not': 'not',
'count': '2'
}
)
except AssertionError as e:
assert_that(
str(e),
equal_to(
'\n'
'Expected: not <2>\n'
' but: was <2>\n'
)
)
else:
raise AssertionError("stdout contains exact number of lines")
# stdout contains at least lines
self.execute_module_step(
'output_should_contain_lines',
context=context,
kwargs={
'output': 'output',
'comparison': ' at least',
'count': '1'
}
)
# stdout does not contain at least lines
try:
self.execute_module_step(
'output_should_contain_lines',
context=context,
kwargs={
'output': 'output',
'comparison': ' at least',
'count': '3'
}
)
except AssertionError as e:
assert_that(
str(e),
equal_to(
'\n'
'Expected: a value greater than or equal to <3>\n'
' but: was <2>\n'
)
)
else:
raise AssertionError("stdout contains less than 3 lines")
# stdout contains up to lines
self.execute_module_step(
'output_should_contain_lines',
context=context,
kwargs={
'output': 'output',
'comparison': ' up to',
'count': '2'
}
)
# stdout does not contain up to lines
try:
self.execute_module_step(
'output_should_contain_lines',
context=context,
kwargs={
'output': 'output',
'comparison': ' up to',
'count': '1'
}
)
except AssertionError as e:
assert_that(
str(e),
equal_to(
'\n'
'Expected: a value less than or equal to <1>\n'
' but: was <2>\n'
)
)
else:
raise AssertionError("stdout contains more than 1 line")
# stdout contains less than lines
self.execute_module_step(
'output_should_contain_lines',
context=context,
kwargs={
'output': 'output',
'comparison': ' less than',
'count': '3'
}
)
# stdout does not contain less than lines
try:
self.execute_module_step(
'output_should_contain_lines',
context=context,
kwargs={
'output': 'output',
'comparison': ' less than',
'count': '2'
}
)
except AssertionError as e:
assert_that(
str(e),
equal_to(
'\n'
'Expected: a value less than <2>\n'
' but: was <2>\n'
)
)
else:
raise AssertionError("stdout contains exact 2 lines")
# stdout contains more than lines
self.execute_module_step(
'output_should_contain_lines',
context=context,
kwargs={
'output': 'output',
'comparison': ' more than',
'count': '1'
}
)
# stdout does not contain more than lines
try:
self.execute_module_step(
'output_should_contain_lines',
context=context,
kwargs={
'output': 'output',
'comparison': ' more than',
'count': '2'
}
)
except AssertionError as e:
assert_that(
str(e),
equal_to(
'\n'
'Expected: a value greater than <2>\n'
' but: was <2>\n'
)
)
else:
raise AssertionError("stdout contains exact 2 lines")
def test_exit_status_should_be(self):
not_existing_file_path = os.path.join(
tempfile.gettempdir(),
'not_exists.txt'
)
# remove non existing file
try:
os.remove(not_existing_file_path)
except OSError:
pass
error_context = self.execute_module_step(
'run_command',
kwargs={
'command': 'rm %s' % not_existing_file_path,
}
)
# should be
self.execute_module_step(
'exit_status_should_be',
context=error_context,
kwargs={
'exit_status': '1'
}
)
# shouldn't be
try:
self.execute_module_step(
'exit_status_should_be',
context=error_context,
kwargs={
'should_not': 'not',
'exit_status': '1'
}
)
except AssertionError:
pass
else:
raise AssertionError("exit status equals 1")
class TestCommandStepsSentenceRegex(StepsSentenceRegexTestMixin, TestCase):
steps = base_steps
step_experiments = {
'run_command': [
{
'value': 'I run `sosisa`',
'expected': {
'kwargs': {
'command': 'sosisa'
}
}
}
],
'successfully_run_command': [
{
'value': 'I successfully run `sosisa`',
'expected': {
'kwargs': {
'command': 'sosisa'
}
}
}
],
'run_command_interactively': [
{
'value': 'I run `sosisa` interactively',
'expected': {
'kwargs': {
'command': 'sosisa'
}
}
}
],
'type_into_command': [
{
'value': 'I type "sosisa"',
'expected': {
'kwargs': {
'input_': 'sosisa'
}
}
}
],
'got_interactive_dialog': [
{
'value': (
'I got "Password:" for interactive dialog in 1 second'
),
'expected': {
'kwargs': {
'dialog_matcher': 'Password:',
'timeout': '1'
}
}
},
{
'value': (
'I got "Name .*: " for interactive dialog in 0.05 seconds'
),
'expected': {
'kwargs': {
'dialog_matcher': 'Name .*: ',
'timeout': '0.05'
}
}
},
{
'value': 'I got "Login:" for interactive dialog',
'expected': {
'kwargs': {
'dialog_matcher': 'Login:',
'timeout': None
}
}
},
],
'output_should_contain_text': [
{
'value': 'the output should contain',
'expected': {
'kwargs': {
'output': 'output',
'should_not': None,
'exactly': None
}
}
},
{
'value': 'the stderr should not contain exactly',
'expected': {
'kwargs': {
'output': 'stderr',
'should_not': 'not',
'exactly': 'exactly'
}
}
},
],
'output_should_contain_lines': [
{
'value': 'the output should contain 3 lines',
'expected': {
'kwargs': {
'output': 'output',
'should_not': None,
'comparison': None,
'count': '3'
}
}
},
{
'value': 'the stderr should not contain at least 3 lines',
'expected': {
'kwargs': {
'output': 'stderr',
'should_not': 'not',
'comparison': 'at least',
'count': '3'
}
}
},
],
'exit_status_should_be': [
{
'value': 'the exit status should be 1',
'expected': {
'kwargs': {
'should_not': None,
'exit_status': '1'
}
}
},
{
'value': 'the exit status should not be 2',
'expected': {
'kwargs': {
'should_not': 'not',
'exit_status': '2'
}
}
}
]
}
class TestCommandBehaveSteps(BehaveStepsTestMixin,
CommandStepsMixin,
TestCase):
module = behave_command
class TestCommandLettuceSteps(LettuceStepsTestMixin,
CommandStepsMixin,
TestCase):
module = lettuce_command
added test for 1 line count
import os
import tempfile
import pexpect
from hamcrest import assert_that, equal_to
from cli_bdd.behave.steps import command as behave_command
from cli_bdd.core.steps.command import base_steps
from cli_bdd.lettuce.steps import command as lettuce_command
from testutils import (
BehaveStepsTestMixin,
LettuceStepsTestMixin,
StepsSentenceRegexTestMixin,
TestCase
)
class CommandStepsMixin(object):
def test_command_run(self):
file_path = os.path.join(tempfile.gettempdir(), 'test.txt')
try:
os.remove(file_path)
except OSError:
pass
self.execute_module_step(
'run_command',
kwargs={
'command': 'echo "hello" > %s' % file_path,
}
)
assert_that(open(file_path).read(), equal_to('hello\n'))
def test_successfully_run_command(self):
# no error
self.execute_module_step(
'successfully_run_command',
kwargs={
'command': 'echo "hello"',
}
)
# with error
try:
self.execute_module_step(
'successfully_run_command',
kwargs={
'command': 'cat /',
}
)
except Exception as e:
assert_that(
str(e),
equal_to('cat: /: Is a directory\r\n (exit code 1)')
)
else:
raise AssertionError(
'Should fail when response is not successfull'
)
def test_run_command_interactively(self):
file_path = os.path.join(tempfile.gettempdir(), 'test_interactive.txt')
with open(file_path, 'wr') as ff:
ff.write('Some text')
context = self.execute_module_step(
'run_command_interactively',
kwargs={
'command': 'rm -i %s' % file_path,
}
)
# file should not be removed yet
assert_that(os.path.isfile(file_path), equal_to(True))
# let's communicate and say Yes
context.command_response['child'].sendline('Y')
context.command_response['child'].expect(pexpect.EOF)
# file should be removed
assert_that(os.path.isfile(file_path), equal_to(False))
def test_type_into_command(self):
file_path = os.path.join(tempfile.gettempdir(), 'test_interactive.txt')
with open(file_path, 'wr') as ff:
ff.write('Some text')
context = self.execute_module_step(
'run_command_interactively',
kwargs={
'command': 'rm -i %s' % file_path,
}
)
# file should not be removed yet
assert_that(os.path.isfile(file_path), equal_to(True))
# let's communicate and say Yes via step
self.execute_module_step(
'type_into_command',
context=context,
kwargs={
'input_': 'Y',
}
)
context.command_response['child'].expect(pexpect.EOF)
# file should be removed
assert_that(os.path.isfile(file_path), equal_to(False))
def test_got_interactive_dialog(self):
file_path = os.path.join(tempfile.gettempdir(), 'test_interactive.txt')
with open(file_path, 'wr') as ff:
ff.write('Some text')
for matcher, valid in (
('remove test_interactive.txt', False),
('remove .*/test_interactive.txt', True)
):
context = self.execute_module_step(
'run_command_interactively',
kwargs={
'command': 'rm -i %s' % file_path,
}
)
# file should not be removed yet
assert_that(os.path.isfile(file_path), equal_to(True))
# let's wait for a dialog
try:
self.execute_module_step(
'got_interactive_dialog',
context=context,
kwargs={
'dialog_matcher': matcher,
'timeout': '0.1'
},
)
except AssertionError:
if valid:
raise AssertionError(
'Should not fail with timeout '
'error for valid dialog match "%s"' % matcher
)
else:
if not valid:
raise AssertionError(
'Should fail with timeout '
'error for invalid dialog match "%s"' % matcher
)
def test_output_should_contain_text__stdout(self):
context = self.execute_module_step(
'run_command',
kwargs={
'command': 'echo "hello"',
}
)
# stdout contains text
self.execute_module_step(
'output_should_contain_text',
context=context,
kwargs={
'output': 'output',
},
text='ell'
)
# stdout doesn't contain exact text
try:
self.execute_module_step(
'output_should_contain_text',
context=context,
kwargs={
'output': 'output',
'exactly': True
},
text='ell'
)
except AssertionError as e:
assert_that(
str(e),
equal_to(
'Comparison error. Diff:\n'
'*** \n'
'\n'
'--- \n'
'\n'
'***************\n'
'\n'
'*** 1,2 ****\n'
'\n'
'! hello\n'
'! \n'
'--- 1 ----\n'
'\n'
'! ell'
)
)
else:
raise AssertionError("stdout doesn't contain exact text")
# stdout does contain exact text
try:
self.execute_module_step(
'output_should_contain_text',
context=context,
kwargs={
'output': 'output',
'should_not': 'not'
},
text='ell'
)
except AssertionError:
pass
else:
raise AssertionError("stdout does contain exact text")
def test_output_should_contain_text__new_line_highlight(self):
# expected does not contain new line
context = self.execute_module_step(
'run_command',
kwargs={
'command': 'echo "hello"',
}
)
try:
self.execute_module_step(
'output_should_contain_text',
context=context,
kwargs={
'output': 'output',
'exactly': True
},
text='hello'
)
except AssertionError as e:
assert_that(
str(e),
equal_to(
'Comparison error. Diff:\n'
'*** \n'
'\n'
'--- \n'
'\n'
'***************\n'
'\n'
'*** 1,2 ****\n'
'\n'
' hello\n'
'- \n'
'--- 1 ----\n'
)
)
else:
raise AssertionError("stdout does not contain exact text")
# response does not contain new line
context = self.execute_module_step(
'run_command',
kwargs={
'command': 'printf "hello"',
}
)
try:
self.execute_module_step(
'output_should_contain_text',
context=context,
kwargs={
'output': 'output',
'exactly': True
},
text='hello\n'
)
except AssertionError as e:
assert_that(
str(e),
equal_to(
'Comparison error. Diff:\n'
'*** \n'
'\n'
'--- \n'
'\n'
'***************\n'
'\n'
'*** 1 ****\n'
'\n'
'--- 1,2 ----\n'
'\n'
' hello\n'
'+ '
)
)
else:
raise AssertionError("stdout does not contain exact text")
def test_output_should_contain_text__stderr(self):
not_existing_file_path = os.path.join(
tempfile.gettempdir(),
'not_exists.txt'
)
# remove non existing file
try:
os.remove(not_existing_file_path)
except OSError:
pass
error_context = self.execute_module_step(
'run_command',
kwargs={
'command': 'rm %s' % not_existing_file_path,
}
)
# stderr contains text
self.execute_module_step(
'output_should_contain_text',
context=error_context,
kwargs={
'output': 'stderr',
},
text='No such file or directory'
)
def test_output_should_contain_lines__stdout(self):
context = self.execute_module_step(
'run_command',
kwargs={
'command': 'echo "hello\nworld\n"',
}
)
# stdout contains lines
self.execute_module_step(
'output_should_contain_lines',
context=context,
kwargs={
'output': 'output',
'count': '2'
}
)
# stdout does not contain lines
try:
self.execute_module_step(
'output_should_contain_lines',
context=context,
kwargs={
'output': 'output',
'should_not': 'not',
'count': '2'
}
)
except AssertionError as e:
assert_that(
str(e),
equal_to(
'\n'
'Expected: not <2>\n'
' but: was <2>\n'
)
)
else:
raise AssertionError("stdout contains exact number of lines")
# stdout contains at least lines
self.execute_module_step(
'output_should_contain_lines',
context=context,
kwargs={
'output': 'output',
'comparison': ' at least',
'count': '1'
}
)
# stdout does not contain at least lines
try:
self.execute_module_step(
'output_should_contain_lines',
context=context,
kwargs={
'output': 'output',
'comparison': ' at least',
'count': '3'
}
)
except AssertionError as e:
assert_that(
str(e),
equal_to(
'\n'
'Expected: a value greater than or equal to <3>\n'
' but: was <2>\n'
)
)
else:
raise AssertionError("stdout contains less than 3 lines")
# stdout contains up to lines
self.execute_module_step(
'output_should_contain_lines',
context=context,
kwargs={
'output': 'output',
'comparison': ' up to',
'count': '2'
}
)
# stdout does not contain up to lines
try:
self.execute_module_step(
'output_should_contain_lines',
context=context,
kwargs={
'output': 'output',
'comparison': ' up to',
'count': '1'
}
)
except AssertionError as e:
assert_that(
str(e),
equal_to(
'\n'
'Expected: a value less than or equal to <1>\n'
' but: was <2>\n'
)
)
else:
raise AssertionError("stdout contains more than 1 line")
# stdout contains less than lines
self.execute_module_step(
'output_should_contain_lines',
context=context,
kwargs={
'output': 'output',
'comparison': ' less than',
'count': '3'
}
)
# stdout does not contain less than lines
try:
self.execute_module_step(
'output_should_contain_lines',
context=context,
kwargs={
'output': 'output',
'comparison': ' less than',
'count': '2'
}
)
except AssertionError as e:
assert_that(
str(e),
equal_to(
'\n'
'Expected: a value less than <2>\n'
' but: was <2>\n'
)
)
else:
raise AssertionError("stdout contains exact 2 lines")
# stdout contains more than lines
self.execute_module_step(
'output_should_contain_lines',
context=context,
kwargs={
'output': 'output',
'comparison': ' more than',
'count': '1'
}
)
# stdout does not contain more than lines
try:
self.execute_module_step(
'output_should_contain_lines',
context=context,
kwargs={
'output': 'output',
'comparison': ' more than',
'count': '2'
}
)
except AssertionError as e:
assert_that(
str(e),
equal_to(
'\n'
'Expected: a value greater than <2>\n'
' but: was <2>\n'
)
)
else:
raise AssertionError("stdout contains exact 2 lines")
def test_exit_status_should_be(self):
not_existing_file_path = os.path.join(
tempfile.gettempdir(),
'not_exists.txt'
)
# remove non existing file
try:
os.remove(not_existing_file_path)
except OSError:
pass
error_context = self.execute_module_step(
'run_command',
kwargs={
'command': 'rm %s' % not_existing_file_path,
}
)
# should be
self.execute_module_step(
'exit_status_should_be',
context=error_context,
kwargs={
'exit_status': '1'
}
)
# shouldn't be
try:
self.execute_module_step(
'exit_status_should_be',
context=error_context,
kwargs={
'should_not': 'not',
'exit_status': '1'
}
)
except AssertionError:
pass
else:
raise AssertionError("exit status equals 1")
class TestCommandStepsSentenceRegex(StepsSentenceRegexTestMixin, TestCase):
steps = base_steps
step_experiments = {
'run_command': [
{
'value': 'I run `sosisa`',
'expected': {
'kwargs': {
'command': 'sosisa'
}
}
}
],
'successfully_run_command': [
{
'value': 'I successfully run `sosisa`',
'expected': {
'kwargs': {
'command': 'sosisa'
}
}
}
],
'run_command_interactively': [
{
'value': 'I run `sosisa` interactively',
'expected': {
'kwargs': {
'command': 'sosisa'
}
}
}
],
'type_into_command': [
{
'value': 'I type "sosisa"',
'expected': {
'kwargs': {
'input_': 'sosisa'
}
}
}
],
'got_interactive_dialog': [
{
'value': (
'I got "Password:" for interactive dialog in 1 second'
),
'expected': {
'kwargs': {
'dialog_matcher': 'Password:',
'timeout': '1'
}
}
},
{
'value': (
'I got "Name .*: " for interactive dialog in 0.05 seconds'
),
'expected': {
'kwargs': {
'dialog_matcher': 'Name .*: ',
'timeout': '0.05'
}
}
},
{
'value': 'I got "Login:" for interactive dialog',
'expected': {
'kwargs': {
'dialog_matcher': 'Login:',
'timeout': None
}
}
},
],
'output_should_contain_text': [
{
'value': 'the output should contain',
'expected': {
'kwargs': {
'output': 'output',
'should_not': None,
'exactly': None
}
}
},
{
'value': 'the stderr should not contain exactly',
'expected': {
'kwargs': {
'output': 'stderr',
'should_not': 'not',
'exactly': 'exactly'
}
}
},
],
'output_should_contain_lines': [
{
'value': 'the output should contain 3 lines',
'expected': {
'kwargs': {
'output': 'output',
'should_not': None,
'comparison': None,
'count': '3'
}
}
},
{
'value': 'the stderr should not contain at least 3 lines',
'expected': {
'kwargs': {
'output': 'stderr',
'should_not': 'not',
'comparison': 'at least',
'count': '3'
}
}
},
{
'value': 'the stdout should contain 1 line',
'expected': {
'kwargs': {
'output': 'stdout',
'should_not': None,
'comparison': None,
'count': '1'
}
}
},
],
'exit_status_should_be': [
{
'value': 'the exit status should be 1',
'expected': {
'kwargs': {
'should_not': None,
'exit_status': '1'
}
}
},
{
'value': 'the exit status should not be 2',
'expected': {
'kwargs': {
'should_not': 'not',
'exit_status': '2'
}
}
}
]
}
class TestCommandBehaveSteps(BehaveStepsTestMixin,
CommandStepsMixin,
TestCase):
module = behave_command
class TestCommandLettuceSteps(LettuceStepsTestMixin,
CommandStepsMixin,
TestCase):
module = lettuce_command
|
#!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2005-2010 (ita)
"""
Utilities and platform-specific fixes
The portability fixes try to provide a consistent behavior of the Waf API
through Python versions 2.3 to 3.X and across different platforms (win32, linux, etc)
"""
import os, sys, errno, traceback, inspect, re, shutil, datetime, gc
import subprocess # <- leave this!
try:
from collections import deque
except ImportError:
class deque(list):
"""A deque for Python 2.3 which does not have one"""
def popleft(self):
return self.pop(0)
def appendleft(self, x):
self.insert(0, x)
try:
import _winreg as winreg
except ImportError:
try:
import winreg
except ImportError:
winreg = None
from waflib import Errors
try:
from collections import UserDict
except ImportError:
from UserDict import UserDict
try:
from hashlib import md5
except ImportError:
try:
from md5 import md5
except ImportError:
# never fail to enable fixes from another module
pass
try:
import threading
except ImportError:
if not 'JOBS' in os.environ:
# no threading :-(
os.environ['JOBS'] = '1'
class threading(object):
"""
A fake threading class for platforms lacking the threading module.
Use ``waf -j1`` on those platforms
"""
pass
class Lock(object):
"""Fake Lock class"""
def acquire(self):
pass
def release(self):
pass
threading.Lock = threading.Thread = Lock
else:
run_old = threading.Thread.run
def run(*args, **kwargs):
try:
run_old(*args, **kwargs)
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
sys.excepthook(*sys.exc_info())
threading.Thread.run = run
SIG_NIL = 'iluvcuteoverload'.encode()
"""Arbitrary null value for a md5 hash. This value must be changed when the hash value is replaced (size)"""
O644 = 420
"""Constant representing the permissions for regular files (0644 raises a syntax error on python 3)"""
O755 = 493
"""Constant representing the permissions for executable files (0755 raises a syntax error on python 3)"""
rot_chr = ['\\', '|', '/', '-']
"List of characters to use when displaying the throbber (progress bar)"
rot_idx = 0
"Index of the current throbber character (progress bar)"
try:
from collections import defaultdict
except ImportError:
class defaultdict(dict):
"""
defaultdict was introduced in python 2.5, so we leave it for python 2.4 and 2.3
"""
def __init__(self, default_factory):
super(defaultdict, self).__init__()
self.default_factory = default_factory
def __getitem__(self, key):
try:
return super(defaultdict, self).__getitem__(key)
except KeyError:
value = self.default_factory()
self[key] = value
return value
try:
from collections import OrderedDict as ordered_iter_dict
except ImportError:
class ordered_iter_dict(dict):
def __init__(self, *k, **kw):
self.lst = []
dict.__init__(self, *k, **kw)
def clear(self):
dict.clear(self)
self.lst = []
def __setitem__(self, key, value):
dict.__setitem__(self, key, value)
try:
self.lst.remove(key)
except ValueError:
pass
self.lst.append(key)
def __delitem__(self, key):
dict.__delitem__(self, key)
try:
self.lst.remove(key)
except ValueError:
pass
def __iter__(self):
for x in self.lst:
yield x
def keys(self):
return self.lst
is_win32 = sys.platform in ('win32', 'cli')
def readf(fname, m='r', encoding='ISO8859-1'):
"""
Read an entire file into a string, use this function instead of os.open() whenever possible.
In practice the wrapper node.read(..) should be preferred to this function::
def build(ctx):
from waflib import Utils
txt = Utils.readf(self.path.find_node('wscript').abspath())
txt = ctx.path.find_node('wscript').read()
:type fname: string
:param fname: Path to file
:type m: string
:param m: Open mode
:type encoding: string
:param encoding: encoding value, only used for python 3
:rtype: string
:return: Content of the file
"""
if sys.hexversion > 0x3000000 and not 'b' in m:
m += 'b'
f = open(fname, m)
try:
txt = f.read()
finally:
f.close()
txt = txt.decode(encoding)
else:
f = open(fname, m)
try:
txt = f.read()
finally:
f.close()
return txt
def writef(fname, data, m='w', encoding='ISO8859-1'):
"""
Write an entire file from a string, use this function instead of os.open() whenever possible.
In practice the wrapper node.write(..) should be preferred to this function::
def build(ctx):
from waflib import Utils
txt = Utils.writef(self.path.make_node('i_like_kittens').abspath(), 'some data')
self.path.make_node('i_like_kittens').write('some data')
:type fname: string
:param fname: Path to file
:type data: string
:param data: The contents to write to the file
:type m: string
:param m: Open mode
:type encoding: string
:param encoding: encoding value, only used for python 3
"""
if sys.hexversion > 0x3000000 and not 'b' in m:
data = data.encode(encoding)
m += 'b'
f = open(fname, m)
try:
f.write(data)
finally:
f.close()
def h_file(fname):
"""
Compute a hash value for a file by using md5. This method may be replaced by
a faster version if necessary. The following uses the file size and the timestamp value::
import stat
from waflib import Utils
def h_file(fname):
st = os.stat(fname)
if stat.S_ISDIR(st[stat.ST_MODE]): raise IOError('not a file')
m = Utils.md5()
m.update(str(st.st_mtime))
m.update(str(st.st_size))
m.update(fname)
return m.digest()
Utils.h_file = h_file
:type fname: string
:param fname: path to the file to hash
:return: hash of the file contents
"""
f = open(fname, 'rb')
m = md5()
try:
while fname:
fname = f.read(200000)
m.update(fname)
finally:
f.close()
return m.digest()
if hasattr(os, 'O_NOINHERIT') and sys.hexversion < 0x3040000:
def readf_win32(f, m='r', encoding='ISO8859-1'):
flags = os.O_NOINHERIT | os.O_RDONLY
if 'b' in m:
flags |= os.O_BINARY
if '+' in m:
flags |= os.O_RDWR
try:
fd = os.open(f, flags)
except OSError:
raise IOError('Cannot read from %r' % f)
if sys.hexversion > 0x3000000 and not 'b' in m:
m += 'b'
f = os.fdopen(fd, m)
try:
txt = f.read()
finally:
f.close()
txt = txt.decode(encoding)
else:
f = os.fdopen(fd, m)
try:
txt = f.read()
finally:
f.close()
return txt
def writef_win32(f, data, m='w', encoding='ISO8859-1'):
if sys.hexversion > 0x3000000 and not 'b' in m:
data = data.encode(encoding)
m += 'b'
flags = os.O_CREAT | os.O_TRUNC | os.O_WRONLY | os.O_NOINHERIT
if 'b' in m:
flags |= os.O_BINARY
if '+' in m:
flags |= os.O_RDWR
try:
fd = os.open(f, flags)
except OSError:
raise IOError('Cannot write to %r' % f)
f = os.fdopen(fd, m)
try:
f.write(data)
finally:
f.close()
def h_file_win32(fname):
try:
fd = os.open(fname, os.O_BINARY | os.O_RDONLY | os.O_NOINHERIT)
except OSError:
raise IOError('Cannot read from %r' % fname)
f = os.fdopen(fd, 'rb')
m = md5()
try:
while fname:
fname = f.read(200000)
m.update(fname)
finally:
f.close()
return m.digest()
# replace the default functions
readf_old = readf
writef_old = writef
h_file_old = h_file
readf = readf_win32
writef = writef_win32
h_file = h_file_win32
try:
x = ''.encode('hex')
except LookupError:
import binascii
def to_hex(s):
ret = binascii.hexlify(s)
if not isinstance(ret, str):
ret = ret.decode('utf-8')
return ret
else:
def to_hex(s):
return s.encode('hex')
to_hex.__doc__ = """
Return the hexadecimal representation of a string
:param s: string to convert
:type s: string
"""
listdir = os.listdir
if is_win32:
def listdir_win32(s):
"""
List the contents of a folder in a portable manner.
On Win32, return the list of drive letters: ['C:', 'X:', 'Z:']
:type s: string
:param s: a string, which can be empty on Windows
"""
if not s:
try:
import ctypes
except ImportError:
# there is nothing much we can do
return [x + ':\\' for x in list('ABCDEFGHIJKLMNOPQRSTUVWXYZ')]
else:
dlen = 4 # length of "?:\\x00"
maxdrives = 26
buf = ctypes.create_string_buffer(maxdrives * dlen)
ndrives = ctypes.windll.kernel32.GetLogicalDriveStringsA(maxdrives*dlen, ctypes.byref(buf))
return [ str(buf.raw[4*i:4*i+2].decode('ascii')) for i in range(int(ndrives/dlen)) ]
if len(s) == 2 and s[1] == ":":
s += os.sep
if not os.path.isdir(s):
e = OSError('%s is not a directory' % s)
e.errno = errno.ENOENT
raise e
return os.listdir(s)
listdir = listdir_win32
def num2ver(ver):
"""
Convert a string, tuple or version number into an integer. The number is supposed to have at most 4 digits::
from waflib.Utils import num2ver
num2ver('1.3.2') == num2ver((1,3,2)) == num2ver((1,3,2,0))
:type ver: string or tuple of numbers
:param ver: a version number
"""
if isinstance(ver, str):
ver = tuple(ver.split('.'))
if isinstance(ver, tuple):
ret = 0
for i in range(4):
if i < len(ver):
ret += 256**(3 - i) * int(ver[i])
return ret
return ver
def ex_stack():
"""
Extract the stack to display exceptions
:return: a string represening the last exception
"""
exc_type, exc_value, tb = sys.exc_info()
exc_lines = traceback.format_exception(exc_type, exc_value, tb)
return ''.join(exc_lines)
def to_list(sth):
"""
Convert a string argument to a list by splitting on spaces, and pass
through a list argument unchanged::
from waflib.Utils import to_list
lst = to_list("a b c d")
:param sth: List or a string of items separated by spaces
:rtype: list
:return: Argument converted to list
"""
if isinstance(sth, str):
return sth.split()
else:
return sth
def split_path(path):
return path.split('/')
def split_path_cygwin(path):
if path.startswith('//'):
ret = path.split('/')[2:]
ret[0] = '/' + ret[0]
return ret
return path.split('/')
re_sp = re.compile('[/\\\\]')
def split_path_win32(path):
if path.startswith('\\\\'):
ret = re.split(re_sp, path)[2:]
ret[0] = '\\' + ret[0]
return ret
return re.split(re_sp, path)
if sys.platform == 'cygwin':
split_path = split_path_cygwin
elif is_win32:
split_path = split_path_win32
split_path.__doc__ = """
Split a path by / or \\. This function is not like os.path.split
:type path: string
:param path: path to split
:return: list of strings
"""
def check_dir(path):
"""
Ensure that a directory exists (similar to ``mkdir -p``).
:type path: string
:param path: Path to directory
"""
if not os.path.isdir(path):
try:
os.makedirs(path)
except OSError as e:
if not os.path.isdir(path):
raise Errors.WafError('Cannot create the folder %r' % path, ex=e)
def check_exe(name):
"""
Ensure that a program exists
:type name: string
:param name: name or path to program
:return: path of the program or None
"""
if not name:
raise ValueError('Cannot execute an empty string!')
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(name)
if fpath and is_exe(name):
return os.path.abspath(name)
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, name)
if is_exe(exe_file):
return os.path.abspath(exe_file)
return None
def def_attrs(cls, **kw):
"""
Set default attributes on a class instance
:type cls: class
:param cls: the class to update the given attributes in.
:type kw: dict
:param kw: dictionary of attributes names and values.
"""
for k, v in kw.items():
if not hasattr(cls, k):
setattr(cls, k, v)
def quote_define_name(s):
"""
Convert a string to an identifier suitable for C defines.
:type s: string
:param s: String to convert
:rtype: string
:return: Identifier suitable for C defines
"""
fu = re.compile("[^a-zA-Z0-9]").sub("_", s)
fu = fu.upper()
return fu
def h_list(lst):
"""
Hash lists. For tuples, using hash(tup) is much more efficient,
except on python >= 3.3 where hash randomization assumes everybody is running a web application.
:param lst: list to hash
:type lst: list of strings
:return: hash of the list
"""
m = md5()
m.update(str(lst).encode())
return m.digest()
def h_fun(fun):
"""
Hash functions
:param fun: function to hash
:type fun: function
:return: hash of the function
"""
try:
return fun.code
except AttributeError:
try:
h = inspect.getsource(fun)
except IOError:
h = "nocode"
try:
fun.code = h
except AttributeError:
pass
return h
reg_subst = re.compile(r"(\\\\)|(\$\$)|\$\{([^}]+)\}")
def subst_vars(expr, params):
"""
Replace ${VAR} with the value of VAR taken from a dict or a config set::
from waflib import Utils
s = Utils.subst_vars('${PREFIX}/bin', env)
:type expr: string
:param expr: String to perform substitution on
:param params: Dictionary or config set to look up variable values.
"""
def repl_var(m):
if m.group(1):
return '\\'
if m.group(2):
return '$'
try:
# ConfigSet instances may contain lists
return params.get_flat(m.group(3))
except AttributeError:
return params[m.group(3)]
return reg_subst.sub(repl_var, expr)
def destos_to_binfmt(key):
"""
Return the binary format based on the unversioned platform name.
:param key: platform name
:type key: string
:return: string representing the binary format
"""
if key == 'darwin':
return 'mac-o'
elif key in ('win32', 'cygwin', 'uwin', 'msys'):
return 'pe'
return 'elf'
def unversioned_sys_platform():
"""
Return the unversioned platform name.
Some Python platform names contain versions, that depend on
the build environment, e.g. linux2, freebsd6, etc.
This returns the name without the version number. Exceptions are
os2 and win32, which are returned verbatim.
:rtype: string
:return: Unversioned platform name
"""
s = sys.platform
if s == 'java':
# The real OS is hidden under the JVM.
from java.lang import System
s = System.getProperty('os.name')
# see http://lopica.sourceforge.net/os.html for a list of possible values
if s == 'Mac OS X':
return 'darwin'
elif s.startswith('Windows '):
return 'win32'
elif s == 'OS/2':
return 'os2'
elif s == 'HP-UX':
return 'hpux'
elif s in ('SunOS', 'Solaris'):
return 'sunos'
else: s = s.lower()
# powerpc == darwin for our purposes
if s == 'powerpc':
return 'darwin'
if s == 'win32' or s.endswith('os2') and s != 'sunos2': return s
return re.split('\d+$', s)[0]
def nada(*k, **kw):
"""
A function that does nothing
:return: None
"""
pass
class Timer(object):
"""
Simple object for timing the execution of commands.
Its string representation is the current time::
from waflib.Utils import Timer
timer = Timer()
a_few_operations()
s = str(timer)
"""
def __init__(self):
self.start_time = datetime.datetime.utcnow()
def __str__(self):
delta = datetime.datetime.utcnow() - self.start_time
days = delta.days
hours, rem = divmod(delta.seconds, 3600)
minutes, seconds = divmod(rem, 60)
seconds += delta.microseconds * 1e-6
result = ''
if days:
result += '%dd' % days
if days or hours:
result += '%dh' % hours
if days or hours or minutes:
result += '%dm' % minutes
return '%s%.3fs' % (result, seconds)
if is_win32:
old = shutil.copy2
def copy2(src, dst):
"""
shutil.copy2 does not copy the file attributes on windows, so we
hack into the shutil module to fix the problem
"""
old(src, dst)
shutil.copystat(src, dst)
setattr(shutil, 'copy2', copy2)
if os.name == 'java':
# Jython cannot disable the gc but they can enable it ... wtf?
try:
gc.disable()
gc.enable()
except NotImplementedError:
gc.disable = gc.enable
def read_la_file(path):
"""
Read property files, used by msvc.py
:param path: file to read
:type path: string
"""
sp = re.compile(r'^([^=]+)=\'(.*)\'$')
dc = {}
for line in readf(path).splitlines():
try:
_, left, right, _ = sp.split(line.strip())
dc[left] = right
except ValueError:
pass
return dc
def nogc(fun):
"""
Decorator: let a function disable the garbage collector during its execution.
It is used in the build context when storing/loading the build cache file (pickle)
:param fun: function to execute
:type fun: function
:return: the return value of the function executed
"""
def f(*k, **kw):
try:
gc.disable()
ret = fun(*k, **kw)
finally:
gc.enable()
return ret
f.__doc__ = fun.__doc__
return f
def run_once(fun):
"""
Decorator: let a function cache its results, use like this::
@run_once
def foo(k):
return 345*2343
:param fun: function to execute
:type fun: function
:return: the return value of the function executed
"""
cache = {}
def wrap(k):
try:
return cache[k]
except KeyError:
ret = fun(k)
cache[k] = ret
return ret
wrap.__cache__ = cache
return wrap
def get_registry_app_path(key, filename):
if not winreg:
return None
try:
result = winreg.QueryValue(key, "Software\\Microsoft\\Windows\\CurrentVersion\\App Paths\\%s.exe" % filename[0])
except WindowsError:
pass
else:
if os.path.isfile(result):
return result
Issue 1412 - reduce multiple underscores to one in computed define names
#!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2005-2010 (ita)
"""
Utilities and platform-specific fixes
The portability fixes try to provide a consistent behavior of the Waf API
through Python versions 2.3 to 3.X and across different platforms (win32, linux, etc)
"""
import os, sys, errno, traceback, inspect, re, shutil, datetime, gc
import subprocess # <- leave this!
try:
from collections import deque
except ImportError:
class deque(list):
"""A deque for Python 2.3 which does not have one"""
def popleft(self):
return self.pop(0)
def appendleft(self, x):
self.insert(0, x)
try:
import _winreg as winreg
except ImportError:
try:
import winreg
except ImportError:
winreg = None
from waflib import Errors
try:
from collections import UserDict
except ImportError:
from UserDict import UserDict
try:
from hashlib import md5
except ImportError:
try:
from md5 import md5
except ImportError:
# never fail to enable fixes from another module
pass
try:
import threading
except ImportError:
if not 'JOBS' in os.environ:
# no threading :-(
os.environ['JOBS'] = '1'
class threading(object):
"""
A fake threading class for platforms lacking the threading module.
Use ``waf -j1`` on those platforms
"""
pass
class Lock(object):
"""Fake Lock class"""
def acquire(self):
pass
def release(self):
pass
threading.Lock = threading.Thread = Lock
else:
run_old = threading.Thread.run
def run(*args, **kwargs):
try:
run_old(*args, **kwargs)
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
sys.excepthook(*sys.exc_info())
threading.Thread.run = run
SIG_NIL = 'iluvcuteoverload'.encode()
"""Arbitrary null value for a md5 hash. This value must be changed when the hash value is replaced (size)"""
O644 = 420
"""Constant representing the permissions for regular files (0644 raises a syntax error on python 3)"""
O755 = 493
"""Constant representing the permissions for executable files (0755 raises a syntax error on python 3)"""
rot_chr = ['\\', '|', '/', '-']
"List of characters to use when displaying the throbber (progress bar)"
rot_idx = 0
"Index of the current throbber character (progress bar)"
try:
from collections import defaultdict
except ImportError:
class defaultdict(dict):
"""
defaultdict was introduced in python 2.5, so we leave it for python 2.4 and 2.3
"""
def __init__(self, default_factory):
super(defaultdict, self).__init__()
self.default_factory = default_factory
def __getitem__(self, key):
try:
return super(defaultdict, self).__getitem__(key)
except KeyError:
value = self.default_factory()
self[key] = value
return value
try:
from collections import OrderedDict as ordered_iter_dict
except ImportError:
class ordered_iter_dict(dict):
def __init__(self, *k, **kw):
self.lst = []
dict.__init__(self, *k, **kw)
def clear(self):
dict.clear(self)
self.lst = []
def __setitem__(self, key, value):
dict.__setitem__(self, key, value)
try:
self.lst.remove(key)
except ValueError:
pass
self.lst.append(key)
def __delitem__(self, key):
dict.__delitem__(self, key)
try:
self.lst.remove(key)
except ValueError:
pass
def __iter__(self):
for x in self.lst:
yield x
def keys(self):
return self.lst
is_win32 = sys.platform in ('win32', 'cli')
def readf(fname, m='r', encoding='ISO8859-1'):
"""
Read an entire file into a string, use this function instead of os.open() whenever possible.
In practice the wrapper node.read(..) should be preferred to this function::
def build(ctx):
from waflib import Utils
txt = Utils.readf(self.path.find_node('wscript').abspath())
txt = ctx.path.find_node('wscript').read()
:type fname: string
:param fname: Path to file
:type m: string
:param m: Open mode
:type encoding: string
:param encoding: encoding value, only used for python 3
:rtype: string
:return: Content of the file
"""
if sys.hexversion > 0x3000000 and not 'b' in m:
m += 'b'
f = open(fname, m)
try:
txt = f.read()
finally:
f.close()
txt = txt.decode(encoding)
else:
f = open(fname, m)
try:
txt = f.read()
finally:
f.close()
return txt
def writef(fname, data, m='w', encoding='ISO8859-1'):
"""
Write an entire file from a string, use this function instead of os.open() whenever possible.
In practice the wrapper node.write(..) should be preferred to this function::
def build(ctx):
from waflib import Utils
txt = Utils.writef(self.path.make_node('i_like_kittens').abspath(), 'some data')
self.path.make_node('i_like_kittens').write('some data')
:type fname: string
:param fname: Path to file
:type data: string
:param data: The contents to write to the file
:type m: string
:param m: Open mode
:type encoding: string
:param encoding: encoding value, only used for python 3
"""
if sys.hexversion > 0x3000000 and not 'b' in m:
data = data.encode(encoding)
m += 'b'
f = open(fname, m)
try:
f.write(data)
finally:
f.close()
def h_file(fname):
"""
Compute a hash value for a file by using md5. This method may be replaced by
a faster version if necessary. The following uses the file size and the timestamp value::
import stat
from waflib import Utils
def h_file(fname):
st = os.stat(fname)
if stat.S_ISDIR(st[stat.ST_MODE]): raise IOError('not a file')
m = Utils.md5()
m.update(str(st.st_mtime))
m.update(str(st.st_size))
m.update(fname)
return m.digest()
Utils.h_file = h_file
:type fname: string
:param fname: path to the file to hash
:return: hash of the file contents
"""
f = open(fname, 'rb')
m = md5()
try:
while fname:
fname = f.read(200000)
m.update(fname)
finally:
f.close()
return m.digest()
if hasattr(os, 'O_NOINHERIT') and sys.hexversion < 0x3040000:
def readf_win32(f, m='r', encoding='ISO8859-1'):
flags = os.O_NOINHERIT | os.O_RDONLY
if 'b' in m:
flags |= os.O_BINARY
if '+' in m:
flags |= os.O_RDWR
try:
fd = os.open(f, flags)
except OSError:
raise IOError('Cannot read from %r' % f)
if sys.hexversion > 0x3000000 and not 'b' in m:
m += 'b'
f = os.fdopen(fd, m)
try:
txt = f.read()
finally:
f.close()
txt = txt.decode(encoding)
else:
f = os.fdopen(fd, m)
try:
txt = f.read()
finally:
f.close()
return txt
def writef_win32(f, data, m='w', encoding='ISO8859-1'):
if sys.hexversion > 0x3000000 and not 'b' in m:
data = data.encode(encoding)
m += 'b'
flags = os.O_CREAT | os.O_TRUNC | os.O_WRONLY | os.O_NOINHERIT
if 'b' in m:
flags |= os.O_BINARY
if '+' in m:
flags |= os.O_RDWR
try:
fd = os.open(f, flags)
except OSError:
raise IOError('Cannot write to %r' % f)
f = os.fdopen(fd, m)
try:
f.write(data)
finally:
f.close()
def h_file_win32(fname):
try:
fd = os.open(fname, os.O_BINARY | os.O_RDONLY | os.O_NOINHERIT)
except OSError:
raise IOError('Cannot read from %r' % fname)
f = os.fdopen(fd, 'rb')
m = md5()
try:
while fname:
fname = f.read(200000)
m.update(fname)
finally:
f.close()
return m.digest()
# replace the default functions
readf_old = readf
writef_old = writef
h_file_old = h_file
readf = readf_win32
writef = writef_win32
h_file = h_file_win32
try:
x = ''.encode('hex')
except LookupError:
import binascii
def to_hex(s):
ret = binascii.hexlify(s)
if not isinstance(ret, str):
ret = ret.decode('utf-8')
return ret
else:
def to_hex(s):
return s.encode('hex')
to_hex.__doc__ = """
Return the hexadecimal representation of a string
:param s: string to convert
:type s: string
"""
listdir = os.listdir
if is_win32:
def listdir_win32(s):
"""
List the contents of a folder in a portable manner.
On Win32, return the list of drive letters: ['C:', 'X:', 'Z:']
:type s: string
:param s: a string, which can be empty on Windows
"""
if not s:
try:
import ctypes
except ImportError:
# there is nothing much we can do
return [x + ':\\' for x in list('ABCDEFGHIJKLMNOPQRSTUVWXYZ')]
else:
dlen = 4 # length of "?:\\x00"
maxdrives = 26
buf = ctypes.create_string_buffer(maxdrives * dlen)
ndrives = ctypes.windll.kernel32.GetLogicalDriveStringsA(maxdrives*dlen, ctypes.byref(buf))
return [ str(buf.raw[4*i:4*i+2].decode('ascii')) for i in range(int(ndrives/dlen)) ]
if len(s) == 2 and s[1] == ":":
s += os.sep
if not os.path.isdir(s):
e = OSError('%s is not a directory' % s)
e.errno = errno.ENOENT
raise e
return os.listdir(s)
listdir = listdir_win32
def num2ver(ver):
"""
Convert a string, tuple or version number into an integer. The number is supposed to have at most 4 digits::
from waflib.Utils import num2ver
num2ver('1.3.2') == num2ver((1,3,2)) == num2ver((1,3,2,0))
:type ver: string or tuple of numbers
:param ver: a version number
"""
if isinstance(ver, str):
ver = tuple(ver.split('.'))
if isinstance(ver, tuple):
ret = 0
for i in range(4):
if i < len(ver):
ret += 256**(3 - i) * int(ver[i])
return ret
return ver
def ex_stack():
"""
Extract the stack to display exceptions
:return: a string represening the last exception
"""
exc_type, exc_value, tb = sys.exc_info()
exc_lines = traceback.format_exception(exc_type, exc_value, tb)
return ''.join(exc_lines)
def to_list(sth):
"""
Convert a string argument to a list by splitting on spaces, and pass
through a list argument unchanged::
from waflib.Utils import to_list
lst = to_list("a b c d")
:param sth: List or a string of items separated by spaces
:rtype: list
:return: Argument converted to list
"""
if isinstance(sth, str):
return sth.split()
else:
return sth
def split_path(path):
return path.split('/')
def split_path_cygwin(path):
if path.startswith('//'):
ret = path.split('/')[2:]
ret[0] = '/' + ret[0]
return ret
return path.split('/')
re_sp = re.compile('[/\\\\]')
def split_path_win32(path):
if path.startswith('\\\\'):
ret = re.split(re_sp, path)[2:]
ret[0] = '\\' + ret[0]
return ret
return re.split(re_sp, path)
if sys.platform == 'cygwin':
split_path = split_path_cygwin
elif is_win32:
split_path = split_path_win32
split_path.__doc__ = """
Split a path by / or \\. This function is not like os.path.split
:type path: string
:param path: path to split
:return: list of strings
"""
def check_dir(path):
"""
Ensure that a directory exists (similar to ``mkdir -p``).
:type path: string
:param path: Path to directory
"""
if not os.path.isdir(path):
try:
os.makedirs(path)
except OSError as e:
if not os.path.isdir(path):
raise Errors.WafError('Cannot create the folder %r' % path, ex=e)
def check_exe(name):
"""
Ensure that a program exists
:type name: string
:param name: name or path to program
:return: path of the program or None
"""
if not name:
raise ValueError('Cannot execute an empty string!')
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(name)
if fpath and is_exe(name):
return os.path.abspath(name)
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, name)
if is_exe(exe_file):
return os.path.abspath(exe_file)
return None
def def_attrs(cls, **kw):
"""
Set default attributes on a class instance
:type cls: class
:param cls: the class to update the given attributes in.
:type kw: dict
:param kw: dictionary of attributes names and values.
"""
for k, v in kw.items():
if not hasattr(cls, k):
setattr(cls, k, v)
def quote_define_name(s):
"""
Convert a string to an identifier suitable for C defines.
:type s: string
:param s: String to convert
:rtype: string
:return: Identifier suitable for C defines
"""
fu = re.sub('[^a-zA-Z0-9]', '_', s)
fu = re.sub('_+', '_', fu)
fu = fu.upper()
return fu
def h_list(lst):
"""
Hash lists. For tuples, using hash(tup) is much more efficient,
except on python >= 3.3 where hash randomization assumes everybody is running a web application.
:param lst: list to hash
:type lst: list of strings
:return: hash of the list
"""
m = md5()
m.update(str(lst).encode())
return m.digest()
def h_fun(fun):
"""
Hash functions
:param fun: function to hash
:type fun: function
:return: hash of the function
"""
try:
return fun.code
except AttributeError:
try:
h = inspect.getsource(fun)
except IOError:
h = "nocode"
try:
fun.code = h
except AttributeError:
pass
return h
reg_subst = re.compile(r"(\\\\)|(\$\$)|\$\{([^}]+)\}")
def subst_vars(expr, params):
"""
Replace ${VAR} with the value of VAR taken from a dict or a config set::
from waflib import Utils
s = Utils.subst_vars('${PREFIX}/bin', env)
:type expr: string
:param expr: String to perform substitution on
:param params: Dictionary or config set to look up variable values.
"""
def repl_var(m):
if m.group(1):
return '\\'
if m.group(2):
return '$'
try:
# ConfigSet instances may contain lists
return params.get_flat(m.group(3))
except AttributeError:
return params[m.group(3)]
return reg_subst.sub(repl_var, expr)
def destos_to_binfmt(key):
"""
Return the binary format based on the unversioned platform name.
:param key: platform name
:type key: string
:return: string representing the binary format
"""
if key == 'darwin':
return 'mac-o'
elif key in ('win32', 'cygwin', 'uwin', 'msys'):
return 'pe'
return 'elf'
def unversioned_sys_platform():
"""
Return the unversioned platform name.
Some Python platform names contain versions, that depend on
the build environment, e.g. linux2, freebsd6, etc.
This returns the name without the version number. Exceptions are
os2 and win32, which are returned verbatim.
:rtype: string
:return: Unversioned platform name
"""
s = sys.platform
if s == 'java':
# The real OS is hidden under the JVM.
from java.lang import System
s = System.getProperty('os.name')
# see http://lopica.sourceforge.net/os.html for a list of possible values
if s == 'Mac OS X':
return 'darwin'
elif s.startswith('Windows '):
return 'win32'
elif s == 'OS/2':
return 'os2'
elif s == 'HP-UX':
return 'hpux'
elif s in ('SunOS', 'Solaris'):
return 'sunos'
else: s = s.lower()
# powerpc == darwin for our purposes
if s == 'powerpc':
return 'darwin'
if s == 'win32' or s.endswith('os2') and s != 'sunos2': return s
return re.split('\d+$', s)[0]
def nada(*k, **kw):
"""
A function that does nothing
:return: None
"""
pass
class Timer(object):
"""
Simple object for timing the execution of commands.
Its string representation is the current time::
from waflib.Utils import Timer
timer = Timer()
a_few_operations()
s = str(timer)
"""
def __init__(self):
self.start_time = datetime.datetime.utcnow()
def __str__(self):
delta = datetime.datetime.utcnow() - self.start_time
days = delta.days
hours, rem = divmod(delta.seconds, 3600)
minutes, seconds = divmod(rem, 60)
seconds += delta.microseconds * 1e-6
result = ''
if days:
result += '%dd' % days
if days or hours:
result += '%dh' % hours
if days or hours or minutes:
result += '%dm' % minutes
return '%s%.3fs' % (result, seconds)
if is_win32:
old = shutil.copy2
def copy2(src, dst):
"""
shutil.copy2 does not copy the file attributes on windows, so we
hack into the shutil module to fix the problem
"""
old(src, dst)
shutil.copystat(src, dst)
setattr(shutil, 'copy2', copy2)
if os.name == 'java':
# Jython cannot disable the gc but they can enable it ... wtf?
try:
gc.disable()
gc.enable()
except NotImplementedError:
gc.disable = gc.enable
def read_la_file(path):
"""
Read property files, used by msvc.py
:param path: file to read
:type path: string
"""
sp = re.compile(r'^([^=]+)=\'(.*)\'$')
dc = {}
for line in readf(path).splitlines():
try:
_, left, right, _ = sp.split(line.strip())
dc[left] = right
except ValueError:
pass
return dc
def nogc(fun):
"""
Decorator: let a function disable the garbage collector during its execution.
It is used in the build context when storing/loading the build cache file (pickle)
:param fun: function to execute
:type fun: function
:return: the return value of the function executed
"""
def f(*k, **kw):
try:
gc.disable()
ret = fun(*k, **kw)
finally:
gc.enable()
return ret
f.__doc__ = fun.__doc__
return f
def run_once(fun):
"""
Decorator: let a function cache its results, use like this::
@run_once
def foo(k):
return 345*2343
:param fun: function to execute
:type fun: function
:return: the return value of the function executed
"""
cache = {}
def wrap(k):
try:
return cache[k]
except KeyError:
ret = fun(k)
cache[k] = ret
return ret
wrap.__cache__ = cache
return wrap
def get_registry_app_path(key, filename):
if not winreg:
return None
try:
result = winreg.QueryValue(key, "Software\\Microsoft\\Windows\\CurrentVersion\\App Paths\\%s.exe" % filename[0])
except WindowsError:
pass
else:
if os.path.isfile(result):
return result
|
#!/bin/env python
#
# analyse_solid_run.py: analyse and report on SOLiD sequencer runs
# Copyright (C) University of Manchester 2011 Peter Briggs
#
########################################################################
#
# analyse_solid_run.py
#
#########################################################################
"""analyse_solid_run.py
Provides functionality for analysing a SOLiD run, to verify and report data
about the run, and suggest a layout scheme for the analysis directories.
"""
#######################################################################
# Import modules that this module depends on
#######################################################################
import sys
import os
import string
import SolidDataExtractor
import build_analysis_dir
# Spreadsheet functionality
try:
import Spreadsheet
SPREADSHEET_IMPORTED = True
except ImportError,ex:
print "Failed to import Spreadsheet module: %s" % ex
SPREADSHEET_IMPORTED = False
#######################################################################
# Class definitions
#######################################################################
# No classes defined
#######################################################################
# Module Functions: SOLiD data utilities
#######################################################################
def pretty_print_libraries(libraries):
"""Given a list of libraries, format for pretty printing.
Examples:
['DR1', 'DR2', 'DR3', DR4'] -> 'DR1-4'
"""
# Split each library name into prefix and numeric suffix
##print "pretty_print: input = "+str(libraries)
libs = sorted(libraries, key=lambda l: (l.prefix,l.index))
##print str(libs)
# Go through and group
groups = []
group = []
last_index = None
for lib in libs:
# Check if this is next in sequence
try:
if lib.index == last_index+1:
# Next in sequence
group.append(lib)
last_index = lib.index
continue
except TypeError:
# One or both of the indexes was None
pass
# Current lib is not next in previous sequence
# Tidy up and start new group
if group:
groups.append(group)
group = [lib]
last_index = lib.index
# Capture last group
if group:
groups.append(group)
##print str(groups)
# Pretty print
out = []
for group in groups:
if len(group) == 1:
# "group" of one
out.append(group[0].name)
else:
# Group with at least two members
out.append(group[0].name+"-"+group[-1].index_as_string)
# Concatenate and return
return ', '.join(out)
#######################################################################
# Module Functions: program functions
#######################################################################
def report_run(solid_runs):
"""Print a brief report about SOLiD runs.
This generates a brief screen report about the content of the
supplied SOLiD runs e.g. flow cells, layout, number of samples
etc.
Arguments:
solid_runs: a list or tuple of SolidRun objects to report.
"""
# Report the data for each run
for run in solid_runs:
# Report overall slide layout
slide_layout = run.slideLayout()
print "FC%s (%s)" % (str(run.run_info.flow_cell),
str(slide_layout))
print "Date: %s" % (run.run_info.date)
print "I.D.: %s\n" % (run.run_info.name)
#
# Report projects for each sample
for sample in run.samples:
for project in sample.projects:
libraries = pretty_print_libraries(project.libraries)
print "Sample %s: (project %s): %s" % (sample,
project.name,
libraries)
if run.run_info.is_barcoded_sample:
print "B/C samples: %d" % len(project.libraries)
total_reads = 'not available'
if sample.barcode_stats:
try:
total_reads = sample.barcode_stats.\
getDataByName("All Beads")[-1]
except IndexError:
pass
# FIXME need to check that this total read info is
# actually correct
print "Total reads: %s\n" % str(total_reads)
def write_spreadsheet(solid_runs,spreadsheet):
"""Generate or append run data to an XLS-format spreadsheet
Creates a new spreadsheet or appends to an existing one, writing
new rows to summarise the data about the solid runs supplied as
input.
Arguments:
solid_runs: a list or tuple of SolidRun objects to report.
spreadsheet: the name of the XLS-format spreadsheet to write
the data
"""
# Check whether spreadsheet file already exists
if os.path.exists(spreadsheet):
write_header = False
else:
write_header = True
# Only write date once
write_date = True
# Open spreadsheet
wb = Spreadsheet.Spreadsheet(spreadsheet,'SOLiD Runs')
# Header row
if write_header:
wb.addTitleRow(['Ref No',
'Project Description',
'P.I.',
'Date',
'Library type',
'Sample & Layout Description',
'B/C samples',
'Total reads',
'I.D.',
'Cost'])
# Spacer row
wb.addEmptyRow(color='gray25')
# Report the data for each run
for run in solid_runs:
# First line: date, flow cell layout, and id
slide_layout = run.slideLayout()
description = "FC"+str(run.run_info.flow_cell)+" ("+slide_layout+")"
# Barcoding status
# Assumes all samples/libraries in the project have the same
# barcoding status
try:
is_barcoded = run.samples[0].projects[0].isBarcoded()
except IndexError:
is_barcoded = False
# Run with only one sample
total_reads = ''
if len(run.samples) == 1:
description += ": "+str(run.samples[0].name)
try:
if run.samples[0].projects[0].isBarcoded():
# Barcoded sample, get stats
try:
total_reads = run.samples[0].barcode_stats.\
getDataByName("All Beads")[-1]
except AttributeError:
# Potential problem
total_reads = "NOT_FOUND"
else:
# Not a barcoded sample
total_reads = "MANUAL_LOOKUP"
except IndexError:
# Some problem looking up barcode status
total_reads = "NO_INFO"
# Deal with date string
if write_date:
run_date = run.run_info.date
write_date = False # Don't write date again
else:
run_date = ''
run_id = run.run_info.name
wb.addRow(['',
'',
'',
run_date,
'',
description,
'',
total_reads,
run_id])
# Add one line per project in each sample
index = 0
for sample in run.samples:
for project in sample.projects:
libraries = pretty_print_libraries(project.libraries)
experimenters_initials = project.libraries[0].initials
# Get initial description and total reads
if len(run.samples) > 1:
# Multiple samples in one project
description = sample.name+": "
# Total reads
# For barcoded samples we should be able to extract
# thos from the barcode statistics data
if project.isBarcoded():
total_reads = ''
if sample.barcode_stats:
try:
total_reads = sample.barcode_stats.\
getDataByName("All Beads")[-1]
except IndexError:
pass
else:
# Not a barcoded sample, manual lookup
total_reads = "MANUAL_LOOKUP"
else:
# All libraries belong to the same sample
description = ''
# Total reads already written once
total_reads = ''
# Library type
if project.isBarcoded():
library_type = "bar-coding"
else:
library_type = ''
# Add samples to the libraries
description += str(len(project.libraries))+" samples "+\
libraries
# Project description field
# Essentially a placeholder with experimenter's initials
project_description = "%s) %s [project description]" % \
(string.lowercase[index],experimenters_initials)
index += 1
# FIXME need to check that this total read info is
# actually correct
wb.addRow(['',
project_description,
'[P.I.]',
'',
library_type,
description,
len(project.libraries),
total_reads])
wb.addEmptyRow()
# Write the spreadsheet
wb.write()
def suggest_analysis_layout(solid_runs):
"""Print a suggested analysis directory scheme
Given a set of SolidRuns, print a suggested layout scheme for the
analysis directory including names and partitioning of primary data
(i.e. which data files should be associated with which subdirectory).
Output is in the form of proposed arguments to the build_analysis_dir.py
program.
Arguments:
solid_runs: a list of SolidRun objects.
"""
print "Analysis directory layout:"
for run in solid_runs:
print "\n%s_analysis" % run.run_name
for sample in run.samples:
for project in sample.projects:
# Create one experiment per project
expt = build_analysis_dir.Experiment()
expt.name = project.getProjectName()
expt.type = "expt"
expt.sample = project.getSample().name
expt.library = project.getLibraryNamePattern()
# Print the arguments for the layout
print "%s " % expt.describe()
def verify_runs(solid_runs):
"""Do basic verification checks on SOLiD run data
For each run described by a SolidRun object, check that there is
run_definition file, samples and libraries, and that primary data
files (csfasta and qual) have been assigned and exist.
Returns a UNIX-like status code: 0 indicates that the checks passed,
1 indicates that they failed.
Arguments:
solid_runs: a list of SolidRun objects.
"""
print "Performing verification"
status = 0
for run in solid_runs:
print "\nExamining %s:" % run.run_name
run_status = 0
# Check that run_definition file loaded
if not run.run_definition:
print "Error with run_definition"
run_status = 1
else:
# Check basic parameters: should have non-zero numbers of
# samples and libraries
if len(run.samples) == 0:
print "No sample data"
run_status = 1
for sample in run.samples:
if len(sample.libraries) == 0:
print "No libraries for sample %s" % sample.name
run_status = 1
for library in sample.libraries:
# Check csfasta was found
if not library.csfasta:
print "No csfasta for %s/%s" % \
(sample.name,library.name)
run_status = 1
else:
if not os.path.exists(library.csfasta):
print "Missing csfasta for %s/%s" % \
(sample.name,library.name)
run_status = 1
# Check qual was found
if not library.qual:
print "No qual for %s/%s" % \
(sample.name,library.name)
run_status = 1
else:
if not os.path.exists(library.qual):
print "Missing qual for %s/%s" % \
(sample.name,library.name)
run_status = 1
# Completed checks for run
print "%s:" % run.run_name,
if run_status == 0:
print " [PASSED]"
else:
print " [FAILED]"
status = 1
# Completed
print "\nOverall status:",
if status == 0:
print " [PASSED]"
else:
print " [FAILED]"
return status
#######################################################################
# Main program
#######################################################################
if __name__ == "__main__":
# Get solid directories
if len(sys.argv) < 2:
print "Usage: %s [OPTIONS] <solid_run_dir>" % \
os.path.basename(sys.argv[0])
print ""
print "Various operations on a SOLiD run directory. Note that if"
print "<solid_run_dir>_2 also exists then this is automatically"
print "detected and included in the processing."
print ""
print "Options:"
print " --report: print a report of the SOLiD run"
print " --verify: do verification checks on SOLiD run directories"
print " --layout: suggest layout for analysis directories"
print " --spreadsheet[=<file>.xls]: write report to Excel spreadsheet"
sys.exit()
# Solid run directories
solid_dir_fc1 = sys.argv[-1]
solid_dir_fc2 = sys.argv[-1]+"_2"
if os.path.isdir(solid_dir_fc2):
solid_dirs = (solid_dir_fc1,solid_dir_fc2)
else:
solid_dirs = (solid_dir_fc1,)
# Other options
do_report_run = False
if "--report" in sys.argv[1:-1]:
do_report_run = True
do_checks = False
if "--verify" in sys.argv[1:-1]:
do_checks = True
do_suggest_layout = False
if "--layout" in sys.argv[1:-1]:
do_suggest_layout = True
do_spreadsheet = False
for arg in sys.argv[1:-1]:
if arg.startswith("--spreadsheet"):
if not SPREADSHEET_IMPORTED:
print "Spreadsheet functionality not available"
else:
do_spreadsheet = True
try:
i = arg.index("=")
spreadsheet = arg[i+1:]
except IndexError:
spreadsheet = solid_dir_fc1+".xls"
print "Writing spreadsheet %s" % spreadsheet
# Check there's at least one thing to do
if not (do_report_run or
do_suggest_layout or
do_spreadsheet or
do_checks):
do_report_run = True
# Get the run information
solid_runs = []
for solid_dir in solid_dirs:
run = SolidDataExtractor.SolidRun(solid_dir)
if not run:
print "Error extracting run data for %s" % solid_dir
else:
solid_runs.append(run)
# Report the runs
if do_report_run:
report_run(solid_runs)
# Report the runs to a spreadsheet
if do_spreadsheet:
write_spreadsheet(solid_runs,spreadsheet)
# Suggest a layout for analysis
if do_suggest_layout:
suggest_analysis_layout(solid_runs)
# Do verification
# Nb this should always be the last step
# Use the verification return code as the exit status
if do_checks:
status = verify_runs(solid_runs)
sys.exit(status)
analyse_solid_run.py: in write_spreadsheet, removed unused code.
#!/bin/env python
#
# analyse_solid_run.py: analyse and report on SOLiD sequencer runs
# Copyright (C) University of Manchester 2011 Peter Briggs
#
########################################################################
#
# analyse_solid_run.py
#
#########################################################################
"""analyse_solid_run.py
Provides functionality for analysing a SOLiD run, to verify and report data
about the run, and suggest a layout scheme for the analysis directories.
"""
#######################################################################
# Import modules that this module depends on
#######################################################################
import sys
import os
import string
import SolidDataExtractor
import build_analysis_dir
# Spreadsheet functionality
try:
import Spreadsheet
SPREADSHEET_IMPORTED = True
except ImportError,ex:
print "Failed to import Spreadsheet module: %s" % ex
SPREADSHEET_IMPORTED = False
#######################################################################
# Class definitions
#######################################################################
# No classes defined
#######################################################################
# Module Functions: program functions
#######################################################################
def report_run(solid_runs):
"""Print a brief report about SOLiD runs.
This generates a brief screen report about the content of the
supplied SOLiD runs e.g. flow cells, layout, number of samples
etc.
Arguments:
solid_runs: a list or tuple of SolidRun objects to report.
"""
# Report the data for each run
for run in solid_runs:
# Report overall slide layout
slide_layout = run.slideLayout()
print "FC%s (%s)" % (str(run.run_info.flow_cell),
str(slide_layout))
print "Date: %s" % (run.run_info.date)
print "I.D.: %s\n" % (run.run_info.name)
#
# Report projects for each sample
for sample in run.samples:
for project in sample.projects:
libraries = project.prettyPrintLibraries()
print "Sample %s: (project %s): %s" % (sample,
project.name,
libraries)
if run.run_info.is_barcoded_sample:
print "B/C samples: %d" % len(project.libraries)
total_reads = 'not available'
if sample.barcode_stats:
try:
total_reads = sample.barcode_stats.\
getDataByName("All Beads")[-1]
except IndexError:
pass
# FIXME need to check that this total read info is
# actually correct
print "Total reads: %s\n" % str(total_reads)
def write_spreadsheet(solid_runs,spreadsheet):
"""Generate or append run data to an XLS-format spreadsheet
Creates a new spreadsheet or appends to an existing one, writing
new rows to summarise the data about the solid runs supplied as
input.
Arguments:
solid_runs: a list or tuple of SolidRun objects to report.
spreadsheet: the name of the XLS-format spreadsheet to write
the data
"""
# Check whether spreadsheet file already exists
if os.path.exists(spreadsheet):
write_header = False
else:
write_header = True
# Only write date once
write_date = True
# Open spreadsheet
wb = Spreadsheet.Spreadsheet(spreadsheet,'SOLiD Runs')
# Header row
if write_header:
wb.addTitleRow(['Ref No',
'Project Description',
'P.I.',
'Date',
'Library type',
'Sample & Layout Description',
'B/C samples',
'Total reads',
'I.D.',
'Cost'])
# Spacer row
wb.addEmptyRow(color='gray25')
# Report the data for each run
for run in solid_runs:
# First line: date, flow cell layout, and id
slide_layout = run.slideLayout()
description = "FC"+str(run.run_info.flow_cell)+" ("+slide_layout+")"
# Run with only one sample
total_reads = ''
if len(run.samples) == 1:
description += ": "+str(run.samples[0].name)
try:
if run.samples[0].projects[0].isBarcoded():
# Barcoded sample, get stats
try:
total_reads = run.samples[0].barcode_stats.\
getDataByName("All Beads")[-1]
except AttributeError:
# Potential problem
total_reads = "NOT_FOUND"
else:
# Not a barcoded sample
total_reads = "MANUAL_LOOKUP"
except IndexError:
# Some problem looking up barcode status
total_reads = "NO_INFO"
# Deal with date string
if write_date:
run_date = run.run_info.date
write_date = False # Don't write date again
else:
run_date = ''
run_id = run.run_info.name
wb.addRow(['',
'',
'',
run_date,
'',
description,
'',
total_reads,
run_id])
# Add one line per project in each sample
index = 0
for sample in run.samples:
for project in sample.projects:
libraries = project.prettyPrintLibraries()
experimenters_initials = project.libraries[0].initials
# Get initial description and total reads
if len(run.samples) > 1:
# Multiple samples in one project
description = sample.name+": "
# Total reads
# For barcoded samples we should be able to extract
# thos from the barcode statistics data
if project.isBarcoded():
total_reads = ''
if sample.barcode_stats:
try:
total_reads = sample.barcode_stats.\
getDataByName("All Beads")[-1]
except IndexError:
pass
else:
# Not a barcoded sample, manual lookup
total_reads = "MANUAL_LOOKUP"
else:
# All libraries belong to the same sample
description = ''
# Total reads already written once
total_reads = ''
# Library type
if project.isBarcoded():
library_type = "bar-coding"
else:
library_type = ''
# Add samples to the libraries
description += str(len(project.libraries))+" samples "+\
libraries
# Project description field
# Essentially a placeholder with experimenter's initials
project_description = "%s) %s [project description]" % \
(string.lowercase[index],experimenters_initials)
index += 1
# FIXME need to check that this total read info is
# actually correct
wb.addRow(['',
project_description,
'[P.I.]',
'',
library_type,
description,
len(project.libraries),
total_reads])
wb.addEmptyRow()
# Write the spreadsheet
wb.write()
def suggest_analysis_layout(solid_runs):
"""Print a suggested analysis directory scheme
Given a set of SolidRuns, print a suggested layout scheme for the
analysis directory including names and partitioning of primary data
(i.e. which data files should be associated with which subdirectory).
Output is in the form of proposed arguments to the build_analysis_dir.py
program.
Arguments:
solid_runs: a list of SolidRun objects.
"""
print "Analysis directory layout:"
for run in solid_runs:
print "\n%s_analysis" % run.run_name
for sample in run.samples:
for project in sample.projects:
# Create one experiment per project
expt = build_analysis_dir.Experiment()
expt.name = project.getProjectName()
expt.type = "expt"
expt.sample = project.getSample().name
expt.library = project.getLibraryNamePattern()
# Print the arguments for the layout
print "%s " % expt.describe()
def verify_runs(solid_runs):
"""Do basic verification checks on SOLiD run data
For each run described by a SolidRun object, check that there is
run_definition file, samples and libraries, and that primary data
files (csfasta and qual) have been assigned and exist.
Returns a UNIX-like status code: 0 indicates that the checks passed,
1 indicates that they failed.
Arguments:
solid_runs: a list of SolidRun objects.
"""
print "Performing verification"
status = 0
for run in solid_runs:
print "\nExamining %s:" % run.run_name
run_status = 0
# Check that run_definition file loaded
if not run.run_definition:
print "Error with run_definition"
run_status = 1
else:
# Check basic parameters: should have non-zero numbers of
# samples and libraries
if len(run.samples) == 0:
print "No sample data"
run_status = 1
for sample in run.samples:
if len(sample.libraries) == 0:
print "No libraries for sample %s" % sample.name
run_status = 1
for library in sample.libraries:
# Check csfasta was found
if not library.csfasta:
print "No csfasta for %s/%s" % \
(sample.name,library.name)
run_status = 1
else:
if not os.path.exists(library.csfasta):
print "Missing csfasta for %s/%s" % \
(sample.name,library.name)
run_status = 1
# Check qual was found
if not library.qual:
print "No qual for %s/%s" % \
(sample.name,library.name)
run_status = 1
else:
if not os.path.exists(library.qual):
print "Missing qual for %s/%s" % \
(sample.name,library.name)
run_status = 1
# Completed checks for run
print "%s:" % run.run_name,
if run_status == 0:
print " [PASSED]"
else:
print " [FAILED]"
status = 1
# Completed
print "\nOverall status:",
if status == 0:
print " [PASSED]"
else:
print " [FAILED]"
return status
#######################################################################
# Main program
#######################################################################
if __name__ == "__main__":
# Get solid directories
if len(sys.argv) < 2:
print "Usage: %s [OPTIONS] <solid_run_dir>" % \
os.path.basename(sys.argv[0])
print ""
print "Various operations on a SOLiD run directory. Note that if"
print "<solid_run_dir>_2 also exists then this is automatically"
print "detected and included in the processing."
print ""
print "Options:"
print " --report: print a report of the SOLiD run"
print " --verify: do verification checks on SOLiD run directories"
print " --layout: suggest layout for analysis directories"
print " --spreadsheet[=<file>.xls]: write report to Excel spreadsheet"
sys.exit()
# Solid run directories
solid_dir_fc1 = sys.argv[-1]
solid_dir_fc2 = sys.argv[-1]+"_2"
if os.path.isdir(solid_dir_fc2):
solid_dirs = (solid_dir_fc1,solid_dir_fc2)
else:
solid_dirs = (solid_dir_fc1,)
# Other options
do_report_run = False
if "--report" in sys.argv[1:-1]:
do_report_run = True
do_checks = False
if "--verify" in sys.argv[1:-1]:
do_checks = True
do_suggest_layout = False
if "--layout" in sys.argv[1:-1]:
do_suggest_layout = True
do_spreadsheet = False
for arg in sys.argv[1:-1]:
if arg.startswith("--spreadsheet"):
if not SPREADSHEET_IMPORTED:
print "Spreadsheet functionality not available"
else:
do_spreadsheet = True
try:
i = arg.index("=")
spreadsheet = arg[i+1:]
except IndexError:
spreadsheet = solid_dir_fc1+".xls"
print "Writing spreadsheet %s" % spreadsheet
# Check there's at least one thing to do
if not (do_report_run or
do_suggest_layout or
do_spreadsheet or
do_checks):
do_report_run = True
# Get the run information
solid_runs = []
for solid_dir in solid_dirs:
run = SolidDataExtractor.SolidRun(solid_dir)
if not run:
print "Error extracting run data for %s" % solid_dir
else:
solid_runs.append(run)
# Report the runs
if do_report_run:
report_run(solid_runs)
# Report the runs to a spreadsheet
if do_spreadsheet:
write_spreadsheet(solid_runs,spreadsheet)
# Suggest a layout for analysis
if do_suggest_layout:
suggest_analysis_layout(solid_runs)
# Do verification
# Nb this should always be the last step
# Use the verification return code as the exit status
if do_checks:
status = verify_runs(solid_runs)
sys.exit(status)
|
# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE
# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt
"""Tests for function call inference"""
from astroid import bases, builder, nodes
from astroid.util import Uninferable
def test_no_return() -> None:
"""Test function with no return statements"""
node = builder.extract_node(
"""
def f():
pass
f() #@
"""
)
assert isinstance(node, nodes.NodeNG)
inferred = node.inferred()
assert len(inferred) == 1
assert inferred[0] is Uninferable
def test_one_return() -> None:
"""Test function with a single return that always executes"""
node = builder.extract_node(
"""
def f():
return 1
f() #@
"""
)
assert isinstance(node, nodes.NodeNG)
inferred = node.inferred()
assert len(inferred) == 1
assert isinstance(inferred[0], nodes.Const)
assert inferred[0].value == 1
def test_one_return_possible() -> None:
"""Test function with a single return that only sometimes executes
Note: currently, inference doesn't handle this type of control flow
"""
node = builder.extract_node(
"""
def f(x):
if x:
return 1
f(1) #@
"""
)
assert isinstance(node, nodes.NodeNG)
inferred = node.inferred()
assert len(inferred) == 1
assert isinstance(inferred[0], nodes.Const)
assert inferred[0].value == 1
def test_multiple_returns() -> None:
"""Test function with multiple returns"""
node = builder.extract_node(
"""
def f(x):
if x > 10:
return 1
elif x > 20:
return 2
else:
return 3
f(100) #@
"""
)
assert isinstance(node, nodes.NodeNG)
inferred = node.inferred()
assert len(inferred) == 3
assert all(isinstance(node, nodes.Const) for node in inferred)
assert {node.value for node in inferred} == {1, 2, 3}
def test_argument() -> None:
"""Test function whose return value uses its arguments"""
node = builder.extract_node(
"""
def f(x, y):
return x + y
f(1, 2) #@
"""
)
assert isinstance(node, nodes.NodeNG)
inferred = node.inferred()
assert len(inferred) == 1
assert isinstance(inferred[0], nodes.Const)
assert inferred[0].value == 3
def test_inner_call() -> None:
"""Test function where return value is the result of a separate function call"""
node = builder.extract_node(
"""
def f():
return g()
def g():
return 1
f() #@
"""
)
assert isinstance(node, nodes.NodeNG)
inferred = node.inferred()
assert len(inferred) == 1
assert isinstance(inferred[0], nodes.Const)
assert inferred[0].value == 1
def test_inner_call_with_const_argument() -> None:
"""Test function where return value is the result of a separate function call,
with a constant value passed to the inner function.
"""
node = builder.extract_node(
"""
def f():
return g(1)
def g(y):
return y + 2
f() #@
"""
)
assert isinstance(node, nodes.NodeNG)
inferred = node.inferred()
assert len(inferred) == 1
assert isinstance(inferred[0], nodes.Const)
assert inferred[0].value == 3
def test_inner_call_with_dynamic_argument() -> None:
"""Test function where return value is the result of a separate function call,
with a dynamic value passed to the inner function.
Currently, this is Uninferable.
"""
node = builder.extract_node(
"""
def f(x):
return g(x)
def g(y):
return y + 2
f(1) #@
"""
)
assert isinstance(node, nodes.NodeNG)
inferred = node.inferred()
assert len(inferred) == 1
assert inferred[0] is Uninferable
def test_method_const_instance_attr() -> None:
"""Test method where the return value is based on an instance attribute with a
constant value.
"""
node = builder.extract_node(
"""
class A:
def __init__(self):
self.x = 1
def get_x(self):
return self.x
A().get_x() #@
"""
)
assert isinstance(node, nodes.NodeNG)
inferred = node.inferred()
assert len(inferred) == 1
assert isinstance(inferred[0], nodes.Const)
assert inferred[0].value == 1
def test_method_const_instance_attr_multiple() -> None:
"""Test method where the return value is based on an instance attribute with
multiple possible constant values, across different methods.
"""
node = builder.extract_node(
"""
class A:
def __init__(self, x):
if x:
self.x = 1
else:
self.x = 2
def set_x(self):
self.x = 3
def get_x(self):
return self.x
A().get_x() #@
"""
)
assert isinstance(node, nodes.NodeNG)
inferred = node.inferred()
assert len(inferred) == 3
assert all(isinstance(node, nodes.Const) for node in inferred)
assert {node.value for node in inferred} == {1, 2, 3}
def test_method_const_instance_attr_same_method() -> None:
"""Test method where the return value is based on an instance attribute with
multiple possible constant values, including in the method being called.
Note that even with a simple control flow where the assignment in the method body
is guaranteed to override any previous assignments, all possible constant values
are returned.
"""
node = builder.extract_node(
"""
class A:
def __init__(self, x):
if x:
self.x = 1
else:
self.x = 2
def set_x(self):
self.x = 3
def get_x(self):
self.x = 4
return self.x
A().get_x() #@
"""
)
assert isinstance(node, nodes.NodeNG)
inferred = node.inferred()
assert len(inferred) == 4
assert all(isinstance(node, nodes.Const) for node in inferred)
assert {node.value for node in inferred} == {1, 2, 3, 4}
def test_method_dynamic_instance_attr_1() -> None:
"""Test method where the return value is based on an instance attribute with
a dynamically-set value in a different method.
In this case, the return value is Uninferable.
"""
node = builder.extract_node(
"""
class A:
def __init__(self, x):
self.x = x
def get_x(self):
return self.x
A(1).get_x() #@
"""
)
assert isinstance(node, nodes.NodeNG)
inferred = node.inferred()
assert len(inferred) == 1
assert inferred[0] is Uninferable
def test_method_dynamic_instance_attr_2() -> None:
"""Test method where the return value is based on an instance attribute with
a dynamically-set value in the same method.
"""
node = builder.extract_node(
"""
class A:
# Note: no initializer, so the only assignment happens in get_x
def get_x(self, x):
self.x = x
return self.x
A().get_x(1) #@
"""
)
assert isinstance(node, nodes.NodeNG)
inferred = node.inferred()
assert len(inferred) == 1
assert isinstance(inferred[0], nodes.Const)
assert inferred[0].value == 1
def test_method_dynamic_instance_attr_3() -> None:
"""Test method where the return value is based on an instance attribute with
a dynamically-set value in a different method.
This is currently Uninferable.
"""
node = builder.extract_node(
"""
class A:
def get_x(self, x): # x is unused
return self.x
def set_x(self, x):
self.x = x
A().get_x(10) #@
"""
)
assert isinstance(node, nodes.NodeNG)
inferred = node.inferred()
assert len(inferred) == 1
assert inferred[0] is Uninferable # not 10!
def test_method_dynamic_instance_attr_4() -> None:
"""Test method where the return value is based on an instance attribute with
a dynamically-set value in a different method, and is passed a constant value.
This is currently Uninferable.
"""
node = builder.extract_node(
"""
class A:
# Note: no initializer, so the only assignment happens in get_x
def get_x(self):
self.set_x(10)
return self.x
def set_x(self, x):
self.x = x
A().get_x() #@
"""
)
assert isinstance(node, nodes.NodeNG)
inferred = node.inferred()
assert len(inferred) == 1
assert inferred[0] is Uninferable
def test_method_dynamic_instance_attr_5() -> None:
"""Test method where the return value is based on an instance attribute with
a dynamically-set value in a different method, and is passed a constant value.
But, where the outer and inner functions have the same signature.
Inspired by https://github.com/PyCQA/pylint/issues/400
This is currently Uninferable.
"""
node = builder.extract_node(
"""
class A:
# Note: no initializer, so the only assignment happens in get_x
def get_x(self, x):
self.set_x(10)
return self.x
def set_x(self, x):
self.x = x
A().get_x(1) #@
"""
)
assert isinstance(node, nodes.NodeNG)
inferred = node.inferred()
assert len(inferred) == 1
assert inferred[0] is Uninferable
def test_method_dynamic_instance_attr_6() -> None:
"""Test method where the return value is based on an instance attribute with
a dynamically-set value in a different method, and is passed a dynamic value.
This is currently Uninferable.
"""
node = builder.extract_node(
"""
class A:
# Note: no initializer, so the only assignment happens in get_x
def get_x(self, x):
self.set_x(x + 1)
return self.x
def set_x(self, x):
self.x = x
A().get_x(1) #@
"""
)
assert isinstance(node, nodes.NodeNG)
inferred = node.inferred()
assert len(inferred) == 1
assert inferred[0] is Uninferable
def test_dunder_getitem() -> None:
"""Test for the special method __getitem__ (used by Instance.getitem).
This is currently Uninferable, until we can infer instance attribute values through
constructor calls.
"""
node = builder.extract_node(
"""
class A:
def __init__(self, x):
self.x = x
def __getitem__(self, i):
return self.x + i
A(1)[2] #@
"""
)
assert isinstance(node, nodes.NodeNG)
inferred = node.inferred()
assert len(inferred) == 1
assert inferred[0] is Uninferable
def test_instance_method() -> None:
"""Tests for instance method, both bound and unbound."""
nodes_ = builder.extract_node(
"""
class A:
def method(self, x):
return x
A().method(42) #@
# In this case, the 1 argument is bound to self, which is ignored in the method
A.method(1, 42) #@
"""
)
for node in nodes_:
assert isinstance(node, nodes.NodeNG)
inferred = node.inferred()
assert len(inferred) == 1
assert isinstance(inferred[0], nodes.Const)
assert inferred[0].value == 42
def test_class_method() -> None:
"""Tests for class method calls, both instance and with the class."""
nodes_ = builder.extract_node(
"""
class A:
@classmethod
def method(cls, x):
return x
A.method(42) #@
A().method(42) #@
"""
)
for node in nodes_:
assert isinstance(node, nodes.NodeNG)
inferred = node.inferred()
assert len(inferred) == 1
assert isinstance(inferred[0], nodes.Const), node
assert inferred[0].value == 42
def test_static_method() -> None:
"""Tests for static method calls, both instance and with the class."""
nodes_ = builder.extract_node(
"""
class A:
@staticmethod
def method(x):
return x
A.method(42) #@
A().method(42) #@
"""
)
for node in nodes_:
assert isinstance(node, nodes.NodeNG)
inferred = node.inferred()
assert len(inferred) == 1
assert isinstance(inferred[0], nodes.Const), node
assert inferred[0].value == 42
def test_instance_method_inherited() -> None:
"""Tests for instance methods that are inherited from a superclass.
Based on https://github.com/PyCQA/astroid/issues/1008.
"""
nodes_ = builder.extract_node(
"""
class A:
def method(self):
return self
class B(A):
pass
A().method() #@
A.method(A()) #@
B().method() #@
B.method(B()) #@
A.method(B()) #@
"""
)
expected = ["A", "A", "B", "B", "B"]
for node, expected in zip(nodes_, expected):
assert isinstance(node, nodes.NodeNG)
inferred = node.inferred()
assert len(inferred) == 1
assert isinstance(inferred[0], bases.Instance)
assert inferred[0].name == expected
def test_class_method_inherited() -> None:
"""Tests for class methods that are inherited from a superclass.
Based on https://github.com/PyCQA/astroid/issues/1008.
"""
nodes_ = builder.extract_node(
"""
class A:
@classmethod
def method(cls):
return cls
class B(A):
pass
A().method() #@
A.method() #@
B().method() #@
B.method() #@
"""
)
expected = ["A", "A", "B", "B"]
for node, expected in zip(nodes_, expected):
assert isinstance(node, nodes.NodeNG)
inferred = node.inferred()
assert len(inferred) == 1
assert isinstance(inferred[0], nodes.ClassDef)
assert inferred[0].name == expected
def test_chained_attribute_inherited() -> None:
"""Tests for class methods that are inherited from a superclass.
Based on https://github.com/PyCQA/pylint/issues/4220.
"""
node = builder.extract_node(
"""
class A:
def f(self):
return 42
class B(A):
def __init__(self):
self.a = A()
result = self.a.f()
def f(self):
pass
B().a.f() #@
"""
)
assert isinstance(node, nodes.NodeNG)
inferred = node.inferred()
assert len(inferred) == 1
assert isinstance(inferred[0], nodes.Const)
assert inferred[0].value == 42
Fix for loop that reassigns the iterable it is iterating
# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE
# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt
"""Tests for function call inference"""
from astroid import bases, builder, nodes
from astroid.util import Uninferable
def test_no_return() -> None:
"""Test function with no return statements"""
node = builder.extract_node(
"""
def f():
pass
f() #@
"""
)
assert isinstance(node, nodes.NodeNG)
inferred = node.inferred()
assert len(inferred) == 1
assert inferred[0] is Uninferable
def test_one_return() -> None:
"""Test function with a single return that always executes"""
node = builder.extract_node(
"""
def f():
return 1
f() #@
"""
)
assert isinstance(node, nodes.NodeNG)
inferred = node.inferred()
assert len(inferred) == 1
assert isinstance(inferred[0], nodes.Const)
assert inferred[0].value == 1
def test_one_return_possible() -> None:
"""Test function with a single return that only sometimes executes
Note: currently, inference doesn't handle this type of control flow
"""
node = builder.extract_node(
"""
def f(x):
if x:
return 1
f(1) #@
"""
)
assert isinstance(node, nodes.NodeNG)
inferred = node.inferred()
assert len(inferred) == 1
assert isinstance(inferred[0], nodes.Const)
assert inferred[0].value == 1
def test_multiple_returns() -> None:
"""Test function with multiple returns"""
node = builder.extract_node(
"""
def f(x):
if x > 10:
return 1
elif x > 20:
return 2
else:
return 3
f(100) #@
"""
)
assert isinstance(node, nodes.NodeNG)
inferred = node.inferred()
assert len(inferred) == 3
assert all(isinstance(node, nodes.Const) for node in inferred)
assert {node.value for node in inferred} == {1, 2, 3}
def test_argument() -> None:
"""Test function whose return value uses its arguments"""
node = builder.extract_node(
"""
def f(x, y):
return x + y
f(1, 2) #@
"""
)
assert isinstance(node, nodes.NodeNG)
inferred = node.inferred()
assert len(inferred) == 1
assert isinstance(inferred[0], nodes.Const)
assert inferred[0].value == 3
def test_inner_call() -> None:
"""Test function where return value is the result of a separate function call"""
node = builder.extract_node(
"""
def f():
return g()
def g():
return 1
f() #@
"""
)
assert isinstance(node, nodes.NodeNG)
inferred = node.inferred()
assert len(inferred) == 1
assert isinstance(inferred[0], nodes.Const)
assert inferred[0].value == 1
def test_inner_call_with_const_argument() -> None:
"""Test function where return value is the result of a separate function call,
with a constant value passed to the inner function.
"""
node = builder.extract_node(
"""
def f():
return g(1)
def g(y):
return y + 2
f() #@
"""
)
assert isinstance(node, nodes.NodeNG)
inferred = node.inferred()
assert len(inferred) == 1
assert isinstance(inferred[0], nodes.Const)
assert inferred[0].value == 3
def test_inner_call_with_dynamic_argument() -> None:
"""Test function where return value is the result of a separate function call,
with a dynamic value passed to the inner function.
Currently, this is Uninferable.
"""
node = builder.extract_node(
"""
def f(x):
return g(x)
def g(y):
return y + 2
f(1) #@
"""
)
assert isinstance(node, nodes.NodeNG)
inferred = node.inferred()
assert len(inferred) == 1
assert inferred[0] is Uninferable
def test_method_const_instance_attr() -> None:
"""Test method where the return value is based on an instance attribute with a
constant value.
"""
node = builder.extract_node(
"""
class A:
def __init__(self):
self.x = 1
def get_x(self):
return self.x
A().get_x() #@
"""
)
assert isinstance(node, nodes.NodeNG)
inferred = node.inferred()
assert len(inferred) == 1
assert isinstance(inferred[0], nodes.Const)
assert inferred[0].value == 1
def test_method_const_instance_attr_multiple() -> None:
"""Test method where the return value is based on an instance attribute with
multiple possible constant values, across different methods.
"""
node = builder.extract_node(
"""
class A:
def __init__(self, x):
if x:
self.x = 1
else:
self.x = 2
def set_x(self):
self.x = 3
def get_x(self):
return self.x
A().get_x() #@
"""
)
assert isinstance(node, nodes.NodeNG)
inferred = node.inferred()
assert len(inferred) == 3
assert all(isinstance(node, nodes.Const) for node in inferred)
assert {node.value for node in inferred} == {1, 2, 3}
def test_method_const_instance_attr_same_method() -> None:
"""Test method where the return value is based on an instance attribute with
multiple possible constant values, including in the method being called.
Note that even with a simple control flow where the assignment in the method body
is guaranteed to override any previous assignments, all possible constant values
are returned.
"""
node = builder.extract_node(
"""
class A:
def __init__(self, x):
if x:
self.x = 1
else:
self.x = 2
def set_x(self):
self.x = 3
def get_x(self):
self.x = 4
return self.x
A().get_x() #@
"""
)
assert isinstance(node, nodes.NodeNG)
inferred = node.inferred()
assert len(inferred) == 4
assert all(isinstance(node, nodes.Const) for node in inferred)
assert {node.value for node in inferred} == {1, 2, 3, 4}
def test_method_dynamic_instance_attr_1() -> None:
"""Test method where the return value is based on an instance attribute with
a dynamically-set value in a different method.
In this case, the return value is Uninferable.
"""
node = builder.extract_node(
"""
class A:
def __init__(self, x):
self.x = x
def get_x(self):
return self.x
A(1).get_x() #@
"""
)
assert isinstance(node, nodes.NodeNG)
inferred = node.inferred()
assert len(inferred) == 1
assert inferred[0] is Uninferable
def test_method_dynamic_instance_attr_2() -> None:
"""Test method where the return value is based on an instance attribute with
a dynamically-set value in the same method.
"""
node = builder.extract_node(
"""
class A:
# Note: no initializer, so the only assignment happens in get_x
def get_x(self, x):
self.x = x
return self.x
A().get_x(1) #@
"""
)
assert isinstance(node, nodes.NodeNG)
inferred = node.inferred()
assert len(inferred) == 1
assert isinstance(inferred[0], nodes.Const)
assert inferred[0].value == 1
def test_method_dynamic_instance_attr_3() -> None:
"""Test method where the return value is based on an instance attribute with
a dynamically-set value in a different method.
This is currently Uninferable.
"""
node = builder.extract_node(
"""
class A:
def get_x(self, x): # x is unused
return self.x
def set_x(self, x):
self.x = x
A().get_x(10) #@
"""
)
assert isinstance(node, nodes.NodeNG)
inferred = node.inferred()
assert len(inferred) == 1
assert inferred[0] is Uninferable # not 10!
def test_method_dynamic_instance_attr_4() -> None:
"""Test method where the return value is based on an instance attribute with
a dynamically-set value in a different method, and is passed a constant value.
This is currently Uninferable.
"""
node = builder.extract_node(
"""
class A:
# Note: no initializer, so the only assignment happens in get_x
def get_x(self):
self.set_x(10)
return self.x
def set_x(self, x):
self.x = x
A().get_x() #@
"""
)
assert isinstance(node, nodes.NodeNG)
inferred = node.inferred()
assert len(inferred) == 1
assert inferred[0] is Uninferable
def test_method_dynamic_instance_attr_5() -> None:
"""Test method where the return value is based on an instance attribute with
a dynamically-set value in a different method, and is passed a constant value.
But, where the outer and inner functions have the same signature.
Inspired by https://github.com/PyCQA/pylint/issues/400
This is currently Uninferable.
"""
node = builder.extract_node(
"""
class A:
# Note: no initializer, so the only assignment happens in get_x
def get_x(self, x):
self.set_x(10)
return self.x
def set_x(self, x):
self.x = x
A().get_x(1) #@
"""
)
assert isinstance(node, nodes.NodeNG)
inferred = node.inferred()
assert len(inferred) == 1
assert inferred[0] is Uninferable
def test_method_dynamic_instance_attr_6() -> None:
"""Test method where the return value is based on an instance attribute with
a dynamically-set value in a different method, and is passed a dynamic value.
This is currently Uninferable.
"""
node = builder.extract_node(
"""
class A:
# Note: no initializer, so the only assignment happens in get_x
def get_x(self, x):
self.set_x(x + 1)
return self.x
def set_x(self, x):
self.x = x
A().get_x(1) #@
"""
)
assert isinstance(node, nodes.NodeNG)
inferred = node.inferred()
assert len(inferred) == 1
assert inferred[0] is Uninferable
def test_dunder_getitem() -> None:
"""Test for the special method __getitem__ (used by Instance.getitem).
This is currently Uninferable, until we can infer instance attribute values through
constructor calls.
"""
node = builder.extract_node(
"""
class A:
def __init__(self, x):
self.x = x
def __getitem__(self, i):
return self.x + i
A(1)[2] #@
"""
)
assert isinstance(node, nodes.NodeNG)
inferred = node.inferred()
assert len(inferred) == 1
assert inferred[0] is Uninferable
def test_instance_method() -> None:
"""Tests for instance method, both bound and unbound."""
nodes_ = builder.extract_node(
"""
class A:
def method(self, x):
return x
A().method(42) #@
# In this case, the 1 argument is bound to self, which is ignored in the method
A.method(1, 42) #@
"""
)
for node in nodes_:
assert isinstance(node, nodes.NodeNG)
inferred = node.inferred()
assert len(inferred) == 1
assert isinstance(inferred[0], nodes.Const)
assert inferred[0].value == 42
def test_class_method() -> None:
"""Tests for class method calls, both instance and with the class."""
nodes_ = builder.extract_node(
"""
class A:
@classmethod
def method(cls, x):
return x
A.method(42) #@
A().method(42) #@
"""
)
for node in nodes_:
assert isinstance(node, nodes.NodeNG)
inferred = node.inferred()
assert len(inferred) == 1
assert isinstance(inferred[0], nodes.Const), node
assert inferred[0].value == 42
def test_static_method() -> None:
"""Tests for static method calls, both instance and with the class."""
nodes_ = builder.extract_node(
"""
class A:
@staticmethod
def method(x):
return x
A.method(42) #@
A().method(42) #@
"""
)
for node in nodes_:
assert isinstance(node, nodes.NodeNG)
inferred = node.inferred()
assert len(inferred) == 1
assert isinstance(inferred[0], nodes.Const), node
assert inferred[0].value == 42
def test_instance_method_inherited() -> None:
"""Tests for instance methods that are inherited from a superclass.
Based on https://github.com/PyCQA/astroid/issues/1008.
"""
nodes_ = builder.extract_node(
"""
class A:
def method(self):
return self
class B(A):
pass
A().method() #@
A.method(A()) #@
B().method() #@
B.method(B()) #@
A.method(B()) #@
"""
)
expected_names = ["A", "A", "B", "B", "B"]
for node, expected in zip(nodes_, expected_names):
assert isinstance(node, nodes.NodeNG)
inferred = node.inferred()
assert len(inferred) == 1
assert isinstance(inferred[0], bases.Instance)
assert inferred[0].name == expected
def test_class_method_inherited() -> None:
"""Tests for class methods that are inherited from a superclass.
Based on https://github.com/PyCQA/astroid/issues/1008.
"""
nodes_ = builder.extract_node(
"""
class A:
@classmethod
def method(cls):
return cls
class B(A):
pass
A().method() #@
A.method() #@
B().method() #@
B.method() #@
"""
)
expected_names = ["A", "A", "B", "B"]
for node, expected in zip(nodes_, expected_names):
assert isinstance(node, nodes.NodeNG)
inferred = node.inferred()
assert len(inferred) == 1
assert isinstance(inferred[0], nodes.ClassDef)
assert inferred[0].name == expected
def test_chained_attribute_inherited() -> None:
"""Tests for class methods that are inherited from a superclass.
Based on https://github.com/PyCQA/pylint/issues/4220.
"""
node = builder.extract_node(
"""
class A:
def f(self):
return 42
class B(A):
def __init__(self):
self.a = A()
result = self.a.f()
def f(self):
pass
B().a.f() #@
"""
)
assert isinstance(node, nodes.NodeNG)
inferred = node.inferred()
assert len(inferred) == 1
assert isinstance(inferred[0], nodes.Const)
assert inferred[0].value == 42
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals
import base64
import contextlib
import copy
import enum
import functools
import io
import json
import logging
import os
import re
import shutil
import subprocess
import threading
import time
from collections import defaultdict, namedtuple
from typing import Callable, Optional, Union
from urllib.parse import urlparse
import requests
import retry
import six
from deprecated import deprecated
from . import requests_usbmux, xcui_element_types
from ._proto import *
from .exceptions import *
from .usbmux import Usbmux
from .utils import inject_call, limit_call_depth
try:
from functools import cached_property # Python3.8+
except ImportError:
from cached_property import cached_property
try:
import sys
import logzero
if not (hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()):
log_format = '[%(levelname)1.1s %(asctime)s %(module)s:%(lineno)d] %(message)s'
logzero.setup_default_logger(formatter=logzero.LogFormatter(
fmt=log_format))
logger = logzero.logger
except ImportError:
logger = logging.getLogger("facebook-wda") # default level: WARNING
DEBUG = False
HTTP_TIMEOUT = 180.0 # unit second
DEVICE_WAIT_TIMEOUT = 180.0 # wait ready
LANDSCAPE = 'LANDSCAPE'
PORTRAIT = 'PORTRAIT'
LANDSCAPE_RIGHT = 'UIA_DEVICE_ORIENTATION_LANDSCAPERIGHT'
PORTRAIT_UPSIDEDOWN = 'UIA_DEVICE_ORIENTATION_PORTRAIT_UPSIDEDOWN'
class Status(enum.IntEnum):
# 不是怎么准确,status在mds平台上变来变去的
UNKNOWN = 100 # other status
ERROR = 110
class Callback(str, enum.Enum):
ERROR = "::error"
HTTP_REQUEST_BEFORE = "::http-request-before"
HTTP_REQUEST_AFTER = "::http-request-after"
RET_RETRY = "::retry" # Callback return value
RET_ABORT = "::abort"
RET_CONTINUE = "::continue"
class AttrDict(dict):
def __getattr__(self, key):
if isinstance(key, str) and key in self:
return self[key]
raise AttributeError("Attribute key not found", key)
def convert(dictionary):
"""
Convert dict to namedtuple
"""
return AttrDict(dictionary)
# Old implement
# return namedtuple('GenericDict', list(dictionary.keys()))(**dictionary)
def urljoin(*urls):
"""
The default urlparse.urljoin behavior look strange
Standard urlparse.urljoin('http://a.com/foo', '/bar')
Expect: http://a.com/foo/bar
Actually: http://a.com/bar
This function fix that.
"""
return '/'.join([u.strip("/") for u in urls])
def roundint(i):
return int(round(i, 0))
def namedlock(name):
"""
Returns:
threading.Lock
"""
if not hasattr(namedlock, 'locks'):
namedlock.locks = defaultdict(threading.Lock)
return namedlock.locks[name]
def httpdo(url, method="GET", data=None, timeout=None) -> AttrDict:
"""
thread safe http request
Raises:
WDAError, WDARequestError, WDAEmptyResponseError
"""
p = urlparse(url)
with namedlock(p.scheme + "://" + p.netloc):
return _unsafe_httpdo(url, method, data, timeout)
@functools.lru_cache(1024)
def _requests_session_pool_get(scheme, netloc):
return requests_usbmux.Session()
def _is_tmq_platform() -> bool:
return os.getenv("TMQ") == "true"
def _unsafe_httpdo(url, method='GET', data=None, timeout=None):
"""
Do HTTP Request
"""
start = time.time()
if DEBUG:
body = json.dumps(data) if data else ''
print("Shell$ curl -X {method} -d '{body}' '{url}'".format(
method=method.upper(), body=body or '', url=url))
if timeout is None:
timeout = HTTP_TIMEOUT
try:
u = urlparse(url)
request_session = _requests_session_pool_get(u.scheme, u.netloc)
response = request_session.request(method,
url,
json=data,
timeout=timeout)
except (requests.ConnectionError, requests.ReadTimeout) as e:
raise
if response.status_code == 502: # Bad Gateway
raise WDABadGateway(response.status_code, response.text)
if DEBUG:
ms = (time.time() - start) * 1000
response_text = response.text
if url.endswith("/screenshot"):
response_text = response_text[:100] + "..." # limit length of screenshot response
print('Return ({:.0f}ms): {}'.format(ms, response_text))
try:
retjson = response.json()
retjson['status'] = retjson.get('status', 0)
r = convert(retjson)
if isinstance(r.value, dict) and r.value.get("error"):
status = Status.ERROR
value = r.value.copy()
value.pop("traceback", None)
for errCls in (WDAInvalidSessionIdError, WDAPossiblyCrashedError, WDAKeyboardNotPresentError, WDAUnknownError, WDAStaleElementReferenceError):
if errCls.check(value):
raise errCls(status, value)
raise WDARequestError(status, value)
return r
except JSONDecodeError:
if response.text == "":
raise WDAEmptyResponseError(method, url, data)
raise WDAError(method, url, response.text[:100] + "...") # should not too long
except requests.ConnectionError as e:
raise WDAError("Failed to establish connection to to WDA")
class Rect(list):
def __init__(self, x, y, width, height):
super().__init__([x, y, width, height])
self.__dict__.update({
"x": x,
"y": y,
"width": width,
"height": height
})
def __str__(self):
return 'Rect(x={x}, y={y}, width={w}, height={h})'.format(
x=self.x, y=self.y, w=self.width, h=self.height)
def __repr__(self):
return str(self)
@property
def center(self):
return namedtuple('Point', ['x', 'y'])(self.x + self.width // 2,
self.y + self.height // 2)
@property
def origin(self):
return namedtuple('Point', ['x', 'y'])(self.x, self.y)
@property
def left(self):
return self.x
@property
def top(self):
return self.y
@property
def right(self):
return self.x + self.width
@property
def bottom(self):
return self.y + self.height
def _start_wda_xctest(udid: str, wda_bundle_id=None) -> bool:
xctool_path = shutil.which("tins2") or shutil.which("tidevice")
if not xctool_path:
return False
logger.info("WDA is not running, exec: {} xctest".format(xctool_path))
args = []
if udid:
args.extend(['-u', udid])
args.append('xctest')
if wda_bundle_id:
args.extend(['-B', wda_bundle_id])
p = subprocess.Popen([xctool_path] + args)
time.sleep(3)
if p.poll() is not None:
logger.warning("xctest launch failed")
return False
return True
class BaseClient(object):
def __init__(self, url=None, _session_id=None):
"""
Args:
target (string): the device url
If target is empty, device url will set to env-var "DEVICE_URL" if defined else set to "http://localhost:8100"
"""
if not url:
url = os.environ.get('DEVICE_URL', 'http://localhost:8100')
assert re.match(r"^(http\+usbmux|https?)://", url), "Invalid URL: %r" % url
# Session variable
self.__wda_url = url
self.__session_id = _session_id
self.__is_app = bool(_session_id) # set to freeze session_id
self.__timeout = 30.0
self.__callbacks = defaultdict(list)
self.__callback_depth = 0
self.__callback_running = False
if not _session_id:
self._init_callback()
# u = urllib.parse.urlparse(self.__wda_url)
# if u.scheme == "http+usbmux" and not self.is_ready():
# udid = u.netloc.split(":")[0]
# if _start_wda_xctest(udid):
# self.wait_ready()
# raise RuntimeError("xctest start failed")
def _callback_fix_invalid_session_id(self, err: WDAError):
""" 当遇到 invalid session id错误时,更新session id并重试 """
if isinstance(err, WDAInvalidSessionIdError): # and not self.__is_app:
self.session_id = None
return Callback.RET_RETRY
if isinstance(err, WDAPossiblyCrashedError):
self.session_id = self.session().session_id # generate new sessionId
return Callback.RET_RETRY
""" 等待设备恢复上线 """
def _callback_wait_ready(self, err):
# logger.warning("Error: %s", err) # too noisy
if isinstance(err, (ConnectionError, requests.ConnectionError,
requests.ReadTimeout, WDABadGateway)):
if not self.wait_ready(DEVICE_WAIT_TIMEOUT): # 等待设备恢复在线
return Callback.RET_ABORT
return Callback.RET_RETRY
def _callback_tmq_before_send_keys(self, urlpath: str):
if urlpath.endswith("/wda/keys"):
if self.alert.exists:
self.alert.accept()
print("send_keys callback called")
def _callback_tmq_print_error(self, method, url, data, err):
if 'no such alert' in str(err): # too many this error
return
logger.warning(
"HTTP Error happens, this message is printed for better debugging")
body = json.dumps(data) if data else ''
logger.warning("Shell$ curl -X {method} -d '{body}' '{url}'".format(
method=method.upper(), body=body or '', url=url))
logger.warning("Error: %s", err)
def _init_callback(self):
self.register_callback(Callback.ERROR,
self._callback_fix_invalid_session_id)
if _is_tmq_platform():
# 输入之前处理弹窗
# 出现错误是print出来,方便调试
logger.info("register callbacks for tmq")
self.register_callback(Callback.ERROR, self._callback_wait_ready)
self.register_callback(Callback.HTTP_REQUEST_BEFORE,
self._callback_tmq_before_send_keys)
self.register_callback(Callback.ERROR,
self._callback_tmq_print_error)
def _callback_json_report(self, method, urlpath):
""" TODO: ssx """
pass
def _set_output_report(self, filename: str):
"""
Args:
filename: json log
"""
self.register_callback(
Callback.HTTP_REQUEST_BEFORE, self._callback_json_report)
def is_ready(self) -> bool:
try:
self.http.get("status", timeout=3)
return True
except Exception as e:
return False
def wait_ready(self, timeout=120, noprint=False) -> bool:
"""
wait until WDA back to normal
Returns:
bool (if wda works)
"""
deadline = time.time() + timeout
def _dprint(message: str):
if noprint:
return
print("facebook-wda", time.ctime(), message)
_dprint("Wait ready (timeout={:.1f})".format(timeout))
while time.time() < deadline:
if self.is_ready():
_dprint("device back online")
return True
else:
_dprint("{!r} wait_ready left {:.1f} seconds".format(self.__wda_url, deadline - time.time()))
time.sleep(1.0)
_dprint("device still offline")
return False
@retry.retry(exceptions=WDAEmptyResponseError, tries=3, delay=2)
def status(self):
res = self.http.get('status')
res["value"]['sessionId'] = res.get("sessionId")
# Can't use res.value['sessionId'] = ...
return res.value
def register_callback(self, event_name: str, func: Callable, try_first: bool = False):
if try_first:
self.__callbacks[event_name].insert(0, func)
else:
self.__callbacks[event_name].append(func)
def unregister_callback(self,
event_name: Optional[str] = None,
func: Optional[Callable] = None):
""" 反注册 """
if event_name is None:
self.__callbacks.clear()
elif func is None:
self.__callbacks[event_name].clear()
else:
self.__callbacks[event_name].remove(func)
def _run_callback(self, event_name, callbacks,
**kwargs) -> Union[None, Callback]:
""" 运行回调函数 """
if not callbacks:
return
self.__callback_running = True
try:
for fn in callbacks[event_name]:
ret = inject_call(fn, **kwargs)
if ret in [
Callback.RET_RETRY, Callback.RET_ABORT,
Callback.RET_CONTINUE
]:
return ret
finally:
self.__callback_running = False
@property
def callbacks(self):
return self.__callbacks
@limit_call_depth(4)
def _fetch(self,
method: str,
urlpath: str,
data: Optional[dict] = None,
with_session: bool = False,
timeout: Optional[float] = None) -> AttrDict:
""" do http request """
urlpath = "/" + urlpath.lstrip("/") # urlpath always startswith /
callbacks = self.__callbacks
if self.__callback_running:
callbacks = None
url = urljoin(self.__wda_url, urlpath)
run_callback = functools.partial(self._run_callback,
callbacks=callbacks,
method=method,
url=url,
urlpath=urlpath,
with_session=with_session,
data=data,
client=self)
try:
if with_session:
url = urljoin(self.__wda_url, "session", self.session_id,
urlpath)
run_callback(Callback.HTTP_REQUEST_BEFORE)
response = httpdo(url, method, data, timeout)
run_callback(Callback.HTTP_REQUEST_AFTER, response=response)
return response
except Exception as err:
ret = run_callback(Callback.ERROR, err=err)
if ret == Callback.RET_RETRY:
return self._fetch(method, urlpath, data, with_session)
elif ret == Callback.RET_CONTINUE:
return
else:
raise
@property
def http(self):
return namedtuple("HTTPRequest", ['fetch', 'get', 'post'])(
self._fetch,
functools.partial(self._fetch, "GET"),
functools.partial(self._fetch, "POST")) # yapf: disable
@property
def _session_http(self):
return namedtuple("HTTPSessionRequest", ['fetch', 'get', 'post', 'delete'])(
functools.partial(self._fetch, with_session=True),
functools.partial(self._fetch, "GET", with_session=True),
functools.partial(self._fetch, "POST", with_session=True),
functools.partial(self._fetch, "DELETE", with_session=True)) # yapf: disable
def home(self):
"""Press home button"""
try:
self.http.post('/wda/homescreen')
except WDARequestError as e:
if "Timeout waiting until SpringBoard is visible" in str(e):
return
raise
def healthcheck(self):
"""Hit healthcheck"""
return self.http.get('/wda/healthcheck')
def locked(self) -> bool:
""" returns locked status, true or false """
return self.http.get("/wda/locked").value
def lock(self):
return self.http.post('/wda/lock')
def unlock(self):
""" unlock screen, double press home """
return self.http.post('/wda/unlock')
def sleep(self, secs: float):
""" same as time.sleep """
time.sleep(secs)
@retry.retry(WDAUnknownError, tries=3, delay=.5, jitter=.2)
def app_current(self) -> dict:
"""
Returns:
dict, eg:
{"pid": 1281,
"name": "",
"bundleId": "com.netease.cloudmusic"}
"""
return self.http.get("/wda/activeAppInfo").value
def source(self, format='xml', accessible=False):
"""
Args:
format (str): only 'xml' and 'json' source types are supported
accessible (bool): when set to true, format is always 'json'
"""
if accessible:
return self.http.get('/wda/accessibleSource').value
return self.http.get('source?format=' + format).value
def screenshot(self, png_filename=None, format='pillow'):
"""
Screenshot with PNG format
Args:
png_filename(string): optional, save file name
format(string): return format, "raw" or "pillow” (default)
Returns:
PIL.Image or raw png data
Raises:
WDARequestError
"""
value = self.http.get('screenshot').value
raw_value = base64.b64decode(value)
png_header = b"\x89PNG\r\n\x1a\n"
if not raw_value.startswith(png_header) and png_filename:
raise WDARequestError(-1, "screenshot png format error")
if png_filename:
with open(png_filename, 'wb') as f:
f.write(raw_value)
if format == 'raw':
return raw_value
elif format == 'pillow':
from PIL import Image
buff = io.BytesIO(raw_value)
im = Image.open(buff)
return im.convert("RGB") # convert to RGB to fix save jpeg error
else:
raise ValueError("unknown format")
def session(self,
bundle_id=None,
arguments: Optional[list] = None,
environment: Optional[dict] = None,
alert_action: Optional[AlertAction] = None):
"""
Launch app in a session
Args:
- bundle_id (str): the app bundle id
- arguments (list): ['-u', 'https://www.google.com/ncr']
- enviroment (dict): {"KEY": "VAL"}
- alert_action (AlertAction): AlertAction.ACCEPT or AlertAction.DISMISS
WDA Return json like
{
"value": {
"sessionId": "69E6FDBA-8D59-4349-B7DE-A9CA41A97814",
"capabilities": {
"device": "iphone",
"browserName": "部落冲突",
"sdkVersion": "9.3.2",
"CFBundleIdentifier": "com.supercell.magic"
}
},
"sessionId": "69E6FDBA-8D59-4349-B7DE-A9CA41A97814",
"status": 0
}
To create a new session, send json data like
{
"capabilities": {
"alwaysMatch": {
"bundleId": "your-bundle-id",
"app": "your-app-path"
"shouldUseCompactResponses": (bool),
"shouldUseTestManagerForVisibilityDetection": (bool),
"maxTypingFrequency": (integer),
"arguments": (list(str)),
"environment": (dict: str->str)
}
},
}
Or {"capabilities": {}}
"""
# if not bundle_id:
# # 旧版的WDA创建Session不允许bundleId为空,但是总是可以拿到sessionId
# # 新版的WDA允许bundleId为空,但是初始状态没有sessionId
# session_id = self.status().get("sessionId")
# if session_id:
# return self
capabilities = {}
if bundle_id:
always_match = {
"bundleId": bundle_id,
"arguments": arguments or [],
"environment": environment or {},
"shouldWaitForQuiescence": False,
}
if alert_action:
assert alert_action in ["accept", "dismiss"]
capabilities["defaultAlertAction"] = alert_action
capabilities['alwaysMatch'] = always_match
payload = {
"capabilities": capabilities,
"desiredCapabilities": capabilities.get('alwaysMatch',
{}), # 兼容旧版的wda
}
# when device is Locked, it is unable to start app
if self.locked():
self.unlock()
try:
res = self.http.post('session', payload)
except WDAEmptyResponseError:
""" when there is alert, might be got empty response
use /wda/apps/state may still get sessionId
"""
res = self.session().app_state(bundle_id)
if res.value != 4:
raise
client = Client(self.__wda_url, _session_id=res.sessionId)
client.__timeout = self.__timeout
client.__callbacks = self.__callbacks
return client
def close(self): # close session
try:
return self._session_http.delete('/')
except WDARequestError as e:
if not isinstance(e, (WDAInvalidSessionIdError, WDAPossiblyCrashedError)):
raise
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@#
###### Session methods and properties ######
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@#
def __enter__(self):
"""
Usage example:
with c.session("com.example.app") as app:
# do something
"""
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
@property
@deprecated(version="1.0.0", reason="Use session_id instread id")
def id(self):
return self._get_session_id()
@property
def session_id(self) -> str:
if self.__session_id:
return self.__session_id
current_sid = self.status()['sessionId']
if current_sid:
self.__session_id = current_sid # store old session id to reduce request count
return current_sid
return self.session().session_id
@session_id.setter
def session_id(self, value):
self.__session_id = value
def _get_session_id(self) -> str:
return self.session_id
@cached_property
def scale(self) -> int:
"""
UIKit scale factor
Refs:
https://developer.apple.com/library/archive/documentation/DeviceInformation/Reference/iOSDeviceCompatibility/Displays/Displays.html
There is another way to get scale
self._session_http.get("/wda/screen").value returns {"statusBarSize": {'width': 320, 'height': 20}, 'scale': 2}
"""
try:
return self._session_http.get("/wda/screen").value['scale']
except (KeyError, WDARequestError):
v = max(self.screenshot().size) / max(self.window_size())
return round(v)
@cached_property
def bundle_id(self):
""" the session matched bundle id """
v = self._session_http.get("/").value
return v['capabilities'].get('CFBundleIdentifier')
def implicitly_wait(self, seconds):
"""
set default element search timeout
"""
assert isinstance(seconds, (int, float))
self.__timeout = seconds
def battery_info(self):
"""
Returns dict: (I do not known what it means)
eg: {"level": 1, "state": 2}
"""
return self._session_http.get("/wda/batteryInfo").value
def device_info(self):
"""
Returns dict:
eg: {'currentLocale': 'zh_CN', 'timeZone': 'Asia/Shanghai'}
"""
return self._session_http.get("/wda/device/info").value
@property
def info(self):
"""
Returns:
{'timeZone': 'Asia/Shanghai',
'currentLocale': 'zh_CN',
'model': 'iPhone',
'uuid': '9DAC43B3-6887-428D-B5D5-4892D1F38BAA',
'userInterfaceIdiom': 0,
'userInterfaceStyle': 'unsupported',
'name': 'iPhoneSE',
'isSimulator': False}
"""
return self.device_info()
def set_clipboard(self, content, content_type="plaintext"):
""" set clipboard """
self._session_http.post(
"/wda/setPasteboard", {
"content": base64.b64encode(content.encode()).decode(),
"contentType": content_type
})
@deprecated(version="1.0.0", reason="This method is deprecated now.")
def set_alert_callback(self, callback):
"""
Args:
callback (func): called when alert popup
Example of callback:
def callback(session):
session.alert.accept()
"""
pass
# Not working
# def get_clipboard(self):
# return self.http.post("/wda/getPasteboard").value
# Not working
# def siri_activate(self, text):
# self.http.post("/wda/siri/activate", {"text": text})
def app_launch(self,
bundle_id,
arguments=[],
environment={},
wait_for_quiescence=False):
"""
Args:
- bundle_id (str): the app bundle id
- arguments (list): ['-u', 'https://www.google.com/ncr']
- enviroment (dict): {"KEY": "VAL"}
- wait_for_quiescence (bool): default False
"""
# Deprecated, use app_start instead
assert isinstance(arguments, (tuple, list))
assert isinstance(environment, dict)
# When device is locked, it is unable to launch
if self.locked():
self.unlock()
return self._session_http.post(
"/wda/apps/launch", {
"bundleId": bundle_id,
"arguments": arguments,
"environment": environment,
"shouldWaitForQuiescence": wait_for_quiescence,
})
def app_activate(self, bundle_id):
return self._session_http.post("/wda/apps/launch", {
"bundleId": bundle_id,
})
def app_terminate(self, bundle_id):
# Deprecated, use app_stop instead
return self._session_http.post("/wda/apps/terminate", {
"bundleId": bundle_id,
})
def app_state(self, bundle_id):
"""
Returns example:
{
"value": 4,
"sessionId": "0363BDC5-4335-47ED-A54E-F7CCB65C6A65"
}
value 1(not running) 2(running in background) 3(running in foreground)
"""
return self._session_http.post("/wda/apps/state", {
"bundleId": bundle_id,
})
def app_start(self,
bundle_id,
arguments=[],
environment={},
wait_for_quiescence=False):
""" alias for app_launch """
return self.app_launch(bundle_id, arguments, environment,
wait_for_quiescence)
def app_stop(self, bundle_id: str):
""" alias for app_terminate """
self.app_terminate(bundle_id)
def app_list(self):
"""
Not working very well, only show springboard
Returns:
list of app
Return example:
[{'pid': 52, 'bundleId': 'com.apple.springboard'}]
"""
return self._session_http.get("/wda/apps/list").value
def open_url(self, url):
"""
TODO: Never successed using before. Looks like use Siri to search.
https://github.com/facebook/WebDriverAgent/blob/master/WebDriverAgentLib/Commands/FBSessionCommands.m#L43
Args:
url (str): url
Raises:
WDARequestError
"""
if os.getenv("TMQ_ORIGIN") == "civita": # MDS platform
return self.http.post("/mds/openurl", {"url": url})
return self._session_http.post('url', {'url': url})
def deactivate(self, duration):
"""Put app into background and than put it back
Args:
- duration (float): deactivate time, seconds
"""
return self._session_http.post('/wda/deactivateApp',
dict(duration=duration))
def tap(self, x, y):
if _is_tmq_platform() and os.environ.get(
"TMQ_ORIGIN") == "civita": # in TMQ and belong to MDS
return self._session_http.post("/mds/touchAndHold",
dict(x=x, y=y, duration=0.02))
return self._session_http.post('/wda/tap/0', dict(x=x, y=y))
def _percent2pos(self, x, y, window_size=None):
if any(isinstance(v, float) for v in [x, y]):
w, h = window_size or self.window_size()
x = int(x * w) if isinstance(x, float) else x
y = int(y * h) if isinstance(y, float) else y
assert w >= x >= 0
assert h >= y >= 0
return (x, y)
def click(self, x, y, duration: Optional[float] = None):
"""
Combine tap and tap_hold
Args:
x, y: can be float(percent) or int
duration (optional): tap_hold duration
"""
x, y = self._percent2pos(x, y)
if duration:
return self.tap_hold(x, y, duration)
return self.tap(x, y)
def double_tap(self, x, y):
x, y = self._percent2pos(x, y)
return self._session_http.post('/wda/doubleTap', dict(x=x, y=y))
def tap_hold(self, x, y, duration=1.0):
"""
Tap and hold for a moment
Args:
- x, y(int, float): float(percent) or int(absolute coordicate)
- duration(float): seconds of hold time
[[FBRoute POST:@"/wda/touchAndHold"] respondWithTarget:self action:@selector(handleTouchAndHoldCoordinate:)],
"""
x, y = self._percent2pos(x, y)
data = {'x': x, 'y': y, 'duration': duration}
return self._session_http.post('/wda/touchAndHold', data=data)
def swipe(self, x1, y1, x2, y2, duration=0):
"""
Args:
x1, y1, x2, y2 (int, float): float(percent), int(coordicate)
duration (float): start coordinate press duration (seconds)
[[FBRoute POST:@"/wda/dragfromtoforduration"] respondWithTarget:self action:@selector(handleDragCoordinate:)],
"""
if any(isinstance(v, float) for v in [x1, y1, x2, y2]):
size = self.window_size()
x1, y1 = self._percent2pos(x1, y1, size)
x2, y2 = self._percent2pos(x2, y2, size)
data = dict(fromX=x1, fromY=y1, toX=x2, toY=y2, duration=duration)
return self._session_http.post('/wda/dragfromtoforduration', data=data)
def _fast_swipe(self, x1, y1, x2, y2, velocity: int = 500):
"""
velocity: the larger the faster
"""
data = dict(fromX=x1, fromY=y1, toX=x2, toY=y2, velocity=velocity)
return self._session_http.post('/wda/drag', data=data)
def swipe_left(self):
""" swipe right to left """
w, h = self.window_size()
return self.swipe(w, h // 2, 1, h // 2)
def swipe_right(self):
""" swipe left to right """
w, h = self.window_size()
return self.swipe(1, h // 2, w, h // 2)
def swipe_up(self):
""" swipe from center to top """
w, h = self.window_size()
return self.swipe(w // 2, h // 2, w // 2, 1)
def swipe_down(self):
""" swipe from center to bottom """
w, h = self.window_size()
return self.swipe(w // 2, h // 2, w // 2, h - 1)
def _fast_swipe_ext(self, direction: str):
if direction == "up":
w, h = self.window_size()
return self.swipe(w // 2, h // 2, w // 2, 1)
elif direction == "down":
w, h = self.window_size()
return self._fast_swipe(w // 2, h // 2, w // 2, h - 1)
else:
raise RuntimeError("not supported direction:", direction)
@property
def orientation(self):
"""
Return string
One of <PORTRAIT | LANDSCAPE>
"""
for _ in range(3):
result = self._session_http.get('orientation').value
if result:
return result
time.sleep(.5)
@orientation.setter
def orientation(self, value):
"""
Args:
- orientation(string): LANDSCAPE | PORTRAIT | UIA_DEVICE_ORIENTATION_LANDSCAPERIGHT |
UIA_DEVICE_ORIENTATION_PORTRAIT_UPSIDEDOWN
"""
return self._session_http.post('orientation',
data={'orientation': value})
def window_size(self):
"""
Returns:
namedtuple: eg
Size(width=320, height=568)
"""
size = self._unsafe_window_size()
if min(size) > 0:
return size
# get orientation, handle alert
_ = self.orientation # after this operation, may safe to get window_size
if self.alert.exists:
self.alert.accept()
time.sleep(.1)
size = self._unsafe_window_size()
if min(size) > 0:
return size
logger.warning("unable to get window_size(), try to to create a new session")
with self.session("com.apple.Preferences") as app:
size = app._unsafe_window_size()
assert min(size) > 0, "unable to get window_size"
return size
def _unsafe_window_size(self):
"""
returns (width, height) might be (0, 0)
"""
value = self._session_http.get('/window/size').value
w = roundint(value['width'])
h = roundint(value['height'])
return namedtuple('Size', ['width', 'height'])(w, h)
@retry.retry(WDAKeyboardNotPresentError, tries=3, delay=1.0)
def send_keys(self, value):
"""
send keys, yet I know not, todo function
"""
if isinstance(value, six.string_types):
value = list(value)
return self._session_http.post('/wda/keys', data={'value': value})
def press(self, name: str):
"""
Args:
name: one of <home|volumeUp|volumeDown>
"""
valid_names = ("home", "volumeUp", "volumeDown")
if name not in valid_names:
raise ValueError(
f"Invalid name: {name}, should be one of {valid_names}")
self._session_http.post("/wda/pressButton", {"name": name})
def press_duration(self, name: str, duration: float):
"""
Args:
name: one of <home|volumeUp|volumeDown|power|snapshot>
duration: seconds
Notes:
snapshot equals power+home
Raises:
ValueError
Refs:
https://github.com/appium/WebDriverAgent/pull/494/files
"""
hid_usages = {
"home": 0x40,
"volumeup": 0xE9,
"volumedown": 0xEA,
"power": 0x30,
"snapshot": 0x65,
"power+home": 0x65
}
name = name.lower()
if name not in hid_usages:
raise ValueError("Invalid name:", name)
hid_usage = hid_usages[name]
return self._session_http.post("/wda/performIoHidEvent", {"page": 0x0C, "usage": hid_usage, "duration": duration})
def keyboard_dismiss(self):
"""
Not working for now
"""
raise RuntimeError("not pass tests, this method is not allowed to use")
self._session_http.post('/wda/keyboard/dismiss')
def appium_settings(self, value: Optional[dict] = None) -> dict:
"""
Get and set /session/$sessionId/appium/settings
"""
if value is None:
return self._session_http.get("/appium/settings").value
return self._session_http.post("/appium/settings",
data={
"settings": value
}).value
def xpath(self, value):
"""
For weditor, d.xpath(...)
"""
return Selector(self, xpath=value)
def __call__(self, *args, **kwargs):
if 'timeout' not in kwargs:
kwargs['timeout'] = self.__timeout
return Selector(self, *args, **kwargs)
@property
def alibaba(self):
""" Only used in alibaba company """
try:
import wda_taobao
return wda_taobao.Alibaba(self)
except ImportError:
raise RuntimeError(
"@alibaba property requires wda_taobao library installed")
@property
def taobao(self):
try:
import wda_taobao
return wda_taobao.Taobao(self)
except ImportError:
raise RuntimeError(
"@taobao property requires wda_taobao library installed")
class Alert(object):
DEFAULT_ACCEPT_BUTTONS = [
"使用App时允许", "无线局域网与蜂窝网络", "好", "稍后", "稍后提醒", "确定",
"允许", "以后", "打开", "录屏", "Allow", "OK", "YES", "Yes", "Later", "Close"
]
def __init__(self, client: BaseClient):
self._c = client
self.http = client._session_http
@property
def exists(self):
try:
self.text
return True
except WDARequestError as e:
# expect e.status != 27 in old version and e.value == 'no such alert' in new version
return False
@property
def text(self):
return self.http.get('/alert/text').value
def wait(self, timeout=20.0):
start_time = time.time()
while time.time() - start_time < timeout:
if self.exists:
return True
time.sleep(0.2)
return False
def accept(self):
return self.http.post('/alert/accept')
def dismiss(self):
return self.http.post('/alert/dismiss')
def buttons(self):
return self.http.get('/wda/alert/buttons').value
def click(self, button_name: Optional[Union[str, list]] = None):
"""
Args:
- button_name: the name of the button
Returns:
button_name being clicked
Raises:
ValueError when button_name is not in avaliable button names
"""
# Actually, It has no difference POST to accept or dismiss
if isinstance(button_name, str):
self.http.post('/alert/accept', data={"name": button_name})
return button_name
avaliable_names = self.buttons()
buttons: list = button_name
for bname in buttons:
if bname in avaliable_names:
return self.click(bname)
raise ValueError("Only these buttons can be clicked", avaliable_names)
def click_exists(self, buttons: Optional[Union[str, list]] = None):
"""
Args:
- buttons: the name of the button of list of names
Returns:
button_name clicked or None
"""
try:
return self.click(buttons)
except (ValueError, WDARequestError):
return None
@contextlib.contextmanager
def watch_and_click(self,
buttons: Optional[list] = None,
interval: float = 2.0):
""" watch and click button
Args:
buttons: buttons name which need to click
interval: check interval
"""
if not buttons:
buttons = self.DEFAULT_ACCEPT_BUTTONS
event = threading.Event()
def _inner():
while not event.is_set():
try:
alert_buttons = self.buttons()
logger.info("Alert detected, buttons: %s", alert_buttons)
for btn_name in buttons:
if btn_name in alert_buttons:
logger.info("Alert click: %s", btn_name)
self.click(btn_name)
break
else:
logger.warning("Alert not handled")
except WDARequestError:
pass
time.sleep(interval)
threading.Thread(name="alert", target=_inner, daemon=True).start()
yield None
event.set()
class Client(BaseClient):
@property
def alert(self) -> Alert:
return Alert(self)
Session = Client # for compability
class Selector(object):
def __init__(self,
session: Session,
predicate=None,
id=None,
className=None,
type=None,
name=None,
nameContains=None,
nameMatches=None,
text=None,
textContains=None,
textMatches=None,
value=None,
valueContains=None,
label=None,
labelContains=None,
visible=None,
enabled=None,
classChain=None,
xpath=None,
parent_class_chains=[],
timeout=10.0,
index=0):
'''
Args:
predicate (str): predicate string
id (str): raw identifier
className (str): attr of className
type (str): alias of className
name (str): attr for name
nameContains (str): attr of name contains
nameMatches (str): regex string
text (str): alias of name
textContains (str): alias of nameContains
textMatches (str): alias of nameMatches
value (str): attr of value, not used in most times
valueContains (str): attr of value contains
label (str): attr for label
labelContains (str): attr for label contains
visible (bool): is visible
enabled (bool): is enabled
classChain (str): string of ios chain query, eg: **/XCUIElementTypeOther[`value BEGINSWITH 'blabla'`]
xpath (str): xpath string, a little slow, but works fine
timeout (float): maxium wait element time, default 10.0s
index (int): index of founded elements
WDA use two key to find elements "using", "value"
Examples:
"using" can be on of
"partial link text", "link text"
"name", "id", "accessibility id"
"class name", "class chain", "xpath", "predicate string"
predicate string support many keys
UID,
accessibilityContainer,
accessible,
enabled,
frame,
label,
name,
rect,
type,
value,
visible,
wdAccessibilityContainer,
wdAccessible,
wdEnabled,
wdFrame,
wdLabel,
wdName,
wdRect,
wdType,
wdUID,
wdValue,
wdVisible
'''
assert isinstance(session, Session)
self._session = session
self._predicate = predicate
self._id = id
self._class_name = className or type
self._name = self._add_escape_character_for_quote_prime_character(
name or text)
self._name_part = nameContains or textContains
self._name_regex = nameMatches or textMatches
self._value = value
self._value_part = valueContains
self._label = label
self._label_part = labelContains
self._enabled = enabled
self._visible = visible
self._index = index
self._xpath = self._fix_xcui_type(xpath)
self._class_chain = self._fix_xcui_type(classChain)
self._timeout = timeout
# some fixtures
if self._class_name and not self._class_name.startswith(
'XCUIElementType'):
self._class_name = 'XCUIElementType' + self._class_name
if self._name_regex:
if not self._name_regex.startswith(
'^') and not self._name_regex.startswith('.*'):
self._name_regex = '.*' + self._name_regex
if not self._name_regex.endswith(
'$') and not self._name_regex.endswith('.*'):
self._name_regex = self._name_regex + '.*'
self._parent_class_chains = parent_class_chains
@property
def http(self):
return self._session._session_http
def _fix_xcui_type(self, s):
if s is None:
return
re_element = '|'.join(xcui_element_types.ELEMENTS)
return re.sub(r'/(' + re_element + ')', '/XCUIElementType\g<1>', s)
def _add_escape_character_for_quote_prime_character(self, text):
"""
Fix for https://github.com/openatx/facebook-wda/issues/33
Returns:
string with properly formated quotes, or non changed text
"""
if text is not None:
if "'" in text:
return text.replace("'", "\\'")
elif '"' in text:
return text.replace('"', '\\"')
else:
return text
else:
return text
def _wdasearch(self, using, value):
"""
Returns:
element_ids (list(string)): example ['id1', 'id2']
HTTP example response:
[
{"ELEMENT": "E2FF5B2A-DBDF-4E67-9179-91609480D80A"},
{"ELEMENT": "597B1A1E-70B9-4CBE-ACAD-40943B0A6034"}
]
"""
element_ids = []
for v in self.http.post('/elements', {
'using': using,
'value': value
}).value:
element_ids.append(v['ELEMENT'])
return element_ids
def _gen_class_chain(self):
# just return if aleady exists predicate
if self._predicate:
return '/XCUIElementTypeAny[`' + self._predicate + '`]'
qs = []
if self._name:
qs.append("name == '%s'" % self._name)
if self._name_part:
qs.append("name CONTAINS %r" % self._name_part)
if self._name_regex:
qs.append("name MATCHES %r" % self._name_regex)
if self._label:
qs.append("label == '%s'" % self._label)
if self._label_part:
qs.append("label CONTAINS '%s'" % self._label_part)
if self._value:
qs.append("value == '%s'" % self._value)
if self._value_part:
qs.append("value CONTAINS '%s'" % self._value_part)
if self._visible is not None:
qs.append("visible == %s" % 'true' if self._visible else 'false')
if self._enabled is not None:
qs.append("enabled == %s" % 'true' if self._enabled else 'false')
predicate = ' AND '.join(qs)
chain = '/' + (self._class_name or 'XCUIElementTypeAny')
if predicate:
chain = chain + '[`' + predicate + '`]'
if self._index:
chain = chain + '[%d]' % self._index
return chain
@retry.retry(WDAStaleElementReferenceError, tries=3, delay=.5, jitter=.2)
def find_element_ids(self):
elems = []
if self._id:
return self._wdasearch('id', self._id)
if self._predicate:
return self._wdasearch('predicate string', self._predicate)
if self._xpath:
return self._wdasearch('xpath', self._xpath)
if self._class_chain:
return self._wdasearch('class chain', self._class_chain)
chain = '**' + ''.join(
self._parent_class_chains) + self._gen_class_chain()
if DEBUG:
print('CHAIN:', chain)
return self._wdasearch('class chain', chain)
def find_elements(self):
"""
Returns:
Element (list): all the elements
"""
es = []
for element_id in self.find_element_ids():
e = Element(self._session, element_id)
es.append(e)
return es
def count(self):
return len(self.find_element_ids())
def get(self, timeout=None, raise_error=True):
"""
Args:
timeout (float): timeout for query element, unit seconds
Default 10s
raise_error (bool): whether to raise error if element not found
Returns:
Element: UI Element
Raises:
WDAElementNotFoundError if raise_error is True else None
"""
start_time = time.time()
if timeout is None:
timeout = self._timeout
while True:
elems = self.find_elements()
if len(elems) > 0:
return elems[0]
if start_time + timeout < time.time():
break
time.sleep(0.5)
if raise_error:
raise WDAElementNotFoundError("element not found",
"timeout %.1f" % timeout)
def __getattr__(self, oper):
if oper.startswith("_"):
raise AttributeError("invalid attr", oper)
if not hasattr(Element, oper):
raise AttributeError("'Element' object has no attribute %r" % oper)
el = self.get()
return getattr(el, oper)
def set_timeout(self, s):
"""
Set element wait timeout
"""
self._timeout = s
return self
def __getitem__(self, index):
self._index = index
return self
def child(self, *args, **kwargs):
chain = self._gen_class_chain()
kwargs['parent_class_chains'] = self._parent_class_chains + [chain]
return Selector(self._session, *args, **kwargs)
@property
def exists(self):
return len(self.find_element_ids()) > self._index
def click(self, timeout: Optional[float] = None):
"""
Click element
Args:
timeout (float): max wait seconds
"""
e = self.get(timeout=timeout)
e.click()
def click_exists(self, timeout=0):
"""
Wait element and perform click
Args:
timeout (float): timeout for wait
Returns:
bool: if successfully clicked
"""
e = self.get(timeout=timeout, raise_error=False)
if e is None:
return False
e.click()
return True
def wait(self, timeout=None, raise_error=False):
""" alias of get
Args:
timeout (float): timeout seconds
raise_error (bool): default false, whether to raise error if element not found
Returns:
Element or None
"""
return self.get(timeout=timeout, raise_error=raise_error)
def wait_gone(self, timeout=None, raise_error=True):
"""
Args:
timeout (float): default timeout
raise_error (bool): return bool or raise error
Returns:
bool: works when raise_error is False
Raises:
WDAElementNotDisappearError
"""
start_time = time.time()
if timeout is None or timeout <= 0:
timeout = self._timeout
while start_time + timeout > time.time():
if not self.exists:
return True
if not raise_error:
return False
raise WDAElementNotDisappearError("element not gone")
# todo
# pinch
# touchAndHold
# dragfromtoforduration
# twoFingerTap
# todo
# handleGetIsAccessibilityContainer
# [[FBRoute GET:@"/wda/element/:uuid/accessibilityContainer"] respondWithTarget:self action:@selector(handleGetIsAccessibilityContainer:)],
class Element(object):
def __init__(self, session: Session, id: str):
"""
base_url eg: http://localhost:8100/session/$SESSION_ID
"""
self._session = session
self._id = id
def __repr__(self):
return '<wda.Element(id="{}")>'.format(self._id)
@property
def http(self):
return self._session._session_http
def _req(self, method, url, data=None):
return self.http.fetch(method, '/element/' + self._id + url, data)
def _wda_req(self, method, url, data=None):
return self.http.fetch(method, '/wda/element/' + self._id + url, data)
def _prop(self, key):
return self._req('get', '/' + key.lstrip('/')).value
def _wda_prop(self, key):
ret = self.http.get('/wda/element/%s/%s' % (self._id, key)).value
return ret
@property
def info(self):
return {
"id": self._id,
"label": self.label,
"value": self.value,
"text": self.text,
"name": self.name,
"className": self.className,
"enabled": self.enabled,
"displayed": self.displayed,
"visible": self.visible,
"accessible": self.accessible,
"accessibilityContainer": self.accessibility_container
}
@property
def id(self):
return self._id
@property
def label(self):
return self._prop('attribute/label')
@property
def className(self):
return self._prop('attribute/type')
@property
def text(self):
return self._prop('text')
@property
def name(self):
return self._prop('name')
@property
def displayed(self):
return self._prop("displayed")
@property
def enabled(self):
return self._prop('enabled')
@property
def accessible(self):
return self._wda_prop("accessible")
@property
def accessibility_container(self):
return self._wda_prop('accessibilityContainer')
@property
def value(self):
return self._prop('attribute/value')
@property
def visible(self):
return self._prop('attribute/visible')
@property
def bounds(self) -> Rect:
value = self._prop('rect')
x, y = value['x'], value['y']
w, h = value['width'], value['height']
return Rect(x, y, w, h)
# operations
def tap(self):
return self._req('post', '/click')
def click(self):
"""
Get element center position and do click, a little slower
"""
# Some one reported, invisible element can not click
# So here, git position and then do tap
x, y = self.bounds.center
self._session.click(x, y)
# return self.tap()
def tap_hold(self, duration=1.0):
"""
Tap and hold for a moment
Args:
duration (float): seconds of hold time
[[FBRoute POST:@"/wda/element/:uuid/touchAndHold"] respondWithTarget:self action:@selector(handleTouchAndHold:)],
"""
return self._wda_req('post', '/touchAndHold', {'duration': duration})
def scroll(self, direction='visible', distance=1.0):
"""
Args:
direction (str): one of "visible", "up", "down", "left", "right"
distance (float): swipe distance, only works when direction is not "visible"
Raises:
ValueError
distance=1.0 means, element (width or height) multiply 1.0
"""
if direction == 'visible':
self._wda_req('post', '/scroll', {'toVisible': True})
elif direction in ['up', 'down', 'left', 'right']:
self._wda_req('post', '/scroll', {
'direction': direction,
'distance': distance
})
else:
raise ValueError("Invalid direction")
return self
# TvOS
# @property
# def focused(self):
#
# def focuse(self):
def pickerwheel_select(self):
""" Select by pickerwheel """
# Ref: https://github.com/appium/WebDriverAgent/blob/e5d46a85fbdb22e401d396cedf0b5a9bbc995084/WebDriverAgentLib/Commands/FBElementCommands.m#L88
raise NotImplementedError()
def pinch(self, scale, velocity):
"""
Args:
scale (float): scale must > 0
velocity (float): velocity must be less than zero when scale is less than 1
Example:
pinchIn -> scale:0.5, velocity: -1
pinchOut -> scale:2.0, velocity: 1
"""
data = {'scale': scale, 'velocity': velocity}
return self._wda_req('post', '/pinch', data)
def set_text(self, value):
return self._req('post', '/value', {'value': value})
def clear_text(self):
return self._req('post', '/clear')
# def child(self, **kwargs):
# return Selector(self.__base_url, self._id, **kwargs)
# todo lot of other operations
# tap_hold
class USBClient(Client):
""" connect device through unix:/var/run/usbmuxd """
def __init__(self, udid: str = "", port: int = 8100, wda_bundle_id=None):
if not udid:
usbmux = Usbmux()
infos = usbmux.device_list()
if len(infos) == 0:
raise RuntimeError("no device connected")
elif len(infos) >= 2:
raise RuntimeError("more then one device connected")
udid = infos[0]['SerialNumber']
super().__init__(url=requests_usbmux.DEFAULT_SCHEME + "{}:{}".format(udid, port))
if self.is_ready():
return
_start_wda_xctest(udid, wda_bundle_id)
if not self.wait_ready(timeout=20):
raise RuntimeError("wda xctest launched but check failed")
add cahced_property to alibaba
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals
import base64
import contextlib
import copy
import enum
import functools
import io
import json
import logging
import os
import re
import shutil
import subprocess
import threading
import time
from collections import defaultdict, namedtuple
from typing import Callable, Optional, Union
from urllib.parse import urlparse
import requests
import retry
import six
from deprecated import deprecated
from . import requests_usbmux, xcui_element_types
from ._proto import *
from .exceptions import *
from .usbmux import Usbmux
from .utils import inject_call, limit_call_depth
try:
from functools import cached_property # Python3.8+
except ImportError:
from cached_property import cached_property
try:
import sys
import logzero
if not (hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()):
log_format = '[%(levelname)1.1s %(asctime)s %(module)s:%(lineno)d] %(message)s'
logzero.setup_default_logger(formatter=logzero.LogFormatter(
fmt=log_format))
logger = logzero.logger
except ImportError:
logger = logging.getLogger("facebook-wda") # default level: WARNING
DEBUG = False
HTTP_TIMEOUT = 180.0 # unit second
DEVICE_WAIT_TIMEOUT = 180.0 # wait ready
LANDSCAPE = 'LANDSCAPE'
PORTRAIT = 'PORTRAIT'
LANDSCAPE_RIGHT = 'UIA_DEVICE_ORIENTATION_LANDSCAPERIGHT'
PORTRAIT_UPSIDEDOWN = 'UIA_DEVICE_ORIENTATION_PORTRAIT_UPSIDEDOWN'
class Status(enum.IntEnum):
# 不是怎么准确,status在mds平台上变来变去的
UNKNOWN = 100 # other status
ERROR = 110
class Callback(str, enum.Enum):
ERROR = "::error"
HTTP_REQUEST_BEFORE = "::http-request-before"
HTTP_REQUEST_AFTER = "::http-request-after"
RET_RETRY = "::retry" # Callback return value
RET_ABORT = "::abort"
RET_CONTINUE = "::continue"
class AttrDict(dict):
def __getattr__(self, key):
if isinstance(key, str) and key in self:
return self[key]
raise AttributeError("Attribute key not found", key)
def convert(dictionary):
"""
Convert dict to namedtuple
"""
return AttrDict(dictionary)
# Old implement
# return namedtuple('GenericDict', list(dictionary.keys()))(**dictionary)
def urljoin(*urls):
"""
The default urlparse.urljoin behavior look strange
Standard urlparse.urljoin('http://a.com/foo', '/bar')
Expect: http://a.com/foo/bar
Actually: http://a.com/bar
This function fix that.
"""
return '/'.join([u.strip("/") for u in urls])
def roundint(i):
return int(round(i, 0))
def namedlock(name):
"""
Returns:
threading.Lock
"""
if not hasattr(namedlock, 'locks'):
namedlock.locks = defaultdict(threading.Lock)
return namedlock.locks[name]
def httpdo(url, method="GET", data=None, timeout=None) -> AttrDict:
"""
thread safe http request
Raises:
WDAError, WDARequestError, WDAEmptyResponseError
"""
p = urlparse(url)
with namedlock(p.scheme + "://" + p.netloc):
return _unsafe_httpdo(url, method, data, timeout)
@functools.lru_cache(1024)
def _requests_session_pool_get(scheme, netloc):
return requests_usbmux.Session()
def _is_tmq_platform() -> bool:
return os.getenv("TMQ") == "true"
def _unsafe_httpdo(url, method='GET', data=None, timeout=None):
"""
Do HTTP Request
"""
start = time.time()
if DEBUG:
body = json.dumps(data) if data else ''
print("Shell$ curl -X {method} -d '{body}' '{url}'".format(
method=method.upper(), body=body or '', url=url))
if timeout is None:
timeout = HTTP_TIMEOUT
try:
u = urlparse(url)
request_session = _requests_session_pool_get(u.scheme, u.netloc)
response = request_session.request(method,
url,
json=data,
timeout=timeout)
except (requests.ConnectionError, requests.ReadTimeout) as e:
raise
if response.status_code == 502: # Bad Gateway
raise WDABadGateway(response.status_code, response.text)
if DEBUG:
ms = (time.time() - start) * 1000
response_text = response.text
if url.endswith("/screenshot"):
response_text = response_text[:100] + "..." # limit length of screenshot response
print('Return ({:.0f}ms): {}'.format(ms, response_text))
try:
retjson = response.json()
retjson['status'] = retjson.get('status', 0)
r = convert(retjson)
if isinstance(r.value, dict) and r.value.get("error"):
status = Status.ERROR
value = r.value.copy()
value.pop("traceback", None)
for errCls in (WDAInvalidSessionIdError, WDAPossiblyCrashedError, WDAKeyboardNotPresentError, WDAUnknownError, WDAStaleElementReferenceError):
if errCls.check(value):
raise errCls(status, value)
raise WDARequestError(status, value)
return r
except JSONDecodeError:
if response.text == "":
raise WDAEmptyResponseError(method, url, data)
raise WDAError(method, url, response.text[:100] + "...") # should not too long
except requests.ConnectionError as e:
raise WDAError("Failed to establish connection to to WDA")
class Rect(list):
def __init__(self, x, y, width, height):
super().__init__([x, y, width, height])
self.__dict__.update({
"x": x,
"y": y,
"width": width,
"height": height
})
def __str__(self):
return 'Rect(x={x}, y={y}, width={w}, height={h})'.format(
x=self.x, y=self.y, w=self.width, h=self.height)
def __repr__(self):
return str(self)
@property
def center(self):
return namedtuple('Point', ['x', 'y'])(self.x + self.width // 2,
self.y + self.height // 2)
@property
def origin(self):
return namedtuple('Point', ['x', 'y'])(self.x, self.y)
@property
def left(self):
return self.x
@property
def top(self):
return self.y
@property
def right(self):
return self.x + self.width
@property
def bottom(self):
return self.y + self.height
def _start_wda_xctest(udid: str, wda_bundle_id=None) -> bool:
xctool_path = shutil.which("tins2") or shutil.which("tidevice")
if not xctool_path:
return False
logger.info("WDA is not running, exec: {} xctest".format(xctool_path))
args = []
if udid:
args.extend(['-u', udid])
args.append('xctest')
if wda_bundle_id:
args.extend(['-B', wda_bundle_id])
p = subprocess.Popen([xctool_path] + args)
time.sleep(3)
if p.poll() is not None:
logger.warning("xctest launch failed")
return False
return True
class BaseClient(object):
def __init__(self, url=None, _session_id=None):
"""
Args:
target (string): the device url
If target is empty, device url will set to env-var "DEVICE_URL" if defined else set to "http://localhost:8100"
"""
if not url:
url = os.environ.get('DEVICE_URL', 'http://localhost:8100')
assert re.match(r"^(http\+usbmux|https?)://", url), "Invalid URL: %r" % url
# Session variable
self.__wda_url = url
self.__session_id = _session_id
self.__is_app = bool(_session_id) # set to freeze session_id
self.__timeout = 30.0
self.__callbacks = defaultdict(list)
self.__callback_depth = 0
self.__callback_running = False
if not _session_id:
self._init_callback()
# u = urllib.parse.urlparse(self.__wda_url)
# if u.scheme == "http+usbmux" and not self.is_ready():
# udid = u.netloc.split(":")[0]
# if _start_wda_xctest(udid):
# self.wait_ready()
# raise RuntimeError("xctest start failed")
def _callback_fix_invalid_session_id(self, err: WDAError):
""" 当遇到 invalid session id错误时,更新session id并重试 """
if isinstance(err, WDAInvalidSessionIdError): # and not self.__is_app:
self.session_id = None
return Callback.RET_RETRY
if isinstance(err, WDAPossiblyCrashedError):
self.session_id = self.session().session_id # generate new sessionId
return Callback.RET_RETRY
""" 等待设备恢复上线 """
def _callback_wait_ready(self, err):
# logger.warning("Error: %s", err) # too noisy
if isinstance(err, (ConnectionError, requests.ConnectionError,
requests.ReadTimeout, WDABadGateway)):
if not self.wait_ready(DEVICE_WAIT_TIMEOUT): # 等待设备恢复在线
return Callback.RET_ABORT
return Callback.RET_RETRY
def _callback_tmq_before_send_keys(self, urlpath: str):
if urlpath.endswith("/wda/keys"):
if self.alert.exists:
self.alert.accept()
print("send_keys callback called")
def _callback_tmq_print_error(self, method, url, data, err):
if 'no such alert' in str(err): # too many this error
return
logger.warning(
"HTTP Error happens, this message is printed for better debugging")
body = json.dumps(data) if data else ''
logger.warning("Shell$ curl -X {method} -d '{body}' '{url}'".format(
method=method.upper(), body=body or '', url=url))
logger.warning("Error: %s", err)
def _init_callback(self):
self.register_callback(Callback.ERROR,
self._callback_fix_invalid_session_id)
if _is_tmq_platform():
# 输入之前处理弹窗
# 出现错误是print出来,方便调试
logger.info("register callbacks for tmq")
self.register_callback(Callback.ERROR, self._callback_wait_ready)
self.register_callback(Callback.HTTP_REQUEST_BEFORE,
self._callback_tmq_before_send_keys)
self.register_callback(Callback.ERROR,
self._callback_tmq_print_error)
def _callback_json_report(self, method, urlpath):
""" TODO: ssx """
pass
def _set_output_report(self, filename: str):
"""
Args:
filename: json log
"""
self.register_callback(
Callback.HTTP_REQUEST_BEFORE, self._callback_json_report)
def is_ready(self) -> bool:
try:
self.http.get("status", timeout=3)
return True
except Exception as e:
return False
def wait_ready(self, timeout=120, noprint=False) -> bool:
"""
wait until WDA back to normal
Returns:
bool (if wda works)
"""
deadline = time.time() + timeout
def _dprint(message: str):
if noprint:
return
print("facebook-wda", time.ctime(), message)
_dprint("Wait ready (timeout={:.1f})".format(timeout))
while time.time() < deadline:
if self.is_ready():
_dprint("device back online")
return True
else:
_dprint("{!r} wait_ready left {:.1f} seconds".format(self.__wda_url, deadline - time.time()))
time.sleep(1.0)
_dprint("device still offline")
return False
@retry.retry(exceptions=WDAEmptyResponseError, tries=3, delay=2)
def status(self):
res = self.http.get('status')
res["value"]['sessionId'] = res.get("sessionId")
# Can't use res.value['sessionId'] = ...
return res.value
def register_callback(self, event_name: str, func: Callable, try_first: bool = False):
if try_first:
self.__callbacks[event_name].insert(0, func)
else:
self.__callbacks[event_name].append(func)
def unregister_callback(self,
event_name: Optional[str] = None,
func: Optional[Callable] = None):
""" 反注册 """
if event_name is None:
self.__callbacks.clear()
elif func is None:
self.__callbacks[event_name].clear()
else:
self.__callbacks[event_name].remove(func)
def _run_callback(self, event_name, callbacks,
**kwargs) -> Union[None, Callback]:
""" 运行回调函数 """
if not callbacks:
return
self.__callback_running = True
try:
for fn in callbacks[event_name]:
ret = inject_call(fn, **kwargs)
if ret in [
Callback.RET_RETRY, Callback.RET_ABORT,
Callback.RET_CONTINUE
]:
return ret
finally:
self.__callback_running = False
@property
def callbacks(self):
return self.__callbacks
@limit_call_depth(4)
def _fetch(self,
method: str,
urlpath: str,
data: Optional[dict] = None,
with_session: bool = False,
timeout: Optional[float] = None) -> AttrDict:
""" do http request """
urlpath = "/" + urlpath.lstrip("/") # urlpath always startswith /
callbacks = self.__callbacks
if self.__callback_running:
callbacks = None
url = urljoin(self.__wda_url, urlpath)
run_callback = functools.partial(self._run_callback,
callbacks=callbacks,
method=method,
url=url,
urlpath=urlpath,
with_session=with_session,
data=data,
client=self)
try:
if with_session:
url = urljoin(self.__wda_url, "session", self.session_id,
urlpath)
run_callback(Callback.HTTP_REQUEST_BEFORE)
response = httpdo(url, method, data, timeout)
run_callback(Callback.HTTP_REQUEST_AFTER, response=response)
return response
except Exception as err:
ret = run_callback(Callback.ERROR, err=err)
if ret == Callback.RET_RETRY:
return self._fetch(method, urlpath, data, with_session)
elif ret == Callback.RET_CONTINUE:
return
else:
raise
@property
def http(self):
return namedtuple("HTTPRequest", ['fetch', 'get', 'post'])(
self._fetch,
functools.partial(self._fetch, "GET"),
functools.partial(self._fetch, "POST")) # yapf: disable
@property
def _session_http(self):
return namedtuple("HTTPSessionRequest", ['fetch', 'get', 'post', 'delete'])(
functools.partial(self._fetch, with_session=True),
functools.partial(self._fetch, "GET", with_session=True),
functools.partial(self._fetch, "POST", with_session=True),
functools.partial(self._fetch, "DELETE", with_session=True)) # yapf: disable
def home(self):
"""Press home button"""
try:
self.http.post('/wda/homescreen')
except WDARequestError as e:
if "Timeout waiting until SpringBoard is visible" in str(e):
return
raise
def healthcheck(self):
"""Hit healthcheck"""
return self.http.get('/wda/healthcheck')
def locked(self) -> bool:
""" returns locked status, true or false """
return self.http.get("/wda/locked").value
def lock(self):
return self.http.post('/wda/lock')
def unlock(self):
""" unlock screen, double press home """
return self.http.post('/wda/unlock')
def sleep(self, secs: float):
""" same as time.sleep """
time.sleep(secs)
@retry.retry(WDAUnknownError, tries=3, delay=.5, jitter=.2)
def app_current(self) -> dict:
"""
Returns:
dict, eg:
{"pid": 1281,
"name": "",
"bundleId": "com.netease.cloudmusic"}
"""
return self.http.get("/wda/activeAppInfo").value
def source(self, format='xml', accessible=False):
"""
Args:
format (str): only 'xml' and 'json' source types are supported
accessible (bool): when set to true, format is always 'json'
"""
if accessible:
return self.http.get('/wda/accessibleSource').value
return self.http.get('source?format=' + format).value
def screenshot(self, png_filename=None, format='pillow'):
"""
Screenshot with PNG format
Args:
png_filename(string): optional, save file name
format(string): return format, "raw" or "pillow” (default)
Returns:
PIL.Image or raw png data
Raises:
WDARequestError
"""
value = self.http.get('screenshot').value
raw_value = base64.b64decode(value)
png_header = b"\x89PNG\r\n\x1a\n"
if not raw_value.startswith(png_header) and png_filename:
raise WDARequestError(-1, "screenshot png format error")
if png_filename:
with open(png_filename, 'wb') as f:
f.write(raw_value)
if format == 'raw':
return raw_value
elif format == 'pillow':
from PIL import Image
buff = io.BytesIO(raw_value)
im = Image.open(buff)
return im.convert("RGB") # convert to RGB to fix save jpeg error
else:
raise ValueError("unknown format")
def session(self,
bundle_id=None,
arguments: Optional[list] = None,
environment: Optional[dict] = None,
alert_action: Optional[AlertAction] = None):
"""
Launch app in a session
Args:
- bundle_id (str): the app bundle id
- arguments (list): ['-u', 'https://www.google.com/ncr']
- enviroment (dict): {"KEY": "VAL"}
- alert_action (AlertAction): AlertAction.ACCEPT or AlertAction.DISMISS
WDA Return json like
{
"value": {
"sessionId": "69E6FDBA-8D59-4349-B7DE-A9CA41A97814",
"capabilities": {
"device": "iphone",
"browserName": "部落冲突",
"sdkVersion": "9.3.2",
"CFBundleIdentifier": "com.supercell.magic"
}
},
"sessionId": "69E6FDBA-8D59-4349-B7DE-A9CA41A97814",
"status": 0
}
To create a new session, send json data like
{
"capabilities": {
"alwaysMatch": {
"bundleId": "your-bundle-id",
"app": "your-app-path"
"shouldUseCompactResponses": (bool),
"shouldUseTestManagerForVisibilityDetection": (bool),
"maxTypingFrequency": (integer),
"arguments": (list(str)),
"environment": (dict: str->str)
}
},
}
Or {"capabilities": {}}
"""
# if not bundle_id:
# # 旧版的WDA创建Session不允许bundleId为空,但是总是可以拿到sessionId
# # 新版的WDA允许bundleId为空,但是初始状态没有sessionId
# session_id = self.status().get("sessionId")
# if session_id:
# return self
capabilities = {}
if bundle_id:
always_match = {
"bundleId": bundle_id,
"arguments": arguments or [],
"environment": environment or {},
"shouldWaitForQuiescence": False,
}
if alert_action:
assert alert_action in ["accept", "dismiss"]
capabilities["defaultAlertAction"] = alert_action
capabilities['alwaysMatch'] = always_match
payload = {
"capabilities": capabilities,
"desiredCapabilities": capabilities.get('alwaysMatch',
{}), # 兼容旧版的wda
}
# when device is Locked, it is unable to start app
if self.locked():
self.unlock()
try:
res = self.http.post('session', payload)
except WDAEmptyResponseError:
""" when there is alert, might be got empty response
use /wda/apps/state may still get sessionId
"""
res = self.session().app_state(bundle_id)
if res.value != 4:
raise
client = Client(self.__wda_url, _session_id=res.sessionId)
client.__timeout = self.__timeout
client.__callbacks = self.__callbacks
return client
def close(self): # close session
try:
return self._session_http.delete('/')
except WDARequestError as e:
if not isinstance(e, (WDAInvalidSessionIdError, WDAPossiblyCrashedError)):
raise
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@#
###### Session methods and properties ######
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@#
def __enter__(self):
"""
Usage example:
with c.session("com.example.app") as app:
# do something
"""
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
@property
@deprecated(version="1.0.0", reason="Use session_id instread id")
def id(self):
return self._get_session_id()
@property
def session_id(self) -> str:
if self.__session_id:
return self.__session_id
current_sid = self.status()['sessionId']
if current_sid:
self.__session_id = current_sid # store old session id to reduce request count
return current_sid
return self.session().session_id
@session_id.setter
def session_id(self, value):
self.__session_id = value
def _get_session_id(self) -> str:
return self.session_id
@cached_property
def scale(self) -> int:
"""
UIKit scale factor
Refs:
https://developer.apple.com/library/archive/documentation/DeviceInformation/Reference/iOSDeviceCompatibility/Displays/Displays.html
There is another way to get scale
self._session_http.get("/wda/screen").value returns {"statusBarSize": {'width': 320, 'height': 20}, 'scale': 2}
"""
try:
return self._session_http.get("/wda/screen").value['scale']
except (KeyError, WDARequestError):
v = max(self.screenshot().size) / max(self.window_size())
return round(v)
@cached_property
def bundle_id(self):
""" the session matched bundle id """
v = self._session_http.get("/").value
return v['capabilities'].get('CFBundleIdentifier')
def implicitly_wait(self, seconds):
"""
set default element search timeout
"""
assert isinstance(seconds, (int, float))
self.__timeout = seconds
def battery_info(self):
"""
Returns dict: (I do not known what it means)
eg: {"level": 1, "state": 2}
"""
return self._session_http.get("/wda/batteryInfo").value
def device_info(self):
"""
Returns dict:
eg: {'currentLocale': 'zh_CN', 'timeZone': 'Asia/Shanghai'}
"""
return self._session_http.get("/wda/device/info").value
@property
def info(self):
"""
Returns:
{'timeZone': 'Asia/Shanghai',
'currentLocale': 'zh_CN',
'model': 'iPhone',
'uuid': '9DAC43B3-6887-428D-B5D5-4892D1F38BAA',
'userInterfaceIdiom': 0,
'userInterfaceStyle': 'unsupported',
'name': 'iPhoneSE',
'isSimulator': False}
"""
return self.device_info()
def set_clipboard(self, content, content_type="plaintext"):
""" set clipboard """
self._session_http.post(
"/wda/setPasteboard", {
"content": base64.b64encode(content.encode()).decode(),
"contentType": content_type
})
@deprecated(version="1.0.0", reason="This method is deprecated now.")
def set_alert_callback(self, callback):
"""
Args:
callback (func): called when alert popup
Example of callback:
def callback(session):
session.alert.accept()
"""
pass
# Not working
# def get_clipboard(self):
# return self.http.post("/wda/getPasteboard").value
# Not working
# def siri_activate(self, text):
# self.http.post("/wda/siri/activate", {"text": text})
def app_launch(self,
bundle_id,
arguments=[],
environment={},
wait_for_quiescence=False):
"""
Args:
- bundle_id (str): the app bundle id
- arguments (list): ['-u', 'https://www.google.com/ncr']
- enviroment (dict): {"KEY": "VAL"}
- wait_for_quiescence (bool): default False
"""
# Deprecated, use app_start instead
assert isinstance(arguments, (tuple, list))
assert isinstance(environment, dict)
# When device is locked, it is unable to launch
if self.locked():
self.unlock()
return self._session_http.post(
"/wda/apps/launch", {
"bundleId": bundle_id,
"arguments": arguments,
"environment": environment,
"shouldWaitForQuiescence": wait_for_quiescence,
})
def app_activate(self, bundle_id):
return self._session_http.post("/wda/apps/launch", {
"bundleId": bundle_id,
})
def app_terminate(self, bundle_id):
# Deprecated, use app_stop instead
return self._session_http.post("/wda/apps/terminate", {
"bundleId": bundle_id,
})
def app_state(self, bundle_id):
"""
Returns example:
{
"value": 4,
"sessionId": "0363BDC5-4335-47ED-A54E-F7CCB65C6A65"
}
value 1(not running) 2(running in background) 3(running in foreground)
"""
return self._session_http.post("/wda/apps/state", {
"bundleId": bundle_id,
})
def app_start(self,
bundle_id,
arguments=[],
environment={},
wait_for_quiescence=False):
""" alias for app_launch """
return self.app_launch(bundle_id, arguments, environment,
wait_for_quiescence)
def app_stop(self, bundle_id: str):
""" alias for app_terminate """
self.app_terminate(bundle_id)
def app_list(self):
"""
Not working very well, only show springboard
Returns:
list of app
Return example:
[{'pid': 52, 'bundleId': 'com.apple.springboard'}]
"""
return self._session_http.get("/wda/apps/list").value
def open_url(self, url):
"""
TODO: Never successed using before. Looks like use Siri to search.
https://github.com/facebook/WebDriverAgent/blob/master/WebDriverAgentLib/Commands/FBSessionCommands.m#L43
Args:
url (str): url
Raises:
WDARequestError
"""
if os.getenv("TMQ_ORIGIN") == "civita": # MDS platform
return self.http.post("/mds/openurl", {"url": url})
return self._session_http.post('url', {'url': url})
def deactivate(self, duration):
"""Put app into background and than put it back
Args:
- duration (float): deactivate time, seconds
"""
return self._session_http.post('/wda/deactivateApp',
dict(duration=duration))
def tap(self, x, y):
if _is_tmq_platform() and os.environ.get(
"TMQ_ORIGIN") == "civita": # in TMQ and belong to MDS
return self._session_http.post("/mds/touchAndHold",
dict(x=x, y=y, duration=0.02))
return self._session_http.post('/wda/tap/0', dict(x=x, y=y))
def _percent2pos(self, x, y, window_size=None):
if any(isinstance(v, float) for v in [x, y]):
w, h = window_size or self.window_size()
x = int(x * w) if isinstance(x, float) else x
y = int(y * h) if isinstance(y, float) else y
assert w >= x >= 0
assert h >= y >= 0
return (x, y)
def click(self, x, y, duration: Optional[float] = None):
"""
Combine tap and tap_hold
Args:
x, y: can be float(percent) or int
duration (optional): tap_hold duration
"""
x, y = self._percent2pos(x, y)
if duration:
return self.tap_hold(x, y, duration)
return self.tap(x, y)
def double_tap(self, x, y):
x, y = self._percent2pos(x, y)
return self._session_http.post('/wda/doubleTap', dict(x=x, y=y))
def tap_hold(self, x, y, duration=1.0):
"""
Tap and hold for a moment
Args:
- x, y(int, float): float(percent) or int(absolute coordicate)
- duration(float): seconds of hold time
[[FBRoute POST:@"/wda/touchAndHold"] respondWithTarget:self action:@selector(handleTouchAndHoldCoordinate:)],
"""
x, y = self._percent2pos(x, y)
data = {'x': x, 'y': y, 'duration': duration}
return self._session_http.post('/wda/touchAndHold', data=data)
def swipe(self, x1, y1, x2, y2, duration=0):
"""
Args:
x1, y1, x2, y2 (int, float): float(percent), int(coordicate)
duration (float): start coordinate press duration (seconds)
[[FBRoute POST:@"/wda/dragfromtoforduration"] respondWithTarget:self action:@selector(handleDragCoordinate:)],
"""
if any(isinstance(v, float) for v in [x1, y1, x2, y2]):
size = self.window_size()
x1, y1 = self._percent2pos(x1, y1, size)
x2, y2 = self._percent2pos(x2, y2, size)
data = dict(fromX=x1, fromY=y1, toX=x2, toY=y2, duration=duration)
return self._session_http.post('/wda/dragfromtoforduration', data=data)
def _fast_swipe(self, x1, y1, x2, y2, velocity: int = 500):
"""
velocity: the larger the faster
"""
data = dict(fromX=x1, fromY=y1, toX=x2, toY=y2, velocity=velocity)
return self._session_http.post('/wda/drag', data=data)
def swipe_left(self):
""" swipe right to left """
w, h = self.window_size()
return self.swipe(w, h // 2, 1, h // 2)
def swipe_right(self):
""" swipe left to right """
w, h = self.window_size()
return self.swipe(1, h // 2, w, h // 2)
def swipe_up(self):
""" swipe from center to top """
w, h = self.window_size()
return self.swipe(w // 2, h // 2, w // 2, 1)
def swipe_down(self):
""" swipe from center to bottom """
w, h = self.window_size()
return self.swipe(w // 2, h // 2, w // 2, h - 1)
def _fast_swipe_ext(self, direction: str):
if direction == "up":
w, h = self.window_size()
return self.swipe(w // 2, h // 2, w // 2, 1)
elif direction == "down":
w, h = self.window_size()
return self._fast_swipe(w // 2, h // 2, w // 2, h - 1)
else:
raise RuntimeError("not supported direction:", direction)
@property
def orientation(self):
"""
Return string
One of <PORTRAIT | LANDSCAPE>
"""
for _ in range(3):
result = self._session_http.get('orientation').value
if result:
return result
time.sleep(.5)
@orientation.setter
def orientation(self, value):
"""
Args:
- orientation(string): LANDSCAPE | PORTRAIT | UIA_DEVICE_ORIENTATION_LANDSCAPERIGHT |
UIA_DEVICE_ORIENTATION_PORTRAIT_UPSIDEDOWN
"""
return self._session_http.post('orientation',
data={'orientation': value})
def window_size(self):
"""
Returns:
namedtuple: eg
Size(width=320, height=568)
"""
size = self._unsafe_window_size()
if min(size) > 0:
return size
# get orientation, handle alert
_ = self.orientation # after this operation, may safe to get window_size
if self.alert.exists:
self.alert.accept()
time.sleep(.1)
size = self._unsafe_window_size()
if min(size) > 0:
return size
logger.warning("unable to get window_size(), try to to create a new session")
with self.session("com.apple.Preferences") as app:
size = app._unsafe_window_size()
assert min(size) > 0, "unable to get window_size"
return size
def _unsafe_window_size(self):
"""
returns (width, height) might be (0, 0)
"""
value = self._session_http.get('/window/size').value
w = roundint(value['width'])
h = roundint(value['height'])
return namedtuple('Size', ['width', 'height'])(w, h)
@retry.retry(WDAKeyboardNotPresentError, tries=3, delay=1.0)
def send_keys(self, value):
"""
send keys, yet I know not, todo function
"""
if isinstance(value, six.string_types):
value = list(value)
return self._session_http.post('/wda/keys', data={'value': value})
def press(self, name: str):
"""
Args:
name: one of <home|volumeUp|volumeDown>
"""
valid_names = ("home", "volumeUp", "volumeDown")
if name not in valid_names:
raise ValueError(
f"Invalid name: {name}, should be one of {valid_names}")
self._session_http.post("/wda/pressButton", {"name": name})
def press_duration(self, name: str, duration: float):
"""
Args:
name: one of <home|volumeUp|volumeDown|power|snapshot>
duration: seconds
Notes:
snapshot equals power+home
Raises:
ValueError
Refs:
https://github.com/appium/WebDriverAgent/pull/494/files
"""
hid_usages = {
"home": 0x40,
"volumeup": 0xE9,
"volumedown": 0xEA,
"power": 0x30,
"snapshot": 0x65,
"power+home": 0x65
}
name = name.lower()
if name not in hid_usages:
raise ValueError("Invalid name:", name)
hid_usage = hid_usages[name]
return self._session_http.post("/wda/performIoHidEvent", {"page": 0x0C, "usage": hid_usage, "duration": duration})
def keyboard_dismiss(self):
"""
Not working for now
"""
raise RuntimeError("not pass tests, this method is not allowed to use")
self._session_http.post('/wda/keyboard/dismiss')
def appium_settings(self, value: Optional[dict] = None) -> dict:
"""
Get and set /session/$sessionId/appium/settings
"""
if value is None:
return self._session_http.get("/appium/settings").value
return self._session_http.post("/appium/settings",
data={
"settings": value
}).value
def xpath(self, value):
"""
For weditor, d.xpath(...)
"""
return Selector(self, xpath=value)
def __call__(self, *args, **kwargs):
if 'timeout' not in kwargs:
kwargs['timeout'] = self.__timeout
return Selector(self, *args, **kwargs)
@cached_property
def alibaba(self):
""" Only used in alibaba company """
try:
import wda_taobao
return wda_taobao.Alibaba(self)
except ImportError:
raise RuntimeError(
"@alibaba property requires wda_taobao library installed")
@cached_property
def taobao(self):
try:
import wda_taobao
return wda_taobao.Taobao(self)
except ImportError:
raise RuntimeError(
"@taobao property requires wda_taobao library installed")
class Alert(object):
DEFAULT_ACCEPT_BUTTONS = [
"使用App时允许", "无线局域网与蜂窝网络", "好", "稍后", "稍后提醒", "确定",
"允许", "以后", "打开", "录屏", "Allow", "OK", "YES", "Yes", "Later", "Close"
]
def __init__(self, client: BaseClient):
self._c = client
self.http = client._session_http
@property
def exists(self):
try:
self.text
return True
except WDARequestError as e:
# expect e.status != 27 in old version and e.value == 'no such alert' in new version
return False
@property
def text(self):
return self.http.get('/alert/text').value
def wait(self, timeout=20.0):
start_time = time.time()
while time.time() - start_time < timeout:
if self.exists:
return True
time.sleep(0.2)
return False
def accept(self):
return self.http.post('/alert/accept')
def dismiss(self):
return self.http.post('/alert/dismiss')
def buttons(self):
return self.http.get('/wda/alert/buttons').value
def click(self, button_name: Optional[Union[str, list]] = None):
"""
Args:
- button_name: the name of the button
Returns:
button_name being clicked
Raises:
ValueError when button_name is not in avaliable button names
"""
# Actually, It has no difference POST to accept or dismiss
if isinstance(button_name, str):
self.http.post('/alert/accept', data={"name": button_name})
return button_name
avaliable_names = self.buttons()
buttons: list = button_name
for bname in buttons:
if bname in avaliable_names:
return self.click(bname)
raise ValueError("Only these buttons can be clicked", avaliable_names)
def click_exists(self, buttons: Optional[Union[str, list]] = None):
"""
Args:
- buttons: the name of the button of list of names
Returns:
button_name clicked or None
"""
try:
return self.click(buttons)
except (ValueError, WDARequestError):
return None
@contextlib.contextmanager
def watch_and_click(self,
buttons: Optional[list] = None,
interval: float = 2.0):
""" watch and click button
Args:
buttons: buttons name which need to click
interval: check interval
"""
if not buttons:
buttons = self.DEFAULT_ACCEPT_BUTTONS
event = threading.Event()
def _inner():
while not event.is_set():
try:
alert_buttons = self.buttons()
logger.info("Alert detected, buttons: %s", alert_buttons)
for btn_name in buttons:
if btn_name in alert_buttons:
logger.info("Alert click: %s", btn_name)
self.click(btn_name)
break
else:
logger.warning("Alert not handled")
except WDARequestError:
pass
time.sleep(interval)
threading.Thread(name="alert", target=_inner, daemon=True).start()
yield None
event.set()
class Client(BaseClient):
@property
def alert(self) -> Alert:
return Alert(self)
Session = Client # for compability
class Selector(object):
def __init__(self,
session: Session,
predicate=None,
id=None,
className=None,
type=None,
name=None,
nameContains=None,
nameMatches=None,
text=None,
textContains=None,
textMatches=None,
value=None,
valueContains=None,
label=None,
labelContains=None,
visible=None,
enabled=None,
classChain=None,
xpath=None,
parent_class_chains=[],
timeout=10.0,
index=0):
'''
Args:
predicate (str): predicate string
id (str): raw identifier
className (str): attr of className
type (str): alias of className
name (str): attr for name
nameContains (str): attr of name contains
nameMatches (str): regex string
text (str): alias of name
textContains (str): alias of nameContains
textMatches (str): alias of nameMatches
value (str): attr of value, not used in most times
valueContains (str): attr of value contains
label (str): attr for label
labelContains (str): attr for label contains
visible (bool): is visible
enabled (bool): is enabled
classChain (str): string of ios chain query, eg: **/XCUIElementTypeOther[`value BEGINSWITH 'blabla'`]
xpath (str): xpath string, a little slow, but works fine
timeout (float): maxium wait element time, default 10.0s
index (int): index of founded elements
WDA use two key to find elements "using", "value"
Examples:
"using" can be on of
"partial link text", "link text"
"name", "id", "accessibility id"
"class name", "class chain", "xpath", "predicate string"
predicate string support many keys
UID,
accessibilityContainer,
accessible,
enabled,
frame,
label,
name,
rect,
type,
value,
visible,
wdAccessibilityContainer,
wdAccessible,
wdEnabled,
wdFrame,
wdLabel,
wdName,
wdRect,
wdType,
wdUID,
wdValue,
wdVisible
'''
assert isinstance(session, Session)
self._session = session
self._predicate = predicate
self._id = id
self._class_name = className or type
self._name = self._add_escape_character_for_quote_prime_character(
name or text)
self._name_part = nameContains or textContains
self._name_regex = nameMatches or textMatches
self._value = value
self._value_part = valueContains
self._label = label
self._label_part = labelContains
self._enabled = enabled
self._visible = visible
self._index = index
self._xpath = self._fix_xcui_type(xpath)
self._class_chain = self._fix_xcui_type(classChain)
self._timeout = timeout
# some fixtures
if self._class_name and not self._class_name.startswith(
'XCUIElementType'):
self._class_name = 'XCUIElementType' + self._class_name
if self._name_regex:
if not self._name_regex.startswith(
'^') and not self._name_regex.startswith('.*'):
self._name_regex = '.*' + self._name_regex
if not self._name_regex.endswith(
'$') and not self._name_regex.endswith('.*'):
self._name_regex = self._name_regex + '.*'
self._parent_class_chains = parent_class_chains
@property
def http(self):
return self._session._session_http
def _fix_xcui_type(self, s):
if s is None:
return
re_element = '|'.join(xcui_element_types.ELEMENTS)
return re.sub(r'/(' + re_element + ')', '/XCUIElementType\g<1>', s)
def _add_escape_character_for_quote_prime_character(self, text):
"""
Fix for https://github.com/openatx/facebook-wda/issues/33
Returns:
string with properly formated quotes, or non changed text
"""
if text is not None:
if "'" in text:
return text.replace("'", "\\'")
elif '"' in text:
return text.replace('"', '\\"')
else:
return text
else:
return text
def _wdasearch(self, using, value):
"""
Returns:
element_ids (list(string)): example ['id1', 'id2']
HTTP example response:
[
{"ELEMENT": "E2FF5B2A-DBDF-4E67-9179-91609480D80A"},
{"ELEMENT": "597B1A1E-70B9-4CBE-ACAD-40943B0A6034"}
]
"""
element_ids = []
for v in self.http.post('/elements', {
'using': using,
'value': value
}).value:
element_ids.append(v['ELEMENT'])
return element_ids
def _gen_class_chain(self):
# just return if aleady exists predicate
if self._predicate:
return '/XCUIElementTypeAny[`' + self._predicate + '`]'
qs = []
if self._name:
qs.append("name == '%s'" % self._name)
if self._name_part:
qs.append("name CONTAINS %r" % self._name_part)
if self._name_regex:
qs.append("name MATCHES %r" % self._name_regex)
if self._label:
qs.append("label == '%s'" % self._label)
if self._label_part:
qs.append("label CONTAINS '%s'" % self._label_part)
if self._value:
qs.append("value == '%s'" % self._value)
if self._value_part:
qs.append("value CONTAINS '%s'" % self._value_part)
if self._visible is not None:
qs.append("visible == %s" % 'true' if self._visible else 'false')
if self._enabled is not None:
qs.append("enabled == %s" % 'true' if self._enabled else 'false')
predicate = ' AND '.join(qs)
chain = '/' + (self._class_name or 'XCUIElementTypeAny')
if predicate:
chain = chain + '[`' + predicate + '`]'
if self._index:
chain = chain + '[%d]' % self._index
return chain
@retry.retry(WDAStaleElementReferenceError, tries=3, delay=.5, jitter=.2)
def find_element_ids(self):
elems = []
if self._id:
return self._wdasearch('id', self._id)
if self._predicate:
return self._wdasearch('predicate string', self._predicate)
if self._xpath:
return self._wdasearch('xpath', self._xpath)
if self._class_chain:
return self._wdasearch('class chain', self._class_chain)
chain = '**' + ''.join(
self._parent_class_chains) + self._gen_class_chain()
if DEBUG:
print('CHAIN:', chain)
return self._wdasearch('class chain', chain)
def find_elements(self):
"""
Returns:
Element (list): all the elements
"""
es = []
for element_id in self.find_element_ids():
e = Element(self._session, element_id)
es.append(e)
return es
def count(self):
return len(self.find_element_ids())
def get(self, timeout=None, raise_error=True):
"""
Args:
timeout (float): timeout for query element, unit seconds
Default 10s
raise_error (bool): whether to raise error if element not found
Returns:
Element: UI Element
Raises:
WDAElementNotFoundError if raise_error is True else None
"""
start_time = time.time()
if timeout is None:
timeout = self._timeout
while True:
elems = self.find_elements()
if len(elems) > 0:
return elems[0]
if start_time + timeout < time.time():
break
time.sleep(0.5)
if raise_error:
raise WDAElementNotFoundError("element not found",
"timeout %.1f" % timeout)
def __getattr__(self, oper):
if oper.startswith("_"):
raise AttributeError("invalid attr", oper)
if not hasattr(Element, oper):
raise AttributeError("'Element' object has no attribute %r" % oper)
el = self.get()
return getattr(el, oper)
def set_timeout(self, s):
"""
Set element wait timeout
"""
self._timeout = s
return self
def __getitem__(self, index):
self._index = index
return self
def child(self, *args, **kwargs):
chain = self._gen_class_chain()
kwargs['parent_class_chains'] = self._parent_class_chains + [chain]
return Selector(self._session, *args, **kwargs)
@property
def exists(self):
return len(self.find_element_ids()) > self._index
def click(self, timeout: Optional[float] = None):
"""
Click element
Args:
timeout (float): max wait seconds
"""
e = self.get(timeout=timeout)
e.click()
def click_exists(self, timeout=0):
"""
Wait element and perform click
Args:
timeout (float): timeout for wait
Returns:
bool: if successfully clicked
"""
e = self.get(timeout=timeout, raise_error=False)
if e is None:
return False
e.click()
return True
def wait(self, timeout=None, raise_error=False):
""" alias of get
Args:
timeout (float): timeout seconds
raise_error (bool): default false, whether to raise error if element not found
Returns:
Element or None
"""
return self.get(timeout=timeout, raise_error=raise_error)
def wait_gone(self, timeout=None, raise_error=True):
"""
Args:
timeout (float): default timeout
raise_error (bool): return bool or raise error
Returns:
bool: works when raise_error is False
Raises:
WDAElementNotDisappearError
"""
start_time = time.time()
if timeout is None or timeout <= 0:
timeout = self._timeout
while start_time + timeout > time.time():
if not self.exists:
return True
if not raise_error:
return False
raise WDAElementNotDisappearError("element not gone")
# todo
# pinch
# touchAndHold
# dragfromtoforduration
# twoFingerTap
# todo
# handleGetIsAccessibilityContainer
# [[FBRoute GET:@"/wda/element/:uuid/accessibilityContainer"] respondWithTarget:self action:@selector(handleGetIsAccessibilityContainer:)],
class Element(object):
def __init__(self, session: Session, id: str):
"""
base_url eg: http://localhost:8100/session/$SESSION_ID
"""
self._session = session
self._id = id
def __repr__(self):
return '<wda.Element(id="{}")>'.format(self._id)
@property
def http(self):
return self._session._session_http
def _req(self, method, url, data=None):
return self.http.fetch(method, '/element/' + self._id + url, data)
def _wda_req(self, method, url, data=None):
return self.http.fetch(method, '/wda/element/' + self._id + url, data)
def _prop(self, key):
return self._req('get', '/' + key.lstrip('/')).value
def _wda_prop(self, key):
ret = self.http.get('/wda/element/%s/%s' % (self._id, key)).value
return ret
@property
def info(self):
return {
"id": self._id,
"label": self.label,
"value": self.value,
"text": self.text,
"name": self.name,
"className": self.className,
"enabled": self.enabled,
"displayed": self.displayed,
"visible": self.visible,
"accessible": self.accessible,
"accessibilityContainer": self.accessibility_container
}
@property
def id(self):
return self._id
@property
def label(self):
return self._prop('attribute/label')
@property
def className(self):
return self._prop('attribute/type')
@property
def text(self):
return self._prop('text')
@property
def name(self):
return self._prop('name')
@property
def displayed(self):
return self._prop("displayed")
@property
def enabled(self):
return self._prop('enabled')
@property
def accessible(self):
return self._wda_prop("accessible")
@property
def accessibility_container(self):
return self._wda_prop('accessibilityContainer')
@property
def value(self):
return self._prop('attribute/value')
@property
def visible(self):
return self._prop('attribute/visible')
@property
def bounds(self) -> Rect:
value = self._prop('rect')
x, y = value['x'], value['y']
w, h = value['width'], value['height']
return Rect(x, y, w, h)
# operations
def tap(self):
return self._req('post', '/click')
def click(self):
"""
Get element center position and do click, a little slower
"""
# Some one reported, invisible element can not click
# So here, git position and then do tap
x, y = self.bounds.center
self._session.click(x, y)
# return self.tap()
def tap_hold(self, duration=1.0):
"""
Tap and hold for a moment
Args:
duration (float): seconds of hold time
[[FBRoute POST:@"/wda/element/:uuid/touchAndHold"] respondWithTarget:self action:@selector(handleTouchAndHold:)],
"""
return self._wda_req('post', '/touchAndHold', {'duration': duration})
def scroll(self, direction='visible', distance=1.0):
"""
Args:
direction (str): one of "visible", "up", "down", "left", "right"
distance (float): swipe distance, only works when direction is not "visible"
Raises:
ValueError
distance=1.0 means, element (width or height) multiply 1.0
"""
if direction == 'visible':
self._wda_req('post', '/scroll', {'toVisible': True})
elif direction in ['up', 'down', 'left', 'right']:
self._wda_req('post', '/scroll', {
'direction': direction,
'distance': distance
})
else:
raise ValueError("Invalid direction")
return self
# TvOS
# @property
# def focused(self):
#
# def focuse(self):
def pickerwheel_select(self):
""" Select by pickerwheel """
# Ref: https://github.com/appium/WebDriverAgent/blob/e5d46a85fbdb22e401d396cedf0b5a9bbc995084/WebDriverAgentLib/Commands/FBElementCommands.m#L88
raise NotImplementedError()
def pinch(self, scale, velocity):
"""
Args:
scale (float): scale must > 0
velocity (float): velocity must be less than zero when scale is less than 1
Example:
pinchIn -> scale:0.5, velocity: -1
pinchOut -> scale:2.0, velocity: 1
"""
data = {'scale': scale, 'velocity': velocity}
return self._wda_req('post', '/pinch', data)
def set_text(self, value):
return self._req('post', '/value', {'value': value})
def clear_text(self):
return self._req('post', '/clear')
# def child(self, **kwargs):
# return Selector(self.__base_url, self._id, **kwargs)
# todo lot of other operations
# tap_hold
class USBClient(Client):
""" connect device through unix:/var/run/usbmuxd """
def __init__(self, udid: str = "", port: int = 8100, wda_bundle_id=None):
if not udid:
usbmux = Usbmux()
infos = usbmux.device_list()
if len(infos) == 0:
raise RuntimeError("no device connected")
elif len(infos) >= 2:
raise RuntimeError("more then one device connected")
udid = infos[0]['SerialNumber']
super().__init__(url=requests_usbmux.DEFAULT_SCHEME + "{}:{}".format(udid, port))
if self.is_ready():
return
_start_wda_xctest(udid, wda_bundle_id)
if not self.wait_ready(timeout=20):
raise RuntimeError("wda xctest launched but check failed")
|
"""
Renderer Module
This module defines the PlotlyRenderer class and a single function,
fig_to_plotly, which is intended to be the main way that user's will interact
with the matplotlylib package.
"""
import warnings
from . mplexporter import Exporter, Renderer
from . import mpltools
from .. graph_objs import *
class PlotlyRenderer(Renderer):
"""A renderer class inheriting from base for rendering mpl plots in plotly.
A renderer class to be used with an exporter for rendering matplotlib
plots in Plotly. This module defines the PlotlyRenderer class which handles
the creation of the JSON structures that get sent to plotly.
All class attributes available are defined in __init__().
Basic Usage:
# (mpl code) #
fig = gcf()
renderer = PlotlyRenderer(fig)
exporter = Exporter(renderer)
exporter.run(fig) # ... et voila
"""
def __init__(self):
"""Initialize PlotlyRenderer obj.
PlotlyRenderer obj is called on by an Exporter object to draw
matplotlib objects like figures, axes, text, etc.
All class attributes are listed here in the __init__ method.
"""
self.plotly_fig = Figure(data=Data(), layout=Layout())
self.mpl_fig = None
self.current_ax_patches = []
self.axis_ct = 0
self.mpl_x_bounds = (0, 1)
self.mpl_y_bounds = (0, 1)
self.msg = "Initialized PlotlyRenderer\n"
def open_figure(self, fig, props):
"""Creates a new figure by beginning to fill out layout dict.
The 'autosize' key is set to false so that the figure will mirror
sizes set by mpl. The 'hovermode' key controls what shows up when you
mouse around a figure in plotly, it's set to show the 'closest' point.
Positional agurments:
fig -- a matplotlib.figure.Figure object.
props.keys(): [
'figwidth',
'figheight',
'dpi'
]
"""
self.msg += "Opening figure\n"
self.mpl_fig = fig
self.plotly_fig['layout'] = Layout(
width=int(props['figwidth']*props['dpi']),
height=int(props['figheight']*props['dpi']),
autosize=False,
hovermode='closest')
self.mpl_x_bounds, self.mpl_y_bounds = mpltools.get_axes_bounds(fig)
margin = Margin(
l=int(self.mpl_x_bounds[0]*self.plotly_fig['layout']['width']),
r=int((1-self.mpl_x_bounds[1])*self.plotly_fig['layout']['width']),
t=int((1-self.mpl_y_bounds[1])*self.plotly_fig['layout']['height']),
b=int(self.mpl_y_bounds[0]*self.plotly_fig['layout']['height']),
pad=0)
self.plotly_fig['layout']['margin'] = margin
def close_figure(self, fig):
"""Closes figure by cleaning up data and layout dictionaries.
The PlotlyRenderer's job is to create an appropriate set of data and
layout dictionaries. When the figure is closed, some cleanup and
repair is necessary. This method removes inappropriate dictionary
entries, freeing up Plotly to use defaults and best judgements to
complete the entries. This method is called by an Exporter object.
Positional arguments:
fig -- a matplotlib.figure.Figure object.
"""
self.plotly_fig.force_clean()
self.plotly_fig['layout']['showlegend'] = False
self.msg += "Closing figure\n"
def open_axes(self, ax, props):
"""Setup a new axes object (subplot in plotly).
Plotly stores information about subplots in different 'xaxis' and
'yaxis' objects which are numbered. These are just dictionaries
included in the layout dictionary. This function takes information
from the Exporter, fills in appropriate dictionary entries,
and updates the layout dictionary. PlotlyRenderer keeps track of the
number of plots by incrementing the axis_ct attribute.
Setting the proper plot domain in plotly is a bit tricky. Refer to
the documentation for mpltools.convert_x_domain and
mpltools.convert_y_domain.
Positional arguments:
ax -- an mpl axes object. This will become a subplot in plotly.
props.keys() -- [
'axesbg', (background color for axes obj)
'axesbgalpha', (alpha, or opacity for background)
'bounds', ((x0, y0, width, height) for axes)
'dynamic', (zoom/pan-able?)
'axes', (list: [xaxis, yaxis])
'xscale', (log, linear, or date)
'yscale',
'xlim', (range limits for x)
'ylim',
'xdomain' (xdomain=xlim, unless it's a date)
'ydomain'
]
"""
self.msg += " Opening axes\n"
self.axis_ct += 1
# set defaults in axes
xaxis = XAxis(
anchor='y{}'.format(self.axis_ct),
zeroline=False,
showline=True,
mirror='ticks',
ticks='inside')
yaxis = YAxis(
anchor='x{}'.format(self.axis_ct),
zeroline=False,
showline=True,
mirror='ticks',
ticks='inside')
# update defaults with things set in mpl
mpl_xaxis, mpl_yaxis = mpltools.prep_xy_axis(ax=ax,
props=props,
x_bounds=self.mpl_x_bounds,
y_bounds=self.mpl_y_bounds)
xaxis.update(mpl_xaxis)
yaxis.update(mpl_yaxis)
# put axes in our figure
self.plotly_fig['layout']['xaxis{}'.format(self.axis_ct)] = xaxis
self.plotly_fig['layout']['yaxis{}'.format(self.axis_ct)] = yaxis
def close_axes(self, ax):
"""Close the axes object and clean up.
Bars from bar charts are given to PlotlyRenderer one-by-one,
thus they need to be taken care of at the close of each axes object.
The self.current_ax_patches variable should be empty unless a bar
chart has been created or a rectangle object has been drawn that has
an edge exactly on the lines x=0 or y=0.
Positional arguments:
ax -- an mpl axes object, not required at this time.
"""
for patch_coll in self.current_ax_patches:
self.draw_bar(patch_coll)
self.current_ax_patches = [] # clear this for next axes obj
self.msg += " Closing axes\n"
def draw_bar(self, patch_coll):
"""Draw a collection of similar patches as a bar chart.
After bars are sorted, an appropriate data dictionary must be created
to tell plotly about this data. Just like draw_line or draw_markers,
draw_bar translates patch/path information into something plotly
understands.
Positional arguments:
patch_coll -- a collection of patches to be drawn as a bar chart.
"""
orientation = patch_coll[0]['orientation']
if orientation == 'v':
self.msg += " Attempting to draw a vertical bar chart\n"
patch_coll.sort(key=lambda b: b['x0'])
x = [bar['x0']+(bar['x1']-bar['x0'])/2 for bar in patch_coll]
y = [bar['y1'] for bar in patch_coll]
bar_gap = mpltools.get_bar_gap([bar['x0'] for bar in patch_coll],
[bar['x1'] for bar in patch_coll])
else:
self.msg += " Attempting to draw a horizontal bar chart\n"
patch_coll.sort(key=lambda b: b['y0'])
x = [bar['x1'] for bar in patch_coll]
y = [bar['y0']+(bar['y1']-bar['y0'])/2 for bar in patch_coll]
bar_gap = mpltools.get_bar_gap([bar['y0'] for bar in patch_coll],
[bar['y1'] for bar in patch_coll])
bar = Bar(orientation=orientation,
x=x,
y=y,
xaxis='x{}'.format(self.axis_ct),
yaxis='y{}'.format(self.axis_ct),
opacity=patch_coll[0]['alpha'],
marker=Marker(
color=patch_coll[0]['facecolor'],
line=Line(width=patch_coll[0]['edgewidth'])))
if len(bar['x']) > 1:
self.msg += " Heck yeah, I drew that bar chart\n"
self.plotly_fig['data'] += bar,
if bar_gap is not None:
self.plotly_fig['layout']['bargap'] = bar_gap
else:
self.msg += " Bar chart not drawn\n"
warnings.warn('found box chart data with length <= 1, '
'assuming data redundancy, not plotting.')
def draw_marked_line(self, **props):
"""Create a data dict for a line obj.
This will draw 'lines', 'markers', or 'lines+markers'.
props.keys() -- [
'coordinates', ('data', 'axes', 'figure', or 'display')
'data', (a list of xy pairs)
'mplobj', (the matplotlib.lines.Line2D obj being rendered)
'label', (the name of the Line2D obj being rendered)
'linestyle', (linestyle dict, can be None, see below)
'markerstyle', (markerstyle dict, can be None, see below)
]
props['linestyle'].keys() -- [
'alpha', (opacity of Line2D obj)
'color', (color of the line if it exists, not the marker)
'linewidth',
'dasharray', (code for linestyle, see DASH_MAP in mpltools.py)
'zorder', (viewing precedence when stacked with other objects)
]
props['markerstyle'].keys() -- [
'alpha', (opacity of Line2D obj)
'marker', (the mpl marker symbol, see SYMBOL_MAP in mpltools.py)
'facecolor', (color of the marker face)
'edgecolor', (color of the marker edge)
'edgewidth', (width of marker edge)
'markerpath', (an SVG path for drawing the specified marker)
'zorder', (viewing precedence when stacked with other objects)
]
"""
self.msg += " Attempting to draw a line "
line, marker = {}, {}
if props['linestyle'] and props['markerstyle']:
self.msg += "... with both lines+markers\n"
mode = "lines+markers"
elif props['linestyle']:
self.msg += "... with just lines\n"
mode = "lines"
elif props['markerstyle']:
self.msg += "... with just markers\n"
mode = "markers"
if props['linestyle']:
line = Line(
opacity=props['linestyle']['alpha'],
color=props['linestyle']['color'],
width=props['linestyle']['linewidth'],
dash=mpltools.convert_dash(props['linestyle']['dasharray'])
)
if props['markerstyle']:
marker = Marker(
opacity=props['markerstyle']['alpha'],
color=props['markerstyle']['facecolor'],
symbol=mpltools.convert_symbol(props['markerstyle']['marker']),
size=props['markerstyle']['markersize'],
line=Line(
color=props['markerstyle']['edgecolor'],
width=props['markerstyle']['edgewidth']
)
)
if props['coordinates'] == 'data':
marked_line = Scatter(mode=mode,
name=props['label'],
x=[xy_pair[0] for xy_pair in props['data']],
y=[xy_pair[1] for xy_pair in props['data']],
xaxis='x{}'.format(self.axis_ct),
yaxis='y{}'.format(self.axis_ct),
line=line,
marker=marker)
self.plotly_fig['data'] += marked_line,
self.msg += " Heck yeah, I drew that line\n"
else:
self.msg += " Line didn't have 'data' coordinates, " \
"not drawing\n"
warnings.warn("Bummer! Plotly can currently only draw Line2D "
"objects from matplotlib that are in 'data' "
"coordinates!")
def draw_image(self, **props):
"""Draw image.
Not implemented yet!
"""
self.msg += " Attempting to draw image\n"
self.msg += " Not drawing image\n"
warnings.warn("Aw. Snap! You're gonna have to hold off on "
"the selfies for now. Plotly can't import "
"images from matplotlib yet!")
def draw_path_collection(self, **props):
"""Add a path collection to data list as a scatter plot.
Current implementation defaults such collections as scatter plots.
Matplotlib supports collections that have many of the same parameters
in common like color, size, path, etc. However, they needn't all be
the same. Plotly does not currently support such functionality and
therefore, the style for the first object is taken and used to define
the remaining paths in the collection.
props.keys() -- [
'paths', (structure: [vertices, path_code])
'path_coordinates', ('data', 'axes', 'figure', or 'display')
'path_transforms', (mpl transform, including Affine2D matrix)
'offsets', (offset from axes, helpful if in 'data')
'offset_coordinates', ('data', 'axes', 'figure', or 'display')
'offset_order',
'styles', (style dict, see below)
'mplobj' (the collection obj being drawn)
]
props['styles'].keys() -- [
'linewidth', (one or more linewidths)
'facecolor', (one or more facecolors for path)
'edgecolor', (one or more edgecolors for path)
'alpha', (one or more opacites for path)
'zorder', (precedence when stacked)
]
"""
self.msg += " Attempting to draw a path collection\n"
if props['offset_coordinates'] is 'data':
alpha_face = props['styles']['facecolor'][0][3]
rgb_face = [int(c*255)
for c in props['styles']['facecolor'][0][:3]]
alpha_edge = props['styles']['edgecolor'][0][3]
rgb_edge = [int(c*255)
for c in props['styles']['edgecolor'][0][:3]]
data = props['offsets']
marker = mpltools.convert_path(props['paths'][0])
style = {
'alpha': alpha_face,
'facecolor': 'rgb({},{},{})'.format(*rgb_face),
'marker': marker,
'edgecolor': 'rgb({},{},{})'.format(*rgb_edge),
'edgewidth': props['styles']['linewidth'][0],
'markersize': mpltools.convert_affine_trans(
dpi=self.mpl_fig.get_dpi(),
aff=props['path_transforms'][0])
}
scatter_props = {
'coordinates': 'data',
'data': data,
'label': None,
'markerstyle': style,
'linestyle': None
}
self.msg += " Drawing path collection as markers\n"
self.draw_marked_line(**scatter_props)
else:
self.msg += " Path collection not linked to 'data', " \
"not drawing\n"
warnings.warn("Dang! That path collection is out of this "
"world. I totally don't know what to do with "
"it yet! Plotly can only import path "
"collections linked to 'data' coordinates")
def draw_path(self, **props):
"""Draw path, currently only attempts to draw bar charts.
This function attempts to sort a given path into a collection of
horizontal or vertical bar charts. Most of the actual code takes
place in functions from mpltools.py.
props.keys() -- [
'data', (a list of verticies for the path)
'coordinates', ('data', 'axes', 'figure', or 'display')
'pathcodes', (code for the path, structure: ['M', 'L', 'Z', etc.])
'style', (style dict, see below)
'mplobj' (the mpl path object)
]
props['style'].keys() -- [
'alpha', (opacity of path obj)
'edgecolor',
'facecolor',
'edgewidth',
'dasharray', (style for path's enclosing line)
'zorder' (precedence of obj when stacked)
]
"""
self.msg += " Attempting to draw a path\n"
is_bar = mpltools.is_bar(**props)
is_barh = mpltools.is_barh(**props)
if is_bar: # if we think it's a bar, add it!
self.msg += " Assuming path is a vertical bar\n"
bar = mpltools.make_bar(orientation='v', **props)
self.file_bar(bar)
if is_barh: # perhaps a horizontal bar?
self.msg += " Assuming path is a horizontal bar\n"
bar = mpltools.make_bar(orientation='h', **props)
self.file_bar(bar)
if not (is_bar or is_barh):
self.msg += " This path isn't a bar, not drawing\n"
warnings.warn("I found a path object that I don't think is part "
"of a bar chart. Ignoring.")
def file_bar(self, bar):
"""Puts a given bar into an appropriate bar or barh collection.
Bars come from the mplexporter one-by-one. To try to put them into
appropriate data sets, we must compare them to existing data.
Positional arguments:
bar -- a bar dictionary created in mpltools.make_bar.py.
bar.keys() -- [
'bar', (mpl path object)
'orientation', (bar direction, 'v' or 'h' for horizontal or vertical)
'x0', ([x0, y0] = bottom-left corner of rectangle)
'y0',
'x1', ([x1, y1] = top-right corner of rectangle):
'y1',
'alpha', (opacity of rectangle)
'edgecolor', (boundary line color)
'facecolor', (rectangle color)
'edgewidth', (boundary line width)
'dasharray', (linestyle for boundary line)
'zorder', (precedence when stacked)
]
"""
self.msg += " Putting a bar into the proper bar collection\n"
if len(self.current_ax_patches) == 0:
self.msg += " Started a new bar collection with this " \
"bar\n"
self.current_ax_patches.append([])
self.current_ax_patches[-1] += bar,
else:
match = False
for patch_collection in self.current_ax_patches:
if mpltools.check_bar_match(patch_collection[0], bar):
match = True
patch_collection += bar,
self.msg += " Filed bar into existing bar " \
"collection\n"
if not match:
self.msg += " Started a new bar collection with " \
"this bar\n"
self.current_ax_patches.append([])
self.current_ax_patches[-1] += bar,
def draw_text(self, **props):
"""Create an annotation dict for a text obj.
Currently, plotly uses either 'page' or 'data' to reference
annotation locations. These refer to 'display' and 'data',
respectively for the 'coordinates' key used in the Exporter.
Appropriate measures are taken to transform text locations to
reference one of these two options.
props.keys() -- [
'text', (actual content string, not the text obj)
'position', (an x, y pair, not an mpl Bbox)
'coordinates', ('data', 'axes', 'figure', 'display')
'text_type', ('title', 'xlabel', or 'ylabel')
'style', (style dict, see below)
'mplobj' (actual mpl text object)
]
props['style'].keys() -- [
'alpha', (opacity of text)
'fontsize', (size in points of text)
'color', (hex color)
'halign', (horizontal alignment, 'left', 'center', or 'right')
'valign', (vertical alignment, 'baseline', 'center', or 'top')
'rotation',
'zorder', (precedence of text when stacked with other objs)
]
"""
self.msg += " Attempting to draw an mpl text object\n"
align = props['mplobj']._multialignment
if not align:
align = 'center' # mpl default
if 'annotations' not in self.plotly_fig['layout']:
self.plotly_fig['layout']['annotations'] = Annotations()
if props['text_type'] == 'xlabel':
self.msg += " Text object is an xlabel\n"
self.draw_xlabel(**props)
elif props['text_type'] == 'ylabel':
self.msg += " Text object is a ylabel\n"
self.draw_ylabel(**props)
elif props['text_type'] == 'title':
self.msg += " Text object is a title\n"
self.draw_title(**props)
else: # just a regular text annotation...
self.msg += " Text object is a normal annotation\n"
if props['coordinates'] is not 'data':
self.msg += " Text object isn't linked to 'data' " \
"coordinates\n"
x_px, y_px = props['mplobj'].get_transform().transform(
props['position'])
x, y = mpltools.display_to_paper(x_px, y_px,
self.plotly_fig['layout'])
xref = 'paper'
yref = 'paper'
xanchor = props['style']['halign'] # no difference here!
yanchor = mpltools.convert_va(props['style']['valign'])
else:
self.msg += " Text object is linked to 'data' " \
"coordinates\n"
x, y = props['position']
xref = 'x{}'.format(self.axis_ct)
yref = 'y{}'.format(self.axis_ct)
xanchor = 'center'
yanchor = 'middle'
annotation = Annotation(
text=props['text'],
opacity=props['style']['alpha'],
x=x,
y=y,
xref=xref,
yref=yref,
align=align,
xanchor=xanchor,
yanchor=yanchor,
showarrow=False, # change this later?
font=Font(
color=props['style']['color'],
size=props['style']['fontsize']
)
)
self.plotly_fig['layout']['annotations'] += annotation,
self.msg += " Heck, yeah I drew that annotation\n"
def draw_title(self, **props):
"""Add a title to the current subplot in layout dictionary.
If there exists more than a single plot in the figure, titles revert
to 'page'-referenced annotations.
props.keys() -- [
'text', (actual content string, not the text obj)
'position', (an x, y pair, not an mpl Bbox)
'coordinates', ('data', 'axes', 'figure', 'display')
'text_type', ('title', 'xlabel', or 'ylabel')
'style', (style dict, see below)
'mplobj' (actual mpl text object)
]
props['style'].keys() -- [
'alpha', (opacity of text)
'fontsize', (size in points of text)
'color', (hex color)
'halign', (horizontal alignment, 'left', 'center', or 'right')
'valign', (vertical alignment, 'baseline', 'center', or 'top')
'rotation',
'zorder', (precedence of text when stacked with other objs)
]
"""
self.msg += " Attempting to draw a title\n"
if len(self.mpl_fig.axes) > 1:
self.msg += " More than one subplot, adding title as " \
"annotation\n"
x_px, y_px = props['mplobj'].get_transform().transform(props[
'position'])
x, y = mpltools.display_to_paper(x_px, y_px,
self.plotly_fig['layout'])
annotation = Annotation(
text=props['text'],
font=Font(color=props['style']['color'],
size=props['style']['fontsize']
),
xref='paper',
yref='paper',
x=x,
y=y,
xanchor='center',
yanchor='bottom',
showarrow=False # no arrow for a title!
)
self.plotly_fig['layout']['annotations'] += annotation,
else:
self.msg += " Only one subplot found, adding as a " \
"plotly title\n"
self.plotly_fig['layout']['title'] = props['text']
titlefont = Font(size=props['style']['fontsize'],
color=props['style']['color']
)
self.plotly_fig['layout']['titlefont'] = titlefont
def draw_xlabel(self, **props):
"""Add an xaxis label to the current subplot in layout dictionary.
props.keys() -- [
'text', (actual content string, not the text obj)
'position', (an x, y pair, not an mpl Bbox)
'coordinates', ('data', 'axes', 'figure', 'display')
'text_type', ('title', 'xlabel', or 'ylabel')
'style', (style dict, see below)
'mplobj' (actual mpl text object)
]
props['style'].keys() -- [
'alpha', (opacity of text)
'fontsize', (size in points of text)
'color', (hex color)
'halign', (horizontal alignment, 'left', 'center', or 'right')
'valign', (vertical alignment, 'baseline', 'center', or 'top')
'rotation',
'zorder', (precedence of text when stacked with other objs)
]
"""
self.msg += " Adding xlabel\n"
axis_key = 'xaxis{}'.format(self.axis_ct)
self.plotly_fig['layout'][axis_key]['title'] = props['text']
titlefont = Font(size=props['style']['fontsize'],
color=props['style']['color'])
self.plotly_fig['layout'][axis_key]['titlefont'] = titlefont
def draw_ylabel(self, **props):
"""Add a yaxis label to the current subplot in layout dictionary.
props.keys() -- [
'text', (actual content string, not the text obj)
'position', (an x, y pair, not an mpl Bbox)
'coordinates', ('data', 'axes', 'figure', 'display')
'text_type', ('title', 'xlabel', or 'ylabel')
'style', (style dict, see below)
'mplobj' (actual mpl text object)
]
props['style'].keys() -- [
'alpha', (opacity of text)
'fontsize', (size in points of text)
'color', (hex color)
'halign', (horizontal alignment, 'left', 'center', or 'right')
'valign', (vertical alignment, 'baseline', 'center', or 'top')
'rotation',
'zorder', (precedence of text when stacked with other objs)
]
"""
self.msg += " Adding ylabel\n"
axis_key = 'yaxis{}'.format(self.axis_ct)
self.plotly_fig['layout'][axis_key]['title'] = props['text']
titlefont = Font(size=props['style']['fontsize'],
color=props['style']['color'])
self.plotly_fig['layout'][axis_key]['titlefont'] = titlefont
def resize(self):
"""Revert figure layout to allow plotly to resize.
By default, PlotlyRenderer tries its hardest to precisely mimic an
mpl figure. However, plotly is pretty good with aesthetics. By
running PlotlyRenderer.resize(), layout parameters are deleted. This
lets plotly choose them instead of mpl.
"""
self.msg += "Resizing figure, deleting keys from layout\n"
for key in ['width', 'height', 'autosize', 'margin']:
try:
del self.plotly_fig['layout'][key]
except KeyError:
pass
def strip_style(self):
self.msg += "Stripping mpl style, deleting keys from data and layout\n"
self.plotly_fig.strip_style()
mpl defaults text alignment to horizontal alignment when unspecified.
"""
Renderer Module
This module defines the PlotlyRenderer class and a single function,
fig_to_plotly, which is intended to be the main way that user's will interact
with the matplotlylib package.
"""
import warnings
from . mplexporter import Exporter, Renderer
from . import mpltools
from .. graph_objs import *
class PlotlyRenderer(Renderer):
"""A renderer class inheriting from base for rendering mpl plots in plotly.
A renderer class to be used with an exporter for rendering matplotlib
plots in Plotly. This module defines the PlotlyRenderer class which handles
the creation of the JSON structures that get sent to plotly.
All class attributes available are defined in __init__().
Basic Usage:
# (mpl code) #
fig = gcf()
renderer = PlotlyRenderer(fig)
exporter = Exporter(renderer)
exporter.run(fig) # ... et voila
"""
def __init__(self):
"""Initialize PlotlyRenderer obj.
PlotlyRenderer obj is called on by an Exporter object to draw
matplotlib objects like figures, axes, text, etc.
All class attributes are listed here in the __init__ method.
"""
self.plotly_fig = Figure(data=Data(), layout=Layout())
self.mpl_fig = None
self.current_ax_patches = []
self.axis_ct = 0
self.mpl_x_bounds = (0, 1)
self.mpl_y_bounds = (0, 1)
self.msg = "Initialized PlotlyRenderer\n"
def open_figure(self, fig, props):
"""Creates a new figure by beginning to fill out layout dict.
The 'autosize' key is set to false so that the figure will mirror
sizes set by mpl. The 'hovermode' key controls what shows up when you
mouse around a figure in plotly, it's set to show the 'closest' point.
Positional agurments:
fig -- a matplotlib.figure.Figure object.
props.keys(): [
'figwidth',
'figheight',
'dpi'
]
"""
self.msg += "Opening figure\n"
self.mpl_fig = fig
self.plotly_fig['layout'] = Layout(
width=int(props['figwidth']*props['dpi']),
height=int(props['figheight']*props['dpi']),
autosize=False,
hovermode='closest')
self.mpl_x_bounds, self.mpl_y_bounds = mpltools.get_axes_bounds(fig)
margin = Margin(
l=int(self.mpl_x_bounds[0]*self.plotly_fig['layout']['width']),
r=int((1-self.mpl_x_bounds[1])*self.plotly_fig['layout']['width']),
t=int((1-self.mpl_y_bounds[1])*self.plotly_fig['layout']['height']),
b=int(self.mpl_y_bounds[0]*self.plotly_fig['layout']['height']),
pad=0)
self.plotly_fig['layout']['margin'] = margin
def close_figure(self, fig):
"""Closes figure by cleaning up data and layout dictionaries.
The PlotlyRenderer's job is to create an appropriate set of data and
layout dictionaries. When the figure is closed, some cleanup and
repair is necessary. This method removes inappropriate dictionary
entries, freeing up Plotly to use defaults and best judgements to
complete the entries. This method is called by an Exporter object.
Positional arguments:
fig -- a matplotlib.figure.Figure object.
"""
self.plotly_fig.force_clean()
self.plotly_fig['layout']['showlegend'] = False
self.msg += "Closing figure\n"
def open_axes(self, ax, props):
"""Setup a new axes object (subplot in plotly).
Plotly stores information about subplots in different 'xaxis' and
'yaxis' objects which are numbered. These are just dictionaries
included in the layout dictionary. This function takes information
from the Exporter, fills in appropriate dictionary entries,
and updates the layout dictionary. PlotlyRenderer keeps track of the
number of plots by incrementing the axis_ct attribute.
Setting the proper plot domain in plotly is a bit tricky. Refer to
the documentation for mpltools.convert_x_domain and
mpltools.convert_y_domain.
Positional arguments:
ax -- an mpl axes object. This will become a subplot in plotly.
props.keys() -- [
'axesbg', (background color for axes obj)
'axesbgalpha', (alpha, or opacity for background)
'bounds', ((x0, y0, width, height) for axes)
'dynamic', (zoom/pan-able?)
'axes', (list: [xaxis, yaxis])
'xscale', (log, linear, or date)
'yscale',
'xlim', (range limits for x)
'ylim',
'xdomain' (xdomain=xlim, unless it's a date)
'ydomain'
]
"""
self.msg += " Opening axes\n"
self.axis_ct += 1
# set defaults in axes
xaxis = XAxis(
anchor='y{}'.format(self.axis_ct),
zeroline=False,
showline=True,
mirror='ticks',
ticks='inside')
yaxis = YAxis(
anchor='x{}'.format(self.axis_ct),
zeroline=False,
showline=True,
mirror='ticks',
ticks='inside')
# update defaults with things set in mpl
mpl_xaxis, mpl_yaxis = mpltools.prep_xy_axis(ax=ax,
props=props,
x_bounds=self.mpl_x_bounds,
y_bounds=self.mpl_y_bounds)
xaxis.update(mpl_xaxis)
yaxis.update(mpl_yaxis)
# put axes in our figure
self.plotly_fig['layout']['xaxis{}'.format(self.axis_ct)] = xaxis
self.plotly_fig['layout']['yaxis{}'.format(self.axis_ct)] = yaxis
def close_axes(self, ax):
"""Close the axes object and clean up.
Bars from bar charts are given to PlotlyRenderer one-by-one,
thus they need to be taken care of at the close of each axes object.
The self.current_ax_patches variable should be empty unless a bar
chart has been created or a rectangle object has been drawn that has
an edge exactly on the lines x=0 or y=0.
Positional arguments:
ax -- an mpl axes object, not required at this time.
"""
for patch_coll in self.current_ax_patches:
self.draw_bar(patch_coll)
self.current_ax_patches = [] # clear this for next axes obj
self.msg += " Closing axes\n"
def draw_bar(self, patch_coll):
"""Draw a collection of similar patches as a bar chart.
After bars are sorted, an appropriate data dictionary must be created
to tell plotly about this data. Just like draw_line or draw_markers,
draw_bar translates patch/path information into something plotly
understands.
Positional arguments:
patch_coll -- a collection of patches to be drawn as a bar chart.
"""
orientation = patch_coll[0]['orientation']
if orientation == 'v':
self.msg += " Attempting to draw a vertical bar chart\n"
patch_coll.sort(key=lambda b: b['x0'])
x = [bar['x0']+(bar['x1']-bar['x0'])/2 for bar in patch_coll]
y = [bar['y1'] for bar in patch_coll]
bar_gap = mpltools.get_bar_gap([bar['x0'] for bar in patch_coll],
[bar['x1'] for bar in patch_coll])
else:
self.msg += " Attempting to draw a horizontal bar chart\n"
patch_coll.sort(key=lambda b: b['y0'])
x = [bar['x1'] for bar in patch_coll]
y = [bar['y0']+(bar['y1']-bar['y0'])/2 for bar in patch_coll]
bar_gap = mpltools.get_bar_gap([bar['y0'] for bar in patch_coll],
[bar['y1'] for bar in patch_coll])
bar = Bar(orientation=orientation,
x=x,
y=y,
xaxis='x{}'.format(self.axis_ct),
yaxis='y{}'.format(self.axis_ct),
opacity=patch_coll[0]['alpha'],
marker=Marker(
color=patch_coll[0]['facecolor'],
line=Line(width=patch_coll[0]['edgewidth'])))
if len(bar['x']) > 1:
self.msg += " Heck yeah, I drew that bar chart\n"
self.plotly_fig['data'] += bar,
if bar_gap is not None:
self.plotly_fig['layout']['bargap'] = bar_gap
else:
self.msg += " Bar chart not drawn\n"
warnings.warn('found box chart data with length <= 1, '
'assuming data redundancy, not plotting.')
def draw_marked_line(self, **props):
"""Create a data dict for a line obj.
This will draw 'lines', 'markers', or 'lines+markers'.
props.keys() -- [
'coordinates', ('data', 'axes', 'figure', or 'display')
'data', (a list of xy pairs)
'mplobj', (the matplotlib.lines.Line2D obj being rendered)
'label', (the name of the Line2D obj being rendered)
'linestyle', (linestyle dict, can be None, see below)
'markerstyle', (markerstyle dict, can be None, see below)
]
props['linestyle'].keys() -- [
'alpha', (opacity of Line2D obj)
'color', (color of the line if it exists, not the marker)
'linewidth',
'dasharray', (code for linestyle, see DASH_MAP in mpltools.py)
'zorder', (viewing precedence when stacked with other objects)
]
props['markerstyle'].keys() -- [
'alpha', (opacity of Line2D obj)
'marker', (the mpl marker symbol, see SYMBOL_MAP in mpltools.py)
'facecolor', (color of the marker face)
'edgecolor', (color of the marker edge)
'edgewidth', (width of marker edge)
'markerpath', (an SVG path for drawing the specified marker)
'zorder', (viewing precedence when stacked with other objects)
]
"""
self.msg += " Attempting to draw a line "
line, marker = {}, {}
if props['linestyle'] and props['markerstyle']:
self.msg += "... with both lines+markers\n"
mode = "lines+markers"
elif props['linestyle']:
self.msg += "... with just lines\n"
mode = "lines"
elif props['markerstyle']:
self.msg += "... with just markers\n"
mode = "markers"
if props['linestyle']:
line = Line(
opacity=props['linestyle']['alpha'],
color=props['linestyle']['color'],
width=props['linestyle']['linewidth'],
dash=mpltools.convert_dash(props['linestyle']['dasharray'])
)
if props['markerstyle']:
marker = Marker(
opacity=props['markerstyle']['alpha'],
color=props['markerstyle']['facecolor'],
symbol=mpltools.convert_symbol(props['markerstyle']['marker']),
size=props['markerstyle']['markersize'],
line=Line(
color=props['markerstyle']['edgecolor'],
width=props['markerstyle']['edgewidth']
)
)
if props['coordinates'] == 'data':
marked_line = Scatter(mode=mode,
name=props['label'],
x=[xy_pair[0] for xy_pair in props['data']],
y=[xy_pair[1] for xy_pair in props['data']],
xaxis='x{}'.format(self.axis_ct),
yaxis='y{}'.format(self.axis_ct),
line=line,
marker=marker)
self.plotly_fig['data'] += marked_line,
self.msg += " Heck yeah, I drew that line\n"
else:
self.msg += " Line didn't have 'data' coordinates, " \
"not drawing\n"
warnings.warn("Bummer! Plotly can currently only draw Line2D "
"objects from matplotlib that are in 'data' "
"coordinates!")
def draw_image(self, **props):
"""Draw image.
Not implemented yet!
"""
self.msg += " Attempting to draw image\n"
self.msg += " Not drawing image\n"
warnings.warn("Aw. Snap! You're gonna have to hold off on "
"the selfies for now. Plotly can't import "
"images from matplotlib yet!")
def draw_path_collection(self, **props):
"""Add a path collection to data list as a scatter plot.
Current implementation defaults such collections as scatter plots.
Matplotlib supports collections that have many of the same parameters
in common like color, size, path, etc. However, they needn't all be
the same. Plotly does not currently support such functionality and
therefore, the style for the first object is taken and used to define
the remaining paths in the collection.
props.keys() -- [
'paths', (structure: [vertices, path_code])
'path_coordinates', ('data', 'axes', 'figure', or 'display')
'path_transforms', (mpl transform, including Affine2D matrix)
'offsets', (offset from axes, helpful if in 'data')
'offset_coordinates', ('data', 'axes', 'figure', or 'display')
'offset_order',
'styles', (style dict, see below)
'mplobj' (the collection obj being drawn)
]
props['styles'].keys() -- [
'linewidth', (one or more linewidths)
'facecolor', (one or more facecolors for path)
'edgecolor', (one or more edgecolors for path)
'alpha', (one or more opacites for path)
'zorder', (precedence when stacked)
]
"""
self.msg += " Attempting to draw a path collection\n"
if props['offset_coordinates'] is 'data':
alpha_face = props['styles']['facecolor'][0][3]
rgb_face = [int(c*255)
for c in props['styles']['facecolor'][0][:3]]
alpha_edge = props['styles']['edgecolor'][0][3]
rgb_edge = [int(c*255)
for c in props['styles']['edgecolor'][0][:3]]
data = props['offsets']
marker = mpltools.convert_path(props['paths'][0])
style = {
'alpha': alpha_face,
'facecolor': 'rgb({},{},{})'.format(*rgb_face),
'marker': marker,
'edgecolor': 'rgb({},{},{})'.format(*rgb_edge),
'edgewidth': props['styles']['linewidth'][0],
'markersize': mpltools.convert_affine_trans(
dpi=self.mpl_fig.get_dpi(),
aff=props['path_transforms'][0])
}
scatter_props = {
'coordinates': 'data',
'data': data,
'label': None,
'markerstyle': style,
'linestyle': None
}
self.msg += " Drawing path collection as markers\n"
self.draw_marked_line(**scatter_props)
else:
self.msg += " Path collection not linked to 'data', " \
"not drawing\n"
warnings.warn("Dang! That path collection is out of this "
"world. I totally don't know what to do with "
"it yet! Plotly can only import path "
"collections linked to 'data' coordinates")
def draw_path(self, **props):
"""Draw path, currently only attempts to draw bar charts.
This function attempts to sort a given path into a collection of
horizontal or vertical bar charts. Most of the actual code takes
place in functions from mpltools.py.
props.keys() -- [
'data', (a list of verticies for the path)
'coordinates', ('data', 'axes', 'figure', or 'display')
'pathcodes', (code for the path, structure: ['M', 'L', 'Z', etc.])
'style', (style dict, see below)
'mplobj' (the mpl path object)
]
props['style'].keys() -- [
'alpha', (opacity of path obj)
'edgecolor',
'facecolor',
'edgewidth',
'dasharray', (style for path's enclosing line)
'zorder' (precedence of obj when stacked)
]
"""
self.msg += " Attempting to draw a path\n"
is_bar = mpltools.is_bar(**props)
is_barh = mpltools.is_barh(**props)
if is_bar: # if we think it's a bar, add it!
self.msg += " Assuming path is a vertical bar\n"
bar = mpltools.make_bar(orientation='v', **props)
self.file_bar(bar)
if is_barh: # perhaps a horizontal bar?
self.msg += " Assuming path is a horizontal bar\n"
bar = mpltools.make_bar(orientation='h', **props)
self.file_bar(bar)
if not (is_bar or is_barh):
self.msg += " This path isn't a bar, not drawing\n"
warnings.warn("I found a path object that I don't think is part "
"of a bar chart. Ignoring.")
def file_bar(self, bar):
"""Puts a given bar into an appropriate bar or barh collection.
Bars come from the mplexporter one-by-one. To try to put them into
appropriate data sets, we must compare them to existing data.
Positional arguments:
bar -- a bar dictionary created in mpltools.make_bar.py.
bar.keys() -- [
'bar', (mpl path object)
'orientation', (bar direction, 'v' or 'h' for horizontal or vertical)
'x0', ([x0, y0] = bottom-left corner of rectangle)
'y0',
'x1', ([x1, y1] = top-right corner of rectangle):
'y1',
'alpha', (opacity of rectangle)
'edgecolor', (boundary line color)
'facecolor', (rectangle color)
'edgewidth', (boundary line width)
'dasharray', (linestyle for boundary line)
'zorder', (precedence when stacked)
]
"""
self.msg += " Putting a bar into the proper bar collection\n"
if len(self.current_ax_patches) == 0:
self.msg += " Started a new bar collection with this " \
"bar\n"
self.current_ax_patches.append([])
self.current_ax_patches[-1] += bar,
else:
match = False
for patch_collection in self.current_ax_patches:
if mpltools.check_bar_match(patch_collection[0], bar):
match = True
patch_collection += bar,
self.msg += " Filed bar into existing bar " \
"collection\n"
if not match:
self.msg += " Started a new bar collection with " \
"this bar\n"
self.current_ax_patches.append([])
self.current_ax_patches[-1] += bar,
def draw_text(self, **props):
"""Create an annotation dict for a text obj.
Currently, plotly uses either 'page' or 'data' to reference
annotation locations. These refer to 'display' and 'data',
respectively for the 'coordinates' key used in the Exporter.
Appropriate measures are taken to transform text locations to
reference one of these two options.
props.keys() -- [
'text', (actual content string, not the text obj)
'position', (an x, y pair, not an mpl Bbox)
'coordinates', ('data', 'axes', 'figure', 'display')
'text_type', ('title', 'xlabel', or 'ylabel')
'style', (style dict, see below)
'mplobj' (actual mpl text object)
]
props['style'].keys() -- [
'alpha', (opacity of text)
'fontsize', (size in points of text)
'color', (hex color)
'halign', (horizontal alignment, 'left', 'center', or 'right')
'valign', (vertical alignment, 'baseline', 'center', or 'top')
'rotation',
'zorder', (precedence of text when stacked with other objs)
]
"""
self.msg += " Attempting to draw an mpl text object\n"
align = props['mplobj']._multialignment
if not align:
align = props['style']['halign'] # mpl default
if 'annotations' not in self.plotly_fig['layout']:
self.plotly_fig['layout']['annotations'] = Annotations()
if props['text_type'] == 'xlabel':
self.msg += " Text object is an xlabel\n"
self.draw_xlabel(**props)
elif props['text_type'] == 'ylabel':
self.msg += " Text object is a ylabel\n"
self.draw_ylabel(**props)
elif props['text_type'] == 'title':
self.msg += " Text object is a title\n"
self.draw_title(**props)
else: # just a regular text annotation...
self.msg += " Text object is a normal annotation\n"
if props['coordinates'] is not 'data':
self.msg += " Text object isn't linked to 'data' " \
"coordinates\n"
x_px, y_px = props['mplobj'].get_transform().transform(
props['position'])
x, y = mpltools.display_to_paper(x_px, y_px,
self.plotly_fig['layout'])
xref = 'paper'
yref = 'paper'
xanchor = props['style']['halign'] # no difference here!
yanchor = mpltools.convert_va(props['style']['valign'])
else:
self.msg += " Text object is linked to 'data' " \
"coordinates\n"
x, y = props['position']
xref = 'x{}'.format(self.axis_ct)
yref = 'y{}'.format(self.axis_ct)
xanchor = 'center'
yanchor = 'middle'
annotation = Annotation(
text=props['text'],
opacity=props['style']['alpha'],
x=x,
y=y,
xref=xref,
yref=yref,
align=align,
xanchor=xanchor,
yanchor=yanchor,
showarrow=False, # change this later?
font=Font(
color=props['style']['color'],
size=props['style']['fontsize']
)
)
self.plotly_fig['layout']['annotations'] += annotation,
self.msg += " Heck, yeah I drew that annotation\n"
def draw_title(self, **props):
"""Add a title to the current subplot in layout dictionary.
If there exists more than a single plot in the figure, titles revert
to 'page'-referenced annotations.
props.keys() -- [
'text', (actual content string, not the text obj)
'position', (an x, y pair, not an mpl Bbox)
'coordinates', ('data', 'axes', 'figure', 'display')
'text_type', ('title', 'xlabel', or 'ylabel')
'style', (style dict, see below)
'mplobj' (actual mpl text object)
]
props['style'].keys() -- [
'alpha', (opacity of text)
'fontsize', (size in points of text)
'color', (hex color)
'halign', (horizontal alignment, 'left', 'center', or 'right')
'valign', (vertical alignment, 'baseline', 'center', or 'top')
'rotation',
'zorder', (precedence of text when stacked with other objs)
]
"""
self.msg += " Attempting to draw a title\n"
if len(self.mpl_fig.axes) > 1:
self.msg += " More than one subplot, adding title as " \
"annotation\n"
x_px, y_px = props['mplobj'].get_transform().transform(props[
'position'])
x, y = mpltools.display_to_paper(x_px, y_px,
self.plotly_fig['layout'])
annotation = Annotation(
text=props['text'],
font=Font(color=props['style']['color'],
size=props['style']['fontsize']
),
xref='paper',
yref='paper',
x=x,
y=y,
xanchor='center',
yanchor='bottom',
showarrow=False # no arrow for a title!
)
self.plotly_fig['layout']['annotations'] += annotation,
else:
self.msg += " Only one subplot found, adding as a " \
"plotly title\n"
self.plotly_fig['layout']['title'] = props['text']
titlefont = Font(size=props['style']['fontsize'],
color=props['style']['color']
)
self.plotly_fig['layout']['titlefont'] = titlefont
def draw_xlabel(self, **props):
"""Add an xaxis label to the current subplot in layout dictionary.
props.keys() -- [
'text', (actual content string, not the text obj)
'position', (an x, y pair, not an mpl Bbox)
'coordinates', ('data', 'axes', 'figure', 'display')
'text_type', ('title', 'xlabel', or 'ylabel')
'style', (style dict, see below)
'mplobj' (actual mpl text object)
]
props['style'].keys() -- [
'alpha', (opacity of text)
'fontsize', (size in points of text)
'color', (hex color)
'halign', (horizontal alignment, 'left', 'center', or 'right')
'valign', (vertical alignment, 'baseline', 'center', or 'top')
'rotation',
'zorder', (precedence of text when stacked with other objs)
]
"""
self.msg += " Adding xlabel\n"
axis_key = 'xaxis{}'.format(self.axis_ct)
self.plotly_fig['layout'][axis_key]['title'] = props['text']
titlefont = Font(size=props['style']['fontsize'],
color=props['style']['color'])
self.plotly_fig['layout'][axis_key]['titlefont'] = titlefont
def draw_ylabel(self, **props):
"""Add a yaxis label to the current subplot in layout dictionary.
props.keys() -- [
'text', (actual content string, not the text obj)
'position', (an x, y pair, not an mpl Bbox)
'coordinates', ('data', 'axes', 'figure', 'display')
'text_type', ('title', 'xlabel', or 'ylabel')
'style', (style dict, see below)
'mplobj' (actual mpl text object)
]
props['style'].keys() -- [
'alpha', (opacity of text)
'fontsize', (size in points of text)
'color', (hex color)
'halign', (horizontal alignment, 'left', 'center', or 'right')
'valign', (vertical alignment, 'baseline', 'center', or 'top')
'rotation',
'zorder', (precedence of text when stacked with other objs)
]
"""
self.msg += " Adding ylabel\n"
axis_key = 'yaxis{}'.format(self.axis_ct)
self.plotly_fig['layout'][axis_key]['title'] = props['text']
titlefont = Font(size=props['style']['fontsize'],
color=props['style']['color'])
self.plotly_fig['layout'][axis_key]['titlefont'] = titlefont
def resize(self):
"""Revert figure layout to allow plotly to resize.
By default, PlotlyRenderer tries its hardest to precisely mimic an
mpl figure. However, plotly is pretty good with aesthetics. By
running PlotlyRenderer.resize(), layout parameters are deleted. This
lets plotly choose them instead of mpl.
"""
self.msg += "Resizing figure, deleting keys from layout\n"
for key in ['width', 'height', 'autosize', 'margin']:
try:
del self.plotly_fig['layout'][key]
except KeyError:
pass
def strip_style(self):
self.msg += "Stripping mpl style, deleting keys from data and layout\n"
self.plotly_fig.strip_style()
|
remove one minimum_bounding_box call
|
Fix for Python3
In Python3 without universal_newlines set to True
output from Popen was byte-encoded.
|
from database_models import db
from database_models import Team
from database_models import *
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--update", help="only update existing scores", action="store_true")
args = parser.parse_args()
if args.update:
print "...only doing an update"
else:
print "...clearing and re-filling DB"
db.drop_all()
db.create_all()
session = getSession()
class SPI:
off_rating = None
def_rating = None
rating = None
def __init__(self, rating, off_rating, def_rating):
self.off_rating = off_rating
self.def_rating = def_rating
self.rating = rating
# the contents of this class will be split into different database tables
class CompactTeamData:
name = None
country_code = None
ELO = None
group = None
FIFA = None
Value = None # value in euro
Age = None # avg. age
HA = 0.0 # home advantage
spi = None
def __init__(self, name, country_code, group, ELO, FIFA, Value, Age, spi, homeadvantage=0.0):
self.name = name
self.country_code = country_code
self.group = group
self.ELO = ELO
self.FIFA = FIFA
self.Value = Value
self.HA = homeadvantage
self.Age = Age
self.spi = spi
all_teams = [
# group A
CompactTeamData("Brazil", "BR", "A", 2110, 1125, 440500000 / 19.0, 27.60, SPI(91.7, 3.4, 0.5), homeadvantage=1.0),
CompactTeamData("Croatia", "HR", "A", 1779, 966, 193200000 / 22.0, 27.60, SPI(75.0, 1.7, 0.9)),
CompactTeamData("Mexico", "MX", "A", 1784, 887, 39250000 / 18.0, 27.80, SPI(76.7, 1.6, 0.7)),
CompactTeamData("Cameroon", "CM", "A", 1593, 626, 114300000 / 23.0, 26.80, SPI(71.0, 1.5, 1.0)),
# group B
CompactTeamData("Spain", "ES", "B", 2082, 1506, 577000000 / 21.0, 28.0, SPI(89.3, 2.8, 0.5)),
CompactTeamData("Netherlands", "NL", "B", 1979, 1122, 197000000 / 24.0, 26.10, SPI(82.4, 2.4, 0.9)),
CompactTeamData("Chile", "CL", "B", 1896, 1038, 142850000 / 24.0, 28.20, SPI(87.1, 2.7, 0.7)),
CompactTeamData("Australia", "AU", "B", 1711, 576, 25550000 / 23.0, 26.40, SPI(69.9, 1.7, 1.3)),
# group C
CompactTeamData("Colombia", "CO", "C", 1912, 1211, 194550000 / 26.0, 27.70, SPI(86.0, 2.2, 0.4)),
CompactTeamData("Greece", "GR", "C", 1813, 1084, 80400000 / 22.0, 28.70, SPI(76.1, 1.4, 0.6)),
CompactTeamData("Ivory Coast", "CI", "C", 1786, 841, 142900000 / 26.0, 27.30, SPI(79.2, 2.3, 1.0)),
CompactTeamData("Japan", "JP", "C", 1747, 601, 91700000 / 22.0, 26.90, SPI(72.9, 1.9, 1.2)),
# group D
CompactTeamData("Uruguay", "UY", "D", 1898, 1157, 154925000 / 21.0, 28.30, SPI(84.0, 2.4, 0.7)),
CompactTeamData("Costa Rica", "CR", "D", 1700, 734, 30200000 / 22.0, 26.70, SPI(76.7, 1.4, 0.6)),
CompactTeamData("England", "EN", "D", 1906, 1032, 387500000 / 30.0, 26.60, SPI(83.1, 2.2, 0.7)),
CompactTeamData("Italy", "IT", "D", 1887, 1135, 339000000 / 26.0, 27.90, SPI(80.9, 2.1, 0.8)),
# group E
CompactTeamData("Switzerland", "CH", "E", 1822, 1159, 171000000 / 21.0, 26.10, SPI(77.3, 2.0, 1.0)),
CompactTeamData("Ecuador", "EC", "E", 1816, 831, 54550000 / 20.0, 27.90, SPI(81.7, 2.0, 0.7)),
CompactTeamData("France", "FR", "E", 1855, 917, 447500000 / 25.0, 26.60, SPI(85.2, 2.4, 0.6)),
CompactTeamData("Honduras", "HN", "E", 1664, 716, 18400000 / 23.0, 27.50, SPI(73.3, 1.7, 1.0)),
# group F
CompactTeamData("Argentina", "AR", "F", 1994, 1255, 423500000 / 22.0, 27.50, SPI(90.3, 2.9, 0.4), homeadvantage=0.5),
CompactTeamData("Bosnia and Herzegovina", "BA", "F", 1758, 919, 112000000 / 22.0, 26.80, SPI(79.4, 2.3, 1.0)),
CompactTeamData("Iran", "IR", "F", 1719, 729, 30700000 / 34.0, 27.70, SPI(70.7, 1.4, 1.0)),
CompactTeamData("Nigeria", "NG", "F", 1718, 616, 88650000 / 23.0, 24.80, SPI(75.9, 1.7, 0.9)),
# group G
CompactTeamData("Germany", "DE", "G", 2060, 1314, 400000000 / 19.0, 26.80, SPI(88.7, 3.1, 0.7)),
CompactTeamData("Portugal", "PT", "G", 1905, 1219, 258250000 / 22.0, 27.80, SPI(79.5, 2.1, 0.9)),
CompactTeamData("Ghana", "GH", "G", 1700, 733, 97200000 / 21.0, 25.50, SPI(76.1, 1.9, 1.0)),
CompactTeamData("USA", "US", "G", 1841, 1044, 28750000 / 23.0, 28.00, SPI(77.5, 2.1, 1.0)),
# group H
CompactTeamData("Belgium", "BE", "H", 1807, 1117, 341450000 / 22.0, 26.50, SPI(80.7, 2.1, 0.8)),
CompactTeamData("Algeria", "DZ", "H", 1582, 819, 54000000 / 28.0, 27.10, SPI(63.1, 1.1, 1.2)),
CompactTeamData("Russia", "RU", "H", 1819, 862, 193600000 / 25.0, 27.60, SPI(79.0, 1.7, 0.7)),
CompactTeamData("Korea Republic", "KR", "H", 1683, 556, 52500000 / 22.0, 25.50, SPI(73.4, 1.7, 1.0))
]
if not args.update:
# first step: insert teams into DB to get the IDs
for team_data in all_teams:
team = Team(team_data.name, team_data.country_code)
session.add(team)
# add new rating score types
session.add(ScoreType("ELO", "The ELO rating known from chess.", long_name="ELO rating"))
session.add(ScoreType("FIFA", "FIFA ranking points", long_name="FIFA rating"))
session.add(ScoreType("Value", "Average value of the players in Euro", long_name="Ø value in €"))
session.add(ScoreType("Age", "Average age of the team", long_name="Ø age"))
session.add(ScoreType("HA", "Home-advantage of the team", long_name="Home-advantage"))
session.add(ScoreType("Custom", "User-defined custom rating", long_name="Custom rating", hidden=True))
session.add(ScoreType("SPI Off", "ESPN SPI Offensive Rating", long_name="SPI Offensive Rating"))
session.add(ScoreType("SPI Def", "ESPN SPI Defensive Rating", long_name="SPI Defensive Rating"))
# add certain custom parameters
custom_rule_parameter = RuleParameterType("normalization_constant", 10.0)
session.add(custom_rule_parameter)
session.add(RuleParameterType("simulation_run_count", 1))
# add default ELO calculation rule
elo_rule = RuleType("ELO", "Calculation using the ELO score", "elo_binary")
elo_rule.makeDefaultRule(1.0)
session.add(elo_rule)
spi_rule = RuleType("SPI", "Calculation based on ESPN's Soccer-Power-Index", "spi_binary")
spi_rule.makeDefaultRule(1.0)
session.add(spi_rule)
fifa_rule = RuleType("FIFA", "Calculation using the FIFA ranking", "fifa_binary")
fifa_rule.makeDefaultRule(0.5)
session.add(fifa_rule)
value_rule = RuleType("Value", "Calculation based on average player market value", "value_binary")
value_rule.makeDefaultRule(0.25)
session.add(value_rule)
ha_rule = RuleType("HA", "Adjust the win expectancy based on the home-advantage", "homeadvantage_binary", long_name="Home-advantage", is_backref_rule=True)
ha_rule.makeDefaultRule(1.0)
session.add(ha_rule)
age_rule = RuleType("Age", "Calculation based on average age", "age_binary")
age_rule.makeDefaultRule(0.0)
session.add(age_rule)
luck_rule = RuleType("Luck", "Each team has the same probability of winning", "luck_binary")
luck_rule.makeDefaultRule(0.0)
session.add(luck_rule)
custom_rule = RuleType("Custom", "Define custom ratings and an own win expectancy function.", "custom_binary", long_name="Custom Rating", needs_custom_ratings=True)
custom_rule.makeDefaultRule(0.0)
session.add(custom_rule)
# add default tournament types
session.add(TournamentType("1 vs 1", "A simple 1 vs 1 test tournament.", 2, "TwoHandsIcon.png", "1v1"))
session.add(TournamentType("World Cup", "The standard FIFA World Cup.", 32, "StdLeagueIcon.png", "worldcup", "worldcup_view"))
# only after comitting will the objects have valid IDs assigned!
session.commit()
# get the objects we just added (now with correct ID)
elo = session.query(ScoreType).filter_by(name="ELO").first()
fifa = session.query(ScoreType).filter_by(name="FIFA").first()
value = session.query(ScoreType).filter_by(name="Value").first()
age = session.query(ScoreType).filter_by(name="Age").first()
ha = session.query(ScoreType).filter_by(name="HA").first()
custom = session.query(ScoreType).filter_by(name="Custom").first()
spi_off = session.query(ScoreType).filter_by(name="SPI Off").first()
spi_def = session.query(ScoreType).filter_by(name="SPI Def").first()
assert elo != None
assert fifa != None
assert value != None
assert age != None
assert ha != None
assert custom != None
assert spi_off != None
assert spi_def != None
if not args.update:
elo_rule.addScoreType(elo, session)
fifa_rule.addScoreType(fifa, session)
value_rule.addScoreType(value, session)
age_rule.addScoreType(age, session)
ha_rule.addScoreType(ha, session)
custom_rule.addScoreType(custom, session)
custom_rule.addParameterType(custom_rule_parameter, session)
spi_rule.addScoreType(spi_off, session)
spi_rule.addScoreType(spi_def, session)
# and finish the team setup
for team_data in all_teams:
team = session.query(Team).filter_by(country_code=team_data.country_code).first()
assert team != None
fun = None
def add(type, score):
session.add(Score(type.id, team.id, score))
def update(type, score):
session.query(Score).filter_by(type_id=type.id,tournament_id=None,team_id=team.id).first().value = score
if not args.update:
fun = add
else:
fun = update
fun(elo, team_data.ELO)
fun(fifa, team_data.FIFA)
fun(value, team_data.Value)
fun(age, team_data.Age)
fun(ha, team_data.HA)
fun(spi_off, team_data.spi.off_rating)
fun(spi_def, team_data.spi.def_rating)
session.commit()
print "..done"
cleanupSession()
backend: updated scores
from database_models import db
from database_models import Team
from database_models import *
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--update", help="only update existing scores", action="store_true")
args = parser.parse_args()
if args.update:
print "...only doing an update"
else:
print "...clearing and re-filling DB"
db.drop_all()
db.create_all()
session = getSession()
class SPI:
off_rating = None
def_rating = None
rating = None
def __init__(self, rating, off_rating, def_rating):
self.off_rating = off_rating
self.def_rating = def_rating
self.rating = rating
# the contents of this class will be split into different database tables
class CompactTeamData:
name = None
country_code = None
ELO = None
group = None
FIFA = None
Value = None # value in euro
Age = None # avg. age
HA = 0.0 # home advantage
spi = None
def __init__(self, name, country_code, group, ELO, FIFA, Value, Age, spi, homeadvantage=0.0):
self.name = name
self.country_code = country_code
self.group = group
self.ELO = ELO
self.FIFA = FIFA
self.Value = Value
self.HA = homeadvantage
self.Age = Age
self.spi = spi
all_teams = [
# group A
CompactTeamData("Brazil", "BR", "A", 2113, 1210, 467500000 / 23.0, 28.30, SPI(91.7, 3.4, 0.5), homeadvantage=1.0),
CompactTeamData("Croatia", "HR", "A", 1784, 871, 193200000 / 22.0, 27.70, SPI(75.1, 1.7, 0.9)),
CompactTeamData("Mexico", "MX", "A", 1799, 877, 39250000 / 18.0, 27.90, SPI(76.7, 1.6, 0.7)),
CompactTeamData("Cameroon", "CM", "A", 1590, 583, 114300000 / 24.0, 26.70, SPI(71.0, 1.5, 1.0)),
# group B
CompactTeamData("Spain", "ES", "B", 2085, 1460, 577000000 / 21.0, 28.0, SPI(89.4, 2.8, 0.5)),
CompactTeamData("Netherlands", "NL", "B", 1963, 967, 97500000 / 20.0, 22.40, SPI(82.3, 2.4, 0.9)),
CompactTeamData("Chile", "CL", "B", 1892, 1037, 142850000 / 24.0, 28.30, SPI(87.3, 2.8, 0.7)),
CompactTeamData("Australia", "AU", "B", 1705, 545, 25550000 / 23.0, 26.50, SPI(70.1, 1.7, 1.3)),
# group C
CompactTeamData("Colombia", "CO", "C", 1904, 1186, 194550000 / 26.0, 27.80, SPI(86.2, 2.2, 0.4)),
CompactTeamData("Greece", "GR", "C", 1790, 1082, 80400000 / 22.0, 28.80, SPI(76.0, 1.4, 0.6)),
CompactTeamData("Ivory Coast", "CI", "C", 1789, 830, 142900000 / 26.0, 27.30, SPI(79.2, 2.3, 1.0)),
CompactTeamData("Japan", "JP", "C", 1752, 613, 91700000 / 22.0, 27.00, SPI(72.7, 1.9, 1.2)),
# group D
CompactTeamData("Uruguay", "UY", "D", 1894, 1181, 154925000 / 21.0, 28.40, SPI(84.1, 2.4, 0.7)),
CompactTeamData("Costa Rica", "CR", "D", 1707, 748, 31900000 / 27.0, 27.20, SPI(76.8, 1.4, 0.6)),
CompactTeamData("England", "EN", "D", 1909, 1043, 373000000 / 28.0, 26.80, SPI(83.1, 2.2, 0.7)),
CompactTeamData("Italy", "IT", "D", 1885, 1115, 339000000 / 26.0, 28.00, SPI(81.0, 2.1, 0.8)),
# group E
CompactTeamData("Switzerland", "CH", "E", 1818, 1161, 171000000 / 21.0, 26.20, SPI(77.3, 2.0, 1.0)),
CompactTeamData("Ecuador", "EC", "E", 1823, 794, 63250000 / 24.0, 27.20, SPI(82.1, 2.0, 0.7)),
CompactTeamData("France", "FR", "E", 1872, 935, 447500000 / 25.0, 26.60, SPI(85.2, 2.4, 0.6)),
CompactTeamData("Honduras", "HN", "E", 1673, 759, 21150000 / 23.0, 28.40, SPI(73.4, 1.7, 1.0)),
# group F
CompactTeamData("Argentina", "AR", "F", 1989, 1178, 423500000 / 22.0, 27.60, SPI(90.3, 2.9, 0.4), homeadvantage=0.0),
CompactTeamData("Bosnia and Herzegovina", "BA", "F", 1740, 795, 116750000 / 24.0, 27.00, SPI(79.5, 2.3, 1.0)),
CompactTeamData("Iran", "IR", "F", 1709, 715, 27450000 / 28.0, 27.70, SPI(70.7, 1.4, 1.0)),
CompactTeamData("Nigeria", "NG", "F", 1720, 631, 96100000 / 29.0, 25.50, SPI(75.9, 1.7, 0.9)),
# group G
CompactTeamData("Germany", "DE", "G", 2064, 1340, 691000000 / 38.0, 24.90, SPI(88.7, 3.1, 0.7)),
CompactTeamData("Portugal", "PT", "G", 1908, 1245, 258250000 / 22.0, 27.90, SPI(79.6, 2.1, 0.9)),
CompactTeamData("Ghana", "GH", "G", 1689, 713, 97200000 / 21.0, 25.50, SPI(76.1, 1.9, 1.0)),
CompactTeamData("USA", "US", "G", 1825, 1015, 28750000 / 23.0, 28.10, SPI(77.7, 2.1, 1.0)),
# group H
CompactTeamData("Belgium", "BE", "H", 1805, 1039, 347450000 / 23.0, 26.30, SPI(80.8, 2.1, 0.8)),
CompactTeamData("Algeria", "DZ", "H", 1595, 795, 54000000 / 28.0, 27.20, SPI(62.9, 1.1, 1.2)),
CompactTeamData("Russia", "RU", "H", 1822, 903, 193600000 / 25.0, 27.70, SPI(78.9, 1.7, 0.7)),
CompactTeamData("Korea Republic", "KR", "H", 1690, 551, 52125000 / 23.0, 25.90, SPI(73.5, 1.7, 1.0))
]
if not args.update:
# first step: insert teams into DB to get the IDs
for team_data in all_teams:
team = Team(team_data.name, team_data.country_code)
session.add(team)
# add new rating score types
session.add(ScoreType("ELO", "The ELO rating known from chess.", long_name="ELO rating"))
session.add(ScoreType("FIFA", "FIFA ranking points", long_name="FIFA rating"))
session.add(ScoreType("Value", "Average value of the players in Euro", long_name="Ø value in €"))
session.add(ScoreType("Age", "Average age of the team", long_name="Ø age"))
session.add(ScoreType("HA", "Home-advantage of the team", long_name="Home-advantage"))
session.add(ScoreType("Custom", "User-defined custom rating", long_name="Custom rating", hidden=True))
session.add(ScoreType("SPI Off", "ESPN SPI Offensive Rating", long_name="SPI Offensive Rating"))
session.add(ScoreType("SPI Def", "ESPN SPI Defensive Rating", long_name="SPI Defensive Rating"))
# add certain custom parameters
custom_rule_parameter = RuleParameterType("normalization_constant", 10.0)
session.add(custom_rule_parameter)
session.add(RuleParameterType("simulation_run_count", 1))
# add default ELO calculation rule
elo_rule = RuleType("ELO", "Calculation using the ELO score", "elo_binary")
elo_rule.makeDefaultRule(1.0)
session.add(elo_rule)
spi_rule = RuleType("SPI", "Calculation based on ESPN's Soccer-Power-Index", "spi_binary")
spi_rule.makeDefaultRule(1.0)
session.add(spi_rule)
fifa_rule = RuleType("FIFA", "Calculation using the FIFA ranking", "fifa_binary")
fifa_rule.makeDefaultRule(0.5)
session.add(fifa_rule)
value_rule = RuleType("Value", "Calculation based on average player market value", "value_binary")
value_rule.makeDefaultRule(0.25)
session.add(value_rule)
ha_rule = RuleType("HA", "Adjust the win expectancy based on the home-advantage", "homeadvantage_binary", long_name="Home-advantage", is_backref_rule=True)
ha_rule.makeDefaultRule(1.0)
session.add(ha_rule)
age_rule = RuleType("Age", "Calculation based on average age", "age_binary")
age_rule.makeDefaultRule(0.0)
session.add(age_rule)
luck_rule = RuleType("Luck", "Each team has the same probability of winning", "luck_binary")
luck_rule.makeDefaultRule(0.0)
session.add(luck_rule)
custom_rule = RuleType("Custom", "Define custom ratings and an own win expectancy function.", "custom_binary", long_name="Custom Rating", needs_custom_ratings=True)
custom_rule.makeDefaultRule(0.0)
session.add(custom_rule)
# add default tournament types
session.add(TournamentType("1 vs 1", "A simple 1 vs 1 test tournament.", 2, "TwoHandsIcon.png", "1v1"))
session.add(TournamentType("World Cup", "The standard FIFA World Cup.", 32, "StdLeagueIcon.png", "worldcup", "worldcup_view"))
# only after comitting will the objects have valid IDs assigned!
session.commit()
# get the objects we just added (now with correct ID)
elo = session.query(ScoreType).filter_by(name="ELO").first()
fifa = session.query(ScoreType).filter_by(name="FIFA").first()
value = session.query(ScoreType).filter_by(name="Value").first()
age = session.query(ScoreType).filter_by(name="Age").first()
ha = session.query(ScoreType).filter_by(name="HA").first()
custom = session.query(ScoreType).filter_by(name="Custom").first()
spi_off = session.query(ScoreType).filter_by(name="SPI Off").first()
spi_def = session.query(ScoreType).filter_by(name="SPI Def").first()
assert elo != None
assert fifa != None
assert value != None
assert age != None
assert ha != None
assert custom != None
assert spi_off != None
assert spi_def != None
if not args.update:
elo_rule.addScoreType(elo, session)
fifa_rule.addScoreType(fifa, session)
value_rule.addScoreType(value, session)
age_rule.addScoreType(age, session)
ha_rule.addScoreType(ha, session)
custom_rule.addScoreType(custom, session)
custom_rule.addParameterType(custom_rule_parameter, session)
spi_rule.addScoreType(spi_off, session)
spi_rule.addScoreType(spi_def, session)
# and finish the team setup
for team_data in all_teams:
team = session.query(Team).filter_by(country_code=team_data.country_code).first()
assert team != None
fun = None
def add(type, score):
session.add(Score(type.id, team.id, score))
def update(type, score):
session.query(Score).filter_by(type_id=type.id,tournament_id=None,team_id=team.id).first().value = score
if not args.update:
fun = add
else:
fun = update
fun(elo, team_data.ELO)
fun(fifa, team_data.FIFA)
fun(value, team_data.Value)
fun(age, team_data.Age)
fun(ha, team_data.HA)
fun(spi_off, team_data.spi.off_rating)
fun(spi_def, team_data.spi.def_rating)
session.commit()
print "..done"
cleanupSession()
|
from __future__ import print_function
from importlib import import_module
from dataclasses import dataclass
import inspect
from inspect import Signature
import sys
import textwrap
import types
import difflib
import shelve
import os
import clr.config
# Sentinal for args get in get_command_spec to indicate there is no default.
# Because we are pickling command specs for clr cache use a random int
# and check for equality rather than object identity.
NO_DEFAULT = 4194921784511160246
NAMESPACE_KEYS = sorted(clr.config.commands().keys() | {'system'})
# Load lazily namespace modules as needed. Some have expensive/occasionally
# failing initialization.
__namespaces = {}
def _load_namespace(key):
"""Imports the module specified by the given key."""
if key == 'system':
instance = System()
else:
module_path = clr.config.commands()[key]
try:
module = import_module(module_path)
instance = module.COMMANDS
except Exception as e:
return ErrorLoadingNamespace(key, e)
descr = instance.descr
longdescr = instance.longdescr if hasattr(instance, 'longdescr') else descr
command_callables = {a[4:]: getattr(instance, a) for a in dir(instance) if a.startswith('cmd_')}
command_specs = {n: get_command_spec(c) for n, c in command_callables.items()}
return Namespace(descr, longdescr, command_specs, command_callables)
def get_namespace(namespace_key):
"""Lazily load and return the namespace"""
global __namespaces
if namespace_key not in __namespaces:
__namespaces[namespace_key] = _load_namespace(namespace_key)
return __namespaces[namespace_key]
def _get_close_matches(query, options):
matches = difflib.get_close_matches(query, options, cutoff=.4)
if query:
matches.extend(sorted(o for o in options if o.startswith(query) and o not in matches))
return matches
def resolve_command(query, cache=None):
"""Resolve the string `query' into a (namespace_key, command_name) tuple."""
if ':' in query:
namespace_key, command_name = query.split(':', 1)
else:
if query in get_namespace('system').commands:
# So that `clr help` works as expected.
namespace_key = 'system'
command_name = query
else:
# This will still fail, but the error messages will be sensible.
namespace_key = query
command_name = ''
if namespace_key not in NAMESPACE_KEYS:
close_matches = _get_close_matches(namespace_key, NAMESPACE_KEYS)
print(f"Error! Command namespace '{namespace_key}' does not exist.\nClosest matches: {close_matches}\n\nAvailable namespaces: {sorted(NAMESPACE_KEYS)}", file=sys.stderr)
sys.exit(1)
namespace = cache.get(namespace_key) if cache else get_namespace(namespace_key)
if command_name not in namespace.commands:
close_matches = _get_close_matches(command_name, namespace.commands)
print(f"Error! Command '{command_name}' does not exist in namespace '{namespace_key}' - {namespace.descr}.\nClosest matches: {close_matches}\n\nAvailable commands: {namespace.commands}", file=sys.stderr)
sys.exit(1)
return namespace_key, command_name
def get_command_spec(command_callable):
"""Get a command spec from the given (resolved) command, and
distinguish default args vs. non-default args."""
args, vararg, varkwarg, defvals = inspect.getargspec(command_callable)
signature = Signature.from_callable(command_callable)
if signature.return_annotation != Signature.empty:
print(f'WARNING: {command_callable} returns a {signature.return_annotation} which is ignored.')
if varkwarg:
print(f'WARNING: Ignoring kwargs found for clr command {command_callable}: {varkwarg}')
if args is None:
args = tuple()
if defvals is None:
defvals = tuple()
# Avoid the self argument.
if isinstance(command_callable, types.MethodType):
# print(f'WARNING: {command_callable} is a method.')
args = args[1:]
print(signature)
for param in signature.parameters.values():
print(f' {param}')
if param.kind == param.VAR_KEYWORD:
print(f'WARNING: Ignoring kwargs found for clr command {param} {command_callable}: {varkwarg}')
nargs = len(args) - len(defvals)
args = list(zip(args[:nargs], [NO_DEFAULT]*nargs)) + list(zip(args[nargs:], defvals))
return args, vararg, inspect.getdoc(command_callable)
@dataclass
class Namespace:
descr: str
longdescr: str
command_specs: dict
command_callables: dict
@property
def commands(self):
return sorted(self.command_specs.keys())
@dataclass
class ErrorLoadingNamespace:
"""Psuedo namespace for when one can't be loaded to show the error message."""
key: str
error: Exception
commands = {}
command_specs = {}
@property
def descr(self):
return f"ERROR Could not load. See `clr help {self.key}`"
@property
def longdescr(self):
return f"Error importing module '{clr.config.commands()[self.key]}' for namespace '{self.key}':\n\n{self.error}"
@dataclass(frozen=True)
class NamespaceCacheEntry:
descr: str
longdescr: str
command_specs: dict
@staticmethod
def create(namespace):
return NamespaceCacheEntry(namespace.descr, namespace.longdescr, namespace.command_specs)
@property
def commands(self):
return sorted(self.command_specs.keys())
class NamespaceCache:
"""Cache introspection on command names and signatures to disk.
This allows subsequent calls to `clr help` or `clr completion` to be fast.
Necessary to work the fact that many clr command namespace modules import
the world and initialize state on import.
"""
def __init__(self):
self.CACHE_FN = '/tmp/clr_command_cache'
self.cache = shelve.open(self.CACHE_FN)
def get(self, namespace_key):
# Don't cache the system namespace. It is already loaded.
if namespace_key == 'system': return get_namespace('system')
if namespace_key not in self.cache:
namespace = get_namespace(namespace_key)
if isinstance(namespace, ErrorLoadingNamespace): return namespace
self.cache[namespace_key] = NamespaceCacheEntry.create(namespace)
self.cache.sync()
return self.cache[namespace_key]
class System(object):
"""System namespace for the clr tool.
Commands defined here will be avaliable directly without specifying a
namespace. For example `clr help` instead of `clr system:help`. Be careful
not to define commands here that have the same name as a defined namespace
or it may be obscured."""
descr = 'clr built-in commands'
cache = NamespaceCache()
def cmd_clear_cache(self):
"""Clear clr's cache.
clr caches command specs to disk to speed up help and completions.
Run this to clear the cache if your results are stale."""
# Remove file. Process exits after this, will get recreated on next run.
os.remove(self.cache.CACHE_FN)
def cmd_completion(self, query=''):
"""Completion results for first arg to clr."""
results = []
if ':' not in query:
# Suffix system commands with a space.
results.extend(f'{c} ' for c in self.cache.get('system').commands)
# Suffix namespaces with a :.
results.extend(f'{k}:' for k in NAMESPACE_KEYS)
else:
namespace_key, _ = query.split(':', 1)
results.extend(f'{namespace_key}:{c} ' for c in self.cache.get(namespace_key).commands)
print('\n'.join(r for r in results if r.startswith(query)), end='')
def cmd_profile_imports(self, *namespaces):
"""Prints some debugging information about how long it takes to import clr namespaces."""
import time
if not namespaces: namespaces = NAMESPACE_KEYS
results = {}
for index, key in enumerate(namespaces):
t1 = time.time()
get_namespace(key)
results[f'#{index + 1}-{key}'] = time.time() - t1
print('\n'.join(f'{k}: {int(1000*v)}' for k, v in sorted(results.items(), key=lambda i:i[1])))
def cmd_help(self, query=None, query2=None):
"""
provides help for commands, when specified, `query' can be one
either a namespace or a namespace:command tuple.
"""
if not query:
print('Available namespaces')
for namespace_key in NAMESPACE_KEYS:
print(' ', namespace_key.ljust(20), '-', self.cache.get(namespace_key).descr)
return
# If they passed just one arg and it is a namespace key, print help for the full namespace.
if query.endswith(':'): query = query[:-1]
if query in NAMESPACE_KEYS and not query2:
for command in self.cache.get(query).commands:
self.print_help_for_command(query, command, prefix=' ')
return
if query2: query = f'{query}:{query2}'
namespace_key, command_name = resolve_command(query, cache=self.cache)
self.print_help_for_command(namespace_key, command_name)
def print_help_for_command(self, namespace_key, command_name, prefix=''):
w = textwrap.TextWrapper(
initial_indent=prefix, subsequent_indent=prefix,
width=70)
spec, vararg, docstr = self.cache.get(namespace_key).command_specs[command_name]
def is_default(spec):
# print(f'{spec} {NO_DEFAULT} {spec[1] is NO_DEFAULT} {spec[1] == NO_DEFAULT}')
return spec[1] == NO_DEFAULT
req = [spec_item for spec_item in spec if is_default(spec_item)]
notreq = [spec_item for spec_item in spec if not is_default(spec_item)]
args = []
if len(req) > 0:
args.append(' '.join(['<%s>' % a for a, _ in req]))
if notreq:
def atxt(a, v):
if isinstance(v, bool):
if v:
return '--no%s' % a
else:
return '--%s' % a
else:
return '--%s=%s' % (a, v)
args.append('[%s]' % ' '.join([atxt(a, v) for a, v in notreq]))
if vararg is not None:
args.append('[%s...]' % vararg)
print(w.fill('%s %s' % (command_name, ' '.join(args))))
w.initial_indent += ' '
w.subsequent_indent += ' '
if docstr:
for l in docstr.split('\n'):
print(w.fill(l))
cleanup
from __future__ import print_function
from importlib import import_module
from dataclasses import dataclass
import inspect
from inspect import Signature
import sys
import textwrap
import types
import difflib
import shelve
import os
from collections import namedtuple
import clr.config
# Sentinal for args get in get_command_spec to indicate there is no default.
# Because we are pickling command specs for clr cache use a random int
# and check for equality rather than object identity.
NO_DEFAULT = 4194921784511160246
NAMESPACE_KEYS = sorted(clr.config.commands().keys() | {'system'})
# Load lazily namespace modules as needed. Some have expensive/occasionally
# failing initialization.
__namespaces = {}
def _load_namespace(key):
"""Imports the module specified by the given key."""
if key == 'system':
instance = System()
else:
module_path = clr.config.commands()[key]
try:
module = import_module(module_path)
instance = module.COMMANDS
except Exception as e:
return ErrorLoadingNamespace(key, e)
descr = instance.descr
longdescr = instance.longdescr if hasattr(instance, 'longdescr') else descr
command_callables = {a[4:]: getattr(instance, a) for a in dir(instance) if a.startswith('cmd_')}
command_specs = {n: get_command_spec(c) for n, c in command_callables.items()}
return Namespace(descr, longdescr, command_specs, command_callables)
def get_namespace(namespace_key):
"""Lazily load and return the namespace"""
global __namespaces
if namespace_key not in __namespaces:
__namespaces[namespace_key] = _load_namespace(namespace_key)
return __namespaces[namespace_key]
def _get_close_matches(query, options):
matches = difflib.get_close_matches(query, options, cutoff=.4)
if query:
matches.extend(sorted(o for o in options if o.startswith(query) and o not in matches))
return matches
def resolve_command(query, cache=None):
"""Resolve the string `query' into a (namespace_key, command_name) tuple."""
if ':' in query:
namespace_key, command_name = query.split(':', 1)
else:
if query in get_namespace('system').commands:
# So that `clr help` works as expected.
namespace_key = 'system'
command_name = query
else:
# This will still fail, but the error messages will be sensible.
namespace_key = query
command_name = ''
if namespace_key not in NAMESPACE_KEYS:
close_matches = _get_close_matches(namespace_key, NAMESPACE_KEYS)
print(f"Error! Command namespace '{namespace_key}' does not exist.\nClosest matches: {close_matches}\n\nAvailable namespaces: {sorted(NAMESPACE_KEYS)}", file=sys.stderr)
sys.exit(1)
namespace = cache.get(namespace_key) if cache else get_namespace(namespace_key)
if command_name not in namespace.commands:
close_matches = _get_close_matches(command_name, namespace.commands)
print(f"Error! Command '{command_name}' does not exist in namespace '{namespace_key}' - {namespace.descr}.\nClosest matches: {close_matches}\n\nAvailable commands: {namespace.commands}", file=sys.stderr)
sys.exit(1)
return namespace_key, command_name
CommandSpec = namedtuple('CommandSpec', 'args varargs docstr')
def get_command_spec(command_callable):
"""Get a command spec from the given (resolved) command, and
distinguish default args vs. non-default args."""
# TODO(michael.cusack): Move to using Signature and remove deprecated
# getargspec.
args, vararg, varkwarg, defvals = inspect.getargspec(command_callable)
signature = Signature.from_callable(command_callable)
if signature.return_annotation != Signature.empty:
print(f'WARNING: {command_callable} returns a {signature.return_annotation} which is ignored.')
for param in signature.parameters.values():
if param.kind == param.VAR_KEYWORD:
print(f'WARNING: Ignoring kwargs found for clr command {param} {command_callable}: {varkwarg}')
if args is None:
args = tuple()
if defvals is None:
defvals = tuple()
# Avoid the self argument.
if isinstance(command_callable, types.MethodType):
# print(f'WARNING: {command_callable} is a method.')
args = args[1:]
nargs = len(args) - len(defvals)
args = list(zip(args[:nargs], [NO_DEFAULT]*nargs)) + list(zip(args[nargs:], defvals))
return CommandSpec(args, vararg, inspect.getdoc(command_callable))
@dataclass
class Namespace:
descr: str
longdescr: str
command_specs: dict
command_callables: dict
@property
def commands(self):
return sorted(self.command_specs.keys())
@dataclass
class ErrorLoadingNamespace:
"""Psuedo namespace for when one can't be loaded to show the error message."""
key: str
error: Exception
commands = {}
command_specs = {}
@property
def descr(self):
return f"ERROR Could not load. See `clr help {self.key}`"
@property
def longdescr(self):
return f"Error importing module '{clr.config.commands()[self.key]}' for namespace '{self.key}':\n\n{self.error}"
@dataclass(frozen=True)
class NamespaceCacheEntry:
descr: str
longdescr: str
command_specs: dict
@staticmethod
def create(namespace):
return NamespaceCacheEntry(namespace.descr, namespace.longdescr, namespace.command_specs)
@property
def commands(self):
return sorted(self.command_specs.keys())
class NamespaceCache:
"""Cache introspection on command names and signatures to disk.
This allows subsequent calls to `clr help` or `clr completion` to be fast.
Necessary to work the fact that many clr command namespace modules import
the world and initialize state on import.
"""
def __init__(self):
self.CACHE_FN = '/tmp/clr_command_cache'
self.cache = shelve.open(self.CACHE_FN)
def get(self, namespace_key):
# Don't cache the system namespace. It is already loaded.
if namespace_key == 'system': return get_namespace('system')
if namespace_key not in self.cache:
namespace = get_namespace(namespace_key)
if isinstance(namespace, ErrorLoadingNamespace): return namespace
self.cache[namespace_key] = NamespaceCacheEntry.create(namespace)
self.cache.sync()
return self.cache[namespace_key]
class System(object):
"""System namespace for the clr tool.
Commands defined here will be avaliable directly without specifying a
namespace. For example `clr help` instead of `clr system:help`. Be careful
not to define commands here that have the same name as a defined namespace
or it may be obscured."""
descr = 'clr built-in commands'
cache = NamespaceCache()
def cmd_clear_cache(self):
"""Clear clr's cache.
clr caches command specs to disk to speed up help and completions.
Run this to clear the cache if your results are stale."""
# Remove file. Process exits after this, will get recreated on next run.
os.remove(self.cache.CACHE_FN)
def cmd_completion(self, query=''):
"""Completion results for first arg to clr."""
results = []
if ':' not in query:
# Suffix system commands with a space.
results.extend(f'{c} ' for c in self.cache.get('system').commands)
# Suffix namespaces with a :.
results.extend(f'{k}:' for k in NAMESPACE_KEYS)
else:
namespace_key, _ = query.split(':', 1)
results.extend(f'{namespace_key}:{c} ' for c in self.cache.get(namespace_key).commands)
print('\n'.join(r for r in results if r.startswith(query)), end='')
def cmd_profile_imports(self, *namespaces):
"""Prints some debugging information about how long it takes to import clr namespaces."""
import time
if not namespaces: namespaces = NAMESPACE_KEYS
results = {}
for index, key in enumerate(namespaces):
t1 = time.time()
get_namespace(key)
results[f'#{index + 1}-{key}'] = time.time() - t1
print('\n'.join(f'{k}: {int(1000*v)}' for k, v in sorted(results.items(), key=lambda i:i[1])))
def cmd_help(self, query=None, query2=None):
"""
provides help for commands, when specified, `query' can be one
either a namespace or a namespace:command tuple.
"""
if not query:
print('Available namespaces')
for namespace_key in NAMESPACE_KEYS:
print(' ', namespace_key.ljust(20), '-', self.cache.get(namespace_key).descr)
return
# If they passed just one arg and it is a namespace key, print help for the full namespace.
if query.endswith(':'): query = query[:-1]
if query in NAMESPACE_KEYS and not query2:
for command in self.cache.get(query).commands:
self.print_help_for_command(query, command, prefix=' ')
return
if query2: query = f'{query}:{query2}'
namespace_key, command_name = resolve_command(query, cache=self.cache)
self.print_help_for_command(namespace_key, command_name)
def print_help_for_command(self, namespace_key, command_name, prefix=''):
w = textwrap.TextWrapper(
initial_indent=prefix, subsequent_indent=prefix,
width=70)
spec, vararg, docstr = self.cache.get(namespace_key).command_specs[command_name]
def is_default(spec):
# print(f'{spec} {NO_DEFAULT} {spec[1] is NO_DEFAULT} {spec[1] == NO_DEFAULT}')
return spec[1] == NO_DEFAULT
req = [spec_item for spec_item in spec if is_default(spec_item)]
notreq = [spec_item for spec_item in spec if not is_default(spec_item)]
args = []
if len(req) > 0:
args.append(' '.join(['<%s>' % a for a, _ in req]))
if notreq:
def atxt(a, v):
if isinstance(v, bool):
if v:
return '--no%s' % a
else:
return '--%s' % a
else:
return '--%s=%s' % (a, v)
args.append('[%s]' % ' '.join([atxt(a, v) for a, v in notreq]))
if vararg is not None:
args.append('[%s...]' % vararg)
print(w.fill('%s %s' % (command_name, ' '.join(args))))
w.initial_indent += ' '
w.subsequent_indent += ' '
if docstr:
for l in docstr.split('\n'):
print(w.fill(l))
|
from django import forms
from .fields import UploadField
from .models import Blob
class BlobForm(forms.ModelForm):
class Meta:
model = Blob
fields = '__all__'
def __init__(self, *args, **kwargs):
super(BlobForm, self).__init__(*args, **kwargs)
self.fields['local'] = UploadField(help_text=self.fields['local'].help_text)
Forgot to allow blank local fields
from django import forms
from .fields import UploadField
from .models import Blob
class BlobForm(forms.ModelForm):
class Meta:
model = Blob
fields = '__all__'
def __init__(self, *args, **kwargs):
super(BlobForm, self).__init__(*args, **kwargs)
self.fields['local'] = UploadField(null=True, blank=True,
help_text=self.fields['local'].help_text)
|
import uos
from flashbdev import bdev
def check_bootsec():
buf = bytearray(bdev.SEC_SIZE)
bdev.readblocks(0, buf)
empty = True
for b in buf:
if b != 0xff:
empty = False
break
if empty:
return True
fs_corrupted()
def fs_corrupted():
import time
while 1:
print("""\
FAT filesystem appears to be corrupted. If you had important data there, you
may want to make a flash snapshot to try to recover it. Otherwise, perform
factory reprogramming of MicroPython firmware (completely erase flash, followed
by firmware programming).
""")
time.sleep(3)
def setup():
check_bootsec()
print("Performing initial setup")
uos.VfsFat.mkfs(bdev)
vfs = uos.VfsFat(bdev)
uos.mount(vfs, '/flash')
uos.chdir('/flash')
with open("boot.py", "w") as f:
f.write("""\
# This file is executed on every boot (including wake-boot from deepsleep)
#import esp
#esp.osdebug(None)
#import webrepl
#webrepl.start()
""")
return vfs
esp32/modules: On initial setup mount internal flash at root.
Like it's done on normal boot up. Fixes issue #5004.
import uos
from flashbdev import bdev
def check_bootsec():
buf = bytearray(bdev.SEC_SIZE)
bdev.readblocks(0, buf)
empty = True
for b in buf:
if b != 0xff:
empty = False
break
if empty:
return True
fs_corrupted()
def fs_corrupted():
import time
while 1:
print("""\
FAT filesystem appears to be corrupted. If you had important data there, you
may want to make a flash snapshot to try to recover it. Otherwise, perform
factory reprogramming of MicroPython firmware (completely erase flash, followed
by firmware programming).
""")
time.sleep(3)
def setup():
check_bootsec()
print("Performing initial setup")
uos.VfsFat.mkfs(bdev)
vfs = uos.VfsFat(bdev)
uos.mount(vfs, '/')
with open("boot.py", "w") as f:
f.write("""\
# This file is executed on every boot (including wake-boot from deepsleep)
#import esp
#esp.osdebug(None)
#import webrepl
#webrepl.start()
""")
return vfs
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
###############################################################################
# #
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public License for more details. #
# #
# You should have received a copy of the GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
###############################################################################
from __future__ import print_function
from erppeek import *
from base import *
import argparse
import getpass
def clv_document_unlink(client, args):
clv_document = client.model('clv_document')
document_browse = clv_document.browse(args)
i = 0
deleted = 0
not_deleted = 0
for document in document_browse:
i += 1
print(i, document.name.encode("utf-8"))
history = client.model('clv_document.history')
history_browse = history.browse([('document_id', '=', document.id), ])
history_ids = history_browse.id
print('>>>>>', history_ids)
history.unlink(history_ids)
try:
clv_document.unlink(document.id)
deleted += 1
except:
print('>>>>>', 'Not deleted!')
not_deleted += 1
print('--> i: ', i)
print('--> deleted: ', deleted)
print('--> not_deleted: ', not_deleted)
def clv_document_create(client, args):
clv_document = client.model('clv_document')
clv_patient = client.model('clv_patient')
patient_browse = clv_patient.browse(args)
clv_patient_category = client.model('clv_patient.category')
cat_idoso_2016_id = clv_patient_category.browse([('name', '=', 'Idoso 2016'), ])[0].id
cat_crianca_2016_id = clv_patient_category.browse([('name', '=', 'Criança 2016'), ])[0].id
cat_dhc_2016_id = clv_patient_category.browse([('name', '=', 'DHC 2016'), ])[0].id
cat_anemia_2016_id = clv_patient_category.browse([('name', '=', 'Anemia 2016'), ])[0].id
survey_survey = client.model('survey.survey')
survey_FSE16_id = survey_survey.browse([(
'title', '=',
'[FSE16] JCAFB 2016 - Questionário Socioeconômico Familiar (Crianças e Idosos)'), ])[0].id
survey_ISE16_id = survey_survey.browse([(
'title', '=',
'[ISE16] JCAFB 2016 - Questionário Socioeconômico Individual (Idosos)'), ])[0].id
survey_CSE16_id = survey_survey.browse([(
'title', '=',
'[CSE16] JCAFB 2016 - Questionário Socioeconômico Individual (Crianças)'), ])[0].id
survey_QMD16_id = survey_survey.browse([(
'title', '=',
'[QMD16] JCAFB 2016 - Questionário Medicamento'), ])[0].id
survey_ITM16_id = survey_survey.browse([(
'title', '=',
'[ITM16] JCAFB 2016 - Interpretação das Tabelas de Medicamentos'), ])[0].id
survey_QAN16_id = survey_survey.browse([(
'title', '=',
'[QAN16] JCAFB 2016 - Questionário para detecção de Anemia'), ])[0].id
survey_QDH16_id = survey_survey.browse([(
'title', '=',
'[QDH16] JCAFB 2016 - Questionário - Diabetes, Hipertensão Arterial e Hipercolesterolemia'), ])[0].id
survey_TCP16_id = survey_survey.browse([(
'title', '=',
'[TCP16] JCAFB 2016 - ' +
'TERMO DE CONSENTIMENTO PARA A CAMPANHA DE DETECÇÃO DE DIABETES, ' +
'HIPERTENSÃO ARTERIAL E HIPERCOLESTEROLEMIA'
), ])[0].id
survey_TCR16_id = survey_survey.browse([(
'title', '=',
'[TCR16] JCAFB 2016 - ' +
'TERMO DE CONSENTIMENTO LIVRE E ESCLARECIDO PARA REALIZAÇÃO DE EXAMES COPROPARASITOLÓGICOS, ' +
'DETECÇÃO DE ANEMIA E QUESTIONÁRIO SOCIOECONÔMICO'
), ])[0].id
survey_TID16_id = survey_survey.browse([(
'title', '=',
'[TID16] JCAFB 2016 - ' +
'TERMO DE CONSENTIMENTO LIVRE E ESCLARECIDO PARA REALIZAÇÃO DE EXAME DE URINA, ' +
'COPROPARASITOLÓGICO, DETECÇÃO DE ANEMIA E QUESTIONÁRIO SOCIOECONÔMICO'
), ])[0].id
i = 0
idoso_2016 = 0
crianca_2016 = 0
dhc_2016 = 0
anemia_2016 = 0
for patient in patient_browse:
i += 1
print(i, patient.name.encode('utf-8'), patient.category_ids.id)
if (cat_idoso_2016_id in patient.category_ids.id) or \
(cat_crianca_2016_id in patient.category_ids.id):
family_id = False
try:
family_id = patient.person.family_member_ids[0].family_id.id
except:
pass
survey_ids = []
for document in patient.person.family_member_ids.family_id.document_ids:
print('>>>>>', survey_ids, document.survey_id.id)
survey_ids = survey_ids + document.survey_id.id
if survey_FSE16_id not in survey_ids:
values = {
'survey_id': survey_FSE16_id,
'family_id': family_id,
}
document_id = clv_document.create(values).id
print('>>>>>', document_id)
if cat_idoso_2016_id in patient.category_ids.id:
idoso_2016 += 1
family_id = False
try:
family_id = patient.person.family_member_ids[0].family_id.id
except:
pass
survey_ids = []
for document in patient.document_ids:
print('>>>>>', survey_ids, [document.survey_id.id])
survey_ids = survey_ids + [document.survey_id.id]
if survey_ISE16_id not in survey_ids:
values = {
'survey_id': survey_ISE16_id,
'patient_id': patient.id,
'family_id': family_id,
}
document_id = clv_document.create(values).id
print('>>>>>', document_id)
if survey_QMD16_id not in survey_ids:
values = {
'survey_id': survey_QMD16_id,
'family_id': family_id,
'patient_id': patient.id,
}
document_id = clv_document.create(values).id
print('>>>>>', document_id)
if survey_ITM16_id not in survey_ids:
values = {
'survey_id': survey_ITM16_id,
'patient_id': patient.id,
'family_id': family_id,
}
document_id = clv_document.create(values).id
print('>>>>>', document_id)
if survey_QAN16_id not in survey_ids:
values = {
'survey_id': survey_QAN16_id,
'patient_id': patient.id,
'family_id': family_id,
}
document_id = clv_document.create(values).id
print('>>>>>', document_id)
if survey_QDH16_id not in survey_ids:
values = {
'survey_id': survey_QDH16_id,
'patient_id': patient.id,
'family_id': family_id,
}
document_id = clv_document.create(values).id
print('>>>>>', document_id)
if survey_TID16_id not in survey_ids:
values = {
'survey_id': survey_TID16_id,
'patient_id': patient.id,
'family_id': family_id,
}
document_id = clv_document.create(values).id
print('>>>>>', document_id)
if survey_TCP16_id not in survey_ids:
values = {
'survey_id': survey_TCP16_id,
'patient_id': patient.id,
'family_id': family_id,
}
document_id = clv_document.create(values).id
print('>>>>>', document_id)
if cat_crianca_2016_id in patient.category_ids.id:
crianca_2016 += 1
family_id = False
try:
family_id = patient.person.family_member_ids[0].family_id.id
except:
pass
survey_ids = []
for document in patient.document_ids:
print('>>>>>', survey_ids, [document.survey_id.id])
survey_ids = survey_ids + [document.survey_id.id]
if survey_CSE16_id not in survey_ids:
values = {
'survey_id': survey_CSE16_id,
'patient_id': patient.id,
'family_id': family_id,
}
document_id = clv_document.create(values).id
print('>>>>>', document_id)
if survey_QAN16_id not in survey_ids:
values = {
'survey_id': survey_QAN16_id,
'patient_id': patient.id,
'family_id': family_id,
}
document_id = clv_document.create(values).id
print('>>>>>', document_id)
if survey_TCR16_id not in survey_ids:
values = {
'survey_id': survey_TCR16_id,
'patient_id': patient.id,
'family_id': family_id,
}
document_id = clv_document.create(values).id
print('>>>>>', document_id)
if cat_dhc_2016_id in patient.category_ids.id:
dhc_2016 += 1
survey_ids = []
for document in patient.document_ids:
print('>>>>>', survey_ids, [document.survey_id.id])
survey_ids = survey_ids + [document.survey_id.id]
family_id = False
try:
family_id = patient.person.family_member_ids[0].family_id.id
except:
pass
if survey_QDH16_id not in survey_ids:
values = {
'survey_id': survey_QDH16_id,
'patient_id': patient.id,
'family_id': family_id,
}
document_id = clv_document.create(values).id
print('>>>>>', document_id)
if survey_TCP16_id not in survey_ids:
values = {
'survey_id': survey_TCP16_id,
'patient_id': patient.id,
'family_id': family_id,
}
document_id = clv_document.create(values).id
print('>>>>>', document_id)
if cat_anemia_2016_id in patient.category_ids.id:
anemia_2016 += 1
family_id = False
try:
family_id = patient.person.family_member_ids[0].family_id.id
except:
pass
survey_ids = []
for document in patient.document_ids:
print('>>>>>', survey_ids, [document.survey_id.id])
survey_ids = survey_ids + [document.survey_id.id]
if survey_QAN16_id not in survey_ids:
values = {
'survey_id': survey_QAN16_id,
'patient_id': patient.id,
'family_id': family_id,
}
document_id = clv_document.create(values).id
print('>>>>>', document_id)
print('--> i: ', i)
print('--> idoso_2016: ', idoso_2016)
print('--> crianca_2016: ', crianca_2016)
print('--> dhc_2016: ', dhc_2016)
print('--> anemia_2016: ', anemia_2016)
def clv_document_clear_survey_user_input_id(client, args):
clv_document = client.model('clv_document')
document_browse = clv_document.browse(args)
i = 0
for document in document_browse:
i += 1
print(i, document.name, document.survey_id.title.encode("utf-8"))
values = {
"survey_user_input_id": False,
}
clv_document.write(document.id, values)
print('--> i: ', i)
def clv_document_get_survey_user_input_id(client, args):
survey_survey = client.model('survey.survey')
survey_FSE16_id = survey_survey.browse([(
'title', '=',
'[FSE16] JCAFB 2016 - Questionário Socioeconômico Familiar (Crianças e Idosos)'), ])[0].id
survey_ISE16_id = survey_survey.browse([(
'title', '=',
'[ISE16] JCAFB 2016 - Questionário Socioeconômico Individual (Idosos)'), ])[0].id
survey_CSE16_id = survey_survey.browse([(
'title', '=',
'[CSE16] JCAFB 2016 - Questionário Socioeconômico Individual (Crianças)'), ])[0].id
survey_QMD16_id = survey_survey.browse([(
'title', '=',
'[QMD16] JCAFB 2016 - Questionário Medicamento'), ])[0].id
survey_ITM16_id = survey_survey.browse([(
'title', '=',
'[ITM16] JCAFB 2016 - Interpretação das Tabelas de Medicamentos'), ])[0].id
survey_QAN16_id = survey_survey.browse([(
'title', '=',
'[QAN16] JCAFB 2016 - Questionário para detecção de Anemia'), ])[0].id
survey_QDH16_id = survey_survey.browse([(
'title', '=',
'[QDH16] JCAFB 2016 - Questionário - Diabetes, Hipertensão Arterial e Hipercolesterolemia'), ])[0].id
survey_TCP16_id = survey_survey.browse([(
'title', '=',
'[TCP16] JCAFB 2016 - ' +
'TERMO DE CONSENTIMENTO PARA A CAMPANHA DE DETECÇÃO DE DIABETES, ' +
'HIPERTENSÃO ARTERIAL E HIPERCOLESTEROLEMIA'
), ])[0].id
survey_TCR16_id = survey_survey.browse([(
'title', '=',
'[TCR16] JCAFB 2016 - ' +
'TERMO DE CONSENTIMENTO LIVRE E ESCLARECIDO PARA REALIZAÇÃO DE EXAMES COPROPARASITOLÓGICOS, ' +
'DETECÇÃO DE ANEMIA E QUESTIONÁRIO SOCIOECONÔMICO'
), ])[0].id
survey_TID16_id = survey_survey.browse([(
'title', '=',
'[TID16] JCAFB 2016 - ' +
'TERMO DE CONSENTIMENTO LIVRE E ESCLARECIDO PARA REALIZAÇÃO DE EXAME DE URINA, ' +
'COPROPARASITOLÓGICO, DETECÇÃO DE ANEMIA E QUESTIONÁRIO SOCIOECONÔMICO'
), ])[0].id
clv_document = client.model('clv_document')
document_browse = clv_document.browse(args)
survey_user_input_line = client.model('survey.user_input_line')
i = 0
found = 0
not_found = 0
for document in document_browse:
# i += 1
# print(i, document.name, document.survey_id.title.encode("utf-8"))
if document.survey_id.id == survey_FSE16_id:
i += 1
survey_user_input_line_browse = survey_user_input_line.browse(
[('value_text', '=', document.name), ])
survey_user_input_line_ids = survey_user_input_line_browse.id
if survey_user_input_line_ids != []:
found += 1
print(i, document.name, document.survey_id.title.encode("utf-8"),
survey_user_input_line_browse[0].user_input_id.state)
# values = {
# "survey_user_input_id": survey_user_input_line_browse[0].user_input_id.id,
# }
# clv_document.write(document.id, values)
else:
not_found += 1
if document.survey_id.id == survey_ISE16_id:
i += 1
survey_user_input_line_browse = survey_user_input_line.browse(
[('value_text', '=', document.name), ])
survey_user_input_line_ids = survey_user_input_line_browse.id
if survey_user_input_line_ids != []:
found += 1
print(i, document.name, document.survey_id.title.encode("utf-8"),
survey_user_input_line_browse[0].user_input_id.state)
# values = {
# "survey_user_input_id": survey_user_input_line_browse[0].user_input_id.id,
# }
# clv_document.write(document.id, values)
else:
not_found += 1
if document.survey_id.id == survey_CSE16_id:
i += 1
survey_user_input_line_browse = survey_user_input_line.browse(
[('value_text', '=', document.name), ])
survey_user_input_line_ids = survey_user_input_line_browse.id
if survey_user_input_line_ids != []:
found += 1
print(i, document.name, document.survey_id.title.encode("utf-8"),
survey_user_input_line_browse[0].user_input_id.state)
# values = {
# "survey_user_input_id": survey_user_input_line_browse[0].user_input_id.id,
# }
# clv_document.write(document.id, values)
else:
not_found += 1
print('--> i: ', i)
print('--> found: ', found)
print('--> not_found: ', not_found)
def get_arguments():
global username
global password
global dbname
parser = argparse.ArgumentParser()
parser.add_argument('--user', action="store", dest="username")
parser.add_argument('--pw', action="store", dest="password")
parser.add_argument('--db', action="store", dest="dbname")
args = parser.parse_args()
print('%s%s' % ('--> ', args))
if args.dbname is not None:
dbname = args.dbname
elif dbname == '*':
dbname = raw_input('dbname: ')
if args.username is not None:
username = args.username
elif username == '*':
username = raw_input('username: ')
if args.password is not None:
password = args.password
elif password == '*':
password = getpass.getpass('password: ')
if __name__ == '__main__':
server = 'http://localhost:8069'
# username = 'username'
username = '*'
# paswword = 'paswword'
paswword = '*'
dbname = 'odoo'
# dbname = '*'
get_arguments()
from time import time
start = time()
print('--> clv_document.py...')
client = erppeek.Client(server, dbname, username, password)
# patient_args = [('category_ids', '!=', False), ]
# print('-->', client, patient_args)
# print('--> Executing clv_document_create()...')
# clv_document_create(client, patient_args)
# document_args = [('state', '=', 'waiting'),
# ('survey_user_input_id', '!=', False),
# ]
# print('-->', client, document_args)
# print('--> Executing clv_document_clear_survey_user_input_id()...')
# clv_document_clear_survey_user_input_id(client, document_args)
document_args = [('state', '=', 'draft'),
('survey_user_input_id', '=', False),
]
print('-->', client, document_args)
print('--> Executing clv_document_get_survey_user_input_id()...')
clv_document_get_survey_user_input_id(client, document_args)
print('--> clv_document.py')
print('--> Execution time:', secondsToStr(time() - start))
Updated "clv_document.py".
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
###############################################################################
# #
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public License for more details. #
# #
# You should have received a copy of the GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
###############################################################################
from __future__ import print_function
from erppeek import *
from base import *
import argparse
import getpass
def clv_document_unlink(client, args):
clv_document = client.model('clv_document')
document_browse = clv_document.browse(args)
i = 0
deleted = 0
not_deleted = 0
for document in document_browse:
i += 1
print(i, document.name.encode("utf-8"))
history = client.model('clv_document.history')
history_browse = history.browse([('document_id', '=', document.id), ])
history_ids = history_browse.id
print('>>>>>', history_ids)
history.unlink(history_ids)
try:
clv_document.unlink(document.id)
deleted += 1
except:
print('>>>>>', 'Not deleted!')
not_deleted += 1
print('--> i: ', i)
print('--> deleted: ', deleted)
print('--> not_deleted: ', not_deleted)
def clv_document_create(client, args):
clv_document = client.model('clv_document')
clv_patient = client.model('clv_patient')
patient_browse = clv_patient.browse(args)
clv_patient_category = client.model('clv_patient.category')
cat_idoso_2016_id = clv_patient_category.browse([('name', '=', 'Idoso 2016'), ])[0].id
cat_crianca_2016_id = clv_patient_category.browse([('name', '=', 'Criança 2016'), ])[0].id
cat_dhc_2016_id = clv_patient_category.browse([('name', '=', 'DHC 2016'), ])[0].id
cat_anemia_2016_id = clv_patient_category.browse([('name', '=', 'Anemia 2016'), ])[0].id
survey_survey = client.model('survey.survey')
survey_FSE16_id = survey_survey.browse([(
'title', '=',
'[FSE16] JCAFB 2016 - Questionário Socioeconômico Familiar (Crianças e Idosos)'), ])[0].id
survey_ISE16_id = survey_survey.browse([(
'title', '=',
'[ISE16] JCAFB 2016 - Questionário Socioeconômico Individual (Idosos)'), ])[0].id
survey_CSE16_id = survey_survey.browse([(
'title', '=',
'[CSE16] JCAFB 2016 - Questionário Socioeconômico Individual (Crianças)'), ])[0].id
survey_QMD16_id = survey_survey.browse([(
'title', '=',
'[QMD16] JCAFB 2016 - Questionário Medicamento'), ])[0].id
survey_ITM16_id = survey_survey.browse([(
'title', '=',
'[ITM16] JCAFB 2016 - Interpretação das Tabelas de Medicamentos'), ])[0].id
survey_QAN16_id = survey_survey.browse([(
'title', '=',
'[QAN16] JCAFB 2016 - Questionário para detecção de Anemia'), ])[0].id
survey_QDH16_id = survey_survey.browse([(
'title', '=',
'[QDH16] JCAFB 2016 - Questionário - Diabetes, Hipertensão Arterial e Hipercolesterolemia'), ])[0].id
survey_TCP16_id = survey_survey.browse([(
'title', '=',
'[TCP16] JCAFB 2016 - ' +
'TERMO DE CONSENTIMENTO PARA A CAMPANHA DE DETECÇÃO DE DIABETES, ' +
'HIPERTENSÃO ARTERIAL E HIPERCOLESTEROLEMIA'
), ])[0].id
survey_TCR16_id = survey_survey.browse([(
'title', '=',
'[TCR16] JCAFB 2016 - ' +
'TERMO DE CONSENTIMENTO LIVRE E ESCLARECIDO PARA REALIZAÇÃO DE EXAMES COPROPARASITOLÓGICOS, ' +
'DETECÇÃO DE ANEMIA E QUESTIONÁRIO SOCIOECONÔMICO'
), ])[0].id
survey_TID16_id = survey_survey.browse([(
'title', '=',
'[TID16] JCAFB 2016 - ' +
'TERMO DE CONSENTIMENTO LIVRE E ESCLARECIDO PARA REALIZAÇÃO DE EXAME DE URINA, ' +
'COPROPARASITOLÓGICO, DETECÇÃO DE ANEMIA E QUESTIONÁRIO SOCIOECONÔMICO'
), ])[0].id
i = 0
idoso_2016 = 0
crianca_2016 = 0
dhc_2016 = 0
anemia_2016 = 0
for patient in patient_browse:
i += 1
print(i, patient.name.encode('utf-8'), patient.category_ids.id)
if (cat_idoso_2016_id in patient.category_ids.id) or \
(cat_crianca_2016_id in patient.category_ids.id):
family_id = False
try:
family_id = patient.person.family_member_ids[0].family_id.id
except:
pass
survey_ids = []
for document in patient.person.family_member_ids.family_id.document_ids:
print('>>>>>', survey_ids, document.survey_id.id)
survey_ids = survey_ids + document.survey_id.id
if survey_FSE16_id not in survey_ids:
values = {
'survey_id': survey_FSE16_id,
'family_id': family_id,
}
document_id = clv_document.create(values).id
print('>>>>>', document_id)
if cat_idoso_2016_id in patient.category_ids.id:
idoso_2016 += 1
family_id = False
try:
family_id = patient.person.family_member_ids[0].family_id.id
except:
pass
survey_ids = []
for document in patient.document_ids:
print('>>>>>', survey_ids, [document.survey_id.id])
survey_ids = survey_ids + [document.survey_id.id]
if survey_ISE16_id not in survey_ids:
values = {
'survey_id': survey_ISE16_id,
'patient_id': patient.id,
'family_id': family_id,
}
document_id = clv_document.create(values).id
print('>>>>>', document_id)
if survey_QMD16_id not in survey_ids:
values = {
'survey_id': survey_QMD16_id,
'family_id': family_id,
'patient_id': patient.id,
}
document_id = clv_document.create(values).id
print('>>>>>', document_id)
if survey_ITM16_id not in survey_ids:
values = {
'survey_id': survey_ITM16_id,
'patient_id': patient.id,
'family_id': family_id,
}
document_id = clv_document.create(values).id
print('>>>>>', document_id)
if survey_QAN16_id not in survey_ids:
values = {
'survey_id': survey_QAN16_id,
'patient_id': patient.id,
'family_id': family_id,
}
document_id = clv_document.create(values).id
print('>>>>>', document_id)
if survey_QDH16_id not in survey_ids:
values = {
'survey_id': survey_QDH16_id,
'patient_id': patient.id,
'family_id': family_id,
}
document_id = clv_document.create(values).id
print('>>>>>', document_id)
if survey_TID16_id not in survey_ids:
values = {
'survey_id': survey_TID16_id,
'patient_id': patient.id,
'family_id': family_id,
}
document_id = clv_document.create(values).id
print('>>>>>', document_id)
if survey_TCP16_id not in survey_ids:
values = {
'survey_id': survey_TCP16_id,
'patient_id': patient.id,
'family_id': family_id,
}
document_id = clv_document.create(values).id
print('>>>>>', document_id)
if cat_crianca_2016_id in patient.category_ids.id:
crianca_2016 += 1
family_id = False
try:
family_id = patient.person.family_member_ids[0].family_id.id
except:
pass
survey_ids = []
for document in patient.document_ids:
print('>>>>>', survey_ids, [document.survey_id.id])
survey_ids = survey_ids + [document.survey_id.id]
if survey_CSE16_id not in survey_ids:
values = {
'survey_id': survey_CSE16_id,
'patient_id': patient.id,
'family_id': family_id,
}
document_id = clv_document.create(values).id
print('>>>>>', document_id)
if survey_QAN16_id not in survey_ids:
values = {
'survey_id': survey_QAN16_id,
'patient_id': patient.id,
'family_id': family_id,
}
document_id = clv_document.create(values).id
print('>>>>>', document_id)
if survey_TCR16_id not in survey_ids:
values = {
'survey_id': survey_TCR16_id,
'patient_id': patient.id,
'family_id': family_id,
}
document_id = clv_document.create(values).id
print('>>>>>', document_id)
if cat_dhc_2016_id in patient.category_ids.id:
dhc_2016 += 1
survey_ids = []
for document in patient.document_ids:
print('>>>>>', survey_ids, [document.survey_id.id])
survey_ids = survey_ids + [document.survey_id.id]
family_id = False
try:
family_id = patient.person.family_member_ids[0].family_id.id
except:
pass
if survey_QDH16_id not in survey_ids:
values = {
'survey_id': survey_QDH16_id,
'patient_id': patient.id,
'family_id': family_id,
}
document_id = clv_document.create(values).id
print('>>>>>', document_id)
if survey_TCP16_id not in survey_ids:
values = {
'survey_id': survey_TCP16_id,
'patient_id': patient.id,
'family_id': family_id,
}
document_id = clv_document.create(values).id
print('>>>>>', document_id)
if cat_anemia_2016_id in patient.category_ids.id:
anemia_2016 += 1
family_id = False
try:
family_id = patient.person.family_member_ids[0].family_id.id
except:
pass
survey_ids = []
for document in patient.document_ids:
print('>>>>>', survey_ids, [document.survey_id.id])
survey_ids = survey_ids + [document.survey_id.id]
if survey_QAN16_id not in survey_ids:
values = {
'survey_id': survey_QAN16_id,
'patient_id': patient.id,
'family_id': family_id,
}
document_id = clv_document.create(values).id
print('>>>>>', document_id)
print('--> i: ', i)
print('--> idoso_2016: ', idoso_2016)
print('--> crianca_2016: ', crianca_2016)
print('--> dhc_2016: ', dhc_2016)
print('--> anemia_2016: ', anemia_2016)
def clv_document_clear_survey_user_input_id(client, args):
clv_document = client.model('clv_document')
document_browse = clv_document.browse(args)
i = 0
for document in document_browse:
i += 1
print(i, document.name, document.survey_id.title.encode("utf-8"))
values = {
"survey_user_input_id": False,
}
clv_document.write(document.id, values)
print('--> i: ', i)
def clv_document_get_survey_user_input_id(client, args):
survey_survey = client.model('survey.survey')
survey_FSE16_id = survey_survey.browse([(
'title', '=',
'[FSE16] JCAFB 2016 - Questionário Socioeconômico Familiar (Crianças e Idosos)'), ])[0].id
survey_ISE16_id = survey_survey.browse([(
'title', '=',
'[ISE16] JCAFB 2016 - Questionário Socioeconômico Individual (Idosos)'), ])[0].id
survey_CSE16_id = survey_survey.browse([(
'title', '=',
'[CSE16] JCAFB 2016 - Questionário Socioeconômico Individual (Crianças)'), ])[0].id
survey_QMD16_id = survey_survey.browse([(
'title', '=',
'[QMD16] JCAFB 2016 - Questionário Medicamento'), ])[0].id
survey_ITM16_id = survey_survey.browse([(
'title', '=',
'[ITM16] JCAFB 2016 - Interpretação das Tabelas de Medicamentos'), ])[0].id
survey_QAN16_id = survey_survey.browse([(
'title', '=',
'[QAN16] JCAFB 2016 - Questionário para detecção de Anemia'), ])[0].id
survey_QDH16_id = survey_survey.browse([(
'title', '=',
'[QDH16] JCAFB 2016 - Questionário - Diabetes, Hipertensão Arterial e Hipercolesterolemia'), ])[0].id
survey_TCP16_id = survey_survey.browse([(
'title', '=',
'[TCP16] JCAFB 2016 - ' +
'TERMO DE CONSENTIMENTO PARA A CAMPANHA DE DETECÇÃO DE DIABETES, ' +
'HIPERTENSÃO ARTERIAL E HIPERCOLESTEROLEMIA'
), ])[0].id
survey_TCR16_id = survey_survey.browse([(
'title', '=',
'[TCR16] JCAFB 2016 - ' +
'TERMO DE CONSENTIMENTO LIVRE E ESCLARECIDO PARA REALIZAÇÃO DE EXAMES COPROPARASITOLÓGICOS, ' +
'DETECÇÃO DE ANEMIA E QUESTIONÁRIO SOCIOECONÔMICO'
), ])[0].id
survey_TID16_id = survey_survey.browse([(
'title', '=',
'[TID16] JCAFB 2016 - ' +
'TERMO DE CONSENTIMENTO LIVRE E ESCLARECIDO PARA REALIZAÇÃO DE EXAME DE URINA, ' +
'COPROPARASITOLÓGICO, DETECÇÃO DE ANEMIA E QUESTIONÁRIO SOCIOECONÔMICO'
), ])[0].id
clv_document = client.model('clv_document')
document_browse = clv_document.browse(args)
survey_user_input_line = client.model('survey.user_input_line')
i = 0
found = 0
not_found = 0
for document in document_browse:
# i += 1
# print(i, document.name, document.survey_id.title.encode("utf-8"))
if document.survey_id.id == survey_FSE16_id:
i += 1
survey_user_input_line_browse = survey_user_input_line.browse(
[('value_text', '=', document.name), ])
survey_user_input_line_ids = survey_user_input_line_browse.id
if survey_user_input_line_ids != []:
found += 1
print(i, document.name, document.survey_id.title.encode("utf-8"),
survey_user_input_line_browse[0].user_input_id.state)
# values = {
# "survey_user_input_id": survey_user_input_line_browse[0].user_input_id.id,
# }
# clv_document.write(document.id, values)
else:
not_found += 1
if document.survey_id.id == survey_ISE16_id:
i += 1
survey_user_input_line_browse = survey_user_input_line.browse(
[('value_text', '=', document.name), ])
survey_user_input_line_ids = survey_user_input_line_browse.id
if survey_user_input_line_ids != []:
found += 1
print(i, document.name, document.survey_id.title.encode("utf-8"),
survey_user_input_line_browse[0].user_input_id.state)
# values = {
# "survey_user_input_id": survey_user_input_line_browse[0].user_input_id.id,
# }
# clv_document.write(document.id, values)
else:
not_found += 1
if document.survey_id.id == survey_CSE16_id:
i += 1
survey_user_input_line_browse = survey_user_input_line.browse(
[('value_text', '=', document.name), ])
survey_user_input_line_ids = survey_user_input_line_browse.id
if survey_user_input_line_ids != []:
found += 1
print(i, document.name, document.survey_id.title.encode("utf-8"),
survey_user_input_line_browse[0].user_input_id.state)
# values = {
# "survey_user_input_id": survey_user_input_line_browse[0].user_input_id.id,
# }
# clv_document.write(document.id, values)
else:
not_found += 1
print('--> i: ', i)
print('--> found: ', found)
print('--> not_found: ', not_found)
def get_arguments():
global username
global password
global dbname
parser = argparse.ArgumentParser()
parser.add_argument('--user', action="store", dest="username")
parser.add_argument('--pw', action="store", dest="password")
parser.add_argument('--db', action="store", dest="dbname")
args = parser.parse_args()
print('%s%s' % ('--> ', args))
if args.dbname is not None:
dbname = args.dbname
elif dbname == '*':
dbname = raw_input('dbname: ')
if args.username is not None:
username = args.username
elif username == '*':
username = raw_input('username: ')
if args.password is not None:
password = args.password
elif password == '*':
password = getpass.getpass('password: ')
if __name__ == '__main__':
server = 'http://localhost:8069'
# username = 'username'
username = '*'
# paswword = 'paswword'
paswword = '*'
dbname = 'odoo'
# dbname = '*'
get_arguments()
from time import time
start = time()
print('--> clv_document.py...')
client = erppeek.Client(server, dbname, username, password)
# patient_args = [('category_ids', '!=', False), ]
# print('-->', client, patient_args)
# print('--> Executing clv_document_create()...')
# clv_document_create(client, patient_args)
# # document_args = [('state', '=', 'waiting'),
# # ('survey_user_input_id', '!=', False),
# # ]
# document_args = [('survey_user_input_id', '!=', False),
# ]
# print('-->', client, document_args)
# print('--> Executing clv_document_clear_survey_user_input_id()...')
# clv_document_clear_survey_user_input_id(client, document_args)
# document_args = [('state', '=', 'draft'),
# ('survey_user_input_id', '=', False),
# ]
# print('-->', client, document_args)
# print('--> Executing clv_document_get_survey_user_input_id()...')
# clv_document_get_survey_user_input_id(client, document_args)
print('--> clv_document.py')
print('--> Execution time:', secondsToStr(time() - start))
|
# -*- coding: utf-8 -*-
import os.path, re, yaml
pjoin = os.path.join
from collections import defaultdict, Counter, OrderedDict
import xml.etree.cElementTree as etree
import urllib.request
from multiprocessing import Process, Manager
from subprocess import Popen, PIPE
from io import StringIO
from apertium import whereis, destxt, retxt, checksum
from apertium.quality import Statistics
ARROW = "\u2192"
class RegressionTest(object):
wrg = re.compile(r"{{test\|(.*)\|(.*)\|(.*)}}")
ns = "{http://www.mediawiki.org/xml/export-0.3/}"
program = "apertium"
def __init__(self, url=None, mode=None, directory=".", **kwargs):
url = kwargs.get('url', url)
mode = kwargs.get('mode', mode)
directory = kwargs.get('directory', directory)
if None in (url, mode):
raise TypeError("Url or mode parameter missing.")
whereis([self.program])
if not "Special:Export" in url:
print("Warning: URL did not contain Special:Export.")
self.mode = mode
self.directory = directory
if url.startswith('http'):
self.tree = etree.parse(urllib.request.urlopen(url))
else:
self.tree = etree.parse(open(url))
self.passes = 0
self.total = 0
text = None
for e in self.tree.getroot().getiterator():
if e.tag == self.ns + "title":
self.title = e.text
if e.tag == self.ns + "revision":
self.revision = e[0].text # should be <id>
if e.tag == self.ns + "text":
text = e.text
if not text:
raise AttributeError("No text element?")
self.tests = defaultdict(OrderedDict)
rtests = text.split('\n')
rtests = [self.wrg.search(j) for j in rtests if self.wrg.search(j)]
for i in rtests:
lang, left, right = i.group(1), i.group(2), i.group(3)
if not left.endswith('.'):
left += '[_].'
self.tests[lang.strip()][left.strip()] = right.strip()
self.out = StringIO()
def run(self):
for side in self.tests:
self.out.write("Now testing: %s\n" % side)
args = '\n'.join(self.tests[side].keys())
app = Popen([self.program, '-d', self.directory, self.mode], stdin=PIPE, stdout=PIPE, stderr=PIPE)
app.stdin.write(args.encode('utf-8'))
res = app.communicate()[0]
self.results = str(res.decode('utf-8')).split('\n')
if app.returncode > 0:
return app.returncode
for n, test in enumerate(self.tests[side].items()):
if n >= len(self.results):
#raise AttributeError("More tests than results.")
continue
res = self.results[n].split("[_]")[0].strip()
orig = test[0].split("[_]")[0].strip()
targ = test[1].strip()
self.out.write("%s\t %s\n" % (self.mode, orig))
if res == targ:
self.out.write("WORKS\t %s\n" % res)
self.passes += 1
else:
self.out.write("\t- %s\n" % targ)
self.out.write("\t+ %s\n" % res)
self.total += 1
self.out.write('\n')
return 0
def get_passes(self):
return self.passes
def get_fails(self):
return self.total - self.passes
def get_total(self):
return self.total
def get_total_percent(self):
return "%.2f" % (float(self.passes)/float(self.total)*100)
def save_statistics(self, f):
stats = Statistics(f)
ns = "{http://www.mediawiki.org/xml/export-0.3/}"
page = self.tree.getroot().find(ns + 'page')
rev = page.find(ns + 'revision').find(ns + 'id').text
title = page.find(ns + 'title').text
stats.add_regression(title, rev, self.passes, self.total, self.get_total_percent())
stats.write()
def get_output(self):
print(self.out.getvalue())
percent = 0
if self.total > 0:
percent = float(self.passes) / float(self.total) * 100
print("Passes: %d/%d, Success rate: %.2f%%" % (self.passes, self.total, percent))
class CoverageTest(object):
def __init__(self, f=None, dct=None, **kwargs):
f = kwargs.get('f', f)
dct = kwargs.get('dct', dct)
if None in (f, dct):
raise TypeError("f or dct parameter missing.")
whereis(["lt-proc"])#, "apertium-destxt", "apertium-retxt"):
self.fn = f #TODO: make sure file exists
self.f = open(f, 'r')
self.dct = dct
self.result = None
def run(self):
if not self.result:
delim = re.compile(r"\$[^^]*\^")
f = self.f.read()
self.f.seek(0)
output = destxt(f).encode('utf-8')
proc = Popen(['lt-proc', self.dct], stdin=PIPE, stdout=PIPE)
output = str(proc.communicate(output)[0].decode('utf-8'))
output = retxt(output)
output = delim.sub("$\n^", output)
self.result = output.split('\n')
return 0
def get_words(self):
if not self.result:
self.run()
return [ i.strip() for i in self.result ]
def get_known_words(self):
if not self.result:
self.run()
return [ i.strip() for i in self.result if not '*' in i ]
def get_unknown_words(self):
if not self.result:
self.run()
return [ i.strip() for i in self.result if '*' in i ]
def get_top_unknown_words(self, c=20):
return Counter(self.get_unknown_words()).most_common(c)
def get_top_unknown_words_string(self, c=20):
out = StringIO()
for word, count in self.get_top_unknown_words(c):
out.write("%d\t %s\n" % (count, word))
return out.getvalue()
def get_coverage(self):
a = float(len(self.get_known_words()))
b = float(len(self.get_words()))
return a / b * 100
def save_statistics(self, f):
stats = Statistics(f)
wrx = re.compile(r"\^(.*)/")
cfn = os.path.basename(self.fn)
dfn = os.path.basename(self.dct)
cck = checksum(self.f.read())
dck = checksum(open(self.dct).read())
cov = "%.2f" % self.get_coverage()
words = len(self.get_words())
kwords = len(self.get_known_words())
ukwords = len(self.get_unknown_words())
topukwtmp = self.get_top_unknown_words()
topukw = []
for word, count in topukwtmp:
topukw.append((wrx.search(word).group(1), count))
stats.add_coverage(cfn, dfn, cck, dck, cov, words, kwords, ukwords, topukw)
stats.write()
def get_output(self):
print("Number of tokenised words in the corpus:",len(self.get_words()))
print("Number of known words in the corpus:",len(self.get_known_words()))
print("Coverage: %.2f%%" % self.get_coverage())
print("Top unknown words in the corpus:")
print(self.get_top_unknown_words_string())
'''class VocabularyTest(object):
class DIXHandler(ContentHandler):
def __init__(self):
self.alph = None
def startElement(self, tag, attrs):
if tag == "alphabet":
self.tag == "alphabet"
def characters(self, ch):
if self.tag == "alphabet":
self.alph = ch.strip()
def get_alphabet(self, f):
parser = make_parser()
handler = self.DIXHandler()
parser.setContentHandler(handler)
parser.parse(f)
self.alph = hander.alph
def __init__(self, lang1, lang2, transfer, fdir="."):
self.out = StringIO()
self.fdir = fdir
self.lang1 = lang1
self.lang2 = lang2
self.transfer = transfer
self.prefix = prefix = "%s-%s" % (lang1, lang2)
self.basename = basename = "apertium-%s" % self.prefix
self.anadix = pjoin(fdir, "%s.%s.dix" % (basename, lang1))
self.genbin = pjoin(fdir, "%s.autogen.bin" % prefix)
self.get_alphabet(anadix)
self.delim = re.compile("[%s]:(>:)?[%s]" % (self.alph, self.alph))
#TODO whereis binaries
def run(self):
p = Popen(['lt-expand', self.anadix], stdout=PIPE)
dixout = p.communicate()[0]
def save_statistics(self, f):
return NotImplemented
def get_output(self):
return NotImplemented
'''
class AmbiguityTest(object):
delim = re.compile(":[<>]:")
def __init__(self, f, **kwargs):
self.f = kwargs.get('f', f)
self.program = "lt-expand"
whereis([self.program])
def get_results(self):
app = Popen([self.program, self.f], stdin=PIPE, stdout=PIPE)
res = str(app.communicate()[0].decode('utf-8'))
self.results = self.delim.sub(":", res).split('\n')
def get_ambiguity(self):
self.h = defaultdict(lambda: 0)
self.surface_forms = 0
self.total = 0
for line in self.results:
row = line.split(":")
if not row[0] in self.h:
self.surface_forms += 1
self.h[row[0]] += 1
self.total += 1
self.average = float(self.total) / float(self.surface_forms)
def run(self):
self.get_results()
self.get_ambiguity()
return 0
def save_statistics(self, f):
stats = Statistics(f)
fck = checksum(open(self.f, 'rb').read())
stats.add_ambiguity(self.f, fck, self.surface_forms, self.total, self.average)
stats.write()
def get_output(self):
print("Total surface forms: %d" % self.surface_forms)
print("Total analyses: %d" % self.total)
print("Average ambiguity: %.2f" % self.average)
class HfstTest(object):
class AllOutput(StringIO):
def get_output(self):
return self.getvalue()
def final_result(self, hfst):
text = "Total passes: %d, Total fails: %d, Total: %d\n"
self.write(colourise(text % (hfst.passes, hfst.fails, hfst.fails+hfst.passes), 2))
class NormalOutput(AllOutput):
def title(self, text):
self.write(colourise("-"*len(text)+'\n', 1))
self.write(colourise(text+'\n', 1))
self.write(colourise("-"*len(text)+'\n', 1))
def success(self, l, r):
self.write(colourise("[PASS] %s => %s\n" % (l, r)))
def failure(self, form, err, errlist):
self.write(colourise("[FAIL] %s => %s: %s\n" % (form, err, ", ".join(errlist))))
def result(self, title, test, counts):
p = counts["Pass"]
f = counts["Fail"]
text = "Test %d - Passes: %d, Fails: %d, Total: %d\n\n"
self.write(colourise(text % (test, p, f, p+f), 2))
class CompactOutput(AllOutput):
def title(self, *args):
pass
def success(self, *args):
pass
def failure(self, *args):
pass
def result(self, title, test, counts):
p = counts["Pass"]
f = counts["Fail"]
out = "%s %d/%d/%d" % (title, p, f, p+f)
if counts["Fail"] > 0:
self.write(colourise("[FAIL] %s\n" % out))
else:
self.write(colourise("[PASS] %s\n" % out))
def __init__(self, **kwargs):
self.args = dict(kwargs)
self.fails = 0
self.passes = 0
self.count = OrderedDict()
self.load_config()
def run(self):
self.run_tests(self.args['test'])
return 0
def load_config(self):
global colourise
f = yaml.load(open(self.args['test_file'][0]), _OrderedDictYAMLLoader)
section = self.args['section'][0]
if not section in f["Config"]:
raise AttributeError("'%s' not found in Config of test file." % section)
self.program = f["Config"][section].get("App", "hfst-lookup")
whereis([self.program])
self.gen = f["Config"][section].get("Gen", None)
self.morph = f["Config"][section].get("Morph", None)
if self.gen == self.morph == None:
raise AttributeError("One of Gen or Morph must be configured.")
for i in (self.gen, self.morph):
if i and not os.path.isfile(i):
raise IOError("File %s does not exist." % i)
if self.args.get('compact'):
self.out = HfstTest.CompactOutput()
else:
self.out = HfstTest.NormalOutput()
if self.args.get('verbose'):
self.out.write("`%s` will be used for parsing dictionaries.\n" % self.program)
self.tests = f["Tests"]
for test in self.tests:
for key, val in self.tests[test].items():
self.tests[test][key] = string_to_list(val)
if not self.args.get('colour'):
colourise = lambda x, y=None: x
def run_tests(self, data=None):
if self.args.get('surface') == self.args.get('lexical') == False:
self.args['surface'] = self.args['lexical'] = True
if(data != None):
self.parse_fsts(self.tests[data[0]])
if self.args.get('lexical'): self.run_test(data[0], True)
if self.args.get('surface'): self.run_test(data[0], False)
else:
tests = {}
for t in self.tests:
tests.update(self.tests[t])
self.parse_fsts(tests)
for t in self.tests:
if self.args.get('lexical'): self.run_test(t, True)
if self.args.get('surface'): self.run_test(t, False)
if self.args.get('verbose'):
self.out.final_result(self)
def parse_fsts(self, tests):
invtests = invert_dict(tests)
manager = Manager()
self.results = manager.dict({"gen": {}, "morph": {}})
def parser(self, d, f, tests):
keys = tests.keys()
app = Popen([self.program, f], stdin=PIPE, stdout=PIPE, stderr=PIPE)
args = '\n'.join(keys) + '\n'
res = str(app.communicate(args.encode('utf-8'))[0].decode('utf-8')).split('\n\n')
if app.returncode > 0:
self.results[d] = res[0]
else:
self.results[d] = self.parse_fst_output(res)
gen = Process(target=parser, args=(self, "gen", self.gen, tests))
gen.daemon = True
gen.start()
if self.args.get('verbose'):
self.out.write("Generating...\n")
morph = Process(target=parser, args=(self, "morph", self.morph, invtests))
morph.daemon = True
morph.start()
if self.args.get('verbose'):
self.out.write("Morphing...\n")
gen.join()
morph.join()
if self.args.get('verbose'):
self.out.write("Done!\n")
def run_test(self, data, is_lexical):
if is_lexical:
desc = "Lexical/Generation"
f = "gen"
tests = self.tests[data]
else: #surface
desc = "Surface/Analysis"
f = "morph"
tests = invert_dict(self.tests[data])
if isinstance(self.results[f], str):
raise LookupError('%s had an error:\n%s' % (self.program, self.results[f]))
c = len(self.count)
d = "%s (%s)" % (data, desc)
title = "Test %d: %s" % (c, d)
self.out.title(title)
self.count[d] = {"Pass": 0, "Fail": 0}
for test, forms in tests.items():
expected_results = set(forms)
actual_results = set(self.results[f][test])
invalid = set()
missing = set()
success = set()
passed = False
for form in expected_results:
if not form in actual_results:
invalid.add(form)
for form in actual_results:
if not form in expected_results:
missing.add(form)
for form in actual_results:
if not form in (invalid | missing):
passed = True
success.add(form)
self.count[d]["Pass"] += 1
if not self.args.get('hide_pass'):
self.out.success(test, form)
if not self.args.get('hide_fail'):
if len(invalid) > 0:
self.out.failure(test, "Invalid test item", invalid)
self.count[d]["Fail"] += len(invalid)
if len(missing) > 0 and \
(not self.args.get('ignore_analyses') or not passed):
self.out.failure(test, "Unexpected output", missing)
self.count[d]["Fail"] += len(missing)
self.out.result(title, c, self.count[d])
self.passes += self.count[d]["Pass"]
self.fails += self.count[d]["Fail"]
def parse_fst_output(self, fst):
parsed = {}
for item in fst:
res = item.replace('\r\n','\n').replace('\r','\n').split('\n')
for i in res:
if i.strip() != '':
results = i.split('\t')
key = results[0].strip()
if not key in parsed:
parsed[key] = set()
# This test is needed because xfst's lookup
# sometimes output strings like
# bearkoe\tbearkoe\t+N+Sg+Nom, instead of the expected
# bearkoe\tbearkoe+N+Sg+Nom
if len(results) > 2 and results[2][0] == '+':
parsed[key].add(results[1].strip() + results[2].strip())
else:
parsed[key].add(results[1].strip())
return parsed
def save_statistics(self, f):
stats = Statistics(f)
stats.add_hfst(self.args['test_file'][0], self.gen, self.morph, self.count, self.passes, self.fails)
stats.write()
def get_output(self):
print(self.out.get_output())
# SUPPORT FUNCTIONS
def string_to_list(data):
if isinstance(data, bytes): raise TypeError("Function does not accept bytes as input.")
elif isinstance(data, str): return [data]
else: return data
def invert_dict(data):
tmp = OrderedDict()
for key, val in data.items():
for v in string_to_list(val):
tmp.setdefault(v, set()).add(key)
return tmp
def colourise(string, opt=None):
#TODO per class, make into a class too
def red(s="", r="\033[m"):
return "\033[1;31m%s%s" % (s, r)
def green(s="", r="\033[m"):
return "\033[0;32m%s%s" % (s, r)
def orange(s="", r="\033[m"):
return "\033[0;33m%s%s" % (s, r)
def yellow(s="", r="\033[m"):
return "\033[1;33m%s%s" % (s, r)
def blue(s="", r="\033[m"):
return "\033[0;34m%s%s" % (s, r)
def light_blue(s="", r="\033[m"):
return "\033[0;36m%s%s" % (s, r)
def reset(s=""):
return "\033[m%s" % s
if not opt:
x = string
x = x.replace("=>", blue("=>"))
x = x.replace("<=", blue("<="))
x = x.replace(":", blue(":"))
x = x.replace("[PASS]", green("[PASS]"))
x = x.replace("[FAIL]", red("[FAIL]"))
return x
elif opt == 1:
return light_blue(string)
elif opt == 2:
x = string.replace('asses: ', 'asses: %s' % green(r=""))
x = x.replace('ails: ', 'ails: %s' % red(r=""))
x = x.replace(', ', reset(', '))
x = x.replace('otal: ', 'otal: %s' % light_blue(r=""))
return "%s%s" % (x, reset())
# SUPPORT CLASSES
class LookupError(Exception):
pass
# Courtesy of https://gist.github.com/844388. Thanks!
class _OrderedDictYAMLLoader(yaml.Loader):
"""A YAML loader that loads mappings into ordered dictionaries."""
def __init__(self, *args, **kwargs):
yaml.Loader.__init__(self, *args, **kwargs)
self.add_constructor('tag:yaml.org,2002:map', type(self).construct_yaml_map)
self.add_constructor('tag:yaml.org,2002:omap', type(self).construct_yaml_map)
def construct_yaml_map(self, node):
data = OrderedDict()
yield data
value = self.construct_mapping(node)
data.update(value)
def construct_mapping(self, node, deep=False):
if isinstance(node, yaml.MappingNode):
self.flatten_mapping(node)
else:
raise yaml.constructor.ConstructorError(None, None,
'expected a mapping node, but found %s' % node.id, node.start_mark)
mapping = OrderedDict()
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=deep)
try:
hash(key)
except TypeError as exc:
raise yaml.constructor.ConstructorError('while constructing a mapping',
node.start_mark, 'found unacceptable key (%s)' % exc, key_node.start_mark)
value = self.construct_object(value_node, deep=deep)
mapping[key] = value
return mapping
Error checking for aq-hfsttest
# -*- coding: utf-8 -*-
import os.path, re, yaml
pjoin = os.path.join
from collections import defaultdict, Counter, OrderedDict
import xml.etree.cElementTree as etree
import urllib.request
from multiprocessing import Process, Manager
from subprocess import Popen, PIPE
from io import StringIO
from apertium import whereis, destxt, retxt, checksum
from apertium.quality import Statistics
ARROW = "\u2192"
class RegressionTest(object):
wrg = re.compile(r"{{test\|(.*)\|(.*)\|(.*)}}")
ns = "{http://www.mediawiki.org/xml/export-0.3/}"
program = "apertium"
def __init__(self, url=None, mode=None, directory=".", **kwargs):
url = kwargs.get('url', url)
mode = kwargs.get('mode', mode)
directory = kwargs.get('directory', directory)
if None in (url, mode):
raise TypeError("Url or mode parameter missing.")
whereis([self.program])
if not "Special:Export" in url:
print("Warning: URL did not contain Special:Export.")
self.mode = mode
self.directory = directory
if url.startswith('http'):
self.tree = etree.parse(urllib.request.urlopen(url))
else:
self.tree = etree.parse(open(url))
self.passes = 0
self.total = 0
text = None
for e in self.tree.getroot().getiterator():
if e.tag == self.ns + "title":
self.title = e.text
if e.tag == self.ns + "revision":
self.revision = e[0].text # should be <id>
if e.tag == self.ns + "text":
text = e.text
if not text:
raise AttributeError("No text element?")
self.tests = defaultdict(OrderedDict)
rtests = text.split('\n')
rtests = [self.wrg.search(j) for j in rtests if self.wrg.search(j)]
for i in rtests:
lang, left, right = i.group(1), i.group(2), i.group(3)
if not left.endswith('.'):
left += '[_].'
self.tests[lang.strip()][left.strip()] = right.strip()
self.out = StringIO()
def run(self):
for side in self.tests:
self.out.write("Now testing: %s\n" % side)
args = '\n'.join(self.tests[side].keys())
app = Popen([self.program, '-d', self.directory, self.mode], stdin=PIPE, stdout=PIPE, stderr=PIPE)
app.stdin.write(args.encode('utf-8'))
res = app.communicate()[0]
self.results = str(res.decode('utf-8')).split('\n')
if app.returncode > 0:
return app.returncode
for n, test in enumerate(self.tests[side].items()):
if n >= len(self.results):
#raise AttributeError("More tests than results.")
continue
res = self.results[n].split("[_]")[0].strip()
orig = test[0].split("[_]")[0].strip()
targ = test[1].strip()
self.out.write("%s\t %s\n" % (self.mode, orig))
if res == targ:
self.out.write("WORKS\t %s\n" % res)
self.passes += 1
else:
self.out.write("\t- %s\n" % targ)
self.out.write("\t+ %s\n" % res)
self.total += 1
self.out.write('\n')
return 0
def get_passes(self):
return self.passes
def get_fails(self):
return self.total - self.passes
def get_total(self):
return self.total
def get_total_percent(self):
return "%.2f" % (float(self.passes)/float(self.total)*100)
def save_statistics(self, f):
stats = Statistics(f)
ns = "{http://www.mediawiki.org/xml/export-0.3/}"
page = self.tree.getroot().find(ns + 'page')
rev = page.find(ns + 'revision').find(ns + 'id').text
title = page.find(ns + 'title').text
stats.add_regression(title, rev, self.passes, self.total, self.get_total_percent())
stats.write()
def get_output(self):
print(self.out.getvalue())
percent = 0
if self.total > 0:
percent = float(self.passes) / float(self.total) * 100
print("Passes: %d/%d, Success rate: %.2f%%" % (self.passes, self.total, percent))
class CoverageTest(object):
def __init__(self, f=None, dct=None, **kwargs):
f = kwargs.get('f', f)
dct = kwargs.get('dct', dct)
if None in (f, dct):
raise TypeError("f or dct parameter missing.")
whereis(["lt-proc"])#, "apertium-destxt", "apertium-retxt"):
self.fn = f #TODO: make sure file exists
self.f = open(f, 'r')
self.dct = dct
self.result = None
def run(self):
if not self.result:
delim = re.compile(r"\$[^^]*\^")
f = self.f.read()
self.f.seek(0)
output = destxt(f).encode('utf-8')
proc = Popen(['lt-proc', self.dct], stdin=PIPE, stdout=PIPE)
output = str(proc.communicate(output)[0].decode('utf-8'))
output = retxt(output)
output = delim.sub("$\n^", output)
self.result = output.split('\n')
return 0
def get_words(self):
if not self.result:
self.run()
return [ i.strip() for i in self.result ]
def get_known_words(self):
if not self.result:
self.run()
return [ i.strip() for i in self.result if not '*' in i ]
def get_unknown_words(self):
if not self.result:
self.run()
return [ i.strip() for i in self.result if '*' in i ]
def get_top_unknown_words(self, c=20):
return Counter(self.get_unknown_words()).most_common(c)
def get_top_unknown_words_string(self, c=20):
out = StringIO()
for word, count in self.get_top_unknown_words(c):
out.write("%d\t %s\n" % (count, word))
return out.getvalue()
def get_coverage(self):
a = float(len(self.get_known_words()))
b = float(len(self.get_words()))
return a / b * 100
def save_statistics(self, f):
stats = Statistics(f)
wrx = re.compile(r"\^(.*)/")
cfn = os.path.basename(self.fn)
dfn = os.path.basename(self.dct)
cck = checksum(self.f.read())
dck = checksum(open(self.dct).read())
cov = "%.2f" % self.get_coverage()
words = len(self.get_words())
kwords = len(self.get_known_words())
ukwords = len(self.get_unknown_words())
topukwtmp = self.get_top_unknown_words()
topukw = []
for word, count in topukwtmp:
topukw.append((wrx.search(word).group(1), count))
stats.add_coverage(cfn, dfn, cck, dck, cov, words, kwords, ukwords, topukw)
stats.write()
def get_output(self):
print("Number of tokenised words in the corpus:",len(self.get_words()))
print("Number of known words in the corpus:",len(self.get_known_words()))
print("Coverage: %.2f%%" % self.get_coverage())
print("Top unknown words in the corpus:")
print(self.get_top_unknown_words_string())
'''class VocabularyTest(object):
class DIXHandler(ContentHandler):
def __init__(self):
self.alph = None
def startElement(self, tag, attrs):
if tag == "alphabet":
self.tag == "alphabet"
def characters(self, ch):
if self.tag == "alphabet":
self.alph = ch.strip()
def get_alphabet(self, f):
parser = make_parser()
handler = self.DIXHandler()
parser.setContentHandler(handler)
parser.parse(f)
self.alph = hander.alph
def __init__(self, lang1, lang2, transfer, fdir="."):
self.out = StringIO()
self.fdir = fdir
self.lang1 = lang1
self.lang2 = lang2
self.transfer = transfer
self.prefix = prefix = "%s-%s" % (lang1, lang2)
self.basename = basename = "apertium-%s" % self.prefix
self.anadix = pjoin(fdir, "%s.%s.dix" % (basename, lang1))
self.genbin = pjoin(fdir, "%s.autogen.bin" % prefix)
self.get_alphabet(anadix)
self.delim = re.compile("[%s]:(>:)?[%s]" % (self.alph, self.alph))
#TODO whereis binaries
def run(self):
p = Popen(['lt-expand', self.anadix], stdout=PIPE)
dixout = p.communicate()[0]
def save_statistics(self, f):
return NotImplemented
def get_output(self):
return NotImplemented
'''
class AmbiguityTest(object):
delim = re.compile(":[<>]:")
def __init__(self, f, **kwargs):
self.f = kwargs.get('f', f)
self.program = "lt-expand"
whereis([self.program])
def get_results(self):
app = Popen([self.program, self.f], stdin=PIPE, stdout=PIPE)
res = str(app.communicate()[0].decode('utf-8'))
self.results = self.delim.sub(":", res).split('\n')
def get_ambiguity(self):
self.h = defaultdict(lambda: 0)
self.surface_forms = 0
self.total = 0
for line in self.results:
row = line.split(":")
if not row[0] in self.h:
self.surface_forms += 1
self.h[row[0]] += 1
self.total += 1
self.average = float(self.total) / float(self.surface_forms)
def run(self):
self.get_results()
self.get_ambiguity()
return 0
def save_statistics(self, f):
stats = Statistics(f)
fck = checksum(open(self.f, 'rb').read())
stats.add_ambiguity(self.f, fck, self.surface_forms, self.total, self.average)
stats.write()
def get_output(self):
print("Total surface forms: %d" % self.surface_forms)
print("Total analyses: %d" % self.total)
print("Average ambiguity: %.2f" % self.average)
class HfstTest(object):
class AllOutput(StringIO):
def get_output(self):
return self.getvalue()
def final_result(self, hfst):
text = "Total passes: %d, Total fails: %d, Total: %d\n"
self.write(colourise(text % (hfst.passes, hfst.fails, hfst.fails+hfst.passes), 2))
class NormalOutput(AllOutput):
def title(self, text):
self.write(colourise("-"*len(text)+'\n', 1))
self.write(colourise(text+'\n', 1))
self.write(colourise("-"*len(text)+'\n', 1))
def success(self, l, r):
self.write(colourise("[PASS] %s => %s\n" % (l, r)))
def failure(self, form, err, errlist):
self.write(colourise("[FAIL] %s => %s: %s\n" % (form, err, ", ".join(errlist))))
def result(self, title, test, counts):
p = counts["Pass"]
f = counts["Fail"]
text = "Test %d - Passes: %d, Fails: %d, Total: %d\n\n"
self.write(colourise(text % (test, p, f, p+f), 2))
class CompactOutput(AllOutput):
def title(self, *args):
pass
def success(self, *args):
pass
def failure(self, *args):
pass
def result(self, title, test, counts):
p = counts["Pass"]
f = counts["Fail"]
out = "%s %d/%d/%d" % (title, p, f, p+f)
if counts["Fail"] > 0:
self.write(colourise("[FAIL] %s\n" % out))
else:
self.write(colourise("[PASS] %s\n" % out))
def __init__(self, **kwargs):
self.args = dict(kwargs)
self.fails = 0
self.passes = 0
self.count = OrderedDict()
self.load_config()
def run(self):
self.run_tests(self.args['test'])
return 0
def load_config(self):
global colourise
f = yaml.load(open(self.args['test_file'][0]), _OrderedDictYAMLLoader)
section = self.args['section'][0]
if not section in f["Config"]:
raise AttributeError("'%s' not found in Config of test file." % section)
self.program = f["Config"][section].get("App", "hfst-lookup")
whereis([self.program])
self.gen = f["Config"][section].get("Gen", None)
self.morph = f["Config"][section].get("Morph", None)
if self.gen == self.morph == None:
raise AttributeError("One of Gen or Morph must be configured.")
for i in (self.gen, self.morph):
if i and not os.path.isfile(i):
raise IOError("File %s does not exist." % i)
if self.args.get('compact'):
self.out = HfstTest.CompactOutput()
else:
self.out = HfstTest.NormalOutput()
if self.args.get('verbose'):
self.out.write("`%s` will be used for parsing dictionaries.\n" % self.program)
self.tests = f["Tests"]
for test in self.tests:
for key, val in self.tests[test].items():
self.tests[test][key] = string_to_list(val)
if not self.args.get('colour'):
colourise = lambda x, y=None: x
def run_tests(self, data=None):
if self.args.get('surface') == self.args.get('lexical') == False:
self.args['surface'] = self.args['lexical'] = True
if(data != None):
self.parse_fsts(self.tests[data[0]])
if self.args.get('lexical'): self.run_test(data[0], True)
if self.args.get('surface'): self.run_test(data[0], False)
else:
tests = {}
for t in self.tests:
tests.update(self.tests[t])
self.parse_fsts(tests)
for t in self.tests:
if self.args.get('lexical'): self.run_test(t, True)
if self.args.get('surface'): self.run_test(t, False)
if self.args.get('verbose'):
self.out.final_result(self)
def parse_fsts(self, tests):
invtests = invert_dict(tests)
manager = Manager()
self.results = manager.dict({"gen": {}, "morph": {}})
def parser(self, d, f, tests):
keys = tests.keys()
app = Popen([self.program, f], stdin=PIPE, stdout=PIPE, stderr=PIPE)
args = '\n'.join(keys) + '\n'
print(args)
res = str(app.communicate(args.encode('utf-8'))[0].decode('utf-8')).split('\n\n')
print(res)
if app.returncode > 0:
self.results[d] = res[0]
else:
self.results[d] = self.parse_fst_output(res)
gen = Process(target=parser, args=(self, "gen", self.gen, tests))
gen.daemon = True
gen.start()
if self.args.get('verbose'):
self.out.write("Generating...\n")
morph = Process(target=parser, args=(self, "morph", self.morph, invtests))
morph.daemon = True
morph.start()
if self.args.get('verbose'):
self.out.write("Morphing...\n")
gen.join()
morph.join()
if self.args.get('verbose'):
self.out.write("Done!\n")
def run_test(self, data, is_lexical):
if is_lexical:
desc = "Lexical/Generation"
f = "gen"
tests = self.tests[data]
else: #surface
desc = "Surface/Analysis"
f = "morph"
tests = invert_dict(self.tests[data])
if isinstance(self.results[f], str):
raise LookupError('%s had an error:\n%s' % (self.program, self.results[f]))
c = len(self.count)
d = "%s (%s)" % (data, desc)
title = "Test %d: %s" % (c, d)
self.out.title(title)
self.count[d] = {"Pass": 0, "Fail": 0}
for test, forms in tests.items():
print(self.results)
import sys; sys.exit()
expected_results = set(forms)
actual_results = set(self.results[f][test])
invalid = set()
missing = set()
success = set()
passed = False
for form in expected_results:
if not form in actual_results:
invalid.add(form)
for form in actual_results:
if not form in expected_results:
missing.add(form)
for form in actual_results:
if not form in (invalid | missing):
passed = True
success.add(form)
self.count[d]["Pass"] += 1
if not self.args.get('hide_pass'):
self.out.success(test, form)
if not self.args.get('hide_fail'):
if len(invalid) > 0:
self.out.failure(test, "Invalid test item", invalid)
self.count[d]["Fail"] += len(invalid)
if len(missing) > 0 and \
(not self.args.get('ignore_analyses') or not passed):
self.out.failure(test, "Unexpected output", missing)
self.count[d]["Fail"] += len(missing)
self.out.result(title, c, self.count[d])
self.passes += self.count[d]["Pass"]
self.fails += self.count[d]["Fail"]
def parse_fst_output(self, fst):
parsed = {}
for item in fst:
res = item.replace('\r\n','\n').replace('\r','\n').split('\n')
for i in res:
if i.strip() != '':
results = i.split('\t')
key = results[0].strip()
if not key in parsed:
parsed[key] = set()
# This test is needed because xfst's lookup
# sometimes output strings like
# bearkoe\tbearkoe\t+N+Sg+Nom, instead of the expected
# bearkoe\tbearkoe+N+Sg+Nom
if len(results) > 2 and results[2][0] == '+':
parsed[key].add(results[1].strip() + results[2].strip())
else:
parsed[key].add(results[1].strip())
return parsed
def save_statistics(self, f):
stats = Statistics(f)
stats.add_hfst(self.args['test_file'][0], self.gen, self.morph, self.count, self.passes, self.fails)
stats.write()
def get_output(self):
print(self.out.get_output())
# SUPPORT FUNCTIONS
def string_to_list(data):
if isinstance(data, bytes): raise TypeError("Function does not accept bytes as input.")
elif isinstance(data, str): return [data]
else: return data
def invert_dict(data):
tmp = OrderedDict()
for key, val in data.items():
for v in string_to_list(val):
tmp.setdefault(v, set()).add(key)
return tmp
def colourise(string, opt=None):
#TODO per class, make into a class too
def red(s="", r="\033[m"):
return "\033[1;31m%s%s" % (s, r)
def green(s="", r="\033[m"):
return "\033[0;32m%s%s" % (s, r)
def orange(s="", r="\033[m"):
return "\033[0;33m%s%s" % (s, r)
def yellow(s="", r="\033[m"):
return "\033[1;33m%s%s" % (s, r)
def blue(s="", r="\033[m"):
return "\033[0;34m%s%s" % (s, r)
def light_blue(s="", r="\033[m"):
return "\033[0;36m%s%s" % (s, r)
def reset(s=""):
return "\033[m%s" % s
if not opt:
x = string
x = x.replace("=>", blue("=>"))
x = x.replace("<=", blue("<="))
x = x.replace(":", blue(":"))
x = x.replace("[PASS]", green("[PASS]"))
x = x.replace("[FAIL]", red("[FAIL]"))
return x
elif opt == 1:
return light_blue(string)
elif opt == 2:
x = string.replace('asses: ', 'asses: %s' % green(r=""))
x = x.replace('ails: ', 'ails: %s' % red(r=""))
x = x.replace(', ', reset(', '))
x = x.replace('otal: ', 'otal: %s' % light_blue(r=""))
return "%s%s" % (x, reset())
# SUPPORT CLASSES
class LookupError(Exception):
pass
# Courtesy of https://gist.github.com/844388. Thanks!
class _OrderedDictYAMLLoader(yaml.Loader):
"""A YAML loader that loads mappings into ordered dictionaries."""
def __init__(self, *args, **kwargs):
yaml.Loader.__init__(self, *args, **kwargs)
self.add_constructor('tag:yaml.org,2002:map', type(self).construct_yaml_map)
self.add_constructor('tag:yaml.org,2002:omap', type(self).construct_yaml_map)
def construct_yaml_map(self, node):
data = OrderedDict()
yield data
value = self.construct_mapping(node)
data.update(value)
def construct_mapping(self, node, deep=False):
if isinstance(node, yaml.MappingNode):
self.flatten_mapping(node)
else:
raise yaml.constructor.ConstructorError(None, None,
'expected a mapping node, but found %s' % node.id, node.start_mark)
mapping = OrderedDict()
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=deep)
try:
hash(key)
except TypeError as exc:
raise yaml.constructor.ConstructorError('while constructing a mapping',
node.start_mark, 'found unacceptable key (%s)' % exc, key_node.start_mark)
value = self.construct_object(value_node, deep=deep)
mapping[key] = value
return mapping
|
import urllib2
import json
import sys
def main():
if len(sys.argv) <= 1:
raise Exception("No URL to selftest provided. Run script like this 'python check_selftest.py <url to selftest>")
selftest_url = sys.argv[1]
perform_selftest(selftest_url)
def perform_selftest(url):
payload = urllib2.urlopen(url).read()
selftest = json.loads(payload)
aggregate_result = selftest['aggregate_result']
if aggregate_result == 0:
sys.exit(0)
else:
print("Failed checks on selftest (%s):\n" % url)
print(gatherErrorMessages(selftest))
sys.exit(-1)
def gatherErrorMessages(selftest):
all_checks = selftest["checks"]
failing_checks = filter(lambda x: x["result"] == 1, all_checks)
error_msgs = ''
for check in failing_checks:
error_msg = "Failed check: %s\n Message: %s\n Endpoint: %s" % (check["description"], check["errorMessage"], check["endpoint"])
error_msgs += error_msg + "\n\n"
return error_msgs
main()
initial commit
import urllib2
import json
import sys
def main():
if len(sys.argv) <= 1:
raise Exception("No URL to selftest provided. Run script like this: 'python check_selftest.py <url to selftest>'")
selftest_url = sys.argv[1]
perform_selftest(selftest_url)
def perform_selftest(url):
payload = urllib2.urlopen(url).read()
selftest = json.loads(payload)
aggregate_result = selftest['aggregate_result']
if aggregate_result == 0:
sys.exit(0)
else:
print("Failed checks on selftest (%s):\n" % url)
print(gatherErrorMessages(selftest))
sys.exit(-1)
def gatherErrorMessages(selftest):
all_checks = selftest["checks"]
failing_checks = filter(lambda x: x["result"] == 1, all_checks)
error_msgs = ''
for check in failing_checks:
error_msg = "Failed check: %s\n Message: %s\n Endpoint: %s" % (check["description"], check["errorMessage"], check["endpoint"])
error_msgs += error_msg + "\n\n"
return error_msgs
main()
|
#
# Anna Dehof 2010-03-22
# get a system and add hydrogens
#
import sys
from BALL import *
#### for use in BALLView
#system = getSystems()[0]
#### for command line use
# issue a usage hint if called without parameters
if (len(sys.argv) != 3 ):
print"Usage: ", sys.argv[0] , " <PDB infile> <PDB outfile>"
exit()
# open a PDB file with the name of the first argument
file = PDBFile(sys.argv[1])
if (not file):
# if file does not exist: complain and abort
print "error opening ", sys.argv[1], " for input."
exit ()
# create a system and read the contents of the PDB file
system = System()
file.read(system)
file.close()
# print the number of atoms read from the file
print "read ", system.countAtoms(), " atoms."
# now we open a fragment database
print "reading fragment DB..."
fragment_db = FragmentDB("")
# and normalize the atom names, i.e. we convert different
# naming standards to the PDB naming scheme - just in case!
print "normalizing names..."
system.apply(fragment_db.normalize_names)
# now we add any missing hydrogens to the residues
# the data on the hydrogen positions stems from the
# fragment database. However the hydrogen positions
# created in this way are only good estimates
print "creating missing atoms..."
system.apply(fragment_db.add_hydrogens)
print "added ", fragment_db.add_hydrogens.getNumberOfInsertedAtoms(), " atoms"
# now we create the bonds between the atoms (PDB files hardly
# ever contain a complete set of CONECT records)
print "building bonds..."
system.apply(fragment_db.build_bonds)
# now we check whether the model we built is consistent
# The ResidueChecker checks for charges, bond lengths,
# and missing atoms
print "checking the built model..."
checker = ResidueChecker(fragment_db)
system.apply(checker)
# now we create an AMBER force field
print "setting up force field..."
FF= AmberFF()
# we then select all hydrogens (element(H))
# using a specialized processor (Selector)
system.deselect()
FF.setup(system)
selector = Selector("element(H)")
system.apply(selector)
#just for curiosity: check how many atoms we are going
# to optimize
print "optimizing ", FF.getNumberOfMovableAtoms(), " out of ", system.countAtoms(), " atoms"
# now we create a minimizer object that uses a conjugate
# gradient algorithm to optimize the atom positions
minimizer = ConjugateGradientMinimizer()
initial_energy = FF.updateEnergy()
print "initial energy: ", initial_energy , " kJ/mol"
# initialize the minimizer and perform (up to)
# 50 optimization steps
minimizer.setup(FF)
minimizer.setEnergyOutputFrequency(1)
minimizer.minimize(50)
# calculate the terminal energy and print it
terminal_energy = FF.getEnergy()
print "energy before/after minimization: ", initial_energy , "/", terminal_energy, " kJ/mol"
#### for command line use
# write the optimized structure to a file whose
# name is given as the second command line argument
print "writing PBD file ", sys.argv[2]
outfile = PDBFile(sys.argv[2], File.MODE_OUT)
#outfile = PDBFile("/home/HPL/anne/tmp/bpti_out.pdb", File.MODE_OUT)
outfile.write(system)
outfile.close()
# done
Formatting issue
#
# Anna Dehof 2010-03-22
# get a system and add hydrogens
#
import sys
from BALL import *
#### for use in BALLView
#system = getSystems()[0]
#### for command line use
# issue a usage hint if called without parameters
if (len(sys.argv) != 3 ):
print"Usage: ", sys.argv[0] , " <PDB infile> <PDB outfile>"
exit()
# open a PDB file with the name of the first argument
file = PDBFile(sys.argv[1])
if (not file):
# if file does not exist: complain and abort
print "error opening ", sys.argv[1], " for input."
exit ()
# create a system and read the contents of the PDB file
system = System()
file.read(system)
file.close()
# print the number of atoms read from the file
print "read ", system.countAtoms(), " atoms."
# now we open a fragment database
print "reading fragment DB..."
fragment_db = FragmentDB("")
# and normalize the atom names, i.e. we convert different
# naming standards to the PDB naming scheme - just in case!
print "normalizing names..."
system.apply(fragment_db.normalize_names)
# now we add any missing hydrogens to the residues
# the data on the hydrogen positions stems from the
# fragment database. However the hydrogen positions
# created in this way are only good estimates
print "creating missing atoms..."
system.apply(fragment_db.add_hydrogens)
print "added ", fragment_db.add_hydrogens.getNumberOfInsertedAtoms(), " atoms"
# now we create the bonds between the atoms (PDB files hardly
# ever contain a complete set of CONECT records)
print "building bonds..."
system.apply(fragment_db.build_bonds)
# now we check whether the model we built is consistent
# The ResidueChecker checks for charges, bond lengths,
# and missing atoms
print "checking the built model..."
checker = ResidueChecker(fragment_db)
system.apply(checker)
# now we create an AMBER force field
print "setting up force field..."
FF= AmberFF()
# we then select all hydrogens (element(H))
# using a specialized processor (Selector)
system.deselect()
FF.setup(system)
selector = Selector("element(H)")
system.apply(selector)
# just for curiosity: check how many atoms we are going
# to optimize
print "optimizing ", FF.getNumberOfMovableAtoms(), " out of ", system.countAtoms(), " atoms"
# now we create a minimizer object that uses a conjugate
# gradient algorithm to optimize the atom positions
minimizer = ConjugateGradientMinimizer()
initial_energy = FF.updateEnergy()
print "initial energy: ", initial_energy , " kJ/mol"
# initialize the minimizer and perform (up to)
# 50 optimization steps
minimizer.setup(FF)
minimizer.setEnergyOutputFrequency(1)
minimizer.minimize(50)
# calculate the terminal energy and print it
terminal_energy = FF.getEnergy()
print "energy before/after minimization: ", initial_energy , "/", terminal_energy, " kJ/mol"
#### for command line use
# write the optimized structure to a file whose
# name is given as the second command line argument
print "writing PBD file ", sys.argv[2]
outfile = PDBFile(sys.argv[2], File.MODE_OUT)
#outfile = PDBFile("/home/HPL/anne/tmp/bpti_out.pdb", File.MODE_OUT)
outfile.write(system)
outfile.close()
# done
|
"""aperturesynth - a tool for registering and combining series of photographs.
Usage:
aperturesynth [--no-transform] [--out FILE] <images>...
Options:
-h --help Show this help screen.
--out FILE Optional output file. If not specified the output will
be written to a tiff file with same name as the
baseline image with 'transformed_' prepended.
--no-transform Combine images without transforming first. Useful for
visualising the impact of registration.
The first image passed in will be the baseline image to which all following
images will be matched.
"""
import multiprocessing as mp
from skimage import io, img_as_ubyte, img_as_float
from docopt import docopt
import os.path
from .register import Registrator
from .gui import get_windows
def save_image(image, filename):
"""Saves the image to the given filename, ensuring uint8 output. """
io.imsave(filename, img_as_ubyte(image))
def load_image(image):
"""Loads the given file and converts to float32 format. """
return img_as_float(io.imread(image)).astype('float32')
def process_images(image_list, windows, no_transform=False):
"""Apply the given transformation to each listed image and find the mean.
Parameters
----------
image_list: list of filepaths
Locations of images to be loaded and transformed.
windows:
n_jobs: int (default 1)
Number of worker processes to use in parallel.
no_transform: bool (default False)
If true, combine images without registering them first. The windows
and n_jobs variables will be ignored. Useful for visualising the impact
of the registration process.
Returns
-------
image: MxNx[3]
The combined image as an ndarray.
"""
if no_transform:
baseline = load_image(image_list[0])
for image in image_list[1:]:
baseline += load_image(image)
else:
# Set up the object to perform the image registration
baseline = load_image(image_list[0])
registrator = Registrator(windows, baseline, pad=400)
for image in image_list[1:]:
image = load_image(image)
baseline += registrator(image)[0]
baseline /= len(image_list)
return baseline
def main():
"""Registers and transforms each input image and saves the result."""
args = docopt(__doc__)
images = args['<images>']
if args['--out'] is not None:
output_file = args['--out']
else:
head, ext = os.path.splitext(images[0])
head, tail = os.path.split(head)
output_file = os.path.join(head, 'transformed_' + tail + '.tiff')
if args['--no-transform']:
windows = []
output = process_images(images, windows, no_transform=True)
save_image(output, output_file)
else:
windows = get_windows(load_image(images[0]))
output = process_images(images, windows)
save_image(output, output_file)
Simplify synthesise interface
In preparation for upcoming work, simplify the process for fusing
images. This better decouples the choice of transformation, fusion
and where/how windows are selected.
"""aperturesynth - a tool for registering and combining series of photographs.
Usage:
aperturesynth [--no-transform] [--out FILE] <images>...
Options:
-h --help Show this help screen.
--out FILE Optional output file. If not specified the output will
be written to a tiff file with same name as the
baseline image with 'transformed_' prepended.
--no-transform Combine images without transforming first. Useful for
visualising the impact of registration.
The first image passed in will be the baseline image to which all following
images will be matched.
"""
import multiprocessing as mp
from skimage import io, img_as_ubyte, img_as_float
from docopt import docopt
import os.path
from .register import Registrator
from .gui import get_windows
def save_image(image, filename):
"""Saves the image to the given filename, ensuring uint8 output. """
io.imsave(filename, img_as_ubyte(image))
def load_image(image):
"""Loads the given file and converts to float32 format. """
return img_as_float(io.imread(image)).astype('float32')
def register_images(image_list, registrator):
"""A generator to register a series of images.
The first image is taken as the baseline and is not transformed.
"""
yield load_image(image_list[0])
for image_file in image_list[1:]:
transformed_image, transform = registrator(load_image(image_file))
# Stub for future operations that examine the transformation
yield transformed_image
def no_transform(image):
"""Pass through the original image without transformation.
Returns a tuple with None to maintain compatability with processes that
evaluate the transform.
"""
return (image, None)
def process_images(image_list, registrator, fusion=None):
"""Apply the given transformation to each listed image and find the mean.
Parameters
----------
image_list: list of filepaths
Image files to be loaded and transformed.
registrator: callable
Returns the desired transformation of a given image.
fusion: callable (optional, default=None)
Returns the fusion of the given images. If not specified the images are
combined by averaging.
Returns
-------
image: MxNx[3]
The combined image as an ndarray.
"""
registered = register_images(image_list, registrator)
if fusion is not None: # Stub for future alternative fusion methods
return fusion(registered)
else:
output = sum(registered)
output /= len(image_list)
return output
def main():
"""Registers and transforms each input image and saves the result."""
args = docopt(__doc__)
images = args['<images>']
if args['--out'] is not None:
output_file = args['--out']
else:
head, ext = os.path.splitext(images[0])
head, tail = os.path.split(head)
output_file = os.path.join(head, 'transformed_' + tail + '.tiff')
if args['--no-transform']:
registrator = no_transform
else:
baseline = load_image(images[0])
windows = get_windows(baseline)
registrator = Registrator(windows, baseline)
output = process_images(images, registrator)
save_image(output, output_file)
|
import commands
from config import YAMLConfig
import json
import os
import packetFactory
from packetFactory import SystemMessagePacket
import plugins
try:
import geoip2
except ImportError:
print("[GeoIP] geoip2 library not installed")
try:
import GeoIP
except ImportError:
print("[GeoIP] GeoIP library not installed")
cidr = False
try:
from netaddr import IPAddress
from netaddr import IPNetwork
cidr = True
except ImportError:
print("[GeoIP] netaddr library not installed")
cidrlist = []
geoiplist = []
GeoSettings = YAMLConfig("cfg/pso2proxy.geoip.config.yml",
{'enabled': True, 'geoip1': "/usr/share/GeoIP/GeoIP.dat", 'geoip2': "/var/lib/GeoIP/GeoLite2-Country.mmdb"}, True)
geoipmode = GeoSettings.get_key('enabled')
geoip1db = GeoSettings.get_key('geoip1')
geoip2db = GeoSettings.get_key('geoip2')
geoip2c = None
geoip1c = None
@plugins.on_start_hook
def load_geoiplist():
global geoiplist
global geoip2c, geoip1c
if not os.path.exists("cfg/pso2proxy.geoip.json"):
f = open("cfg/pso2proxy.geoip.json", "w")
f.write(json.dumps(geoiplist))
f.close()
print('[GeoIP] Blank whitelist made.')
else:
f = open("cfg/pso2proxy.geoip.json", "r")
geoiplist = json.loads(f.read())
f.close()
print("[GeoIP] Loaded %i geoip entries." % len(geoiplist))
try:
geoip2c = geoip2.database.Reader(geoip1db)
except AttributeError:
None
except NameError:
None
except Exception as e:
print("[GeoIP] GeoIP2 error: {}".format(e))
if geoip2c is not None:
try:
geoip1c = GeoIP.open(geoip2db, GeoIP.GEOIP_MMAP_CACHE | GeoIP.GEOIP_CHECK_CACHE)
geoip1c.set_charset(GeoIP.GEOIP_CHARSET_UTF8)
except AttributeError:
None
except NameError:
None
except Exception as e:
print("[GeoIP] GeoIP1 Error: {}".format(e))
if cidr:
cidrlist = []
for x in geoiplist:
try:
cidrlist.append(IPNetwork(x))
except ValueError:
None
except Exception as e:
print("[GeoIP] Error adding CIDR range {} during loading: {}".format(x, e))
def save_geoiplist():
f = open("cfg/pso2proxy.geoip.json", "w")
f.write(json.dumps(geoiplist))
f.close()
print('[GeoIP] Saved whitelist')
if cidr:
cidrlist = []
for x in geoiplist:
try:
cidrlist.append(IPNetwork(x))
except ValueError:
None
except Exception as e:
print("[GeoIP] Error adding CIDR range {} during saving: {}".format(x, e))
@plugins.CommandHook("geoipmode", "[Admin Only] Toggle geoip mode", True)
class geoipmode(commands.Command):
def call_from_client(self, client):
global geoipmode
geoipmode = not geoipmode
if geoipmode:
client.send_crypto_packet(packetFactory.SystemMessagePacket("[GeoIP] Whitelist turn on.", 0x3).build())
return
else:
client.send_crypto_packet(packetFactory.SystemMessagePacket("[GeoIP] Whitelist turn off.", 0x3).build())
return
def call_from_console(self):
global geoipmode
geoipmode = not geoipmode
if geoipmode:
return "[GeoIP] Whitelist turn on."
else:
return "[GeoIP] Whitelist turn off."
@plugins.CommandHook("geoip", "[Admin Only] Adds or removes places to the geoip whitelist.", True)
class geoip(commands.Command):
def call_from_console(self):
global geoiplist
params = self.args.split(" ")
if len(params) < 3:
return "[geoip] Invalid usage. (Usage: geoip <add/del> <place>)"
if params[1] == "add" or params[1] == "ADD":
if params[2] not in geoiplist:
geoiplist.append(params[2])
save_geoiplist()
return "[GeoIP] Added %s to the whitelist." % params[2]
else:
return "[GeoIP] %s is already in the whitelist." % params[2]
elif params[1] == "del" or params[1] == "DEL":
if params[2] in geoiplist:
geoiplist.remove(params[2])
save_geoiplist()
return "[GeoIP] Removed %s from whitelist." % params[2]
else:
return "[GeoIP] %s is not in the whitelist, can not delete!" % params[2]
else:
return "[GeoIP] Invalid usage. (Usage: whitelist <add/del> <palce>)"
def call_from_client(self, client):
"""
:param client: ShipProxy.ShipProxy
"""
global geoiplist
params = self.args.split(" ")
if len(params) < 3:
client.send_crypto_packet(packetFactory.SystemMessagePacket("[Command] {red}Invalid usage. (Usage: geoip <add/del> <SegaID>)", 0x3).build())
return
if params[1] == "add" or params[1] == "ADD":
if params[2] not in geoiplist:
geoiplist.append(params[2])
save_geoiplist()
client.send_crypto_packet(packetFactory.SystemMessagePacket("[Command] {gre}Added %s to the whitelist." % params[2], 0x3).build())
return
else:
client.send_crypto_packet(packetFactory.SystemMessagePacket("[Command] {red}%s is already in the whitelist." % params[2], 0x3).build())
return
elif params[1] == "del" or params[1] == "DEL":
if params[2] in geoiplist:
geoiplist.remove(params[2])
save_geoiplist()
client.send_crypto_packet(packetFactory.SystemMessagePacket("[Command] {gre}Removed %s from whitelist." % params[2], 0x3).build())
return
else:
client.send_crypto_packet(packetFactory.SystemMessagePacket("[Command] {red}%s is not in the whitelist, can not delete!" % params[2], 0x3).build())
return
else:
client.send_crypto_packet(packetFactory.SystemMessagePacket("[Command] {red}Invalid usage. (Usage: whitelist <add/del> <SegaID>)", 0x3).build())
return
@plugins.PacketHook(0x11, 0x0)
def geoip_check(context, data):
"""
:type context: ShipProxy.ShipProxy
"""
global geoip2c, geoip1c
global geoiplist
global geoipmode
place = "IPv4"
ip = context.transport.getPeer().host
badip = True
if cidr:
ipa = IPAddress(ip)
for x in cidrlist:
if ipa == x:
badip = False
if ip in geoiplist:
badip = False
elif geoip2c:
try:
respone = geoip2c.country(ip)
place = respone.country.iso_code
if place in geoiplist:
badip = False
except geoip2.AddressNotFoundError:
print("[GeoIP] Could not find {} in GeoIP database)".format(ip))
place = "NULL"
except Exception as e:
print("[GeoIP] Error: {}".format(e))
place = "ERROR"
elif geoip1c:
try:
place = geoip1c.country_code_by_addr(ip)
if place is None:
place = "NULL"
elif place in geoiplist:
badip = False
except Exception as e:
print("[GeoIP] Error: {}".format(e))
place = "ERROR"
if badip:
print("[GeoIP] {} (IP: {}) is not in the GeoIP whitelist, disconnecting client.".format(place, ip))
context.send_crypto_packet(SystemMessagePacket("You are not on the Geoip whitelist for this proxy, please contact the owner of this proxy.\nDetails:\nCountry Code: {}\nIPv4: {}".format(place, ip), 0x1).build())
context.transport.loseConnection()
elif not geoip:
print("Connection from {}|{}".format(place, ip))
return data
GeoIP: handle AddrFormatError
import commands
from config import YAMLConfig
import json
import os
import packetFactory
from packetFactory import SystemMessagePacket
import plugins
try:
import geoip2
except ImportError:
print("[GeoIP] geoip2 library not installed")
try:
import GeoIP
except ImportError:
print("[GeoIP] GeoIP library not installed")
cidr = False
try:
from netaddr import AddrFormatError
from netaddr import IPAddress
from netaddr import IPNetwork
cidr = True
except ImportError:
print("[GeoIP] netaddr library not installed")
cidrlist = []
geoiplist = []
GeoSettings = YAMLConfig("cfg/pso2proxy.geoip.config.yml",
{'enabled': True, 'geoip1': "/usr/share/GeoIP/GeoIP.dat", 'geoip2': "/var/lib/GeoIP/GeoLite2-Country.mmdb"}, True)
geoipmode = GeoSettings.get_key('enabled')
geoip1db = GeoSettings.get_key('geoip1')
geoip2db = GeoSettings.get_key('geoip2')
geoip2c = None
geoip1c = None
@plugins.on_start_hook
def load_geoiplist():
global geoiplist
global geoip2c, geoip1c
if not os.path.exists("cfg/pso2proxy.geoip.json"):
f = open("cfg/pso2proxy.geoip.json", "w")
f.write(json.dumps(geoiplist))
f.close()
print('[GeoIP] Blank whitelist made.')
else:
f = open("cfg/pso2proxy.geoip.json", "r")
geoiplist = json.loads(f.read())
f.close()
print("[GeoIP] Loaded %i geoip entries." % len(geoiplist))
try:
geoip2c = geoip2.database.Reader(geoip1db)
except AttributeError:
None
except NameError:
None
except Exception as e:
print("[GeoIP] GeoIP2 error: {}".format(e))
if geoip2c is not None:
try:
geoip1c = GeoIP.open(geoip2db, GeoIP.GEOIP_MMAP_CACHE | GeoIP.GEOIP_CHECK_CACHE)
geoip1c.set_charset(GeoIP.GEOIP_CHARSET_UTF8)
except AttributeError:
None
except NameError:
None
except Exception as e:
print("[GeoIP] GeoIP1 Error: {}".format(e))
if cidr:
cidrlist = []
for x in geoiplist:
try:
cidrlist.append(IPNetwork(x))
except ValueError:
None
except AddrFormatError:
None
except Exception as e:
print("[GeoIP] Error adding CIDR range {} during loading: {}".format(x, e))
def save_geoiplist():
f = open("cfg/pso2proxy.geoip.json", "w")
f.write(json.dumps(geoiplist))
f.close()
print('[GeoIP] Saved whitelist')
if cidr:
cidrlist = []
for x in geoiplist:
try:
cidrlist.append(IPNetwork(x))
except ValueError:
None
except Exception as e:
print("[GeoIP] Error adding CIDR range {} during saving: {}".format(x, e))
@plugins.CommandHook("geoipmode", "[Admin Only] Toggle geoip mode", True)
class geoipmode(commands.Command):
def call_from_client(self, client):
global geoipmode
geoipmode = not geoipmode
if geoipmode:
client.send_crypto_packet(packetFactory.SystemMessagePacket("[GeoIP] Whitelist turn on.", 0x3).build())
return
else:
client.send_crypto_packet(packetFactory.SystemMessagePacket("[GeoIP] Whitelist turn off.", 0x3).build())
return
def call_from_console(self):
global geoipmode
geoipmode = not geoipmode
if geoipmode:
return "[GeoIP] Whitelist turn on."
else:
return "[GeoIP] Whitelist turn off."
@plugins.CommandHook("geoip", "[Admin Only] Adds or removes places to the geoip whitelist.", True)
class geoip(commands.Command):
def call_from_console(self):
global geoiplist
params = self.args.split(" ")
if len(params) < 3:
return "[geoip] Invalid usage. (Usage: geoip <add/del> <place>)"
if params[1] == "add" or params[1] == "ADD":
if params[2] not in geoiplist:
geoiplist.append(params[2])
save_geoiplist()
return "[GeoIP] Added %s to the whitelist." % params[2]
else:
return "[GeoIP] %s is already in the whitelist." % params[2]
elif params[1] == "del" or params[1] == "DEL":
if params[2] in geoiplist:
geoiplist.remove(params[2])
save_geoiplist()
return "[GeoIP] Removed %s from whitelist." % params[2]
else:
return "[GeoIP] %s is not in the whitelist, can not delete!" % params[2]
else:
return "[GeoIP] Invalid usage. (Usage: whitelist <add/del> <palce>)"
def call_from_client(self, client):
"""
:param client: ShipProxy.ShipProxy
"""
global geoiplist
params = self.args.split(" ")
if len(params) < 3:
client.send_crypto_packet(packetFactory.SystemMessagePacket("[Command] {red}Invalid usage. (Usage: geoip <add/del> <SegaID>)", 0x3).build())
return
if params[1] == "add" or params[1] == "ADD":
if params[2] not in geoiplist:
geoiplist.append(params[2])
save_geoiplist()
client.send_crypto_packet(packetFactory.SystemMessagePacket("[Command] {gre}Added %s to the whitelist." % params[2], 0x3).build())
return
else:
client.send_crypto_packet(packetFactory.SystemMessagePacket("[Command] {red}%s is already in the whitelist." % params[2], 0x3).build())
return
elif params[1] == "del" or params[1] == "DEL":
if params[2] in geoiplist:
geoiplist.remove(params[2])
save_geoiplist()
client.send_crypto_packet(packetFactory.SystemMessagePacket("[Command] {gre}Removed %s from whitelist." % params[2], 0x3).build())
return
else:
client.send_crypto_packet(packetFactory.SystemMessagePacket("[Command] {red}%s is not in the whitelist, can not delete!" % params[2], 0x3).build())
return
else:
client.send_crypto_packet(packetFactory.SystemMessagePacket("[Command] {red}Invalid usage. (Usage: whitelist <add/del> <SegaID>)", 0x3).build())
return
@plugins.PacketHook(0x11, 0x0)
def geoip_check(context, data):
"""
:type context: ShipProxy.ShipProxy
"""
global geoip2c, geoip1c
global geoiplist
global geoipmode
place = "IPv4"
ip = context.transport.getPeer().host
badip = True
if cidr:
ipa = IPAddress(ip)
for x in cidrlist:
if ipa == x:
badip = False
if ip in geoiplist:
badip = False
elif geoip2c:
try:
respone = geoip2c.country(ip)
place = respone.country.iso_code
if place in geoiplist:
badip = False
except geoip2.AddressNotFoundError:
print("[GeoIP] Could not find {} in GeoIP database)".format(ip))
place = "NULL"
except Exception as e:
print("[GeoIP] Error: {}".format(e))
place = "ERROR"
elif geoip1c:
try:
place = geoip1c.country_code_by_addr(ip)
if place is None:
place = "NULL"
elif place in geoiplist:
badip = False
except Exception as e:
print("[GeoIP] Error: {}".format(e))
place = "ERROR"
if badip:
print("[GeoIP] {} (IP: {}) is not in the GeoIP whitelist, disconnecting client.".format(place, ip))
context.send_crypto_packet(SystemMessagePacket("You are not on the Geoip whitelist for this proxy, please contact the owner of this proxy.\nDetails:\nCountry Code: {}\nIPv4: {}".format(place, ip), 0x1).build())
context.transport.loseConnection()
elif not geoip:
print("Connection from {}|{}".format(place, ip))
return data
|
import os
import bson
import copy
import datetime
import dateutil
import json
import uuid
import zipfile
from .. import base
from .. import config
from ..jobs.jobs import Job
from .. import upload
from .. import download
from .. import util
from .. import validators
from ..auth import listauth, always_ok
from ..dao import noop
from ..dao import liststorage
from ..dao import APIStorageException
from ..dao import hierarchy
from ..dao.containerutil import create_filereference_from_dictionary, create_containerreference_from_dictionary, create_containerreference_from_filereference
log = config.log
def initialize_list_configurations():
"""
This configurations are used by the ListHandler class to load the storage, the permissions checker
and the json schema validators used to handle a request.
"use_object_id" implies that the container ids are converted to ObjectId
"get_full_container" allows the handler to load the full content of the container and not only the sublist element (this is used for permissions for example)
"""
container_default_configurations = {
'tags': {
'storage': liststorage.StringListStorage,
'permchecker': listauth.default_sublist,
'use_object_id': True,
'storage_schema_file': 'tag.json',
'input_schema_file': 'tag.json'
},
'files': {
'storage': liststorage.ListStorage,
'permchecker': listauth.default_sublist,
'use_object_id': True,
'storage_schema_file': 'file.json',
'input_schema_file': 'file.json'
},
'permissions': {
'storage': liststorage.ListStorage,
'permchecker': listauth.permissions_sublist,
'use_object_id': True,
'get_full_container': True,
'storage_schema_file': 'permission.json',
'input_schema_file': 'permission.json'
},
'notes': {
'storage': liststorage.ListStorage,
'permchecker': listauth.notes_sublist,
'use_object_id': True,
'storage_schema_file': 'note.json',
'input_schema_file': 'note.json'
},
'analyses': {
'storage': liststorage.AnalysesStorage,
'permchecker': listauth.default_sublist,
'use_object_id': True,
'storage_schema_file': 'analysis.json',
'input_schema_file': 'analysis.json'
}
}
list_container_configurations = {
'groups': {
'roles':{
'storage': liststorage.ListStorage,
'permchecker': listauth.group_roles_sublist,
'use_object_id': False,
'get_full_container': True,
'storage_schema_file': 'permission.json',
'input_schema_file': 'permission.json'
},
'tags': {
'storage': liststorage.StringListStorage,
'permchecker': listauth.group_tags_sublist,
'use_object_id': False,
'storage_schema_file': 'tag.json',
'input_schema_file': 'tag.json'
},
},
'projects': copy.deepcopy(container_default_configurations),
'sessions': copy.deepcopy(container_default_configurations),
'acquisitions': copy.deepcopy(container_default_configurations),
'collections': copy.deepcopy(container_default_configurations)
}
# preload the Storage instances for all configurations
for cont_name, cont_config in list_container_configurations.iteritems():
for list_name, list_config in cont_config.iteritems():
storage_class = list_config['storage']
storage = storage_class(
cont_name,
list_name,
use_object_id=list_config.get('use_object_id', False)
)
list_config['storage'] = storage
return list_container_configurations
list_handler_configurations = initialize_list_configurations()
class ListHandler(base.RequestHandler):
"""
This class handle operations on a generic sublist of a container like tags, group roles, user permissions, etc.
The pattern used is:
1) initialize request
2) exec request
3) check and return result
Specific behaviors (permissions checking logic for authenticated and not superuser users, storage interaction)
are specified in the routes defined in api.py
"""
def __init__(self, request=None, response=None):
super(ListHandler, self).__init__(request, response)
def get(self, cont_name, list_name, **kwargs):
_id = kwargs.pop('cid')
permchecker, storage, _, _, keycheck = self._initialize_request(cont_name, list_name, _id, query_params=kwargs)
try:
result = keycheck(permchecker(storage.exec_op))('GET', _id, query_params=kwargs)
except APIStorageException as e:
self.abort(400, e.message)
if result is None:
self.abort(404, 'Element not found in list {} of container {} {}'.format(storage.list_name, storage.cont_name, _id))
return result
def post(self, cont_name, list_name, **kwargs):
_id = kwargs.pop('cid')
permchecker, storage, mongo_validator, payload_validator, keycheck = self._initialize_request(cont_name, list_name, _id)
payload = self.request.json_body
payload_validator(payload, 'POST')
result = keycheck(mongo_validator(permchecker(storage.exec_op)))('POST', _id=_id, payload=payload)
if result.modified_count == 1:
return {'modified':result.modified_count}
else:
self.abort(404, 'Element not added in list {} of container {} {}'.format(storage.list_name, storage.cont_name, _id))
def put(self, cont_name, list_name, **kwargs):
_id = kwargs.pop('cid')
permchecker, storage, mongo_validator, payload_validator, keycheck = self._initialize_request(cont_name, list_name, _id, query_params=kwargs)
payload = self.request.json_body
payload_validator(payload, 'PUT')
try:
result = keycheck(mongo_validator(permchecker(storage.exec_op)))('PUT', _id=_id, query_params=kwargs, payload=payload)
except APIStorageException as e:
self.abort(400, e.message)
# abort if the query of the update wasn't able to find any matching documents
if result.matched_count == 0:
self.abort(404, 'Element not updated in list {} of container {} {}'.format(storage.list_name, storage.cont_name, _id))
else:
return {'modified':result.modified_count}
def delete(self, cont_name, list_name, **kwargs):
_id = kwargs.pop('cid')
permchecker, storage, _, _, keycheck = self._initialize_request(cont_name, list_name, _id, query_params=kwargs)
try:
result = keycheck(permchecker(storage.exec_op))('DELETE', _id, query_params=kwargs)
except APIStorageException as e:
self.abort(400, e.message)
if result.modified_count == 1:
return {'modified': result.modified_count}
else:
self.abort(404, 'Element not removed from list {} in container {} {}'.format(storage.list_name, storage.cont_name, _id))
def _initialize_request(self, cont_name, list_name, _id, query_params=None):
"""
This method loads:
1) the container that will be modified
2) the storage class that will handle the database actions
3) the permission checker decorator that will be used
4) the payload_validator checking the payload sent by the client against a json schema
5) the mongo_validator that will check what will be sent to mongo against a json schema
6) the keycheck decorator validating the request key
"""
conf = list_handler_configurations[cont_name][list_name]
storage = conf['storage']
permchecker = conf['permchecker']
if conf.get('get_full_container'):
query_params = None
container = storage.get_container(_id, query_params)
if container is not None:
if self.superuser_request:
permchecker = always_ok
elif self.public_request:
permchecker = listauth.public_request(self, container)
else:
permchecker = permchecker(self, container)
else:
self.abort(404, 'Element {} not found in container {}'.format(_id, storage.cont_name))
mongo_schema_uri = validators.schema_uri('mongo', conf.get('storage_schema_file'))
mongo_validator = validators.decorator_from_schema_path(mongo_schema_uri)
keycheck = validators.key_check(mongo_schema_uri)
input_schema_uri = validators.schema_uri('input', conf.get('input_schema_file'))
input_validator = validators.from_schema_path(input_schema_uri)
return permchecker, storage, mongo_validator, input_validator, keycheck
class PermissionsListHandler(ListHandler):
"""
PermissionsListHandler overrides post, put and delete methods of ListHandler to propagate permissions
"""
def post(self, cont_name, list_name, **kwargs):
_id = kwargs.get('cid')
result = super(PermissionsListHandler, self).post(cont_name, list_name, **kwargs)
self._propagate_project_permissions(cont_name, _id)
return result
def put(self, cont_name, list_name, **kwargs):
_id = kwargs.get('cid')
result = super(PermissionsListHandler, self).put(cont_name, list_name, **kwargs)
self._propagate_project_permissions(cont_name, _id)
return result
def delete(self, cont_name, list_name, **kwargs):
_id = kwargs.get('cid')
result = super(PermissionsListHandler, self).delete(cont_name, list_name, **kwargs)
self._propagate_project_permissions(cont_name, _id)
return result
def _propagate_project_permissions(self, cont_name, _id):
"""
method to propagate permissions from a project to its sessions and acquisitions
"""
if cont_name == 'projects':
try:
oid = bson.ObjectId(_id)
update = {'$set': {
'permissions': config.db.projects.find_one({'_id': oid},{'permissions': 1})['permissions']
}}
hierarchy.propagate_changes(cont_name, oid, {}, update)
except APIStorageException:
self.abort(500, 'permissions not propagated from project {} to sessions'.format(_id))
class NotesListHandler(ListHandler):
"""
NotesListHandler overrides post, put methods of ListHandler to add custom fields to the payload.
e.g. _id, user, created, etc.
"""
def post(self, cont_name, list_name, **kwargs):
_id = kwargs.pop('cid')
permchecker, storage, mongo_validator, input_validator, keycheck = self._initialize_request(cont_name, list_name, _id)
payload = self.request.json_body
input_validator(payload, 'POST')
payload['_id'] = payload.get('_id') or str(bson.objectid.ObjectId())
payload['user'] = payload.get('user', self.uid)
payload['created'] = payload['modified'] = datetime.datetime.utcnow()
if payload.get('timestamp'):
payload['timestamp'] = dateutil.parser.parse(payload['timestamp'])
result = keycheck(mongo_validator(permchecker(storage.exec_op)))('POST', _id=_id, payload=payload)
if result.modified_count == 1:
return {'modified':result.modified_count}
else:
self.abort(404, 'Element not added in list {} of container {} {}'.format(storage.list_name, storage.cont_name, _id))
def put(self, cont_name, list_name, **kwargs):
_id = kwargs.pop('cid')
permchecker, storage, mongo_validator, input_validator, keycheck = self._initialize_request(cont_name, list_name, _id, query_params=kwargs)
payload = self.request.json_body
input_validator(payload, 'PUT')
payload['modified'] = datetime.datetime.utcnow()
if payload.get('timestamp'):
payload['timestamp'] = dateutil.parser.parse(payload['timestamp'])
result = keycheck(mongo_validator(permchecker(storage.exec_op)))('PUT', _id=_id, query_params=kwargs, payload=payload)
# abort if the query of the update wasn't able to find any matching documents
if result.matched_count == 0:
self.abort(404, 'Element not updated in list {} of container {} {}'.format(storage.list_name, storage.cont_name, _id))
else:
return {'modified':result.modified_count}
class TagsListHandler(ListHandler):
"""
TagsListHandler overrides put, delete methods of ListHandler to propagate changes to group tags
If a tag is renamed or deleted at the group level, project, session and acquisition tags will also be renamed/deleted
"""
def put(self, cont_name, list_name, **kwargs):
_id = kwargs.get('cid')
result = super(TagsListHandler, self).put(cont_name, list_name, **kwargs)
if cont_name == 'groups':
payload = self.request.json_body
current_value = kwargs.get('value')
new_value = payload.get('value')
query = {'$and':[{'tags': current_value}, {'tags': {'$ne': new_value}}]}
update = {'$set': {'tags.$': new_value}}
self._propagate_group_tags(cont_name, _id, query, update)
return result
def delete(self, cont_name, list_name, **kwargs):
_id = kwargs.get('cid')
result = super(TagsListHandler, self).delete(cont_name, list_name, **kwargs)
if cont_name == 'groups':
deleted_tag = kwargs.get('value')
query = {}
update = {'$pull': {'tags': deleted_tag}}
self._propagate_group_tags(cont_name, _id, query, update)
return result
def _propagate_group_tags(self, cont_name, _id, query, update):
"""
method to propagate tag changes from a group to its projects, sessions and acquisitions
"""
try:
hierarchy.propagate_changes(cont_name, _id, query, update)
except APIStorageException:
self.abort(500, 'tag change not propagated from group {}'.format(_id))
class FileListHandler(ListHandler):
"""
This class implements a more specific logic for list of files as the api needs to interact with the filesystem.
"""
def __init__(self, request=None, response=None):
super(FileListHandler, self).__init__(request, response)
def _check_ticket(self, ticket_id, _id, filename):
ticket = config.db.downloads.find_one({'_id': ticket_id})
if not ticket:
self.abort(404, 'no such ticket')
if ticket['target'] != _id or ticket['filename'] != filename or ticket['ip'] != self.request.client_addr:
self.abort(400, 'ticket not for this resource or source IP')
return ticket
def _build_zip_info(self, filepath):
"""
Builds a json response containing member and comment info for a zipfile
"""
with zipfile.ZipFile(filepath) as zf:
info = {}
info['comment'] = zf.comment
info['members'] = []
for zi in zf.infolist():
m = {}
m['path'] = zi.filename
m['size'] = zi.file_size
m['timestamp'] = datetime.datetime(*zi.date_time)
m['comment'] = zi.comment
info['members'].append(m)
return info
def get(self, cont_name, list_name, **kwargs):
"""
.. http:get:: /api/(cont_name)/(cid)/files/(file_name)
Gets the ticket used to download the file when the ticket is not provided.
Downloads the file when the ticket is provided.
:query ticket: should be empty
:param cont_name: one of ``projects``, ``sessions``, ``acquisitions``, ``collections``
:type cont_name: string
:param cid: Container ID
:type cid: string
:statuscode 200: no error
:statuscode 400: explain...
:statuscode 409: explain...
**Example request**:
.. sourcecode:: http
GET /api/acquisitions/57081d06b386a6dc79ca383c/files/fMRI%20Loc%20Word%20Face%20Obj.zip?ticket= HTTP/1.1
Host: demo.flywheel.io
Accept: */*
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept-Encoding
Content-Type: application/json; charset=utf-8
{"ticket": "1e975e3d-21e9-41f4-bb97-261f03d35ba1"}
"""
_id = kwargs.pop('cid')
permchecker, storage, _, _, keycheck = self._initialize_request(cont_name, list_name, _id)
list_name = storage.list_name
filename = kwargs.get('name')
# Check ticket id and skip permissions check if it clears
ticket_id = self.get_param('ticket')
if ticket_id:
ticket = self._check_ticket(ticket_id, _id, filename)
permchecker = always_ok
# Grab fileinfo from db
try:
fileinfo = keycheck(permchecker(storage.exec_op))('GET', _id, query_params=kwargs)
except APIStorageException as e:
self.abort(400, e.message)
if not fileinfo:
self.abort(404, 'no such file')
hash_ = self.get_param('hash')
if hash_ and hash_ != fileinfo['hash']:
self.abort(409, 'file exists, hash mismatch')
filepath = os.path.join(config.get_item('persistent', 'data_path'), util.path_from_hash(fileinfo['hash']))
# Request for download ticket
if self.get_param('ticket') == '':
ticket = util.download_ticket(self.request.client_addr, 'file', _id, filename, fileinfo['size'])
return {'ticket': config.db.downloads.insert_one(ticket).inserted_id}
# Request for info about zipfile
elif self.is_true('info'):
try:
info = self._build_zip_info(filepath)
except zipfile.BadZipfile:
self.abort(400, 'not a zip file')
return info
# Request to download zipfile member
elif self.get_param('member') is not None:
zip_member = self.get_param('member')
try:
with zipfile.ZipFile(filepath) as zf:
self.response.headers['Content-Type'] = util.guess_mimetype(zip_member)
self.response.write(zf.open(zip_member).read())
except zipfile.BadZipfile:
self.abort(400, 'not a zip file')
except KeyError:
self.abort(400, 'zip file contains no such member')
# Authenticated or ticketed download request
else:
self.response.app_iter = open(filepath, 'rb')
self.response.headers['Content-Length'] = str(fileinfo['size']) # must be set after setting app_iter
if self.is_true('view'):
self.response.headers['Content-Type'] = str(fileinfo.get('mimetype', 'application/octet-stream'))
else:
self.response.headers['Content-Type'] = 'application/octet-stream'
self.response.headers['Content-Disposition'] = 'attachment; filename="' + filename + '"'
def post(self, cont_name, list_name, **kwargs):
_id = kwargs.pop('cid')
# Ugly hack: ensure cont_name is singular. Pass singular or plural to code that expects it.
if cont_name.endswith('s'):
cont_name_plural = cont_name
cont_name = cont_name[:-1]
else:
cont_name_plural = cont_name + 's'
# Authorize
permchecker, _, _, _, _ = self._initialize_request(cont_name_plural, list_name, _id)
permchecker(noop)('POST', _id=_id)
return upload.process_upload(self.request, upload.Strategy.targeted, container_type=cont_name, id_=_id, origin=self.origin)
def _check_packfile_token(self, project_id, token_id, check_user=True):
"""
Check and update a packfile token assertion.
"""
if token_id is None:
raise Exception('Upload token is required')
query = {
'type': 'packfile',
'project': project_id,
'_id': token_id,
}
# Server-Sent Events are fired in the browser in such a way that one cannot dictate their headers.
# For these endpoints, authentication must be disabled because the normal Authorization header will not be present.
# In this case, the document id will serve instead.
if check_user:
query['user'] = self.uid
# Check for correct token
result = config.db['tokens'].find_one(query)
if result is None:
raise Exception('Invalid or expired upload token')
# Update token timestamp
config.db['tokens'].update_one({
'_id': token_id,
}, {
'$set': {
'modified': datetime.datetime.utcnow()
}
})
def packfile_start(self, cont_name, **kwargs):
"""
Declare intent to upload a packfile to a project, and recieve an upload token identifier.
"""
_id = kwargs.pop('cid')
if cont_name != 'projects':
raise Exception('Packfiles can only be targeted at projects')
# Authorize: confirm project exists
project = config.db['projects'].find_one({ '_id': bson.ObjectId(_id)})
if project is None:
raise Exception('Project ' + _id + ' does not exist')
# Authorize: confirm user has admin/write perms
if not self.superuser_request:
perms = project.get('permissions', [])
for p in perms:
if p['_id'] == self.uid and p['access'] in ('rw', 'admin'):
break
else:
raise Exception('Not authorized')
timestamp = datetime.datetime.utcnow()
# Save token for stateful uploads
result = config.db['tokens'].insert_one({
'_id': str(uuid.uuid4()),
'type': 'packfile',
'user': self.uid,
'project': _id,
'created': timestamp,
'modified': timestamp,
})
return {
'token': str(result.inserted_id)
}
def packfile(self, **kwargs):
"""
Add files to an in-progress packfile.
"""
project_id = kwargs.pop('cid')
token_id = self.request.GET.get('token')
self._check_packfile_token(project_id, token_id)
return upload.process_upload(self.request, upload.Strategy.token, origin=self.origin, context={'token': token_id})
def packfile_end(self, **kwargs):
"""
Complete and save an uploaded packfile.
"""
project_id = kwargs.pop('cid')
token_id = self.request.GET.get('token')
self._check_packfile_token(project_id, token_id, check_user=False)
# Because this is an SSE endpoint, there is no form-post. Instead, read JSON data from request param
metadata = json.loads(self.request.GET.get('metadata'))
return upload.process_upload(self.request, upload.Strategy.packfile, origin=self.origin, context={'token': token_id}, response=self.response, metadata=metadata)
class AnalysesHandler(ListHandler):
def _check_ticket(self, ticket_id, _id, filename):
ticket = config.db.downloads.find_one({'_id': ticket_id})
if not ticket:
self.abort(404, 'no such ticket')
if ticket['ip'] != self.request.client_addr:
self.abort(400, 'ticket not for this source IP')
if not filename:
return self._check_ticket_for_batch(ticket)
if ticket.get('filename') != filename or ticket['target'] != _id:
self.abort(400, 'ticket not for this resource')
return ticket
def _check_ticket_for_batch(self, ticket):
if ticket.get('type') != 'batch':
self.abort(400, 'ticket not for this resource')
return ticket
def put(self, *args, **kwargs):
raise NotImplementedError("an analysis can't be modified")
def _default_analysis(self):
analysis_obj = {}
analysis_obj['_id'] = str(bson.objectid.ObjectId())
analysis_obj['created'] = datetime.datetime.utcnow()
analysis_obj['modified'] = datetime.datetime.utcnow()
analysis_obj['user'] = self.uid
return analysis_obj
def post(self, cont_name, list_name, **kwargs):
"""
.. http:post:: /api/(cont_name)/(cid)/analyses
Default behavior:
Creates an analysis object and uploads supplied input
and output files.
When param ``job`` is true:
Creates an analysis object and job object that reference
each other via ``job`` and ``destination`` fields. Job based
analyses are only allowed at the session level.
:param cont_name: one of ``projects``, ``sessions``, ``collections``
:type cont_name: string
:param cid: Container ID
:type cid: string
:query boolean job: a flag specifying the type of analysis
:statuscode 200: no error
:statuscode 400: Job-based analyses must be at the session level
:statuscode 400: Job-based analyses must have ``job`` and ``analysis`` maps in JSON body
**Example request**:
.. sourcecode:: http
POST /api/sessions/57081d06b386a6dc79ca383c/analyses HTTP/1.1
{
"analysis": {
"label": "Test Analysis 1"
},
"job" : {
"gear": "dcm_convert",
"inputs": {
"dicom": {
"type": "acquisition",
"id": "57081d06b386a6dc79ca386b",
"name" : "test_acquisition_dicom.zip"
}
},
"tags": ["example"]
}
}
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept-Encoding
Content-Type: application/json; charset=utf-8
{
"_id": "573cb66b135d87002660597c"
}
"""
_id = kwargs.pop('cid')
permchecker, storage, _, payload_validator, _ = self._initialize_request(cont_name, list_name, _id)
permchecker(noop)('POST', _id=_id)
if self.is_true('job'):
if cont_name == 'sessions':
payload = self.request.json_body
payload_validator(payload.get('analysis',{}), 'POST')
return self._create_job_and_analysis(cont_name, _id, storage, payload)
else:
self.abort(400, 'Analysis created via a job must be at the session level')
payload = upload.process_upload(self.request, upload.Strategy.analysis, origin=self.origin)
analysis = self._default_analysis()
analysis.update(payload)
result = storage.exec_op('POST', _id=_id, payload=analysis)
if result.modified_count == 1:
return {'_id': analysis['_id']}
else:
self.abort(500, 'Element not added in list analyses of container {} {}'.format(cont_name, _id))
def _create_job_and_analysis(self, cont_name, cid, storage, payload):
analysis = payload.get('analysis')
job = payload.get('job')
if job is None or analysis is None:
self.abort(400, 'JSON body must contain map for "analysis" and "job"')
default = self._default_analysis()
default.update(analysis)
analysis = default
# Save inputs to analysis and job
inputs = {} # For Job object (map of FileReferences)
files = [] # For Analysis object (list of file objects)
for x in job['inputs'].keys():
input_map = job['inputs'][x]
fileref = create_filereference_from_dictionary(input_map)
inputs[x] = fileref
contref = create_containerreference_from_filereference(fileref)
file_ = contref.find_file(fileref.name)
if file_:
file_['input'] = True
files.append(file_)
analysis['files'] = files
result = storage.exec_op('POST', _id=cid, payload=analysis)
if result.modified_count != 1:
self.abort(500, 'Element not added in list analyses of container {} {}'.format(cont_name, cid))
# Prepare job
tags = job.get('tags', [])
if 'analysis' not in tags:
tags.append('analysis')
gear_name = job['gear']
destination = create_containerreference_from_dictionary({'type': 'analysis', 'id': analysis['_id']})
job = Job(gear_name, inputs, destination=destination, tags=tags)
job_id = job.insert()
if not job_id:
self.abort(500, 'Job not created for analysis {} of container {} {}'.format(analysis['_id'], cont_name, cid))
result = storage.exec_op('PUT', _id=cid, query_params={'_id': analysis['_id']}, payload={'job': job_id})
return { '_id': analysis['_id']}
def download(self, cont_name, list_name, **kwargs):
"""
.. http:get:: /api/(cont_name)/(cid)/analyses/(analysis_id)/files/(file_name)
Download a file from an analysis or download a tar of all files
When no filename is provided, a tar of all input and output files is created.
The first request to this endpoint without a ticket ID generates a download ticket.
A request to this endpoint with a ticket ID downloads the file(s).
If the analysis object is tied to a job, the input file(s) are inlfated from
the job's ``input`` array.
:param cont_name: one of ``projects``, ``sessions``, ``collections``
:type cont_name: string
:param cid: Container ID
:type cid: string
:param analysis_id: Analysis ID
:type analysis_id: string
:param filename: (Optional) Filename of specific file to download
:type cid: string
:query string ticket: Download ticket ID
:statuscode 200: no error
:statuscode 404: No files on analysis ``analysis_id``
:statuscode 404: Could not find file ``filename`` on analysis ``analysis_id``
**Example request without ticket ID**:
.. sourcecode:: http
GET /api/sessions/57081d06b386a6dc79ca383c/analyses/5751cd3781460100a66405c8/files HTTP/1.1
Host: demo.flywheel.io
Accept: */*
**Response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept-Encoding
Content-Type: application/json; charset=utf-8
{
"ticket": "57f2af23-a94c-426d-8521-11b2e8782020",
"filename": "analysis_5751cd3781460100a66405c8.tar",
"file_cnt": 3,
"size": 4525137
}
**Example request with ticket ID**:
.. sourcecode:: http
GET /api/sessions/57081d06b386a6dc79ca383c/analyses/5751cd3781460100a66405c8/files?ticket=57f2af23-a94c-426d-8521-11b2e8782020 HTTP/1.1
Host: demo.flywheel.io
Accept: */*
**Response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept-Encoding
Content-Type: application/octet-stream
Content-Disposition: attachment; filename=analysis_5751cd3781460100a66405c8.tar;
**Example Request with filename**:
.. sourcecode:: http
GET /api/sessions/57081d06b386a6dc79ca383c/analyses/5751cd3781460100a66405c8/files/exampledicom.zip?ticket= HTTP/1.1
Host: demo.flywheel.io
Accept: */*
**Response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept-Encoding
Content-Type: application/json; charset=utf-8
{
"ticket": "57f2af23-a94c-426d-8521-11b2e8782020",
"filename": "exampledicom.zip",
"file_cnt": 1,
"size": 4525137
}
"""
_id = kwargs.pop('cid')
permchecker, storage, _, _, _ = self._initialize_request(cont_name, list_name, _id)
filename = kwargs.get('name')
ticket_id = self.get_param('ticket')
if not ticket_id:
permchecker(noop)('GET', _id=_id)
analysis_id = kwargs.get('_id')
fileinfo = storage.get_fileinfo(_id, analysis_id, filename)
if fileinfo is None:
error_msg = 'No files on analysis {}'.format(analysis_id)
if filename:
error_msg = 'Could not find file {} on analysis {}'.format(filename, analysis_id)
self.abort(404, error_msg)
if not ticket_id:
if filename:
total_size = fileinfo[0]['size']
file_cnt = 1
ticket = util.download_ticket(self.request.client_addr, 'file', _id, filename, total_size)
else:
targets, total_size, file_cnt = self._prepare_batch(fileinfo)
filename = 'analysis_' + analysis_id + '.tar'
ticket = util.download_ticket(self.request.client_addr, 'batch', targets, filename, total_size)
return {
'ticket': config.db.downloads.insert_one(ticket).inserted_id,
'size': total_size,
'file_cnt': file_cnt,
'filename': filename
}
else:
ticket = self._check_ticket(ticket_id, _id, filename)
if not filename:
self._send_batch(ticket)
return
if not fileinfo:
self.abort(404, '{} doesn''t exist'.format(filename))
fileinfo = fileinfo[0]
filepath = os.path.join(
config.get_item('persistent', 'data_path'),
util.path_from_hash(fileinfo['hash'])
)
filename = fileinfo['name']
self.response.app_iter = open(filepath, 'rb')
self.response.headers['Content-Length'] = str(fileinfo['size']) # must be set after setting app_iter
if self.is_true('view'):
self.response.headers['Content-Type'] = str(fileinfo.get('mimetype', 'application/octet-stream'))
else:
self.response.headers['Content-Type'] = 'application/octet-stream'
self.response.headers['Content-Disposition'] = 'attachment; filename=' + str(filename)
def _prepare_batch(self, fileinfo):
## duplicated code from download.py
## we need a way to avoid this
targets = []
total_size = total_cnt = 0
data_path = config.get_item('persistent', 'data_path')
for f in fileinfo:
filepath = os.path.join(data_path, util.path_from_hash(f['hash']))
if os.path.exists(filepath): # silently skip missing files
targets.append((filepath, 'analyses/' + f['name'], f['size']))
total_size += f['size']
total_cnt += 1
return targets, total_size, total_cnt
def _send_batch(self, ticket):
self.response.app_iter = download.archivestream(ticket)
self.response.headers['Content-Type'] = 'application/octet-stream'
self.response.headers['Content-Disposition'] = 'attachment; filename=' + str(ticket['filename'])
def delete_note(self, cont_name, list_name, **kwargs):
_id = kwargs.pop('cid')
analysis_id = kwargs.pop('_id')
permchecker, storage, _, _, _ = self._initialize_request(cont_name, list_name, _id)
note_id = kwargs.get('note_id')
permchecker(noop)('DELETE', _id=_id)
result = storage.delete_note(_id=_id, analysis_id=analysis_id, note_id=note_id)
if result.modified_count == 1:
return {'modified':result.modified_count}
else:
self.abort(404, 'Element not removed from list {} of container {} {}'.format(storage.list_name, storage.cont_name, _id))
def add_note(self, cont_name, list_name, **kwargs):
_id = kwargs.pop('cid')
analysis_id = kwargs.get('_id')
permchecker, storage, _, input_validator, _ = self._initialize_request(cont_name, list_name, _id)
payload = self.request.json_body
input_validator(payload, 'POST')
payload['_id'] = str(bson.objectid.ObjectId())
payload['user'] = payload.get('user', self.uid)
payload['created'] = datetime.datetime.utcnow()
permchecker(noop)('POST', _id=_id)
result = storage.add_note(_id=_id, analysis_id=analysis_id, payload=payload)
if result.modified_count == 1:
return {'modified':result.modified_count}
else:
self.abort(404, 'Element not added in list {} of container {} {}'.format(storage.list_name, storage.cont_name, _id))
Don't use analysis validator for notes
import os
import bson
import copy
import datetime
import dateutil
import json
import uuid
import zipfile
from .. import base
from .. import config
from ..jobs.jobs import Job
from .. import upload
from .. import download
from .. import util
from .. import validators
from ..auth import listauth, always_ok
from ..dao import noop
from ..dao import liststorage
from ..dao import APIStorageException
from ..dao import hierarchy
from ..dao.containerutil import create_filereference_from_dictionary, create_containerreference_from_dictionary, create_containerreference_from_filereference
log = config.log
def initialize_list_configurations():
"""
This configurations are used by the ListHandler class to load the storage, the permissions checker
and the json schema validators used to handle a request.
"use_object_id" implies that the container ids are converted to ObjectId
"get_full_container" allows the handler to load the full content of the container and not only the sublist element (this is used for permissions for example)
"""
container_default_configurations = {
'tags': {
'storage': liststorage.StringListStorage,
'permchecker': listauth.default_sublist,
'use_object_id': True,
'storage_schema_file': 'tag.json',
'input_schema_file': 'tag.json'
},
'files': {
'storage': liststorage.ListStorage,
'permchecker': listauth.default_sublist,
'use_object_id': True,
'storage_schema_file': 'file.json',
'input_schema_file': 'file.json'
},
'permissions': {
'storage': liststorage.ListStorage,
'permchecker': listauth.permissions_sublist,
'use_object_id': True,
'get_full_container': True,
'storage_schema_file': 'permission.json',
'input_schema_file': 'permission.json'
},
'notes': {
'storage': liststorage.ListStorage,
'permchecker': listauth.notes_sublist,
'use_object_id': True,
'storage_schema_file': 'note.json',
'input_schema_file': 'note.json'
},
'analyses': {
'storage': liststorage.AnalysesStorage,
'permchecker': listauth.default_sublist,
'use_object_id': True,
'storage_schema_file': 'analysis.json',
'input_schema_file': 'analysis.json'
}
}
list_container_configurations = {
'groups': {
'roles':{
'storage': liststorage.ListStorage,
'permchecker': listauth.group_roles_sublist,
'use_object_id': False,
'get_full_container': True,
'storage_schema_file': 'permission.json',
'input_schema_file': 'permission.json'
},
'tags': {
'storage': liststorage.StringListStorage,
'permchecker': listauth.group_tags_sublist,
'use_object_id': False,
'storage_schema_file': 'tag.json',
'input_schema_file': 'tag.json'
},
},
'projects': copy.deepcopy(container_default_configurations),
'sessions': copy.deepcopy(container_default_configurations),
'acquisitions': copy.deepcopy(container_default_configurations),
'collections': copy.deepcopy(container_default_configurations)
}
# preload the Storage instances for all configurations
for cont_name, cont_config in list_container_configurations.iteritems():
for list_name, list_config in cont_config.iteritems():
storage_class = list_config['storage']
storage = storage_class(
cont_name,
list_name,
use_object_id=list_config.get('use_object_id', False)
)
list_config['storage'] = storage
return list_container_configurations
list_handler_configurations = initialize_list_configurations()
class ListHandler(base.RequestHandler):
"""
This class handle operations on a generic sublist of a container like tags, group roles, user permissions, etc.
The pattern used is:
1) initialize request
2) exec request
3) check and return result
Specific behaviors (permissions checking logic for authenticated and not superuser users, storage interaction)
are specified in the routes defined in api.py
"""
def __init__(self, request=None, response=None):
super(ListHandler, self).__init__(request, response)
def get(self, cont_name, list_name, **kwargs):
_id = kwargs.pop('cid')
permchecker, storage, _, _, keycheck = self._initialize_request(cont_name, list_name, _id, query_params=kwargs)
try:
result = keycheck(permchecker(storage.exec_op))('GET', _id, query_params=kwargs)
except APIStorageException as e:
self.abort(400, e.message)
if result is None:
self.abort(404, 'Element not found in list {} of container {} {}'.format(storage.list_name, storage.cont_name, _id))
return result
def post(self, cont_name, list_name, **kwargs):
_id = kwargs.pop('cid')
permchecker, storage, mongo_validator, payload_validator, keycheck = self._initialize_request(cont_name, list_name, _id)
payload = self.request.json_body
payload_validator(payload, 'POST')
result = keycheck(mongo_validator(permchecker(storage.exec_op)))('POST', _id=_id, payload=payload)
if result.modified_count == 1:
return {'modified':result.modified_count}
else:
self.abort(404, 'Element not added in list {} of container {} {}'.format(storage.list_name, storage.cont_name, _id))
def put(self, cont_name, list_name, **kwargs):
_id = kwargs.pop('cid')
permchecker, storage, mongo_validator, payload_validator, keycheck = self._initialize_request(cont_name, list_name, _id, query_params=kwargs)
payload = self.request.json_body
payload_validator(payload, 'PUT')
try:
result = keycheck(mongo_validator(permchecker(storage.exec_op)))('PUT', _id=_id, query_params=kwargs, payload=payload)
except APIStorageException as e:
self.abort(400, e.message)
# abort if the query of the update wasn't able to find any matching documents
if result.matched_count == 0:
self.abort(404, 'Element not updated in list {} of container {} {}'.format(storage.list_name, storage.cont_name, _id))
else:
return {'modified':result.modified_count}
def delete(self, cont_name, list_name, **kwargs):
_id = kwargs.pop('cid')
permchecker, storage, _, _, keycheck = self._initialize_request(cont_name, list_name, _id, query_params=kwargs)
try:
result = keycheck(permchecker(storage.exec_op))('DELETE', _id, query_params=kwargs)
except APIStorageException as e:
self.abort(400, e.message)
if result.modified_count == 1:
return {'modified': result.modified_count}
else:
self.abort(404, 'Element not removed from list {} in container {} {}'.format(storage.list_name, storage.cont_name, _id))
def _initialize_request(self, cont_name, list_name, _id, query_params=None):
"""
This method loads:
1) the container that will be modified
2) the storage class that will handle the database actions
3) the permission checker decorator that will be used
4) the payload_validator checking the payload sent by the client against a json schema
5) the mongo_validator that will check what will be sent to mongo against a json schema
6) the keycheck decorator validating the request key
"""
conf = list_handler_configurations[cont_name][list_name]
storage = conf['storage']
permchecker = conf['permchecker']
if conf.get('get_full_container'):
query_params = None
container = storage.get_container(_id, query_params)
if container is not None:
if self.superuser_request:
permchecker = always_ok
elif self.public_request:
permchecker = listauth.public_request(self, container)
else:
permchecker = permchecker(self, container)
else:
self.abort(404, 'Element {} not found in container {}'.format(_id, storage.cont_name))
mongo_schema_uri = validators.schema_uri('mongo', conf.get('storage_schema_file'))
mongo_validator = validators.decorator_from_schema_path(mongo_schema_uri)
keycheck = validators.key_check(mongo_schema_uri)
input_schema_uri = validators.schema_uri('input', conf.get('input_schema_file'))
input_validator = validators.from_schema_path(input_schema_uri)
return permchecker, storage, mongo_validator, input_validator, keycheck
class PermissionsListHandler(ListHandler):
"""
PermissionsListHandler overrides post, put and delete methods of ListHandler to propagate permissions
"""
def post(self, cont_name, list_name, **kwargs):
_id = kwargs.get('cid')
result = super(PermissionsListHandler, self).post(cont_name, list_name, **kwargs)
self._propagate_project_permissions(cont_name, _id)
return result
def put(self, cont_name, list_name, **kwargs):
_id = kwargs.get('cid')
result = super(PermissionsListHandler, self).put(cont_name, list_name, **kwargs)
self._propagate_project_permissions(cont_name, _id)
return result
def delete(self, cont_name, list_name, **kwargs):
_id = kwargs.get('cid')
result = super(PermissionsListHandler, self).delete(cont_name, list_name, **kwargs)
self._propagate_project_permissions(cont_name, _id)
return result
def _propagate_project_permissions(self, cont_name, _id):
"""
method to propagate permissions from a project to its sessions and acquisitions
"""
if cont_name == 'projects':
try:
oid = bson.ObjectId(_id)
update = {'$set': {
'permissions': config.db.projects.find_one({'_id': oid},{'permissions': 1})['permissions']
}}
hierarchy.propagate_changes(cont_name, oid, {}, update)
except APIStorageException:
self.abort(500, 'permissions not propagated from project {} to sessions'.format(_id))
class NotesListHandler(ListHandler):
"""
NotesListHandler overrides post, put methods of ListHandler to add custom fields to the payload.
e.g. _id, user, created, etc.
"""
def post(self, cont_name, list_name, **kwargs):
_id = kwargs.pop('cid')
permchecker, storage, mongo_validator, input_validator, keycheck = self._initialize_request(cont_name, list_name, _id)
payload = self.request.json_body
input_validator(payload, 'POST')
payload['_id'] = payload.get('_id') or str(bson.objectid.ObjectId())
payload['user'] = payload.get('user', self.uid)
payload['created'] = payload['modified'] = datetime.datetime.utcnow()
if payload.get('timestamp'):
payload['timestamp'] = dateutil.parser.parse(payload['timestamp'])
result = keycheck(mongo_validator(permchecker(storage.exec_op)))('POST', _id=_id, payload=payload)
if result.modified_count == 1:
return {'modified':result.modified_count}
else:
self.abort(404, 'Element not added in list {} of container {} {}'.format(storage.list_name, storage.cont_name, _id))
def put(self, cont_name, list_name, **kwargs):
_id = kwargs.pop('cid')
permchecker, storage, mongo_validator, input_validator, keycheck = self._initialize_request(cont_name, list_name, _id, query_params=kwargs)
payload = self.request.json_body
input_validator(payload, 'PUT')
payload['modified'] = datetime.datetime.utcnow()
if payload.get('timestamp'):
payload['timestamp'] = dateutil.parser.parse(payload['timestamp'])
result = keycheck(mongo_validator(permchecker(storage.exec_op)))('PUT', _id=_id, query_params=kwargs, payload=payload)
# abort if the query of the update wasn't able to find any matching documents
if result.matched_count == 0:
self.abort(404, 'Element not updated in list {} of container {} {}'.format(storage.list_name, storage.cont_name, _id))
else:
return {'modified':result.modified_count}
class TagsListHandler(ListHandler):
"""
TagsListHandler overrides put, delete methods of ListHandler to propagate changes to group tags
If a tag is renamed or deleted at the group level, project, session and acquisition tags will also be renamed/deleted
"""
def put(self, cont_name, list_name, **kwargs):
_id = kwargs.get('cid')
result = super(TagsListHandler, self).put(cont_name, list_name, **kwargs)
if cont_name == 'groups':
payload = self.request.json_body
current_value = kwargs.get('value')
new_value = payload.get('value')
query = {'$and':[{'tags': current_value}, {'tags': {'$ne': new_value}}]}
update = {'$set': {'tags.$': new_value}}
self._propagate_group_tags(cont_name, _id, query, update)
return result
def delete(self, cont_name, list_name, **kwargs):
_id = kwargs.get('cid')
result = super(TagsListHandler, self).delete(cont_name, list_name, **kwargs)
if cont_name == 'groups':
deleted_tag = kwargs.get('value')
query = {}
update = {'$pull': {'tags': deleted_tag}}
self._propagate_group_tags(cont_name, _id, query, update)
return result
def _propagate_group_tags(self, cont_name, _id, query, update):
"""
method to propagate tag changes from a group to its projects, sessions and acquisitions
"""
try:
hierarchy.propagate_changes(cont_name, _id, query, update)
except APIStorageException:
self.abort(500, 'tag change not propagated from group {}'.format(_id))
class FileListHandler(ListHandler):
"""
This class implements a more specific logic for list of files as the api needs to interact with the filesystem.
"""
def __init__(self, request=None, response=None):
super(FileListHandler, self).__init__(request, response)
def _check_ticket(self, ticket_id, _id, filename):
ticket = config.db.downloads.find_one({'_id': ticket_id})
if not ticket:
self.abort(404, 'no such ticket')
if ticket['target'] != _id or ticket['filename'] != filename or ticket['ip'] != self.request.client_addr:
self.abort(400, 'ticket not for this resource or source IP')
return ticket
def _build_zip_info(self, filepath):
"""
Builds a json response containing member and comment info for a zipfile
"""
with zipfile.ZipFile(filepath) as zf:
info = {}
info['comment'] = zf.comment
info['members'] = []
for zi in zf.infolist():
m = {}
m['path'] = zi.filename
m['size'] = zi.file_size
m['timestamp'] = datetime.datetime(*zi.date_time)
m['comment'] = zi.comment
info['members'].append(m)
return info
def get(self, cont_name, list_name, **kwargs):
"""
.. http:get:: /api/(cont_name)/(cid)/files/(file_name)
Gets the ticket used to download the file when the ticket is not provided.
Downloads the file when the ticket is provided.
:query ticket: should be empty
:param cont_name: one of ``projects``, ``sessions``, ``acquisitions``, ``collections``
:type cont_name: string
:param cid: Container ID
:type cid: string
:statuscode 200: no error
:statuscode 400: explain...
:statuscode 409: explain...
**Example request**:
.. sourcecode:: http
GET /api/acquisitions/57081d06b386a6dc79ca383c/files/fMRI%20Loc%20Word%20Face%20Obj.zip?ticket= HTTP/1.1
Host: demo.flywheel.io
Accept: */*
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept-Encoding
Content-Type: application/json; charset=utf-8
{"ticket": "1e975e3d-21e9-41f4-bb97-261f03d35ba1"}
"""
_id = kwargs.pop('cid')
permchecker, storage, _, _, keycheck = self._initialize_request(cont_name, list_name, _id)
list_name = storage.list_name
filename = kwargs.get('name')
# Check ticket id and skip permissions check if it clears
ticket_id = self.get_param('ticket')
if ticket_id:
ticket = self._check_ticket(ticket_id, _id, filename)
permchecker = always_ok
# Grab fileinfo from db
try:
fileinfo = keycheck(permchecker(storage.exec_op))('GET', _id, query_params=kwargs)
except APIStorageException as e:
self.abort(400, e.message)
if not fileinfo:
self.abort(404, 'no such file')
hash_ = self.get_param('hash')
if hash_ and hash_ != fileinfo['hash']:
self.abort(409, 'file exists, hash mismatch')
filepath = os.path.join(config.get_item('persistent', 'data_path'), util.path_from_hash(fileinfo['hash']))
# Request for download ticket
if self.get_param('ticket') == '':
ticket = util.download_ticket(self.request.client_addr, 'file', _id, filename, fileinfo['size'])
return {'ticket': config.db.downloads.insert_one(ticket).inserted_id}
# Request for info about zipfile
elif self.is_true('info'):
try:
info = self._build_zip_info(filepath)
except zipfile.BadZipfile:
self.abort(400, 'not a zip file')
return info
# Request to download zipfile member
elif self.get_param('member') is not None:
zip_member = self.get_param('member')
try:
with zipfile.ZipFile(filepath) as zf:
self.response.headers['Content-Type'] = util.guess_mimetype(zip_member)
self.response.write(zf.open(zip_member).read())
except zipfile.BadZipfile:
self.abort(400, 'not a zip file')
except KeyError:
self.abort(400, 'zip file contains no such member')
# Authenticated or ticketed download request
else:
self.response.app_iter = open(filepath, 'rb')
self.response.headers['Content-Length'] = str(fileinfo['size']) # must be set after setting app_iter
if self.is_true('view'):
self.response.headers['Content-Type'] = str(fileinfo.get('mimetype', 'application/octet-stream'))
else:
self.response.headers['Content-Type'] = 'application/octet-stream'
self.response.headers['Content-Disposition'] = 'attachment; filename="' + filename + '"'
def post(self, cont_name, list_name, **kwargs):
_id = kwargs.pop('cid')
# Ugly hack: ensure cont_name is singular. Pass singular or plural to code that expects it.
if cont_name.endswith('s'):
cont_name_plural = cont_name
cont_name = cont_name[:-1]
else:
cont_name_plural = cont_name + 's'
# Authorize
permchecker, _, _, _, _ = self._initialize_request(cont_name_plural, list_name, _id)
permchecker(noop)('POST', _id=_id)
return upload.process_upload(self.request, upload.Strategy.targeted, container_type=cont_name, id_=_id, origin=self.origin)
def _check_packfile_token(self, project_id, token_id, check_user=True):
"""
Check and update a packfile token assertion.
"""
if token_id is None:
raise Exception('Upload token is required')
query = {
'type': 'packfile',
'project': project_id,
'_id': token_id,
}
# Server-Sent Events are fired in the browser in such a way that one cannot dictate their headers.
# For these endpoints, authentication must be disabled because the normal Authorization header will not be present.
# In this case, the document id will serve instead.
if check_user:
query['user'] = self.uid
# Check for correct token
result = config.db['tokens'].find_one(query)
if result is None:
raise Exception('Invalid or expired upload token')
# Update token timestamp
config.db['tokens'].update_one({
'_id': token_id,
}, {
'$set': {
'modified': datetime.datetime.utcnow()
}
})
def packfile_start(self, cont_name, **kwargs):
"""
Declare intent to upload a packfile to a project, and recieve an upload token identifier.
"""
_id = kwargs.pop('cid')
if cont_name != 'projects':
raise Exception('Packfiles can only be targeted at projects')
# Authorize: confirm project exists
project = config.db['projects'].find_one({ '_id': bson.ObjectId(_id)})
if project is None:
raise Exception('Project ' + _id + ' does not exist')
# Authorize: confirm user has admin/write perms
if not self.superuser_request:
perms = project.get('permissions', [])
for p in perms:
if p['_id'] == self.uid and p['access'] in ('rw', 'admin'):
break
else:
raise Exception('Not authorized')
timestamp = datetime.datetime.utcnow()
# Save token for stateful uploads
result = config.db['tokens'].insert_one({
'_id': str(uuid.uuid4()),
'type': 'packfile',
'user': self.uid,
'project': _id,
'created': timestamp,
'modified': timestamp,
})
return {
'token': str(result.inserted_id)
}
def packfile(self, **kwargs):
"""
Add files to an in-progress packfile.
"""
project_id = kwargs.pop('cid')
token_id = self.request.GET.get('token')
self._check_packfile_token(project_id, token_id)
return upload.process_upload(self.request, upload.Strategy.token, origin=self.origin, context={'token': token_id})
def packfile_end(self, **kwargs):
"""
Complete and save an uploaded packfile.
"""
project_id = kwargs.pop('cid')
token_id = self.request.GET.get('token')
self._check_packfile_token(project_id, token_id, check_user=False)
# Because this is an SSE endpoint, there is no form-post. Instead, read JSON data from request param
metadata = json.loads(self.request.GET.get('metadata'))
return upload.process_upload(self.request, upload.Strategy.packfile, origin=self.origin, context={'token': token_id}, response=self.response, metadata=metadata)
class AnalysesHandler(ListHandler):
def _check_ticket(self, ticket_id, _id, filename):
ticket = config.db.downloads.find_one({'_id': ticket_id})
if not ticket:
self.abort(404, 'no such ticket')
if ticket['ip'] != self.request.client_addr:
self.abort(400, 'ticket not for this source IP')
if not filename:
return self._check_ticket_for_batch(ticket)
if ticket.get('filename') != filename or ticket['target'] != _id:
self.abort(400, 'ticket not for this resource')
return ticket
def _check_ticket_for_batch(self, ticket):
if ticket.get('type') != 'batch':
self.abort(400, 'ticket not for this resource')
return ticket
def put(self, *args, **kwargs):
raise NotImplementedError("an analysis can't be modified")
def _default_analysis(self):
analysis_obj = {}
analysis_obj['_id'] = str(bson.objectid.ObjectId())
analysis_obj['created'] = datetime.datetime.utcnow()
analysis_obj['modified'] = datetime.datetime.utcnow()
analysis_obj['user'] = self.uid
return analysis_obj
def post(self, cont_name, list_name, **kwargs):
"""
.. http:post:: /api/(cont_name)/(cid)/analyses
Default behavior:
Creates an analysis object and uploads supplied input
and output files.
When param ``job`` is true:
Creates an analysis object and job object that reference
each other via ``job`` and ``destination`` fields. Job based
analyses are only allowed at the session level.
:param cont_name: one of ``projects``, ``sessions``, ``collections``
:type cont_name: string
:param cid: Container ID
:type cid: string
:query boolean job: a flag specifying the type of analysis
:statuscode 200: no error
:statuscode 400: Job-based analyses must be at the session level
:statuscode 400: Job-based analyses must have ``job`` and ``analysis`` maps in JSON body
**Example request**:
.. sourcecode:: http
POST /api/sessions/57081d06b386a6dc79ca383c/analyses HTTP/1.1
{
"analysis": {
"label": "Test Analysis 1"
},
"job" : {
"gear": "dcm_convert",
"inputs": {
"dicom": {
"type": "acquisition",
"id": "57081d06b386a6dc79ca386b",
"name" : "test_acquisition_dicom.zip"
}
},
"tags": ["example"]
}
}
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept-Encoding
Content-Type: application/json; charset=utf-8
{
"_id": "573cb66b135d87002660597c"
}
"""
_id = kwargs.pop('cid')
permchecker, storage, _, payload_validator, _ = self._initialize_request(cont_name, list_name, _id)
permchecker(noop)('POST', _id=_id)
if self.is_true('job'):
if cont_name == 'sessions':
payload = self.request.json_body
payload_validator(payload.get('analysis',{}), 'POST')
return self._create_job_and_analysis(cont_name, _id, storage, payload)
else:
self.abort(400, 'Analysis created via a job must be at the session level')
payload = upload.process_upload(self.request, upload.Strategy.analysis, origin=self.origin)
analysis = self._default_analysis()
analysis.update(payload)
result = storage.exec_op('POST', _id=_id, payload=analysis)
if result.modified_count == 1:
return {'_id': analysis['_id']}
else:
self.abort(500, 'Element not added in list analyses of container {} {}'.format(cont_name, _id))
def _create_job_and_analysis(self, cont_name, cid, storage, payload):
analysis = payload.get('analysis')
job = payload.get('job')
if job is None or analysis is None:
self.abort(400, 'JSON body must contain map for "analysis" and "job"')
default = self._default_analysis()
default.update(analysis)
analysis = default
# Save inputs to analysis and job
inputs = {} # For Job object (map of FileReferences)
files = [] # For Analysis object (list of file objects)
for x in job['inputs'].keys():
input_map = job['inputs'][x]
fileref = create_filereference_from_dictionary(input_map)
inputs[x] = fileref
contref = create_containerreference_from_filereference(fileref)
file_ = contref.find_file(fileref.name)
if file_:
file_['input'] = True
files.append(file_)
analysis['files'] = files
result = storage.exec_op('POST', _id=cid, payload=analysis)
if result.modified_count != 1:
self.abort(500, 'Element not added in list analyses of container {} {}'.format(cont_name, cid))
# Prepare job
tags = job.get('tags', [])
if 'analysis' not in tags:
tags.append('analysis')
gear_name = job['gear']
destination = create_containerreference_from_dictionary({'type': 'analysis', 'id': analysis['_id']})
job = Job(gear_name, inputs, destination=destination, tags=tags)
job_id = job.insert()
if not job_id:
self.abort(500, 'Job not created for analysis {} of container {} {}'.format(analysis['_id'], cont_name, cid))
result = storage.exec_op('PUT', _id=cid, query_params={'_id': analysis['_id']}, payload={'job': job_id})
return { '_id': analysis['_id']}
def download(self, cont_name, list_name, **kwargs):
"""
.. http:get:: /api/(cont_name)/(cid)/analyses/(analysis_id)/files/(file_name)
Download a file from an analysis or download a tar of all files
When no filename is provided, a tar of all input and output files is created.
The first request to this endpoint without a ticket ID generates a download ticket.
A request to this endpoint with a ticket ID downloads the file(s).
If the analysis object is tied to a job, the input file(s) are inlfated from
the job's ``input`` array.
:param cont_name: one of ``projects``, ``sessions``, ``collections``
:type cont_name: string
:param cid: Container ID
:type cid: string
:param analysis_id: Analysis ID
:type analysis_id: string
:param filename: (Optional) Filename of specific file to download
:type cid: string
:query string ticket: Download ticket ID
:statuscode 200: no error
:statuscode 404: No files on analysis ``analysis_id``
:statuscode 404: Could not find file ``filename`` on analysis ``analysis_id``
**Example request without ticket ID**:
.. sourcecode:: http
GET /api/sessions/57081d06b386a6dc79ca383c/analyses/5751cd3781460100a66405c8/files HTTP/1.1
Host: demo.flywheel.io
Accept: */*
**Response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept-Encoding
Content-Type: application/json; charset=utf-8
{
"ticket": "57f2af23-a94c-426d-8521-11b2e8782020",
"filename": "analysis_5751cd3781460100a66405c8.tar",
"file_cnt": 3,
"size": 4525137
}
**Example request with ticket ID**:
.. sourcecode:: http
GET /api/sessions/57081d06b386a6dc79ca383c/analyses/5751cd3781460100a66405c8/files?ticket=57f2af23-a94c-426d-8521-11b2e8782020 HTTP/1.1
Host: demo.flywheel.io
Accept: */*
**Response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept-Encoding
Content-Type: application/octet-stream
Content-Disposition: attachment; filename=analysis_5751cd3781460100a66405c8.tar;
**Example Request with filename**:
.. sourcecode:: http
GET /api/sessions/57081d06b386a6dc79ca383c/analyses/5751cd3781460100a66405c8/files/exampledicom.zip?ticket= HTTP/1.1
Host: demo.flywheel.io
Accept: */*
**Response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept-Encoding
Content-Type: application/json; charset=utf-8
{
"ticket": "57f2af23-a94c-426d-8521-11b2e8782020",
"filename": "exampledicom.zip",
"file_cnt": 1,
"size": 4525137
}
"""
_id = kwargs.pop('cid')
permchecker, storage, _, _, _ = self._initialize_request(cont_name, list_name, _id)
filename = kwargs.get('name')
ticket_id = self.get_param('ticket')
if not ticket_id:
permchecker(noop)('GET', _id=_id)
analysis_id = kwargs.get('_id')
fileinfo = storage.get_fileinfo(_id, analysis_id, filename)
if fileinfo is None:
error_msg = 'No files on analysis {}'.format(analysis_id)
if filename:
error_msg = 'Could not find file {} on analysis {}'.format(filename, analysis_id)
self.abort(404, error_msg)
if not ticket_id:
if filename:
total_size = fileinfo[0]['size']
file_cnt = 1
ticket = util.download_ticket(self.request.client_addr, 'file', _id, filename, total_size)
else:
targets, total_size, file_cnt = self._prepare_batch(fileinfo)
filename = 'analysis_' + analysis_id + '.tar'
ticket = util.download_ticket(self.request.client_addr, 'batch', targets, filename, total_size)
return {
'ticket': config.db.downloads.insert_one(ticket).inserted_id,
'size': total_size,
'file_cnt': file_cnt,
'filename': filename
}
else:
ticket = self._check_ticket(ticket_id, _id, filename)
if not filename:
self._send_batch(ticket)
return
if not fileinfo:
self.abort(404, '{} doesn''t exist'.format(filename))
fileinfo = fileinfo[0]
filepath = os.path.join(
config.get_item('persistent', 'data_path'),
util.path_from_hash(fileinfo['hash'])
)
filename = fileinfo['name']
self.response.app_iter = open(filepath, 'rb')
self.response.headers['Content-Length'] = str(fileinfo['size']) # must be set after setting app_iter
if self.is_true('view'):
self.response.headers['Content-Type'] = str(fileinfo.get('mimetype', 'application/octet-stream'))
else:
self.response.headers['Content-Type'] = 'application/octet-stream'
self.response.headers['Content-Disposition'] = 'attachment; filename=' + str(filename)
def _prepare_batch(self, fileinfo):
## duplicated code from download.py
## we need a way to avoid this
targets = []
total_size = total_cnt = 0
data_path = config.get_item('persistent', 'data_path')
for f in fileinfo:
filepath = os.path.join(data_path, util.path_from_hash(f['hash']))
if os.path.exists(filepath): # silently skip missing files
targets.append((filepath, 'analyses/' + f['name'], f['size']))
total_size += f['size']
total_cnt += 1
return targets, total_size, total_cnt
def _send_batch(self, ticket):
self.response.app_iter = download.archivestream(ticket)
self.response.headers['Content-Type'] = 'application/octet-stream'
self.response.headers['Content-Disposition'] = 'attachment; filename=' + str(ticket['filename'])
def delete_note(self, cont_name, list_name, **kwargs):
_id = kwargs.pop('cid')
analysis_id = kwargs.pop('_id')
permchecker, storage, _, _, _ = self._initialize_request(cont_name, list_name, _id)
note_id = kwargs.get('note_id')
permchecker(noop)('DELETE', _id=_id)
result = storage.delete_note(_id=_id, analysis_id=analysis_id, note_id=note_id)
if result.modified_count == 1:
return {'modified':result.modified_count}
else:
self.abort(404, 'Element not removed from list {} of container {} {}'.format(storage.list_name, storage.cont_name, _id))
def add_note(self, cont_name, list_name, **kwargs):
_id = kwargs.pop('cid')
analysis_id = kwargs.get('_id')
permchecker, storage, _, _, _ = self._initialize_request(cont_name, list_name, _id)
payload = self.request.json_body
notes_schema_file = list_handler_configurations[cont_name]['notes']['storage_schema_file']
input_schema_uri = validators.schema_uri('input', notes_schema_file)
input_validator = validators.from_schema_path(input_schema_uri)
input_validator(payload, 'POST')
payload['_id'] = str(bson.objectid.ObjectId())
payload['user'] = payload.get('user', self.uid)
payload['created'] = datetime.datetime.utcnow()
permchecker(noop)('POST', _id=_id)
result = storage.add_note(_id=_id, analysis_id=analysis_id, payload=payload)
if result.modified_count == 1:
return {'modified':result.modified_count}
else:
self.abort(404, 'Element not added in list {} of container {} {}'.format(storage.list_name, storage.cont_name, _id))
|
# coding=utf-8
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.contrib.training.training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import tensorflow.compat.v1.losses as losses
from tf_slim.layers import layers
from tf_slim.ops import variables as variables_lib
from tf_slim.training import training
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables as variables_lib2
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver as saver_lib
# pylint: enable=g-import-not-at-top
def logistic_classifier(inputs):
return layers.fully_connected(inputs, 1, activation_fn=math_ops.sigmoid)
def batchnorm_classifier(inputs):
inputs = layers.batch_norm(inputs, decay=0.1, fused=False)
return layers.fully_connected(inputs, 1, activation_fn=math_ops.sigmoid)
class ClipGradsTest(test.TestCase):
def testClipGrads(self):
xs = variables_lib2.Variable(0.0)
ys = xs * 4.0
grads = gradients_impl.gradients([ys], [xs])
gradients_to_variables = list(zip(grads, [xs]))
clipped_gradients_to_variables = training.clip_gradient_norms(
gradients_to_variables, 3.0)
with self.cached_session() as session:
session.run(variables_lib2.global_variables_initializer())
self.assertAlmostEqual(4.0, gradients_to_variables[0][0].eval())
self.assertAlmostEqual(3.0, clipped_gradients_to_variables[0][0].eval())
def testClipGradsFn(self):
xs = variables_lib2.Variable(0.0)
ys = xs * 4.0
grads = gradients_impl.gradients([ys], [xs])
gradients_to_variables = list(zip(grads, [xs]))
clipped_gradients_to_variables = training.clip_gradient_norms_fn(3.0)(
gradients_to_variables)
with self.cached_session() as session:
session.run(variables_lib2.global_variables_initializer())
self.assertAlmostEqual(4.0, gradients_to_variables[0][0].eval())
self.assertAlmostEqual(3.0, clipped_gradients_to_variables[0][0].eval())
class CreateTrainOpTest(test.TestCase):
def setUp(self):
np.random.seed(0)
# Create an easy training set:
self._inputs = np.random.rand(16, 4).astype(np.float32)
self._labels = np.random.randint(0, 2, size=(16, 1)).astype(np.float32)
def testTrainOpInCollection(self):
with ops.Graph().as_default():
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = batchnorm_classifier(tf_inputs)
loss = losses.log_loss(tf_labels, tf_predictions)
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(loss, optimizer)
# Make sure the training op was recorded in the proper collection
self.assertTrue(train_op in ops.get_collection(ops.GraphKeys.TRAIN_OP))
def testUseUpdateOps(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
expected_mean = np.mean(self._inputs, axis=(0))
expected_var = np.var(self._inputs, axis=(0))
tf_predictions = batchnorm_classifier(tf_inputs)
loss = losses.log_loss(tf_labels, tf_predictions)
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(loss, optimizer)
moving_mean = variables_lib.get_variables_by_name('moving_mean')[0]
moving_variance = variables_lib.get_variables_by_name('moving_variance')[
0]
with self.cached_session() as session:
# Initialize all variables
session.run(variables_lib2.global_variables_initializer())
mean, variance = session.run([moving_mean, moving_variance])
# After initialization moving_mean == 0 and moving_variance == 1.
self.assertAllClose(mean, [0] * 4)
self.assertAllClose(variance, [1] * 4)
for _ in range(10):
session.run(train_op)
mean = moving_mean.eval()
variance = moving_variance.eval()
# After 10 updates with decay 0.1 moving_mean == expected_mean and
# moving_variance == expected_var.
self.assertAllClose(mean, expected_mean)
self.assertAllClose(variance, expected_var)
def testEmptyUpdateOps(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = batchnorm_classifier(tf_inputs)
loss = losses.log_loss(tf_labels, tf_predictions)
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(loss, optimizer, update_ops=[])
moving_mean = variables_lib.get_variables_by_name('moving_mean')[0]
moving_variance = variables_lib.get_variables_by_name('moving_variance')[
0]
with self.cached_session() as session:
# Initialize all variables
session.run(variables_lib2.global_variables_initializer())
mean, variance = session.run([moving_mean, moving_variance])
# After initialization moving_mean == 0 and moving_variance == 1.
self.assertAllClose(mean, [0] * 4)
self.assertAllClose(variance, [1] * 4)
for _ in range(10):
session.run(train_op)
mean = moving_mean.eval()
variance = moving_variance.eval()
# Since we skip update_ops the moving_vars are not updated.
self.assertAllClose(mean, [0] * 4)
self.assertAllClose(variance, [1] * 4)
def testGlobalStepIsIncrementedByDefault(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = batchnorm_classifier(tf_inputs)
loss = losses.log_loss(tf_labels, tf_predictions)
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(loss, optimizer)
global_step = variables_lib.get_or_create_global_step()
with self.cached_session() as session:
# Initialize all variables
session.run(variables_lib2.global_variables_initializer())
for _ in range(10):
session.run(train_op)
# After 10 updates global_step should be 10.
self.assertAllClose(global_step.eval(), 10)
def testGlobalStepNotIncrementedWhenSetToNone(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = batchnorm_classifier(tf_inputs)
loss = losses.log_loss(tf_labels, tf_predictions)
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(loss, optimizer, global_step=None)
global_step = variables_lib.get_or_create_global_step()
with self.cached_session() as session:
# Initialize all variables
session.run(variables_lib2.global_variables_initializer())
for _ in range(10):
session.run(train_op)
# Since train_op don't use global_step it shouldn't change.
self.assertAllClose(global_step.eval(), 0)
class TrainBatchNormClassifierTest(test.TestCase):
def setUp(self):
# Create an easy training set:
np.random.seed(0)
self._inputs = np.zeros((16, 4))
self._labels = np.random.randint(0, 2, size=(16, 1)).astype(np.float32)
for i in range(16):
j = int(2 * self._labels[i] + np.random.randint(0, 2))
self._inputs[i, j] = 1
def testTrainWithNoInitAssignCanAchieveZeroLoss(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = batchnorm_classifier(tf_inputs)
losses.log_loss(tf_labels, tf_predictions)
total_loss = losses.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(total_loss, optimizer)
loss = training.train(
train_op,
None,
hooks=[basic_session_run_hooks.StopAtStepHook(num_steps=300)],
save_summaries_steps=None,
save_checkpoint_secs=None)
self.assertLess(loss, .1)
class TrainTest(test.TestCase):
def setUp(self):
# Create an easy training set:
np.random.seed(0)
self._inputs = np.zeros((16, 4))
self._labels = np.random.randint(0, 2, size=(16, 1)).astype(np.float32)
for i in range(16):
j = int(2 * self._labels[i] + np.random.randint(0, 2))
self._inputs[i, j] = 1
def testCanAchieveZeroLoss(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = logistic_classifier(tf_inputs)
losses.log_loss(tf_labels, tf_predictions)
total_loss = losses.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(total_loss, optimizer)
loss = training.train(
train_op,
None,
hooks=[basic_session_run_hooks.StopAtStepHook(num_steps=300)],
save_summaries_steps=None,
save_checkpoint_secs=None)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
def testTrainWithLocalVariable(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
local_multiplier = variables_lib.local_variable(1.0)
tf_predictions = logistic_classifier(tf_inputs) * local_multiplier
losses.log_loss(tf_labels, tf_predictions)
total_loss = losses.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(total_loss, optimizer)
loss = training.train(
train_op,
None,
hooks=[basic_session_run_hooks.StopAtStepHook(num_steps=300)],
save_summaries_steps=None,
save_checkpoint_secs=None)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
def testResumeTrainAchievesRoughlyTheSameLoss(self):
number_of_steps = [300, 1, 5]
logdir = os.path.join(self.get_temp_dir(), 'resume_train_same_loss')
for i in range(len(number_of_steps)):
with ops.Graph().as_default():
random_seed.set_random_seed(i)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = logistic_classifier(tf_inputs)
losses.log_loss(tf_labels, tf_predictions)
total_loss = losses.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(total_loss, optimizer)
saver = saver_lib.Saver()
loss = training.train(
train_op,
logdir,
hooks=[
basic_session_run_hooks.StopAtStepHook(
num_steps=number_of_steps[i]),
basic_session_run_hooks.CheckpointSaverHook(
logdir, save_steps=50, saver=saver),
],
save_checkpoint_secs=None,
save_summaries_steps=None)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
def create_train_op(self, learning_rate=1.0, gradient_multiplier=1.0):
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = logistic_classifier(tf_inputs)
losses.log_loss(tf_labels, tf_predictions)
total_loss = losses.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(
learning_rate=learning_rate)
def transform_grads_fn(grads):
if gradient_multiplier != 1.0:
variables = variables_lib2.trainable_variables()
gradient_multipliers = {var: gradient_multiplier for var in variables}
with ops.name_scope('multiply_grads'):
return training.multiply_gradients(grads, gradient_multipliers)
else:
return grads
return training.create_train_op(
total_loss, optimizer, transform_grads_fn=transform_grads_fn)
def testTrainWithInitFromCheckpoint(self):
logdir1 = os.path.join(self.get_temp_dir(), 'tmp_logs1/')
logdir2 = os.path.join(self.get_temp_dir(), 'tmp_logs2/')
if gfile.Exists(logdir1): # For running on jenkins.
gfile.DeleteRecursively(logdir1)
if gfile.Exists(logdir2): # For running on jenkins.
gfile.DeleteRecursively(logdir2)
# First, train the model one step (make sure the error is high).
with ops.Graph().as_default():
random_seed.set_random_seed(0)
train_op = self.create_train_op()
saver = saver_lib.Saver()
loss = training.train(
train_op,
logdir1,
hooks=[
basic_session_run_hooks.CheckpointSaverHook(
logdir1, save_steps=1, saver=saver),
basic_session_run_hooks.StopAtStepHook(num_steps=1),
],
save_checkpoint_secs=None,
save_summaries_steps=None)
self.assertGreater(loss, .5)
# Next, train the model to convergence.
with ops.Graph().as_default():
random_seed.set_random_seed(1)
train_op = self.create_train_op()
saver = saver_lib.Saver()
loss = training.train(
train_op,
logdir1,
hooks=[
basic_session_run_hooks.CheckpointSaverHook(
logdir1, save_steps=300, saver=saver),
basic_session_run_hooks.StopAtStepHook(num_steps=300),
],
save_checkpoint_secs=None,
save_summaries_steps=None)
self.assertIsNotNone(loss)
self.assertLess(loss, .02)
# Finally, advance the model a single step and validate that the loss is
# still low.
with ops.Graph().as_default():
random_seed.set_random_seed(2)
train_op = self.create_train_op()
model_variables = variables_lib2.global_variables()
model_path = checkpoint_management.latest_checkpoint(logdir1)
assign_fn = variables_lib.assign_from_checkpoint_fn(
model_path, model_variables)
def init_fn(_, session):
assign_fn(session)
loss = training.train(
train_op,
None,
scaffold=monitored_session.Scaffold(init_fn=init_fn),
hooks=[basic_session_run_hooks.StopAtStepHook(num_steps=1)],
save_checkpoint_secs=None,
save_summaries_steps=None)
self.assertIsNotNone(loss)
self.assertLess(loss, .02)
def ModelLoss(self):
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = logistic_classifier(tf_inputs)
losses.log_loss(tf_labels, tf_predictions)
return losses.get_total_loss()
def testTrainAllVarsHasLowerLossThanTrainSubsetOfVars(self):
logdir = os.path.join(self.get_temp_dir(), 'tmp_logs3/')
if gfile.Exists(logdir): # For running on jenkins.
gfile.DeleteRecursively(logdir)
# First, train only the weights of the model.
with ops.Graph().as_default():
random_seed.set_random_seed(0)
total_loss = self.ModelLoss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
weights = variables_lib.get_variables_by_name('weights')
train_op = training.create_train_op(
total_loss, optimizer, variables_to_train=weights)
saver = saver_lib.Saver()
loss = training.train(
train_op,
logdir,
hooks=[
basic_session_run_hooks.CheckpointSaverHook(
logdir, save_steps=200, saver=saver),
basic_session_run_hooks.StopAtStepHook(num_steps=200),
],
save_checkpoint_secs=None,
save_summaries_steps=None)
self.assertGreater(loss, .015)
self.assertLess(loss, .05)
# Next, train the biases of the model.
with ops.Graph().as_default():
random_seed.set_random_seed(1)
total_loss = self.ModelLoss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
biases = variables_lib.get_variables_by_name('biases')
train_op = training.create_train_op(
total_loss, optimizer, variables_to_train=biases)
saver = saver_lib.Saver()
loss = training.train(
train_op,
logdir,
hooks=[
basic_session_run_hooks.CheckpointSaverHook(
logdir, save_steps=300, saver=saver),
basic_session_run_hooks.StopAtStepHook(num_steps=300),
],
save_checkpoint_secs=None,
save_summaries_steps=None)
self.assertGreater(loss, .015)
self.assertLess(loss, .05)
# Finally, train both weights and bias to get lower loss.
with ops.Graph().as_default():
random_seed.set_random_seed(2)
total_loss = self.ModelLoss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(total_loss, optimizer)
saver = saver_lib.Saver()
loss = training.train(
train_op,
logdir,
hooks=[
basic_session_run_hooks.StopAtStepHook(num_steps=400),
],
save_checkpoint_secs=None,
save_summaries_steps=None)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
def testTrainingSubsetsOfVariablesOnlyUpdatesThoseVariables(self):
# First, train only the weights of the model.
with ops.Graph().as_default():
random_seed.set_random_seed(0)
total_loss = self.ModelLoss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
weights, biases = variables_lib.get_variables()
train_op = training.create_train_op(total_loss, optimizer)
train_weights = training.create_train_op(
total_loss, optimizer, variables_to_train=[weights])
train_biases = training.create_train_op(
total_loss, optimizer, variables_to_train=[biases])
with self.cached_session() as session:
# Initialize the variables.
session.run(variables_lib2.global_variables_initializer())
# Get the initial weights and biases values.
weights_values, biases_values = session.run([weights, biases])
self.assertGreater(np.linalg.norm(weights_values), 0)
self.assertAlmostEqual(np.linalg.norm(biases_values), 0)
# Update weights and biases.
loss = session.run(train_op)
self.assertGreater(loss, .5)
new_weights, new_biases = session.run([weights, biases])
# Check that the weights and biases have been updated.
self.assertGreater(np.linalg.norm(weights_values - new_weights), 0)
self.assertGreater(np.linalg.norm(biases_values - new_biases), 0)
weights_values, biases_values = new_weights, new_biases
# Update only weights.
loss = session.run(train_weights)
self.assertGreater(loss, .5)
new_weights, new_biases = session.run([weights, biases])
# Check that the weights have been updated, but biases have not.
self.assertGreater(np.linalg.norm(weights_values - new_weights), 0)
self.assertAlmostEqual(np.linalg.norm(biases_values - new_biases), 0)
weights_values = new_weights
# Update only biases.
loss = session.run(train_biases)
self.assertGreater(loss, .5)
new_weights, new_biases = session.run([weights, biases])
# Check that the biases have been updated, but weights have not.
self.assertAlmostEqual(np.linalg.norm(weights_values - new_weights), 0)
self.assertGreater(np.linalg.norm(biases_values - new_biases), 0)
def testTrainWithAlteredGradients(self):
# Use the same learning rate but different gradient multipliers
# to train two models. Model with equivalently larger learning
# rate (i.e., learning_rate * gradient_multiplier) has smaller
# training loss.
multipliers = [1., 1000.]
number_of_steps = 10
learning_rate = 0.001
# First, train the model with equivalently smaller learning rate.
with ops.Graph().as_default():
random_seed.set_random_seed(0)
train_op = self.create_train_op(
learning_rate=learning_rate, gradient_multiplier=multipliers[0])
loss0 = training.train(
train_op,
None,
hooks=[
basic_session_run_hooks.StopAtStepHook(num_steps=number_of_steps),
],
save_checkpoint_secs=None,
save_summaries_steps=None)
self.assertIsNotNone(loss0)
self.assertGreater(loss0, .5)
# Second, train the model with equivalently larger learning rate.
with ops.Graph().as_default():
random_seed.set_random_seed(0)
train_op = self.create_train_op(
learning_rate=learning_rate, gradient_multiplier=multipliers[1])
loss1 = training.train(
train_op,
None,
hooks=[
basic_session_run_hooks.StopAtStepHook(num_steps=number_of_steps),
],
save_checkpoint_secs=None,
save_summaries_steps=None)
self.assertIsNotNone(loss1)
self.assertLess(loss1, .5)
# The loss of the model trained with larger learning rate should
# be smaller.
self.assertGreater(loss0, loss1)
if __name__ == '__main__':
test.main()
Internal change
PiperOrigin-RevId: 293015371
Change-Id: Ie92ff23aef46d43e95f50595477666d1beb776d9
# coding=utf-8
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.contrib.training.training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import tensorflow.compat.v1.losses as losses
from tf_slim.layers import layers
from tf_slim.ops import variables as variables_lib
from tf_slim.training import training
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables as variables_lib2
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver as saver_lib
# pylint: enable=g-import-not-at-top
def logistic_classifier(inputs):
return layers.fully_connected(inputs, 1, activation_fn=math_ops.sigmoid)
def batchnorm_classifier(inputs):
inputs = layers.batch_norm(inputs, decay=0.1, fused=False)
return layers.fully_connected(inputs, 1, activation_fn=math_ops.sigmoid)
class ClipGradsTest(test.TestCase):
def testClipGrads(self):
xs = variables_lib2.Variable(0.0)
ys = xs * 4.0
grads = gradients_impl.gradients([ys], [xs])
gradients_to_variables = list(zip(grads, [xs]))
clipped_gradients_to_variables = training.clip_gradient_norms(
gradients_to_variables, 3.0)
with self.cached_session() as session:
session.run(variables_lib2.global_variables_initializer())
self.assertAlmostEqual(4.0, gradients_to_variables[0][0].eval())
self.assertAlmostEqual(3.0, clipped_gradients_to_variables[0][0].eval())
def testClipGradsFn(self):
xs = variables_lib2.Variable(0.0)
ys = xs * 4.0
grads = gradients_impl.gradients([ys], [xs])
gradients_to_variables = list(zip(grads, [xs]))
clipped_gradients_to_variables = training.clip_gradient_norms_fn(3.0)(
gradients_to_variables)
with self.cached_session() as session:
session.run(variables_lib2.global_variables_initializer())
self.assertAlmostEqual(4.0, gradients_to_variables[0][0].eval())
self.assertAlmostEqual(3.0, clipped_gradients_to_variables[0][0].eval())
class CreateTrainOpTest(test.TestCase):
def setUp(self):
np.random.seed(0)
# Create an easy training set:
self._inputs = np.random.rand(16, 4).astype(np.float32)
self._labels = np.random.randint(0, 2, size=(16, 1)).astype(np.float32)
def testTrainOpInCollection(self):
with ops.Graph().as_default():
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = batchnorm_classifier(tf_inputs)
loss = losses.log_loss(tf_labels, tf_predictions)
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(loss, optimizer)
# Make sure the training op was recorded in the proper collection
self.assertTrue(train_op in ops.get_collection(ops.GraphKeys.TRAIN_OP))
def testUseUpdateOps(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
expected_mean = np.mean(self._inputs, axis=(0))
expected_var = np.var(self._inputs, axis=(0))
tf_predictions = batchnorm_classifier(tf_inputs)
loss = losses.log_loss(tf_labels, tf_predictions)
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(loss, optimizer)
moving_mean = variables_lib.get_variables_by_name('moving_mean')[0]
moving_variance = variables_lib.get_variables_by_name('moving_variance')[
0]
with self.cached_session() as session:
# Initialize all variables
session.run(variables_lib2.global_variables_initializer())
mean, variance = session.run([moving_mean, moving_variance])
# After initialization moving_mean == 0 and moving_variance == 1.
self.assertAllClose(mean, [0] * 4)
self.assertAllClose(variance, [1] * 4)
for _ in range(10):
session.run(train_op)
mean = moving_mean.eval()
variance = moving_variance.eval()
# After 10 updates with decay 0.1 moving_mean == expected_mean and
# moving_variance == expected_var.
self.assertAllClose(mean, expected_mean)
self.assertAllClose(variance, expected_var)
def testEmptyUpdateOps(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = batchnorm_classifier(tf_inputs)
loss = losses.log_loss(tf_labels, tf_predictions)
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(loss, optimizer, update_ops=[])
moving_mean = variables_lib.get_variables_by_name('moving_mean')[0]
moving_variance = variables_lib.get_variables_by_name('moving_variance')[
0]
with self.cached_session() as session:
# Initialize all variables
session.run(variables_lib2.global_variables_initializer())
mean, variance = session.run([moving_mean, moving_variance])
# After initialization moving_mean == 0 and moving_variance == 1.
self.assertAllClose(mean, [0] * 4)
self.assertAllClose(variance, [1] * 4)
for _ in range(10):
session.run(train_op)
mean = moving_mean.eval()
variance = moving_variance.eval()
# Since we skip update_ops the moving_vars are not updated.
self.assertAllClose(mean, [0] * 4)
self.assertAllClose(variance, [1] * 4)
def testGlobalStepIsIncrementedByDefault(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = batchnorm_classifier(tf_inputs)
loss = losses.log_loss(tf_labels, tf_predictions)
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(loss, optimizer)
global_step = variables_lib.get_or_create_global_step()
with self.cached_session() as session:
# Initialize all variables
session.run(variables_lib2.global_variables_initializer())
for _ in range(10):
session.run(train_op)
# After 10 updates global_step should be 10.
self.assertAllClose(global_step.eval(), 10)
def testGlobalStepNotIncrementedWhenSetToNone(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = batchnorm_classifier(tf_inputs)
loss = losses.log_loss(tf_labels, tf_predictions)
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(loss, optimizer, global_step=None)
global_step = variables_lib.get_or_create_global_step()
with self.cached_session() as session:
# Initialize all variables
session.run(variables_lib2.global_variables_initializer())
for _ in range(10):
session.run(train_op)
# Since train_op don't use global_step it shouldn't change.
self.assertAllClose(global_step.eval(), 0)
class TrainBatchNormClassifierTest(test.TestCase):
def setUp(self):
# Create an easy training set:
np.random.seed(0)
self._inputs = np.zeros((16, 4))
self._labels = np.random.randint(0, 2, size=(16, 1)).astype(np.float32)
for i in range(16):
j = int(2 * self._labels[i] + np.random.randint(0, 2))
self._inputs[i, j] = 1
def testTrainWithNoInitAssignCanAchieveZeroLoss(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = batchnorm_classifier(tf_inputs)
losses.log_loss(tf_labels, tf_predictions)
total_loss = losses.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(total_loss, optimizer)
loss = training.train(
train_op,
None,
hooks=[basic_session_run_hooks.StopAtStepHook(num_steps=300)],
save_summaries_steps=None,
save_checkpoint_secs=None)
self.assertLess(loss, .1)
class TrainTest(test.TestCase):
def setUp(self):
# Create an easy training set:
np.random.seed(0)
self._inputs = np.zeros((16, 4))
self._labels = np.random.randint(0, 2, size=(16, 1)).astype(np.float32)
for i in range(16):
j = int(2 * self._labels[i] + np.random.randint(0, 2))
self._inputs[i, j] = 1
def testCanAchieveZeroLoss(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = logistic_classifier(tf_inputs)
losses.log_loss(tf_labels, tf_predictions)
total_loss = losses.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(total_loss, optimizer)
loss = training.train(
train_op,
None,
hooks=[basic_session_run_hooks.StopAtStepHook(num_steps=300)],
save_summaries_steps=None,
save_checkpoint_secs=None)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
def testTrainWithLocalVariable(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
local_multiplier = variables_lib.local_variable(1.0)
tf_predictions = logistic_classifier(tf_inputs) * local_multiplier
losses.log_loss(tf_labels, tf_predictions)
total_loss = losses.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(total_loss, optimizer)
loss = training.train(
train_op,
None,
hooks=[basic_session_run_hooks.StopAtStepHook(num_steps=300)],
save_summaries_steps=None,
save_checkpoint_secs=None)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
def testResumeTrainAchievesRoughlyTheSameLoss(self):
number_of_steps = [300, 1, 5]
logdir = os.path.join(self.get_temp_dir(), 'resume_train_same_loss')
for i in range(len(number_of_steps)):
with ops.Graph().as_default():
random_seed.set_random_seed(i)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = logistic_classifier(tf_inputs)
losses.log_loss(tf_labels, tf_predictions)
total_loss = losses.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(total_loss, optimizer)
saver = saver_lib.Saver()
loss = training.train(
train_op,
logdir,
hooks=[
basic_session_run_hooks.StopAtStepHook(
num_steps=number_of_steps[i]),
basic_session_run_hooks.CheckpointSaverHook(
logdir, save_steps=50, saver=saver),
],
save_checkpoint_secs=None,
save_summaries_steps=None)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
def create_train_op(self, learning_rate=1.0, gradient_multiplier=1.0):
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = logistic_classifier(tf_inputs)
losses.log_loss(tf_labels, tf_predictions)
total_loss = losses.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(
learning_rate=learning_rate)
def transform_grads_fn(grads):
if gradient_multiplier != 1.0:
variables = variables_lib2.trainable_variables()
gradient_multipliers = {var: gradient_multiplier for var in variables}
with ops.name_scope('multiply_grads'):
return training.multiply_gradients(grads, gradient_multipliers)
else:
return grads
return training.create_train_op(
total_loss, optimizer, transform_grads_fn=transform_grads_fn)
def testTrainWithInitFromCheckpoint(self):
logdir1 = os.path.join(self.get_temp_dir(), 'tmp_logs1/')
logdir2 = os.path.join(self.get_temp_dir(), 'tmp_logs2/')
if gfile.Exists(logdir1): # For running on jenkins.
gfile.DeleteRecursively(logdir1)
if gfile.Exists(logdir2): # For running on jenkins.
gfile.DeleteRecursively(logdir2)
# First, train the model one step (make sure the error is high).
with ops.Graph().as_default():
random_seed.set_random_seed(0)
train_op = self.create_train_op()
saver = saver_lib.Saver()
loss = training.train(
train_op,
logdir1,
hooks=[
basic_session_run_hooks.CheckpointSaverHook(
logdir1, save_steps=1, saver=saver),
basic_session_run_hooks.StopAtStepHook(num_steps=1),
],
save_checkpoint_secs=None,
save_summaries_steps=None)
self.assertGreater(loss, .5)
# Next, train the model to convergence.
with ops.Graph().as_default():
random_seed.set_random_seed(1)
train_op = self.create_train_op()
saver = saver_lib.Saver()
loss = training.train(
train_op,
logdir1,
hooks=[
basic_session_run_hooks.CheckpointSaverHook(
logdir1, save_steps=300, saver=saver),
basic_session_run_hooks.StopAtStepHook(num_steps=300),
],
save_checkpoint_secs=None,
save_summaries_steps=None)
self.assertIsNotNone(loss)
self.assertLess(loss, .02)
# Finally, advance the model a single step and validate that the loss is
# still low.
with ops.Graph().as_default():
random_seed.set_random_seed(2)
train_op = self.create_train_op()
model_variables = variables_lib2.global_variables()
model_path = checkpoint_management.latest_checkpoint(logdir1)
assign_fn = variables_lib.assign_from_checkpoint_fn(
model_path, model_variables)
def init_fn(_, session):
assign_fn(session)
loss = training.train(
train_op,
None,
scaffold=monitored_session.Scaffold(init_fn=init_fn),
hooks=[basic_session_run_hooks.StopAtStepHook(num_steps=1)],
save_checkpoint_secs=None,
save_summaries_steps=None)
self.assertIsNotNone(loss)
self.assertLess(loss, .02)
def ModelLoss(self):
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = logistic_classifier(tf_inputs)
losses.log_loss(tf_labels, tf_predictions)
return losses.get_total_loss()
def testTrainAllVarsHasLowerLossThanTrainSubsetOfVars(self):
logdir = os.path.join(self.get_temp_dir(), 'tmp_logs3/')
if gfile.Exists(logdir): # For running on jenkins.
gfile.DeleteRecursively(logdir)
# First, train only the weights of the model.
with ops.Graph().as_default():
random_seed.set_random_seed(0)
total_loss = self.ModelLoss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
weights = variables_lib.get_variables_by_name('weights')
train_op = training.create_train_op(
total_loss, optimizer, variables_to_train=weights)
saver = saver_lib.Saver()
loss = training.train(
train_op,
logdir,
hooks=[
basic_session_run_hooks.CheckpointSaverHook(
logdir, save_steps=200, saver=saver),
basic_session_run_hooks.StopAtStepHook(num_steps=200),
],
save_checkpoint_secs=None,
save_summaries_steps=None)
self.assertGreater(loss, .015)
self.assertLess(loss, .05)
# Next, train the biases of the model.
with ops.Graph().as_default():
random_seed.set_random_seed(1)
total_loss = self.ModelLoss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
biases = variables_lib.get_variables_by_name('biases')
train_op = training.create_train_op(
total_loss, optimizer, variables_to_train=biases)
saver = saver_lib.Saver()
loss = training.train(
train_op,
logdir,
hooks=[
basic_session_run_hooks.CheckpointSaverHook(
logdir, save_steps=300, saver=saver),
basic_session_run_hooks.StopAtStepHook(num_steps=300),
],
save_checkpoint_secs=None,
save_summaries_steps=None)
self.assertGreater(loss, .015)
self.assertLess(loss, .05)
# Finally, train both weights and bias to get lower loss.
with ops.Graph().as_default():
random_seed.set_random_seed(2)
total_loss = self.ModelLoss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(total_loss, optimizer)
saver = saver_lib.Saver()
loss = training.train(
train_op,
logdir,
hooks=[
basic_session_run_hooks.StopAtStepHook(num_steps=400),
],
save_checkpoint_secs=None,
save_summaries_steps=None)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
def testTrainingSubsetsOfVariablesOnlyUpdatesThoseVariables(self):
# First, train only the weights of the model.
with ops.Graph().as_default():
random_seed.set_random_seed(0)
total_loss = self.ModelLoss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
weights, biases = variables_lib.get_variables()
train_op = training.create_train_op(total_loss, optimizer)
train_weights = training.create_train_op(
total_loss, optimizer, variables_to_train=[weights])
train_biases = training.create_train_op(
total_loss, optimizer, variables_to_train=[biases])
with self.cached_session() as session:
# Initialize the variables.
session.run(variables_lib2.global_variables_initializer())
# Get the initial weights and biases values.
weights_values, biases_values = session.run([weights, biases])
self.assertGreater(np.linalg.norm(weights_values), 0)
self.assertAlmostEqual(np.linalg.norm(biases_values), 0)
# Update weights and biases.
loss = session.run(train_op)
self.assertGreater(loss, .45)
new_weights, new_biases = session.run([weights, biases])
# Check that the weights and biases have been updated.
self.assertGreater(np.linalg.norm(weights_values - new_weights), 0)
self.assertGreater(np.linalg.norm(biases_values - new_biases), 0)
weights_values, biases_values = new_weights, new_biases
# Update only weights.
loss = session.run(train_weights)
self.assertGreater(loss, .45)
new_weights, new_biases = session.run([weights, biases])
# Check that the weights have been updated, but biases have not.
self.assertGreater(np.linalg.norm(weights_values - new_weights), 0)
self.assertAlmostEqual(np.linalg.norm(biases_values - new_biases), 0)
weights_values = new_weights
# Update only biases.
loss = session.run(train_biases)
self.assertGreater(loss, .45)
new_weights, new_biases = session.run([weights, biases])
# Check that the biases have been updated, but weights have not.
self.assertAlmostEqual(np.linalg.norm(weights_values - new_weights), 0)
self.assertGreater(np.linalg.norm(biases_values - new_biases), 0)
def testTrainWithAlteredGradients(self):
# Use the same learning rate but different gradient multipliers
# to train two models. Model with equivalently larger learning
# rate (i.e., learning_rate * gradient_multiplier) has smaller
# training loss.
multipliers = [1., 1000.]
number_of_steps = 10
learning_rate = 0.001
# First, train the model with equivalently smaller learning rate.
with ops.Graph().as_default():
random_seed.set_random_seed(0)
train_op = self.create_train_op(
learning_rate=learning_rate, gradient_multiplier=multipliers[0])
loss0 = training.train(
train_op,
None,
hooks=[
basic_session_run_hooks.StopAtStepHook(num_steps=number_of_steps),
],
save_checkpoint_secs=None,
save_summaries_steps=None)
self.assertIsNotNone(loss0)
self.assertGreater(loss0, .5)
# Second, train the model with equivalently larger learning rate.
with ops.Graph().as_default():
random_seed.set_random_seed(0)
train_op = self.create_train_op(
learning_rate=learning_rate, gradient_multiplier=multipliers[1])
loss1 = training.train(
train_op,
None,
hooks=[
basic_session_run_hooks.StopAtStepHook(num_steps=number_of_steps),
],
save_checkpoint_secs=None,
save_summaries_steps=None)
self.assertIsNotNone(loss1)
self.assertLess(loss1, .5)
# The loss of the model trained with larger learning rate should
# be smaller.
self.assertGreater(loss0, loss1)
if __name__ == '__main__':
test.main()
|
import asyncio
from contextlib import asynccontextmanager
import pytest
from mitmproxy import dns, exceptions
from mitmproxy.addons.proxyserver import Proxyserver
from mitmproxy.connection import Address
from mitmproxy.net import udp
from mitmproxy.proxy import layers, server_hooks
from mitmproxy.proxy.layers.http import HTTPMode
from mitmproxy.test import taddons, tflow
from mitmproxy.test.tflow import tclient_conn, tdnsflow, tserver_conn
from mitmproxy.test.tutils import tdnsreq
class HelperAddon:
def __init__(self):
self.flows = []
self.layers = [
lambda ctx: layers.modes.HttpProxy(ctx),
lambda ctx: layers.HttpLayer(ctx, HTTPMode.regular),
lambda ctx: layers.TCPLayer(ctx),
]
def request(self, f):
self.flows.append(f)
def tcp_start(self, f):
self.flows.append(f)
def next_layer(self, nl):
nl.layer = self.layers.pop(0)(nl.context)
@asynccontextmanager
async def tcp_server(handle_conn) -> Address:
server = await asyncio.start_server(handle_conn, '127.0.0.1', 0)
await server.start_serving()
try:
yield server.sockets[0].getsockname()
finally:
server.close()
async def test_start_stop():
async def server_handler(reader: asyncio.StreamReader, writer: asyncio.StreamWriter):
assert await reader.readuntil(b"\r\n\r\n") == b"GET /hello HTTP/1.1\r\n\r\n"
writer.write(b"HTTP/1.1 204 No Content\r\n\r\n")
await writer.drain()
writer.close()
ps = Proxyserver()
with taddons.context(ps) as tctx:
state = HelperAddon()
tctx.master.addons.add(state)
async with tcp_server(server_handler) as addr:
tctx.configure(ps, listen_host="127.0.0.1", listen_port=0)
assert not ps.tcp_server
await ps.running()
await tctx.master.await_log("Proxy server listening", level="info")
assert ps.tcp_server
proxy_addr = ps.tcp_server.sockets[0].getsockname()[:2]
reader, writer = await asyncio.open_connection(*proxy_addr)
req = f"GET http://{addr[0]}:{addr[1]}/hello HTTP/1.1\r\n\r\n"
writer.write(req.encode())
assert await reader.readuntil(b"\r\n\r\n") == b"HTTP/1.1 204 No Content\r\n\r\n"
assert repr(ps) == "ProxyServer(running, 1 active conns)"
tctx.configure(ps, server=False)
await tctx.master.await_log("Stopping Proxy server", level="info")
assert not ps.tcp_server
assert state.flows
assert state.flows[0].request.path == "/hello"
assert state.flows[0].response.status_code == 204
# Waiting here until everything is really torn down... takes some effort.
conn_handler = list(ps._connections.values())[0]
client_handler = conn_handler.transports[conn_handler.client].handler
writer.close()
await writer.wait_closed()
try:
await client_handler
except asyncio.CancelledError:
pass
for _ in range(5):
# Get all other scheduled coroutines to run.
await asyncio.sleep(0)
assert repr(ps) == "ProxyServer(stopped, 0 active conns)"
async def test_inject() -> None:
async def server_handler(reader: asyncio.StreamReader, writer: asyncio.StreamWriter):
while s := await reader.read(1):
writer.write(s.upper())
ps = Proxyserver()
with taddons.context(ps) as tctx:
state = HelperAddon()
tctx.master.addons.add(state)
async with tcp_server(server_handler) as addr:
tctx.configure(ps, listen_host="127.0.0.1", listen_port=0)
await ps.running()
await tctx.master.await_log("Proxy server listening", level="info")
proxy_addr = ps.tcp_server.sockets[0].getsockname()[:2]
reader, writer = await asyncio.open_connection(*proxy_addr)
req = f"CONNECT {addr[0]}:{addr[1]} HTTP/1.1\r\n\r\n"
writer.write(req.encode())
assert await reader.readuntil(b"\r\n\r\n") == b"HTTP/1.1 200 Connection established\r\n\r\n"
writer.write(b"a")
assert await reader.read(1) == b"A"
ps.inject_tcp(state.flows[0], False, b"b")
assert await reader.read(1) == b"B"
ps.inject_tcp(state.flows[0], True, b"c")
assert await reader.read(1) == b"c"
async def test_inject_fail() -> None:
ps = Proxyserver()
with taddons.context(ps) as tctx:
ps.inject_websocket(
tflow.tflow(),
True,
b"test"
)
await tctx.master.await_log("Cannot inject WebSocket messages into non-WebSocket flows.", level="warn")
ps.inject_tcp(
tflow.tflow(),
True,
b"test"
)
await tctx.master.await_log("Cannot inject TCP messages into non-TCP flows.", level="warn")
ps.inject_websocket(
tflow.twebsocketflow(),
True,
b"test"
)
await tctx.master.await_log("Flow is not from a live connection.", level="warn")
ps.inject_websocket(
tflow.ttcpflow(),
True,
b"test"
)
await tctx.master.await_log("Flow is not from a live connection.", level="warn")
async def test_warn_no_nextlayer():
"""
Test that we log an error if the proxy server is started without NextLayer addon.
That is a mean trap to fall into when writing end-to-end tests.
"""
ps = Proxyserver()
with taddons.context(ps) as tctx:
tctx.configure(ps, listen_host="127.0.0.1", listen_port=0)
await ps.running()
await tctx.master.await_log("Proxy server listening at", level="info")
assert tctx.master.has_log("Warning: Running proxyserver without nextlayer addon!", level="warn")
await ps.shutdown_server()
async def test_self_connect():
server = tserver_conn()
client = tclient_conn()
ps = Proxyserver()
with taddons.context(ps) as tctx:
tctx.configure(ps, listen_host="127.0.0.1", listen_port=0)
await ps.running()
await tctx.master.await_log("Proxy server listening at", level="info")
server.address = ps.tcp_server.sockets[0].getsockname()[:2]
await ps.server_connect(
server_hooks.ServerConnectionHookData(server, client)
)
assert "Request destination unknown" in server.error
def test_options():
ps = Proxyserver()
with taddons.context(ps) as tctx:
with pytest.raises(exceptions.OptionsError):
tctx.configure(ps, body_size_limit="invalid")
tctx.configure(ps, body_size_limit="1m")
with pytest.raises(exceptions.OptionsError):
tctx.configure(ps, stream_large_bodies="invalid")
tctx.configure(ps, stream_large_bodies="1m")
with pytest.raises(exceptions.OptionsError):
tctx.configure(ps, dns_mode="invalid")
tctx.configure(ps, dns_mode="simple")
tctx.configure(ps, dns_mode="custom")
with pytest.raises(exceptions.OptionsError):
tctx.configure(ps, dns_mode="forward")
tctx.configure(ps, dns_mode="forward:8.8.8.8")
assert ps.dns_forward_addr == ("8.8.8.8", 53)
with pytest.raises(exceptions.OptionsError):
tctx.configure(ps, dns_mode="forward:invalid:53")
tctx.configure(ps, dns_mode="forward:8.8.8.8:53")
assert ps.dns_forward_addr == ("8.8.8.8", 53)
async def test_startup_err(monkeypatch) -> None:
async def _raise(*_):
raise OSError("cannot bind")
monkeypatch.setattr(asyncio, "start_server", _raise)
ps = Proxyserver()
with taddons.context(ps) as tctx:
await ps.running()
await tctx.master.await_log("cannot bind", level="error")
async def test_shutdown_err() -> None:
def _raise(*_):
raise OSError("cannot close")
ps = Proxyserver()
with taddons.context(ps) as tctx:
await ps.running()
assert ps.running_servers
for server in ps.running_servers:
setattr(server, "close", _raise)
await ps.shutdown_server()
await tctx.master.await_log("cannot close", level="error")
assert ps.running_servers
async def test_dns_simple() -> None:
flow = tdnsflow(resp=False)
ps = Proxyserver()
with taddons.context(ps) as tctx:
tctx.configure(ps, server=False, dns_server=True, dns_listen_port=5353, dns_mode="simple")
await ps.running()
await tctx.master.await_log("DNS server listening at", level="info")
await ps.dns_request(flow)
assert flow.response
await ps.shutdown_server()
async def test_dns_not_simple() -> None:
flow = tdnsflow(resp=False)
ps = Proxyserver()
with taddons.context(ps) as tctx:
tctx.configure(ps, server=False, dns_server=True, dns_listen_port=5353, dns_mode="custom")
await ps.running()
await tctx.master.await_log("DNS server listening at", level="info")
await ps.dns_request(flow)
assert not flow.response
await ps.shutdown_server()
async def test_dns() -> None:
ps = Proxyserver()
with taddons.context(ps) as tctx:
tctx.configure(ps, server=False, dns_server=True, dns_listen_port=5353, dns_mode="simple")
await ps.running()
await tctx.master.await_log("DNS server listening at", level="info")
assert ps.dns_server
r, w = await udp.open_connection(*ps.dns_server.sockets[0].getsockname()[:2])
req = tdnsreq()
w.write(req.packed)
resp = dns.Message.unpack(await r.read(udp.MAX_DATAGRAM_SIZE))
assert req.id == resp.id and "8.8.8.8" in str(resp)
assert len(ps._connections) == 1
req.id = req.id + 1
w.write(req.packed)
resp = dns.Message.unpack(await r.read(udp.MAX_DATAGRAM_SIZE))
assert req.id == resp.id and "8.8.8.8" in str(resp)
assert len(ps._connections) == 1
await ps.shutdown_server()
[dns] alternate DNS test port
import asyncio
from contextlib import asynccontextmanager
import pytest
from mitmproxy import dns, exceptions
from mitmproxy.addons.proxyserver import Proxyserver
from mitmproxy.connection import Address
from mitmproxy.net import udp
from mitmproxy.proxy import layers, server_hooks
from mitmproxy.proxy.layers.http import HTTPMode
from mitmproxy.test import taddons, tflow
from mitmproxy.test.tflow import tclient_conn, tdnsflow, tserver_conn
from mitmproxy.test.tutils import tdnsreq
class HelperAddon:
def __init__(self):
self.flows = []
self.layers = [
lambda ctx: layers.modes.HttpProxy(ctx),
lambda ctx: layers.HttpLayer(ctx, HTTPMode.regular),
lambda ctx: layers.TCPLayer(ctx),
]
def request(self, f):
self.flows.append(f)
def tcp_start(self, f):
self.flows.append(f)
def next_layer(self, nl):
nl.layer = self.layers.pop(0)(nl.context)
@asynccontextmanager
async def tcp_server(handle_conn) -> Address:
server = await asyncio.start_server(handle_conn, '127.0.0.1', 0)
await server.start_serving()
try:
yield server.sockets[0].getsockname()
finally:
server.close()
async def test_start_stop():
async def server_handler(reader: asyncio.StreamReader, writer: asyncio.StreamWriter):
assert await reader.readuntil(b"\r\n\r\n") == b"GET /hello HTTP/1.1\r\n\r\n"
writer.write(b"HTTP/1.1 204 No Content\r\n\r\n")
await writer.drain()
writer.close()
ps = Proxyserver()
with taddons.context(ps) as tctx:
state = HelperAddon()
tctx.master.addons.add(state)
async with tcp_server(server_handler) as addr:
tctx.configure(ps, listen_host="127.0.0.1", listen_port=0)
assert not ps.tcp_server
await ps.running()
await tctx.master.await_log("Proxy server listening", level="info")
assert ps.tcp_server
proxy_addr = ps.tcp_server.sockets[0].getsockname()[:2]
reader, writer = await asyncio.open_connection(*proxy_addr)
req = f"GET http://{addr[0]}:{addr[1]}/hello HTTP/1.1\r\n\r\n"
writer.write(req.encode())
assert await reader.readuntil(b"\r\n\r\n") == b"HTTP/1.1 204 No Content\r\n\r\n"
assert repr(ps) == "ProxyServer(running, 1 active conns)"
tctx.configure(ps, server=False)
await tctx.master.await_log("Stopping Proxy server", level="info")
assert not ps.tcp_server
assert state.flows
assert state.flows[0].request.path == "/hello"
assert state.flows[0].response.status_code == 204
# Waiting here until everything is really torn down... takes some effort.
conn_handler = list(ps._connections.values())[0]
client_handler = conn_handler.transports[conn_handler.client].handler
writer.close()
await writer.wait_closed()
try:
await client_handler
except asyncio.CancelledError:
pass
for _ in range(5):
# Get all other scheduled coroutines to run.
await asyncio.sleep(0)
assert repr(ps) == "ProxyServer(stopped, 0 active conns)"
async def test_inject() -> None:
async def server_handler(reader: asyncio.StreamReader, writer: asyncio.StreamWriter):
while s := await reader.read(1):
writer.write(s.upper())
ps = Proxyserver()
with taddons.context(ps) as tctx:
state = HelperAddon()
tctx.master.addons.add(state)
async with tcp_server(server_handler) as addr:
tctx.configure(ps, listen_host="127.0.0.1", listen_port=0)
await ps.running()
await tctx.master.await_log("Proxy server listening", level="info")
proxy_addr = ps.tcp_server.sockets[0].getsockname()[:2]
reader, writer = await asyncio.open_connection(*proxy_addr)
req = f"CONNECT {addr[0]}:{addr[1]} HTTP/1.1\r\n\r\n"
writer.write(req.encode())
assert await reader.readuntil(b"\r\n\r\n") == b"HTTP/1.1 200 Connection established\r\n\r\n"
writer.write(b"a")
assert await reader.read(1) == b"A"
ps.inject_tcp(state.flows[0], False, b"b")
assert await reader.read(1) == b"B"
ps.inject_tcp(state.flows[0], True, b"c")
assert await reader.read(1) == b"c"
async def test_inject_fail() -> None:
ps = Proxyserver()
with taddons.context(ps) as tctx:
ps.inject_websocket(
tflow.tflow(),
True,
b"test"
)
await tctx.master.await_log("Cannot inject WebSocket messages into non-WebSocket flows.", level="warn")
ps.inject_tcp(
tflow.tflow(),
True,
b"test"
)
await tctx.master.await_log("Cannot inject TCP messages into non-TCP flows.", level="warn")
ps.inject_websocket(
tflow.twebsocketflow(),
True,
b"test"
)
await tctx.master.await_log("Flow is not from a live connection.", level="warn")
ps.inject_websocket(
tflow.ttcpflow(),
True,
b"test"
)
await tctx.master.await_log("Flow is not from a live connection.", level="warn")
async def test_warn_no_nextlayer():
"""
Test that we log an error if the proxy server is started without NextLayer addon.
That is a mean trap to fall into when writing end-to-end tests.
"""
ps = Proxyserver()
with taddons.context(ps) as tctx:
tctx.configure(ps, listen_host="127.0.0.1", listen_port=0)
await ps.running()
await tctx.master.await_log("Proxy server listening at", level="info")
assert tctx.master.has_log("Warning: Running proxyserver without nextlayer addon!", level="warn")
await ps.shutdown_server()
async def test_self_connect():
server = tserver_conn()
client = tclient_conn()
ps = Proxyserver()
with taddons.context(ps) as tctx:
tctx.configure(ps, listen_host="127.0.0.1", listen_port=0)
await ps.running()
await tctx.master.await_log("Proxy server listening at", level="info")
server.address = ps.tcp_server.sockets[0].getsockname()[:2]
await ps.server_connect(
server_hooks.ServerConnectionHookData(server, client)
)
assert "Request destination unknown" in server.error
def test_options():
ps = Proxyserver()
with taddons.context(ps) as tctx:
with pytest.raises(exceptions.OptionsError):
tctx.configure(ps, body_size_limit="invalid")
tctx.configure(ps, body_size_limit="1m")
with pytest.raises(exceptions.OptionsError):
tctx.configure(ps, stream_large_bodies="invalid")
tctx.configure(ps, stream_large_bodies="1m")
with pytest.raises(exceptions.OptionsError):
tctx.configure(ps, dns_mode="invalid")
tctx.configure(ps, dns_mode="simple")
tctx.configure(ps, dns_mode="custom")
with pytest.raises(exceptions.OptionsError):
tctx.configure(ps, dns_mode="forward")
tctx.configure(ps, dns_mode="forward:8.8.8.8")
assert ps.dns_forward_addr == ("8.8.8.8", 53)
with pytest.raises(exceptions.OptionsError):
tctx.configure(ps, dns_mode="forward:invalid:53")
tctx.configure(ps, dns_mode="forward:8.8.8.8:53")
assert ps.dns_forward_addr == ("8.8.8.8", 53)
async def test_startup_err(monkeypatch) -> None:
async def _raise(*_):
raise OSError("cannot bind")
monkeypatch.setattr(asyncio, "start_server", _raise)
ps = Proxyserver()
with taddons.context(ps) as tctx:
await ps.running()
await tctx.master.await_log("cannot bind", level="error")
async def test_shutdown_err() -> None:
def _raise(*_):
raise OSError("cannot close")
ps = Proxyserver()
with taddons.context(ps) as tctx:
await ps.running()
assert ps.running_servers
for server in ps.running_servers:
setattr(server, "close", _raise)
await ps.shutdown_server()
await tctx.master.await_log("cannot close", level="error")
assert ps.running_servers
async def test_dns_simple() -> None:
flow = tdnsflow(resp=False)
ps = Proxyserver()
with taddons.context(ps) as tctx:
tctx.configure(ps, server=False, dns_server=True, dns_listen_port=5353, dns_mode="simple")
await ps.running()
await tctx.master.await_log("DNS server listening at", level="info")
await ps.dns_request(flow)
assert flow.response
await ps.shutdown_server()
async def test_dns_not_simple() -> None:
flow = tdnsflow(resp=False)
ps = Proxyserver()
with taddons.context(ps) as tctx:
tctx.configure(ps, server=False, dns_server=True, dns_listen_port=5354, dns_mode="custom")
await ps.running()
await tctx.master.await_log("DNS server listening at", level="info")
await ps.dns_request(flow)
assert not flow.response
await ps.shutdown_server()
async def test_dns() -> None:
ps = Proxyserver()
with taddons.context(ps) as tctx:
tctx.configure(ps, server=False, dns_server=True, dns_listen_port=5355, dns_mode="simple")
await ps.running()
await tctx.master.await_log("DNS server listening at", level="info")
assert ps.dns_server
r, w = await udp.open_connection(*ps.dns_server.sockets[0].getsockname()[:2])
req = tdnsreq()
w.write(req.packed)
resp = dns.Message.unpack(await r.read(udp.MAX_DATAGRAM_SIZE))
assert req.id == resp.id and "8.8.8.8" in str(resp)
assert len(ps._connections) == 1
req.id = req.id + 1
w.write(req.packed)
resp = dns.Message.unpack(await r.read(udp.MAX_DATAGRAM_SIZE))
assert req.id == resp.id and "8.8.8.8" in str(resp)
assert len(ps._connections) == 1
await ps.shutdown_server()
|
"""
This config file runs the simplest dev environment using sqlite, and db-based
sessions. Assumes structure:
/envroot/
/db # This is where it'll write the database file
/mitx # The location of this repo
/log # Where we're going to write log files
"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=W0401, W0614
from .common import *
from logsettings import get_logger_config
DEBUG = True
TEMPLATE_DEBUG = True
MITX_FEATURES['DISABLE_START_DATES'] = True
MITX_FEATURES['ENABLE_SQL_TRACKING_LOGS'] = True
MITX_FEATURES['SUBDOMAIN_COURSE_LISTINGS'] = False # Enable to test subdomains--otherwise, want all courses to show up
MITX_FEATURES['SUBDOMAIN_BRANDING'] = True
MITX_FEATURES['FORCE_UNIVERSITY_DOMAIN'] = None # show all university courses if in dev (ie don't use HTTP_HOST)
MITX_FEATURES['ENABLE_MANUAL_GIT_RELOAD'] = True
MITX_FEATURES['ENABLE_PSYCHOMETRICS'] = False # real-time psychometrics (eg item response theory analysis in instructor dashboard)
MITX_FEATURES['ENABLE_INSTRUCTOR_ANALYTICS'] = True
MITX_FEATURES['ENABLE_SERVICE_STATUS'] = True
MITX_FEATURES['ENABLE_HINTER_INSTRUCTOR_VIEW'] = True
MITX_FEATURES['ENABLE_INSTRUCTOR_BETA_DASHBOARD'] = True
MITX_FEATURES['MULTIPLE_ENROLLMENT_ROLES'] = True
MITX_FEATURES['ENABLE_SHOPPING_CART'] = True
FEEDBACK_SUBMISSION_EMAIL = "dummy@example.com"
WIKI_ENABLED = True
LOGGING = get_logger_config(ENV_ROOT / "log",
logging_env="dev",
local_loglevel="DEBUG",
dev_env=True,
debug=True)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ENV_ROOT / "db" / "mitx.db",
}
}
CACHES = {
# This is the cache used for most things.
# In staging/prod envs, the sessions also live here.
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'mitx_loc_mem_cache',
'KEY_FUNCTION': 'util.memcache.safe_key',
},
# The general cache is what you get if you use our util.cache. It's used for
# things like caching the course.xml file for different A/B test groups.
# We set it to be a DummyCache to force reloading of course.xml in dev.
# In staging environments, we would grab VERSION from data uploaded by the
# push process.
'general': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
'KEY_PREFIX': 'general',
'VERSION': 4,
'KEY_FUNCTION': 'util.memcache.safe_key',
},
'mongo_metadata_inheritance': {
'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
'LOCATION': '/var/tmp/mongo_metadata_inheritance',
'TIMEOUT': 300,
'KEY_FUNCTION': 'util.memcache.safe_key',
}
}
XQUEUE_INTERFACE = {
"url": "https://sandbox-xqueue.edx.org",
"django_auth": {
"username": "lms",
"password": "***REMOVED***"
},
"basic_auth": ('anant', 'agarwal'),
}
# Make the keyedcache startup warnings go away
CACHE_TIMEOUT = 0
# Dummy secret key for dev
SECRET_KEY = '85920908f28904ed733fe576320db18cabd7b6cd'
COURSE_LISTINGS = {
'default': ['BerkeleyX/CS169.1x/2012_Fall',
'BerkeleyX/CS188.1x/2012_Fall',
'HarvardX/CS50x/2012',
'HarvardX/PH207x/2012_Fall',
'MITx/3.091x/2012_Fall',
'MITx/6.002x/2012_Fall',
'MITx/6.00x/2012_Fall'],
'berkeley': ['BerkeleyX/CS169/fa12',
'BerkeleyX/CS188/fa12'],
'harvard': ['HarvardX/CS50x/2012H'],
'mit': ['MITx/3.091/MIT_2012_Fall'],
'sjsu': ['MITx/6.002x-EE98/2012_Fall_SJSU'],
}
SUBDOMAIN_BRANDING = {
'sjsu': 'MITx',
'mit': 'MITx',
'berkeley': 'BerkeleyX',
'harvard': 'HarvardX',
}
# List of `university` landing pages to display, even though they may not
# have an actual course with that org set
VIRTUAL_UNIVERSITIES = []
# Organization that contain other organizations
META_UNIVERSITIES = {'UTx': ['UTAustinX']}
COMMENTS_SERVICE_KEY = "PUT_YOUR_API_KEY_HERE"
############################## Course static files ##########################
if os.path.isdir(DATA_DIR):
# Add the full course repo if there is no static directory
STATICFILES_DIRS += [
# TODO (cpennington): When courses are stored in a database, this
# should no longer be added to STATICFILES
(course_dir, DATA_DIR / course_dir)
for course_dir in os.listdir(DATA_DIR)
if (os.path.isdir(DATA_DIR / course_dir) and
not os.path.isdir(DATA_DIR / course_dir / 'static'))
]
# Otherwise, add only the static directory from the course dir
STATICFILES_DIRS += [
# TODO (cpennington): When courses are stored in a database, this
# should no longer be added to STATICFILES
(course_dir, DATA_DIR / course_dir / 'static')
for course_dir in os.listdir(DATA_DIR)
if (os.path.isdir(DATA_DIR / course_dir / 'static'))
]
################################# mitx revision string #####################
MITX_VERSION_STRING = os.popen('cd %s; git describe' % REPO_ROOT).read().strip()
############################ Open ended grading config #####################
OPEN_ENDED_GRADING_INTERFACE = {
'url' : 'http://127.0.0.1:3033/',
'username' : 'lms',
'password' : 'abcd',
'staff_grading' : 'staff_grading',
'peer_grading' : 'peer_grading',
'grading_controller' : 'grading_controller'
}
############################## LMS Migration ##################################
MITX_FEATURES['ENABLE_LMS_MIGRATION'] = True
MITX_FEATURES['ACCESS_REQUIRE_STAFF_FOR_COURSE'] = False # require that user be in the staff_* group to be able to enroll
MITX_FEATURES['USE_XQA_SERVER'] = 'http://xqa:server@content-qa.mitx.mit.edu/xqa'
INSTALLED_APPS += ('lms_migration',)
LMS_MIGRATION_ALLOWED_IPS = ['127.0.0.1']
################################ OpenID Auth #################################
MITX_FEATURES['AUTH_USE_OPENID'] = True
MITX_FEATURES['AUTH_USE_OPENID_PROVIDER'] = True
MITX_FEATURES['BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH'] = True
INSTALLED_APPS += ('external_auth',)
INSTALLED_APPS += ('django_openid_auth',)
OPENID_CREATE_USERS = False
OPENID_UPDATE_DETAILS_FROM_SREG = True
OPENID_SSO_SERVER_URL = 'https://www.google.com/accounts/o8/id' # TODO: accept more endpoints
OPENID_USE_AS_ADMIN_LOGIN = False
OPENID_PROVIDER_TRUSTED_ROOTS = ['*']
######################## MIT Certificates SSL Auth ############################
MITX_FEATURES['AUTH_USE_MIT_CERTIFICATES'] = True
################################# CELERY ######################################
# By default don't use a worker, execute tasks as if they were local functions
CELERY_ALWAYS_EAGER = True
################################ DEBUG TOOLBAR ################################
INSTALLED_APPS += ('debug_toolbar',)
MIDDLEWARE_CLASSES += ('django_comment_client.utils.QueryCountDebugMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',)
INTERNAL_IPS = ('127.0.0.1',)
DEBUG_TOOLBAR_PANELS = (
'debug_toolbar.panels.version.VersionDebugPanel',
'debug_toolbar.panels.timer.TimerDebugPanel',
'debug_toolbar.panels.settings_vars.SettingsVarsDebugPanel',
'debug_toolbar.panels.headers.HeaderDebugPanel',
'debug_toolbar.panels.request_vars.RequestVarsDebugPanel',
'debug_toolbar.panels.sql.SQLDebugPanel',
'debug_toolbar.panels.signals.SignalDebugPanel',
'debug_toolbar.panels.logger.LoggingPanel',
# Enabling the profiler has a weird bug as of django-debug-toolbar==0.9.4 and
# Django=1.3.1/1.4 where requests to views get duplicated (your method gets
# hit twice). So you can uncomment when you need to diagnose performance
# problems, but you shouldn't leave it on.
# 'debug_toolbar.panels.profiling.ProfilingDebugPanel',
)
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False
}
#################### FILE UPLOADS (for discussion forums) #####################
DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage'
MEDIA_ROOT = ENV_ROOT / "uploads"
MEDIA_URL = "/static/uploads/"
STATICFILES_DIRS.append(("uploads", MEDIA_ROOT))
FILE_UPLOAD_TEMP_DIR = ENV_ROOT / "uploads"
FILE_UPLOAD_HANDLERS = (
'django.core.files.uploadhandler.MemoryFileUploadHandler',
'django.core.files.uploadhandler.TemporaryFileUploadHandler',
)
MITX_FEATURES['AUTH_USE_SHIB'] = True
MITX_FEATURES['RESTRICT_ENROLL_BY_REG_METHOD'] = True
########################### PIPELINE #################################
PIPELINE_SASS_ARGUMENTS = '--debug-info --require {proj_dir}/static/sass/bourbon/lib/bourbon.rb'.format(proj_dir=PROJECT_ROOT)
########################## PEARSON TESTING ###########################
MITX_FEATURES['ENABLE_PEARSON_LOGIN'] = False
########################## ANALYTICS TESTING ########################
ANALYTICS_SERVER_URL = "http://127.0.0.1:9000/"
ANALYTICS_API_KEY = ""
##### segment-io ######
# If there's an environment variable set, grab it and turn on segment io
SEGMENT_IO_LMS_KEY = os.environ.get('SEGMENT_IO_LMS_KEY')
if SEGMENT_IO_LMS_KEY:
MITX_FEATURES['SEGMENT_IO_LMS'] = True
###################### Payment ##############################3
CC_PROCESSOR['CyberSource']['SHARED_SECRET'] = os.environ.get('CYBERSOURCE_SHARED_SECRET', '')
CC_PROCESSOR['CyberSource']['MERCHANT_ID'] = os.environ.get('CYBERSOURCE_MERCHANT_ID', '')
CC_PROCESSOR['CyberSource']['SERIAL_NUMBER'] = os.environ.get('CYBERSOURCE_SERIAL_NUMBER', '')
CC_PROCESSOR['CyberSource']['PURCHASE_ENDPOINT'] = os.environ.get('CYBERSOURCE_PURCHASE_ENDPOINT', '')
########################## USER API ########################
EDX_API_KEY = None
####################### Shoppingcart ###########################
MITX_FEATURES['ENABLE_SHOPPING_CART'] = True
#####################################################################
# Lastly, see if the developer has any local overrides.
try:
from .private import * # pylint: disable=F0401
except ImportError:
pass
change disable_start_dates to False in lms.dev to mirror more closely a production environment
"""
This config file runs the simplest dev environment using sqlite, and db-based
sessions. Assumes structure:
/envroot/
/db # This is where it'll write the database file
/mitx # The location of this repo
/log # Where we're going to write log files
"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=W0401, W0614
from .common import *
from logsettings import get_logger_config
DEBUG = True
TEMPLATE_DEBUG = True
MITX_FEATURES['DISABLE_START_DATES'] = False
MITX_FEATURES['ENABLE_SQL_TRACKING_LOGS'] = True
MITX_FEATURES['SUBDOMAIN_COURSE_LISTINGS'] = False # Enable to test subdomains--otherwise, want all courses to show up
MITX_FEATURES['SUBDOMAIN_BRANDING'] = True
MITX_FEATURES['FORCE_UNIVERSITY_DOMAIN'] = None # show all university courses if in dev (ie don't use HTTP_HOST)
MITX_FEATURES['ENABLE_MANUAL_GIT_RELOAD'] = True
MITX_FEATURES['ENABLE_PSYCHOMETRICS'] = False # real-time psychometrics (eg item response theory analysis in instructor dashboard)
MITX_FEATURES['ENABLE_INSTRUCTOR_ANALYTICS'] = True
MITX_FEATURES['ENABLE_SERVICE_STATUS'] = True
MITX_FEATURES['ENABLE_HINTER_INSTRUCTOR_VIEW'] = True
MITX_FEATURES['ENABLE_INSTRUCTOR_BETA_DASHBOARD'] = True
MITX_FEATURES['MULTIPLE_ENROLLMENT_ROLES'] = True
MITX_FEATURES['ENABLE_SHOPPING_CART'] = True
FEEDBACK_SUBMISSION_EMAIL = "dummy@example.com"
WIKI_ENABLED = True
LOGGING = get_logger_config(ENV_ROOT / "log",
logging_env="dev",
local_loglevel="DEBUG",
dev_env=True,
debug=True)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ENV_ROOT / "db" / "mitx.db",
}
}
CACHES = {
# This is the cache used for most things.
# In staging/prod envs, the sessions also live here.
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'mitx_loc_mem_cache',
'KEY_FUNCTION': 'util.memcache.safe_key',
},
# The general cache is what you get if you use our util.cache. It's used for
# things like caching the course.xml file for different A/B test groups.
# We set it to be a DummyCache to force reloading of course.xml in dev.
# In staging environments, we would grab VERSION from data uploaded by the
# push process.
'general': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
'KEY_PREFIX': 'general',
'VERSION': 4,
'KEY_FUNCTION': 'util.memcache.safe_key',
},
'mongo_metadata_inheritance': {
'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
'LOCATION': '/var/tmp/mongo_metadata_inheritance',
'TIMEOUT': 300,
'KEY_FUNCTION': 'util.memcache.safe_key',
}
}
XQUEUE_INTERFACE = {
"url": "https://sandbox-xqueue.edx.org",
"django_auth": {
"username": "lms",
"password": "***REMOVED***"
},
"basic_auth": ('anant', 'agarwal'),
}
# Make the keyedcache startup warnings go away
CACHE_TIMEOUT = 0
# Dummy secret key for dev
SECRET_KEY = '85920908f28904ed733fe576320db18cabd7b6cd'
COURSE_LISTINGS = {
'default': ['BerkeleyX/CS169.1x/2012_Fall',
'BerkeleyX/CS188.1x/2012_Fall',
'HarvardX/CS50x/2012',
'HarvardX/PH207x/2012_Fall',
'MITx/3.091x/2012_Fall',
'MITx/6.002x/2012_Fall',
'MITx/6.00x/2012_Fall'],
'berkeley': ['BerkeleyX/CS169/fa12',
'BerkeleyX/CS188/fa12'],
'harvard': ['HarvardX/CS50x/2012H'],
'mit': ['MITx/3.091/MIT_2012_Fall'],
'sjsu': ['MITx/6.002x-EE98/2012_Fall_SJSU'],
}
SUBDOMAIN_BRANDING = {
'sjsu': 'MITx',
'mit': 'MITx',
'berkeley': 'BerkeleyX',
'harvard': 'HarvardX',
}
# List of `university` landing pages to display, even though they may not
# have an actual course with that org set
VIRTUAL_UNIVERSITIES = []
# Organization that contain other organizations
META_UNIVERSITIES = {'UTx': ['UTAustinX']}
COMMENTS_SERVICE_KEY = "PUT_YOUR_API_KEY_HERE"
############################## Course static files ##########################
if os.path.isdir(DATA_DIR):
# Add the full course repo if there is no static directory
STATICFILES_DIRS += [
# TODO (cpennington): When courses are stored in a database, this
# should no longer be added to STATICFILES
(course_dir, DATA_DIR / course_dir)
for course_dir in os.listdir(DATA_DIR)
if (os.path.isdir(DATA_DIR / course_dir) and
not os.path.isdir(DATA_DIR / course_dir / 'static'))
]
# Otherwise, add only the static directory from the course dir
STATICFILES_DIRS += [
# TODO (cpennington): When courses are stored in a database, this
# should no longer be added to STATICFILES
(course_dir, DATA_DIR / course_dir / 'static')
for course_dir in os.listdir(DATA_DIR)
if (os.path.isdir(DATA_DIR / course_dir / 'static'))
]
################################# mitx revision string #####################
MITX_VERSION_STRING = os.popen('cd %s; git describe' % REPO_ROOT).read().strip()
############################ Open ended grading config #####################
OPEN_ENDED_GRADING_INTERFACE = {
'url' : 'http://127.0.0.1:3033/',
'username' : 'lms',
'password' : 'abcd',
'staff_grading' : 'staff_grading',
'peer_grading' : 'peer_grading',
'grading_controller' : 'grading_controller'
}
############################## LMS Migration ##################################
MITX_FEATURES['ENABLE_LMS_MIGRATION'] = True
MITX_FEATURES['ACCESS_REQUIRE_STAFF_FOR_COURSE'] = False # require that user be in the staff_* group to be able to enroll
MITX_FEATURES['USE_XQA_SERVER'] = 'http://xqa:server@content-qa.mitx.mit.edu/xqa'
INSTALLED_APPS += ('lms_migration',)
LMS_MIGRATION_ALLOWED_IPS = ['127.0.0.1']
################################ OpenID Auth #################################
MITX_FEATURES['AUTH_USE_OPENID'] = True
MITX_FEATURES['AUTH_USE_OPENID_PROVIDER'] = True
MITX_FEATURES['BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH'] = True
INSTALLED_APPS += ('external_auth',)
INSTALLED_APPS += ('django_openid_auth',)
OPENID_CREATE_USERS = False
OPENID_UPDATE_DETAILS_FROM_SREG = True
OPENID_SSO_SERVER_URL = 'https://www.google.com/accounts/o8/id' # TODO: accept more endpoints
OPENID_USE_AS_ADMIN_LOGIN = False
OPENID_PROVIDER_TRUSTED_ROOTS = ['*']
######################## MIT Certificates SSL Auth ############################
MITX_FEATURES['AUTH_USE_MIT_CERTIFICATES'] = True
################################# CELERY ######################################
# By default don't use a worker, execute tasks as if they were local functions
CELERY_ALWAYS_EAGER = True
################################ DEBUG TOOLBAR ################################
INSTALLED_APPS += ('debug_toolbar',)
MIDDLEWARE_CLASSES += ('django_comment_client.utils.QueryCountDebugMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',)
INTERNAL_IPS = ('127.0.0.1',)
DEBUG_TOOLBAR_PANELS = (
'debug_toolbar.panels.version.VersionDebugPanel',
'debug_toolbar.panels.timer.TimerDebugPanel',
'debug_toolbar.panels.settings_vars.SettingsVarsDebugPanel',
'debug_toolbar.panels.headers.HeaderDebugPanel',
'debug_toolbar.panels.request_vars.RequestVarsDebugPanel',
'debug_toolbar.panels.sql.SQLDebugPanel',
'debug_toolbar.panels.signals.SignalDebugPanel',
'debug_toolbar.panels.logger.LoggingPanel',
# Enabling the profiler has a weird bug as of django-debug-toolbar==0.9.4 and
# Django=1.3.1/1.4 where requests to views get duplicated (your method gets
# hit twice). So you can uncomment when you need to diagnose performance
# problems, but you shouldn't leave it on.
# 'debug_toolbar.panels.profiling.ProfilingDebugPanel',
)
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False
}
#################### FILE UPLOADS (for discussion forums) #####################
DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage'
MEDIA_ROOT = ENV_ROOT / "uploads"
MEDIA_URL = "/static/uploads/"
STATICFILES_DIRS.append(("uploads", MEDIA_ROOT))
FILE_UPLOAD_TEMP_DIR = ENV_ROOT / "uploads"
FILE_UPLOAD_HANDLERS = (
'django.core.files.uploadhandler.MemoryFileUploadHandler',
'django.core.files.uploadhandler.TemporaryFileUploadHandler',
)
MITX_FEATURES['AUTH_USE_SHIB'] = True
MITX_FEATURES['RESTRICT_ENROLL_BY_REG_METHOD'] = True
########################### PIPELINE #################################
PIPELINE_SASS_ARGUMENTS = '--debug-info --require {proj_dir}/static/sass/bourbon/lib/bourbon.rb'.format(proj_dir=PROJECT_ROOT)
########################## PEARSON TESTING ###########################
MITX_FEATURES['ENABLE_PEARSON_LOGIN'] = False
########################## ANALYTICS TESTING ########################
ANALYTICS_SERVER_URL = "http://127.0.0.1:9000/"
ANALYTICS_API_KEY = ""
##### segment-io ######
# If there's an environment variable set, grab it and turn on segment io
SEGMENT_IO_LMS_KEY = os.environ.get('SEGMENT_IO_LMS_KEY')
if SEGMENT_IO_LMS_KEY:
MITX_FEATURES['SEGMENT_IO_LMS'] = True
###################### Payment ##############################3
CC_PROCESSOR['CyberSource']['SHARED_SECRET'] = os.environ.get('CYBERSOURCE_SHARED_SECRET', '')
CC_PROCESSOR['CyberSource']['MERCHANT_ID'] = os.environ.get('CYBERSOURCE_MERCHANT_ID', '')
CC_PROCESSOR['CyberSource']['SERIAL_NUMBER'] = os.environ.get('CYBERSOURCE_SERIAL_NUMBER', '')
CC_PROCESSOR['CyberSource']['PURCHASE_ENDPOINT'] = os.environ.get('CYBERSOURCE_PURCHASE_ENDPOINT', '')
########################## USER API ########################
EDX_API_KEY = None
####################### Shoppingcart ###########################
MITX_FEATURES['ENABLE_SHOPPING_CART'] = True
#####################################################################
# Lastly, see if the developer has any local overrides.
try:
from .private import * # pylint: disable=F0401
except ImportError:
pass
|
"""
This config file runs the simplest dev environment using sqlite, and db-based
sessions. Assumes structure:
/envroot/
/db # This is where it'll write the database file
/mitx # The location of this repo
/log # Where we're going to write log files
"""
from .common import *
from .logsettings import get_logger_config
DEBUG = True
TEMPLATE_DEBUG = True
MITX_FEATURES['DISABLE_START_DATES'] = True
MITX_FEATURES['ENABLE_SQL_TRACKING_LOGS'] = True
MITX_FEATURES['SUBDOMAIN_COURSE_LISTINGS'] = False # Enable to test subdomains--otherwise, want all courses to show up
MITX_FEATURES['SUBDOMAIN_BRANDING'] = True
MITX_FEATURES['FORCE_UNIVERSITY_DOMAIN'] = None # show all university courses if in dev (ie don't use HTTP_HOST)
MITX_FEATURES['ENABLE_MANUAL_GIT_RELOAD'] = True
MITX_FEATURES['ENABLE_PSYCHOMETRICS'] = False # real-time psychometrics (eg item response theory analysis in instructor dashboard)
WIKI_ENABLED = True
LOGGING = get_logger_config(ENV_ROOT / "log",
logging_env="dev",
local_loglevel="DEBUG",
dev_env=True,
debug=True)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ENV_ROOT / "db" / "mitx.db",
}
}
CACHES = {
# This is the cache used for most things. Askbot will not work without a
# functioning cache -- it relies on caching to load its settings in places.
# In staging/prod envs, the sessions also live here.
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'mitx_loc_mem_cache',
'KEY_FUNCTION': 'util.memcache.safe_key',
},
# The general cache is what you get if you use our util.cache. It's used for
# things like caching the course.xml file for different A/B test groups.
# We set it to be a DummyCache to force reloading of course.xml in dev.
# In staging environments, we would grab VERSION from data uploaded by the
# push process.
'general': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
'KEY_PREFIX': 'general',
'VERSION': 4,
'KEY_FUNCTION': 'util.memcache.safe_key',
}
}
XQUEUE_INTERFACE = {
"url": "https://sandbox-xqueue.edx.org",
"django_auth": {
"username": "lms",
"password": "***REMOVED***"
},
"basic_auth": ('anant', 'agarwal'),
}
# Make the keyedcache startup warnings go away
CACHE_TIMEOUT = 0
# Dummy secret key for dev
SECRET_KEY = '85920908f28904ed733fe576320db18cabd7b6cd'
COURSE_LISTINGS = {
'default': ['BerkeleyX/CS169.1x/2012_Fall',
'BerkeleyX/CS188.1x/2012_Fall',
'HarvardX/CS50x/2012',
'HarvardX/PH207x/2012_Fall',
'MITx/3.091x/2012_Fall',
'MITx/6.002x/2012_Fall',
'MITx/6.00x/2012_Fall'],
'berkeley': ['BerkeleyX/CS169/fa12',
'BerkeleyX/CS188/fa12'],
'harvard': ['HarvardX/CS50x/2012H'],
'mit': ['MITx/3.091/MIT_2012_Fall'],
'sjsu': ['MITx/6.002x-EE98/2012_Fall_SJSU'],
}
SUBDOMAIN_BRANDING = {
'sjsu': 'MITx',
'mit': 'MITx',
'berkeley': 'BerkeleyX',
'harvard': 'HarvardX',
}
COMMENTS_SERVICE_KEY = "PUT_YOUR_API_KEY_HERE"
################################ LMS Migration #################################
MITX_FEATURES['ENABLE_LMS_MIGRATION'] = True
MITX_FEATURES['ACCESS_REQUIRE_STAFF_FOR_COURSE'] = False # require that user be in the staff_* group to be able to enroll
MITX_FEATURES['USE_XQA_SERVER'] = 'http://xqa:server@content-qa.mitx.mit.edu/xqa'
INSTALLED_APPS += ('lms_migration',)
LMS_MIGRATION_ALLOWED_IPS = ['127.0.0.1']
################################ OpenID Auth #################################
MITX_FEATURES['AUTH_USE_OPENID'] = True
MITX_FEATURES['AUTH_USE_OPENID_PROVIDER'] = True
MITX_FEATURES['BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH'] = True
INSTALLED_APPS += ('external_auth',)
INSTALLED_APPS += ('django_openid_auth',)
OPENID_CREATE_USERS = False
OPENID_UPDATE_DETAILS_FROM_SREG = True
OPENID_SSO_SERVER_URL = 'https://www.google.com/accounts/o8/id' # TODO: accept more endpoints
OPENID_USE_AS_ADMIN_LOGIN = False
OPENID_PROVIDER_TRUSTED_ROOTS = ['*']
################################ MIT Certificates SSL Auth #################################
MITX_FEATURES['AUTH_USE_MIT_CERTIFICATES'] = True
################################ DEBUG TOOLBAR #################################
INSTALLED_APPS += ('debug_toolbar',)
MIDDLEWARE_CLASSES += ('django_comment_client.utils.QueryCountDebugMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',)
INTERNAL_IPS = ('127.0.0.1',)
DEBUG_TOOLBAR_PANELS = (
'debug_toolbar.panels.version.VersionDebugPanel',
'debug_toolbar.panels.timer.TimerDebugPanel',
'debug_toolbar.panels.settings_vars.SettingsVarsDebugPanel',
'debug_toolbar.panels.headers.HeaderDebugPanel',
'debug_toolbar.panels.request_vars.RequestVarsDebugPanel',
'debug_toolbar.panels.sql.SQLDebugPanel',
'debug_toolbar.panels.signals.SignalDebugPanel',
'debug_toolbar.panels.logger.LoggingPanel',
# Enabling the profiler has a weird bug as of django-debug-toolbar==0.9.4 and
# Django=1.3.1/1.4 where requests to views get duplicated (your method gets
# hit twice). So you can uncomment when you need to diagnose performance
# problems, but you shouldn't leave it on.
# 'debug_toolbar.panels.profiling.ProfilingDebugPanel',
)
############################ FILE UPLOADS (ASKBOT) #############################
DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage'
MEDIA_ROOT = ENV_ROOT / "uploads"
MEDIA_URL = "/static/uploads/"
STATICFILES_DIRS.append(("uploads", MEDIA_ROOT))
FILE_UPLOAD_TEMP_DIR = ENV_ROOT / "uploads"
FILE_UPLOAD_HANDLERS = (
'django.core.files.uploadhandler.MemoryFileUploadHandler',
'django.core.files.uploadhandler.TemporaryFileUploadHandler',
)
########################### PIPELINE #################################
PIPELINE_SASS_ARGUMENTS = '-r {proj_dir}/static/sass/bourbon/lib/bourbon.rb'.format(proj_dir=PROJECT_ROOT)
DjDT redirect intercept is annoying, especially for tests, so disable it
"""
This config file runs the simplest dev environment using sqlite, and db-based
sessions. Assumes structure:
/envroot/
/db # This is where it'll write the database file
/mitx # The location of this repo
/log # Where we're going to write log files
"""
from .common import *
from .logsettings import get_logger_config
DEBUG = True
TEMPLATE_DEBUG = True
MITX_FEATURES['DISABLE_START_DATES'] = True
MITX_FEATURES['ENABLE_SQL_TRACKING_LOGS'] = True
MITX_FEATURES['SUBDOMAIN_COURSE_LISTINGS'] = False # Enable to test subdomains--otherwise, want all courses to show up
MITX_FEATURES['SUBDOMAIN_BRANDING'] = True
MITX_FEATURES['FORCE_UNIVERSITY_DOMAIN'] = None # show all university courses if in dev (ie don't use HTTP_HOST)
MITX_FEATURES['ENABLE_MANUAL_GIT_RELOAD'] = True
MITX_FEATURES['ENABLE_PSYCHOMETRICS'] = False # real-time psychometrics (eg item response theory analysis in instructor dashboard)
WIKI_ENABLED = True
LOGGING = get_logger_config(ENV_ROOT / "log",
logging_env="dev",
local_loglevel="DEBUG",
dev_env=True,
debug=True)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ENV_ROOT / "db" / "mitx.db",
}
}
CACHES = {
# This is the cache used for most things. Askbot will not work without a
# functioning cache -- it relies on caching to load its settings in places.
# In staging/prod envs, the sessions also live here.
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'mitx_loc_mem_cache',
'KEY_FUNCTION': 'util.memcache.safe_key',
},
# The general cache is what you get if you use our util.cache. It's used for
# things like caching the course.xml file for different A/B test groups.
# We set it to be a DummyCache to force reloading of course.xml in dev.
# In staging environments, we would grab VERSION from data uploaded by the
# push process.
'general': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
'KEY_PREFIX': 'general',
'VERSION': 4,
'KEY_FUNCTION': 'util.memcache.safe_key',
}
}
XQUEUE_INTERFACE = {
"url": "https://sandbox-xqueue.edx.org",
"django_auth": {
"username": "lms",
"password": "***REMOVED***"
},
"basic_auth": ('anant', 'agarwal'),
}
# Make the keyedcache startup warnings go away
CACHE_TIMEOUT = 0
# Dummy secret key for dev
SECRET_KEY = '85920908f28904ed733fe576320db18cabd7b6cd'
COURSE_LISTINGS = {
'default': ['BerkeleyX/CS169.1x/2012_Fall',
'BerkeleyX/CS188.1x/2012_Fall',
'HarvardX/CS50x/2012',
'HarvardX/PH207x/2012_Fall',
'MITx/3.091x/2012_Fall',
'MITx/6.002x/2012_Fall',
'MITx/6.00x/2012_Fall'],
'berkeley': ['BerkeleyX/CS169/fa12',
'BerkeleyX/CS188/fa12'],
'harvard': ['HarvardX/CS50x/2012H'],
'mit': ['MITx/3.091/MIT_2012_Fall'],
'sjsu': ['MITx/6.002x-EE98/2012_Fall_SJSU'],
}
SUBDOMAIN_BRANDING = {
'sjsu': 'MITx',
'mit': 'MITx',
'berkeley': 'BerkeleyX',
'harvard': 'HarvardX',
}
COMMENTS_SERVICE_KEY = "PUT_YOUR_API_KEY_HERE"
################################ LMS Migration #################################
MITX_FEATURES['ENABLE_LMS_MIGRATION'] = True
MITX_FEATURES['ACCESS_REQUIRE_STAFF_FOR_COURSE'] = False # require that user be in the staff_* group to be able to enroll
MITX_FEATURES['USE_XQA_SERVER'] = 'http://xqa:server@content-qa.mitx.mit.edu/xqa'
INSTALLED_APPS += ('lms_migration',)
LMS_MIGRATION_ALLOWED_IPS = ['127.0.0.1']
################################ OpenID Auth #################################
MITX_FEATURES['AUTH_USE_OPENID'] = True
MITX_FEATURES['AUTH_USE_OPENID_PROVIDER'] = True
MITX_FEATURES['BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH'] = True
INSTALLED_APPS += ('external_auth',)
INSTALLED_APPS += ('django_openid_auth',)
OPENID_CREATE_USERS = False
OPENID_UPDATE_DETAILS_FROM_SREG = True
OPENID_SSO_SERVER_URL = 'https://www.google.com/accounts/o8/id' # TODO: accept more endpoints
OPENID_USE_AS_ADMIN_LOGIN = False
OPENID_PROVIDER_TRUSTED_ROOTS = ['*']
################################ MIT Certificates SSL Auth #################################
MITX_FEATURES['AUTH_USE_MIT_CERTIFICATES'] = True
################################ DEBUG TOOLBAR #################################
INSTALLED_APPS += ('debug_toolbar',)
MIDDLEWARE_CLASSES += ('django_comment_client.utils.QueryCountDebugMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',)
INTERNAL_IPS = ('127.0.0.1',)
DEBUG_TOOLBAR_PANELS = (
'debug_toolbar.panels.version.VersionDebugPanel',
'debug_toolbar.panels.timer.TimerDebugPanel',
'debug_toolbar.panels.settings_vars.SettingsVarsDebugPanel',
'debug_toolbar.panels.headers.HeaderDebugPanel',
'debug_toolbar.panels.request_vars.RequestVarsDebugPanel',
'debug_toolbar.panels.sql.SQLDebugPanel',
'debug_toolbar.panels.signals.SignalDebugPanel',
'debug_toolbar.panels.logger.LoggingPanel',
# Enabling the profiler has a weird bug as of django-debug-toolbar==0.9.4 and
# Django=1.3.1/1.4 where requests to views get duplicated (your method gets
# hit twice). So you can uncomment when you need to diagnose performance
# problems, but you shouldn't leave it on.
# 'debug_toolbar.panels.profiling.ProfilingDebugPanel',
)
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False
}
############################ FILE UPLOADS (ASKBOT) #############################
DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage'
MEDIA_ROOT = ENV_ROOT / "uploads"
MEDIA_URL = "/static/uploads/"
STATICFILES_DIRS.append(("uploads", MEDIA_ROOT))
FILE_UPLOAD_TEMP_DIR = ENV_ROOT / "uploads"
FILE_UPLOAD_HANDLERS = (
'django.core.files.uploadhandler.MemoryFileUploadHandler',
'django.core.files.uploadhandler.TemporaryFileUploadHandler',
)
########################### PIPELINE #################################
PIPELINE_SASS_ARGUMENTS = '-r {proj_dir}/static/sass/bourbon/lib/bourbon.rb'.format(proj_dir=PROJECT_ROOT)
|
"""
This part of code is the Q learning brain, which is a brain of the agent.
All decisions are made in here.
Using Tensorflow to build the neural network.
View more on 莫烦Python: https://morvanzhou.github.io/tutorials/
Using:
Tensorflow: 1.0
"""
import numpy as np
import pandas as pd
import tensorflow as tf
np.random.seed(1)
tf.set_random_seed(1)
# Deep Q Network off-policy
class DeepQNetwork:
def __init__(
self,
n_actions,
n_features,
learning_rate=0.01,
reward_decay=0.9,
e_greedy=0.9,
replace_target_iter=300,
memory_size=500,
batch_size=32,
e_greedy_increment=None,
output_graph=False,
):
self.n_actions = n_actions
self.n_features = n_features
self.lr = learning_rate
self.gamma = reward_decay
self.epsilon_max = e_greedy
self.replace_target_iter = replace_target_iter
self.memory_size = memory_size
self.batch_size = batch_size
self.epsilon_increment = e_greedy_increment
self.epsilon = 0 if e_greedy_increment is not None else self.epsilon_max
# total learning step
self.learn_step_counter = 0
# initialize zero memory [s, a, r, s_]
self.memory = pd.DataFrame(np.zeros((self.memory_size, n_features*2+2)))
# consist of [target_net, evaluate_net]
self._build_net()
self.sess = tf.Session()
if output_graph:
# $ tensorboard --logdir=logs
# tf.train.SummaryWriter soon be deprecated, use following
tf.summary.FileWriter("logs/", self.sess.graph)
self.sess.run(tf.global_variables_initializer())
self.cost_his = []
def _build_net(self):
# ------------------ build evaluate_net ------------------
self.s = tf.placeholder(tf.float32, [None, self.n_features], name='s') # input
self.q_target = tf.placeholder(tf.float32, [None, self.n_actions], name='Q_target') # for calculating loss
with tf.variable_scope('eval_net'):
# first layer
layer = tf.contrib.layers.fully_connected(
inputs=self.s,
num_outputs=10,
activation_fn=tf.nn.relu,
weights_initializer=tf.random_normal_initializer(mean=0, stddev=0.3),
biases_initializer=tf.constant_initializer(0.1),
variables_collections=['eval_net_params'], # use it later when assign to target net
)
# second layer
self.q_eval = tf.contrib.layers.fully_connected(
inputs=layer,
num_outputs=self.n_actions,
activation_fn=None,
weights_initializer=tf.random_normal_initializer(mean=0, stddev=0.3),
biases_initializer=tf.constant_initializer(0.1),
variables_collections=['eval_net_params'], # use it later when assign to target net
)
with tf.name_scope('loss'):
self.loss = tf.reduce_sum(tf.squared_difference(self.q_target, self.q_eval))
with tf.name_scope('train'):
self._train_op = tf.train.RMSPropOptimizer(self.lr).minimize(self.loss)
# ------------------ build target_net ------------------
self.s_ = tf.placeholder(tf.float32, [None, self.n_features], name='s_') # input
with tf.variable_scope('target_net'):
# first layer
layer = tf.contrib.layers.fully_connected(
inputs=self.s_,
num_outputs=10,
activation_fn=tf.nn.relu,
weights_initializer=tf.random_normal_initializer(mean=0, stddev=0.3),
biases_initializer=tf.constant_initializer(0.3),
trainable=False,
variables_collections=['target_net_params'], # use it later when assign to target net
)
# second layer
self.q_next = tf.contrib.layers.fully_connected(
inputs=layer,
num_outputs=self.n_actions,
activation_fn=None,
weights_initializer=tf.random_normal_initializer(mean=0, stddev=0.1),
biases_initializer=tf.constant_initializer(0.1),
trainable=False,
variables_collections=['target_net_params'], # use it later when assign to target net
)
def store_transition(self, s, a, r, s_):
if not hasattr(self, 'memory_counter'):
self.memory_counter = 0
transition = np.hstack((s, [a, r], s_))
# replace the old memory with new memory
index = self.memory_counter % self.memory_size
self.memory.iloc[index, :] = transition
self.memory_counter += 1
def choose_action(self, observation):
# to have batch dimension when feed into tf placeholder
observation = observation[np.newaxis, :]
if np.random.uniform() < self.epsilon:
# forward feed the observation and get q value for every actions
actions_value = self.sess.run(self.q_eval, feed_dict={self.s: observation})
action = np.argmax(actions_value)
else:
action = np.random.randint(0, self.n_actions)
return action
def _replace_target_params(self):
t_params = tf.get_collection('target_net_params')
e_params = tf.get_collection('eval_net_params')
self.sess.run([tf.assign(t, e) for t, e in zip(t_params, e_params)])
def learn(self):
# check to replace target parameters
if self.learn_step_counter % self.replace_target_iter == 0:
self._replace_target_params()
print('\ntarget_params_replaced\n')
# sample batch memory from all memory
batch_memory = self.memory.sample(self.batch_size) \
if self.memory_counter > self.memory_size \
else self.memory.iloc[:self.memory_counter].sample(self.batch_size, replace=True)
q_next, q_eval = self.sess.run(
[self.q_next, self.q_eval],
feed_dict={
self.s_: batch_memory.iloc[:, -self.n_features:],
self.s: batch_memory.iloc[:, :self.n_features]
})
# change q_target w.r.t q_eval's action
q_target = q_eval.copy()
q_target[np.arange(self.batch_size, dtype=np.int32), batch_memory.iloc[:, self.n_features].astype(int)] = \
batch_memory.iloc[:, self.n_features+1] + self.gamma * np.max(q_next, axis=1)
"""
For example in this batch I have 2 samples and 3 actions:
q_eval =
[[1, 2, 3],
[4, 5, 6]]
q_target = q_eval =
[[1, 2, 3],
[4, 5, 6]]
Then change q_target with the real q_target value w.r.t the q_eval's action.
For example in:
sample 0, I took action 0, and the max q_target value is -1;
sample 1, I took action 2, and the max q_target value is -2:
q_target =
[[-1, 2, 3],
[4, 5, -2]]
So the (q_target - q_eval) becomes:
[[(-1)-(1), 0, 0],
[0, 0, (-2)-(6)]]
We then backpropagate this error w.r.t the corresponded action to network,
leave other action as error=0 cause we didn't choose it.
"""
# train eval network
_, self.cost = self.sess.run([self._train_op, self.loss],
feed_dict={self.s: batch_memory.iloc[:, :self.n_features],
self.q_target: q_target})
self.cost_his.append(self.cost)
# increasing epsilon
self.epsilon = self.epsilon + self.epsilon_increment if self.epsilon < self.epsilon_max else self.epsilon_max
self.learn_step_counter += 1
def plot_cost(self):
import matplotlib.pyplot as plt
plt.plot(np.arange(len(self.cost_his)), self.cost_his)
plt.show()
edited
"""
This part of code is the Q learning brain, which is a brain of the agent.
All decisions are made in here.
Using Tensorflow to build the neural network.
View more on 莫烦Python: https://morvanzhou.github.io/tutorials/
Using:
Tensorflow: 1.0
"""
import numpy as np
import pandas as pd
import tensorflow as tf
np.random.seed(1)
tf.set_random_seed(1)
# Deep Q Network off-policy
class DeepQNetwork:
def __init__(
self,
n_actions,
n_features,
learning_rate=0.01,
reward_decay=0.9,
e_greedy=0.9,
replace_target_iter=300,
memory_size=500,
batch_size=32,
e_greedy_increment=None,
output_graph=False,
):
self.n_actions = n_actions
self.n_features = n_features
self.lr = learning_rate
self.gamma = reward_decay
self.epsilon_max = e_greedy
self.replace_target_iter = replace_target_iter
self.memory_size = memory_size
self.batch_size = batch_size
self.epsilon_increment = e_greedy_increment
self.epsilon = 0 if e_greedy_increment is not None else self.epsilon_max
# total learning step
self.learn_step_counter = 0
# initialize zero memory [s, a, r, s_]
self.memory = pd.DataFrame(np.zeros((self.memory_size, n_features*2+2)))
# consist of [target_net, evaluate_net]
self._build_net()
self.sess = tf.Session()
if output_graph:
# $ tensorboard --logdir=logs
# tf.train.SummaryWriter soon be deprecated, use following
tf.summary.FileWriter("logs/", self.sess.graph)
self.sess.run(tf.global_variables_initializer())
self.cost_his = []
def _build_net(self):
# ------------------ build evaluate_net ------------------
self.s = tf.placeholder(tf.float32, [None, self.n_features], name='s') # input
self.q_target = tf.placeholder(tf.float32, [None, self.n_actions], name='Q_target') # for calculating loss
with tf.variable_scope('eval_net'):
# first layer
layer = tf.contrib.layers.fully_connected(
inputs=self.s,
num_outputs=10,
activation_fn=tf.nn.relu,
weights_initializer=tf.random_normal_initializer(mean=0, stddev=0.3),
biases_initializer=tf.constant_initializer(0.1),
variables_collections=['eval_net_params'], # use it later when assign to target net
)
# second layer
self.q_eval = tf.contrib.layers.fully_connected(
inputs=layer,
num_outputs=self.n_actions,
activation_fn=None,
weights_initializer=tf.random_normal_initializer(mean=0, stddev=0.3),
biases_initializer=tf.constant_initializer(0.1),
variables_collections=['eval_net_params'], # use it later when assign to target net
)
with tf.name_scope('loss'):
self.loss = tf.reduce_sum(tf.squared_difference(self.q_target, self.q_eval))
with tf.name_scope('train'):
self._train_op = tf.train.RMSPropOptimizer(self.lr).minimize(self.loss)
# ------------------ build target_net ------------------
self.s_ = tf.placeholder(tf.float32, [None, self.n_features], name='s_') # input
with tf.variable_scope('target_net'):
# first layer
layer = tf.contrib.layers.fully_connected(
inputs=self.s_,
num_outputs=10,
activation_fn=tf.nn.relu,
weights_initializer=tf.random_normal_initializer(mean=0, stddev=0.3),
biases_initializer=tf.constant_initializer(0.1),
trainable=False,
variables_collections=['target_net_params'], # use it later when assign to target net
)
# second layer
self.q_next = tf.contrib.layers.fully_connected(
inputs=layer,
num_outputs=self.n_actions,
activation_fn=None,
weights_initializer=tf.random_normal_initializer(mean=0, stddev=0.1),
biases_initializer=tf.constant_initializer(0.1),
trainable=False,
variables_collections=['target_net_params'], # use it later when assign to target net
)
def store_transition(self, s, a, r, s_):
if not hasattr(self, 'memory_counter'):
self.memory_counter = 0
transition = np.hstack((s, [a, r], s_))
# replace the old memory with new memory
index = self.memory_counter % self.memory_size
self.memory.iloc[index, :] = transition
self.memory_counter += 1
def choose_action(self, observation):
# to have batch dimension when feed into tf placeholder
observation = observation[np.newaxis, :]
if np.random.uniform() < self.epsilon:
# forward feed the observation and get q value for every actions
actions_value = self.sess.run(self.q_eval, feed_dict={self.s: observation})
action = np.argmax(actions_value)
else:
action = np.random.randint(0, self.n_actions)
return action
def _replace_target_params(self):
t_params = tf.get_collection('target_net_params')
e_params = tf.get_collection('eval_net_params')
self.sess.run([tf.assign(t, e) for t, e in zip(t_params, e_params)])
def learn(self):
# check to replace target parameters
if self.learn_step_counter % self.replace_target_iter == 0:
self._replace_target_params()
print('\ntarget_params_replaced\n')
# sample batch memory from all memory
batch_memory = self.memory.sample(self.batch_size) \
if self.memory_counter > self.memory_size \
else self.memory.iloc[:self.memory_counter].sample(self.batch_size, replace=True)
q_next, q_eval = self.sess.run(
[self.q_next, self.q_eval],
feed_dict={
self.s_: batch_memory.iloc[:, -self.n_features:],
self.s: batch_memory.iloc[:, :self.n_features]
})
# change q_target w.r.t q_eval's action
q_target = q_eval.copy()
q_target[np.arange(self.batch_size, dtype=np.int32), batch_memory.iloc[:, self.n_features].astype(int)] = \
batch_memory.iloc[:, self.n_features+1] + self.gamma * np.max(q_next, axis=1)
"""
For example in this batch I have 2 samples and 3 actions:
q_eval =
[[1, 2, 3],
[4, 5, 6]]
q_target = q_eval =
[[1, 2, 3],
[4, 5, 6]]
Then change q_target with the real q_target value w.r.t the q_eval's action.
For example in:
sample 0, I took action 0, and the max q_target value is -1;
sample 1, I took action 2, and the max q_target value is -2:
q_target =
[[-1, 2, 3],
[4, 5, -2]]
So the (q_target - q_eval) becomes:
[[(-1)-(1), 0, 0],
[0, 0, (-2)-(6)]]
We then backpropagate this error w.r.t the corresponded action to network,
leave other action as error=0 cause we didn't choose it.
"""
# train eval network
_, self.cost = self.sess.run([self._train_op, self.loss],
feed_dict={self.s: batch_memory.iloc[:, :self.n_features],
self.q_target: q_target})
self.cost_his.append(self.cost)
# increasing epsilon
self.epsilon = self.epsilon + self.epsilon_increment if self.epsilon < self.epsilon_max else self.epsilon_max
self.learn_step_counter += 1
def plot_cost(self):
import matplotlib.pyplot as plt
plt.plot(np.arange(len(self.cost_his)), self.cost_his)
plt.show()
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import json
import logging
from os.path import join as pjoin
import six
from st2client.commands import resource
from st2client.commands.noop import NoopCommand
from st2client.formatters import table
from st2client.models.keyvalue import KeyValuePair
from st2client.utils.date import format_isodate_for_user_timezone
LOG = logging.getLogger(__name__)
DEFAULT_LIST_SCOPE = 'all'
DEFAULT_GET_SCOPE = 'system'
DEFAULT_CUD_SCOPE = 'system'
class KeyValuePairBranch(resource.ResourceBranch):
def __init__(self, description, app, subparsers, parent_parser=None):
super(KeyValuePairBranch, self).__init__(
KeyValuePair, description, app, subparsers,
parent_parser=parent_parser,
commands={
'list': KeyValuePairListCommand,
'get': KeyValuePairGetCommand,
'delete': KeyValuePairDeleteCommand,
'create': NoopCommand,
'update': NoopCommand
})
# Registers extended commands
self.commands['set'] = KeyValuePairSetCommand(self.resource, self.app,
self.subparsers)
self.commands['load'] = KeyValuePairLoadCommand(
self.resource, self.app, self.subparsers)
self.commands['delete_by_prefix'] = KeyValuePairDeleteByPrefixCommand(
self.resource, self.app, self.subparsers)
# Remove unsupported commands
# TODO: Refactor parent class and make it nicer
del self.commands['create']
del self.commands['update']
class KeyValuePairListCommand(resource.ResourceTableCommand):
display_attributes = ['name', 'value', 'secret', 'encrypted', 'scope', 'user',
'expire_timestamp']
attribute_transform_functions = {
'expire_timestamp': format_isodate_for_user_timezone,
}
def __init__(self, resource, *args, **kwargs):
self.default_limit = 50
super(KeyValuePairListCommand, self).__init__(resource, 'list',
'Get the list of the %s most recent %s.' %
(self.default_limit,
resource.get_plural_display_name().lower()),
*args, **kwargs)
self.resource_name = resource.get_plural_display_name().lower()
# Filter options
self.parser.add_argument('--prefix', help=('Only return values with names starting with '
'the provided prefix.'))
self.parser.add_argument('--decrypt', action='store_true',
help='Decrypt secrets and displays plain text.')
self.parser.add_argument('-s', '--scope', default=DEFAULT_LIST_SCOPE, dest='scope',
help='Scope item is under. Example: "user".')
self.parser.add_argument('-u', '--user', dest='user', default=None,
help='User for user scoped items (admin only).')
self.parser.add_argument('-n', '--last', type=int, dest='last',
default=self.default_limit,
help=('List N most recent %s. Use -n -1 to fetch the full result \
set.' % self.resource_name))
@resource.add_auth_token_to_kwargs_from_cli
def run(self, args, **kwargs):
# Filtering options
if args.prefix:
kwargs['prefix'] = args.prefix
decrypt = getattr(args, 'decrypt', False)
kwargs['params'] = {'decrypt': str(decrypt).lower()}
scope = getattr(args, 'scope', DEFAULT_LIST_SCOPE)
kwargs['params']['scope'] = scope
if args.user:
kwargs['params']['user'] = args.user
kwargs['params']['limit'] = args.last
return self.manager.query_with_count(**kwargs)
@resource.add_auth_token_to_kwargs_from_cli
def run_and_print(self, args, **kwargs):
instances, count = self.run(args, **kwargs)
if args.json or args.yaml:
self.print_output(reversed(instances), table.MultiColumnTable,
attributes=args.attr, widths=args.width,
json=args.json, yaml=args.yaml,
attribute_transform_functions=self.attribute_transform_functions)
else:
self.print_output(instances, table.MultiColumnTable,
attributes=args.attr, widths=args.width,
attribute_transform_functions=self.attribute_transform_functions)
if args.last and count and count > args.last:
table.SingleRowTable.note_box(self.resource_name, args.last)
class KeyValuePairGetCommand(resource.ResourceGetCommand):
pk_argument_name = 'name'
display_attributes = ['name', 'value', 'secret', 'encrypted', 'scope', 'expire_timestamp']
def __init__(self, kv_resource, *args, **kwargs):
super(KeyValuePairGetCommand, self).__init__(kv_resource, *args, **kwargs)
self.parser.add_argument('-d', '--decrypt', action='store_true',
help='Decrypt secret if encrypted and show plain text.')
self.parser.add_argument('-s', '--scope', default=DEFAULT_GET_SCOPE, dest='scope',
help='Scope item is under. Example: "user".')
@resource.add_auth_token_to_kwargs_from_cli
def run(self, args, **kwargs):
resource_name = getattr(args, self.pk_argument_name, None)
decrypt = getattr(args, 'decrypt', False)
scope = getattr(args, 'scope', DEFAULT_GET_SCOPE)
kwargs['params'] = {'decrypt': str(decrypt).lower()}
kwargs['params']['scope'] = scope
return self.get_resource_by_id(id=resource_name, **kwargs)
class KeyValuePairSetCommand(resource.ResourceCommand):
display_attributes = ['name', 'value', 'scope', 'expire_timestamp']
def __init__(self, resource, *args, **kwargs):
super(KeyValuePairSetCommand, self).__init__(
resource, 'set',
'Set an existing %s.' % resource.get_display_name().lower(),
*args, **kwargs
)
self.parser.add_argument('name',
metavar='name',
help='Name of the key value pair.')
self.parser.add_argument('value', help='Value paired with the key.')
self.parser.add_argument('-l', '--ttl', dest='ttl', type=int, default=None,
help='TTL (in seconds) for this value.')
self.parser.add_argument('-e', '--encrypt', dest='secret',
action='store_true',
help='Encrypt value before saving.')
self.parser.add_argument('-s', '--scope', dest='scope', default=DEFAULT_CUD_SCOPE,
help='Specify the scope under which you want ' +
'to place the item.')
self.parser.add_argument('-u', '--user', dest='user', default=None,
help='User for user scoped items (admin only).')
@resource.add_auth_token_to_kwargs_from_cli
def run(self, args, **kwargs):
instance = KeyValuePair()
instance.id = args.name # TODO: refactor and get rid of id
instance.name = args.name
instance.value = args.value
instance.scope = args.scope
instance.user = args.user
if args.secret:
instance.secret = args.secret
if args.ttl:
instance.ttl = args.ttl
return self.manager.update(instance, **kwargs)
def run_and_print(self, args, **kwargs):
instance = self.run(args, **kwargs)
self.print_output(instance, table.PropertyValueTable,
attributes=self.display_attributes, json=args.json,
yaml=args.yaml)
class KeyValuePairDeleteCommand(resource.ResourceDeleteCommand):
pk_argument_name = 'name'
def __init__(self, resource, *args, **kwargs):
super(KeyValuePairDeleteCommand, self).__init__(resource, *args, **kwargs)
self.parser.add_argument('-s', '--scope', dest='scope', default=DEFAULT_CUD_SCOPE,
help='Specify the scope under which you want ' +
'to place the item.')
self.parser.add_argument('-u', '--user', dest='user', default=None,
help='User for user scoped items (admin only).')
@resource.add_auth_token_to_kwargs_from_cli
def run(self, args, **kwargs):
resource_id = getattr(args, self.pk_argument_name, None)
scope = getattr(args, 'scope', DEFAULT_CUD_SCOPE)
kwargs['params'] = {}
kwargs['params']['scope'] = scope
kwargs['params']['user'] = args.user
instance = self.get_resource(resource_id, **kwargs)
if not instance:
raise resource.ResourceNotFoundError('KeyValuePair with id "%s" not found',
resource_id)
instance.id = resource_id # TODO: refactor and get rid of id
self.manager.delete(instance, **kwargs)
class KeyValuePairDeleteByPrefixCommand(resource.ResourceCommand):
"""
Commands which delete all the key value pairs which match the provided
prefix.
"""
def __init__(self, resource, *args, **kwargs):
super(KeyValuePairDeleteByPrefixCommand, self).__init__(resource, 'delete_by_prefix',
'Delete KeyValue pairs which \
match the provided prefix',
*args, **kwargs)
self.parser.add_argument('-p', '--prefix', required=True,
help='Name prefix (e.g. twitter.TwitterSensor:)')
@resource.add_auth_token_to_kwargs_from_cli
def run(self, args, **kwargs):
prefix = args.prefix
key_pairs = self.manager.get_all(prefix=prefix)
to_delete = []
for key_pair in key_pairs:
key_pair.id = key_pair.name
to_delete.append(key_pair)
deleted = []
for key_pair in to_delete:
self.manager.delete(instance=key_pair, **kwargs)
deleted.append(key_pair)
return deleted
def run_and_print(self, args, **kwargs):
# TODO: Need to use args, instead of kwargs (args=) because of bad API
# FIX ME
deleted = self.run(args, **kwargs)
key_ids = [key_pair.id for key_pair in deleted]
print('Deleted %s keys' % (len(deleted)))
print('Deleted key ids: %s' % (', '.join(key_ids)))
class KeyValuePairLoadCommand(resource.ResourceCommand):
pk_argument_name = 'name'
display_attributes = ['name', 'value']
def __init__(self, resource, *args, **kwargs):
help_text = ('Load a list of %s from file.' %
resource.get_plural_display_name().lower())
super(KeyValuePairLoadCommand, self).__init__(resource, 'load',
help_text, *args, **kwargs)
self.parser.add_argument('-c', '--convert', action='store_true',
help=('Convert non-string types (hash, array, boolean,'
' int, float) to a JSON string before loading it'
' into the datastore.'))
self.parser.add_argument(
'file', help=('JSON/YAML file containing the %s(s) to load'
% resource.get_plural_display_name().lower()))
@resource.add_auth_token_to_kwargs_from_cli
def run(self, args, **kwargs):
# normalize the file path to allow for relative files to be specified
file_path = os.path.normpath(pjoin(os.getcwd(), args.file))
# load the data (JSON/YAML) from the file
kvps = resource.load_meta_file(file_path)
# if the data is not a list (ie. it's a single entry)
# then make it a list so our process loop is generic
if not isinstance(kvps, list):
kvps = [kvps]
instances = []
for item in kvps:
# parse required KeyValuePair properties
name = item['name']
value = item['value']
# parse optional KeyValuePair properties
scope = item.get('scope', DEFAULT_CUD_SCOPE)
user = item.get('user', None)
secret = item.get('secret', False)
ttl = item.get('ttl', None)
# if the value is not a string, convert it to JSON
# all keys in the datastore must strings
if not isinstance(value, six.string_types):
if args.convert:
value = json.dumps(value)
else:
raise ValueError(("Item '%s' has a value that is not a string."
" Either pass in the -c/--convert option to convert"
" non-string types to JSON strings automatically, or"
" convert the data to a string in the file") % name)
# create the KeyValuePair instance
instance = KeyValuePair()
instance.id = name # TODO: refactor and get rid of id
instance.name = name
instance.value = value
instance.scope = scope
if user:
instance.user = user
if secret:
instance.secret = secret
if ttl:
instance.ttl = ttl
# call the API to create/update the KeyValuePair
self.manager.update(instance, **kwargs)
instances.append(instance)
return instances
def run_and_print(self, args, **kwargs):
instances = self.run(args, **kwargs)
self.print_output(instances, table.MultiColumnTable,
attributes=['name', 'value', 'secret', 'scope', 'user', 'ttl'],
json=args.json,
yaml=args.yaml)
Fx st2 key load to not fail on empty yaml/json files
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import json
import logging
from os.path import join as pjoin
import six
from st2client.commands import resource
from st2client.commands.noop import NoopCommand
from st2client.formatters import table
from st2client.models.keyvalue import KeyValuePair
from st2client.utils.date import format_isodate_for_user_timezone
LOG = logging.getLogger(__name__)
DEFAULT_LIST_SCOPE = 'all'
DEFAULT_GET_SCOPE = 'system'
DEFAULT_CUD_SCOPE = 'system'
class KeyValuePairBranch(resource.ResourceBranch):
def __init__(self, description, app, subparsers, parent_parser=None):
super(KeyValuePairBranch, self).__init__(
KeyValuePair, description, app, subparsers,
parent_parser=parent_parser,
commands={
'list': KeyValuePairListCommand,
'get': KeyValuePairGetCommand,
'delete': KeyValuePairDeleteCommand,
'create': NoopCommand,
'update': NoopCommand
})
# Registers extended commands
self.commands['set'] = KeyValuePairSetCommand(self.resource, self.app,
self.subparsers)
self.commands['load'] = KeyValuePairLoadCommand(
self.resource, self.app, self.subparsers)
self.commands['delete_by_prefix'] = KeyValuePairDeleteByPrefixCommand(
self.resource, self.app, self.subparsers)
# Remove unsupported commands
# TODO: Refactor parent class and make it nicer
del self.commands['create']
del self.commands['update']
class KeyValuePairListCommand(resource.ResourceTableCommand):
display_attributes = ['name', 'value', 'secret', 'encrypted', 'scope', 'user',
'expire_timestamp']
attribute_transform_functions = {
'expire_timestamp': format_isodate_for_user_timezone,
}
def __init__(self, resource, *args, **kwargs):
self.default_limit = 50
super(KeyValuePairListCommand, self).__init__(resource, 'list',
'Get the list of the %s most recent %s.' %
(self.default_limit,
resource.get_plural_display_name().lower()),
*args, **kwargs)
self.resource_name = resource.get_plural_display_name().lower()
# Filter options
self.parser.add_argument('--prefix', help=('Only return values with names starting with '
'the provided prefix.'))
self.parser.add_argument('--decrypt', action='store_true',
help='Decrypt secrets and displays plain text.')
self.parser.add_argument('-s', '--scope', default=DEFAULT_LIST_SCOPE, dest='scope',
help='Scope item is under. Example: "user".')
self.parser.add_argument('-u', '--user', dest='user', default=None,
help='User for user scoped items (admin only).')
self.parser.add_argument('-n', '--last', type=int, dest='last',
default=self.default_limit,
help=('List N most recent %s. Use -n -1 to fetch the full result \
set.' % self.resource_name))
@resource.add_auth_token_to_kwargs_from_cli
def run(self, args, **kwargs):
# Filtering options
if args.prefix:
kwargs['prefix'] = args.prefix
decrypt = getattr(args, 'decrypt', False)
kwargs['params'] = {'decrypt': str(decrypt).lower()}
scope = getattr(args, 'scope', DEFAULT_LIST_SCOPE)
kwargs['params']['scope'] = scope
if args.user:
kwargs['params']['user'] = args.user
kwargs['params']['limit'] = args.last
return self.manager.query_with_count(**kwargs)
@resource.add_auth_token_to_kwargs_from_cli
def run_and_print(self, args, **kwargs):
instances, count = self.run(args, **kwargs)
if args.json or args.yaml:
self.print_output(reversed(instances), table.MultiColumnTable,
attributes=args.attr, widths=args.width,
json=args.json, yaml=args.yaml,
attribute_transform_functions=self.attribute_transform_functions)
else:
self.print_output(instances, table.MultiColumnTable,
attributes=args.attr, widths=args.width,
attribute_transform_functions=self.attribute_transform_functions)
if args.last and count and count > args.last:
table.SingleRowTable.note_box(self.resource_name, args.last)
class KeyValuePairGetCommand(resource.ResourceGetCommand):
pk_argument_name = 'name'
display_attributes = ['name', 'value', 'secret', 'encrypted', 'scope', 'expire_timestamp']
def __init__(self, kv_resource, *args, **kwargs):
super(KeyValuePairGetCommand, self).__init__(kv_resource, *args, **kwargs)
self.parser.add_argument('-d', '--decrypt', action='store_true',
help='Decrypt secret if encrypted and show plain text.')
self.parser.add_argument('-s', '--scope', default=DEFAULT_GET_SCOPE, dest='scope',
help='Scope item is under. Example: "user".')
@resource.add_auth_token_to_kwargs_from_cli
def run(self, args, **kwargs):
resource_name = getattr(args, self.pk_argument_name, None)
decrypt = getattr(args, 'decrypt', False)
scope = getattr(args, 'scope', DEFAULT_GET_SCOPE)
kwargs['params'] = {'decrypt': str(decrypt).lower()}
kwargs['params']['scope'] = scope
return self.get_resource_by_id(id=resource_name, **kwargs)
class KeyValuePairSetCommand(resource.ResourceCommand):
display_attributes = ['name', 'value', 'scope', 'expire_timestamp']
def __init__(self, resource, *args, **kwargs):
super(KeyValuePairSetCommand, self).__init__(
resource, 'set',
'Set an existing %s.' % resource.get_display_name().lower(),
*args, **kwargs
)
self.parser.add_argument('name',
metavar='name',
help='Name of the key value pair.')
self.parser.add_argument('value', help='Value paired with the key.')
self.parser.add_argument('-l', '--ttl', dest='ttl', type=int, default=None,
help='TTL (in seconds) for this value.')
self.parser.add_argument('-e', '--encrypt', dest='secret',
action='store_true',
help='Encrypt value before saving.')
self.parser.add_argument('-s', '--scope', dest='scope', default=DEFAULT_CUD_SCOPE,
help='Specify the scope under which you want ' +
'to place the item.')
self.parser.add_argument('-u', '--user', dest='user', default=None,
help='User for user scoped items (admin only).')
@resource.add_auth_token_to_kwargs_from_cli
def run(self, args, **kwargs):
instance = KeyValuePair()
instance.id = args.name # TODO: refactor and get rid of id
instance.name = args.name
instance.value = args.value
instance.scope = args.scope
instance.user = args.user
if args.secret:
instance.secret = args.secret
if args.ttl:
instance.ttl = args.ttl
return self.manager.update(instance, **kwargs)
def run_and_print(self, args, **kwargs):
instance = self.run(args, **kwargs)
self.print_output(instance, table.PropertyValueTable,
attributes=self.display_attributes, json=args.json,
yaml=args.yaml)
class KeyValuePairDeleteCommand(resource.ResourceDeleteCommand):
pk_argument_name = 'name'
def __init__(self, resource, *args, **kwargs):
super(KeyValuePairDeleteCommand, self).__init__(resource, *args, **kwargs)
self.parser.add_argument('-s', '--scope', dest='scope', default=DEFAULT_CUD_SCOPE,
help='Specify the scope under which you want ' +
'to place the item.')
self.parser.add_argument('-u', '--user', dest='user', default=None,
help='User for user scoped items (admin only).')
@resource.add_auth_token_to_kwargs_from_cli
def run(self, args, **kwargs):
resource_id = getattr(args, self.pk_argument_name, None)
scope = getattr(args, 'scope', DEFAULT_CUD_SCOPE)
kwargs['params'] = {}
kwargs['params']['scope'] = scope
kwargs['params']['user'] = args.user
instance = self.get_resource(resource_id, **kwargs)
if not instance:
raise resource.ResourceNotFoundError('KeyValuePair with id "%s" not found',
resource_id)
instance.id = resource_id # TODO: refactor and get rid of id
self.manager.delete(instance, **kwargs)
class KeyValuePairDeleteByPrefixCommand(resource.ResourceCommand):
"""
Commands which delete all the key value pairs which match the provided
prefix.
"""
def __init__(self, resource, *args, **kwargs):
super(KeyValuePairDeleteByPrefixCommand, self).__init__(resource, 'delete_by_prefix',
'Delete KeyValue pairs which \
match the provided prefix',
*args, **kwargs)
self.parser.add_argument('-p', '--prefix', required=True,
help='Name prefix (e.g. twitter.TwitterSensor:)')
@resource.add_auth_token_to_kwargs_from_cli
def run(self, args, **kwargs):
prefix = args.prefix
key_pairs = self.manager.get_all(prefix=prefix)
to_delete = []
for key_pair in key_pairs:
key_pair.id = key_pair.name
to_delete.append(key_pair)
deleted = []
for key_pair in to_delete:
self.manager.delete(instance=key_pair, **kwargs)
deleted.append(key_pair)
return deleted
def run_and_print(self, args, **kwargs):
# TODO: Need to use args, instead of kwargs (args=) because of bad API
# FIX ME
deleted = self.run(args, **kwargs)
key_ids = [key_pair.id for key_pair in deleted]
print('Deleted %s keys' % (len(deleted)))
print('Deleted key ids: %s' % (', '.join(key_ids)))
class KeyValuePairLoadCommand(resource.ResourceCommand):
pk_argument_name = 'name'
display_attributes = ['name', 'value']
def __init__(self, resource, *args, **kwargs):
help_text = ('Load a list of %s from file.' %
resource.get_plural_display_name().lower())
super(KeyValuePairLoadCommand, self).__init__(resource, 'load',
help_text, *args, **kwargs)
self.parser.add_argument('-c', '--convert', action='store_true',
help=('Convert non-string types (hash, array, boolean,'
' int, float) to a JSON string before loading it'
' into the datastore.'))
self.parser.add_argument(
'file', help=('JSON/YAML file containing the %s(s) to load'
% resource.get_plural_display_name().lower()))
@resource.add_auth_token_to_kwargs_from_cli
def run(self, args, **kwargs):
# normalize the file path to allow for relative files to be specified
file_path = os.path.normpath(pjoin(os.getcwd(), args.file))
# load the data (JSON/YAML) from the file
kvps = resource.load_meta_file(file_path)
instances = []
# bail out if file was empty
if not kvps:
return instances
# if the data is not a list (ie. it's a single entry)
# then make it a list so our process loop is generic
if not isinstance(kvps, list):
kvps = [kvps]
for item in kvps:
# parse required KeyValuePair properties
name = item['name']
value = item['value']
# parse optional KeyValuePair properties
scope = item.get('scope', DEFAULT_CUD_SCOPE)
user = item.get('user', None)
secret = item.get('secret', False)
ttl = item.get('ttl', None)
# if the value is not a string, convert it to JSON
# all keys in the datastore must strings
if not isinstance(value, six.string_types):
if args.convert:
value = json.dumps(value)
else:
raise ValueError(("Item '%s' has a value that is not a string."
" Either pass in the -c/--convert option to convert"
" non-string types to JSON strings automatically, or"
" convert the data to a string in the file") % name)
# create the KeyValuePair instance
instance = KeyValuePair()
instance.id = name # TODO: refactor and get rid of id
instance.name = name
instance.value = value
instance.scope = scope
if user:
instance.user = user
if secret:
instance.secret = secret
if ttl:
instance.ttl = ttl
# call the API to create/update the KeyValuePair
self.manager.update(instance, **kwargs)
instances.append(instance)
return instances
def run_and_print(self, args, **kwargs):
instances = self.run(args, **kwargs)
self.print_output(instances, table.MultiColumnTable,
attributes=['name', 'value', 'secret', 'scope', 'user', 'ttl'],
json=args.json,
yaml=args.yaml)
|
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from typing import List
from typing import Any
from typing import Dict
import os
import abc
import six
import json
import logging
import traceback
from functools import wraps
import yaml
from six.moves import http_client
from st2client import commands
from st2client.exceptions.operations import OperationFailureException
from st2client.formatters import table
from st2client.utils.types import OrderedSet
import st2client
ALLOWED_EXTS = [".json", ".yaml", ".yml"]
PARSER_FUNCS = {".json": json.load, ".yml": yaml.safe_load, ".yaml": yaml.safe_load}
LOG = logging.getLogger(__name__)
def add_auth_token_to_kwargs_from_cli(func):
@wraps(func)
def decorate(*args, **kwargs):
ns = args[1]
if getattr(ns, "token", None):
kwargs["token"] = ns.token
if getattr(ns, "api_key", None):
kwargs["api_key"] = ns.api_key
return func(*args, **kwargs)
return decorate
class ResourceCommandError(Exception):
pass
class ResourceNotFoundError(Exception):
pass
class ResourceBranch(commands.Branch):
def __init__(
self,
resource,
description,
app,
subparsers,
parent_parser=None,
read_only=False,
commands=None,
has_disable=False,
):
self.resource = resource
super(ResourceBranch, self).__init__(
self.resource.get_alias().lower(),
description,
app,
subparsers,
parent_parser=parent_parser,
)
# Registers subcommands for managing the resource type.
self.subparsers = self.parser.add_subparsers(
help=(
"List of commands for managing %s."
% self.resource.get_plural_display_name().lower()
)
)
# Resolves if commands need to be overridden.
commands = commands or {}
cmd_map = {
"list": ResourceListCommand,
"get": ResourceGetCommand,
"create": ResourceCreateCommand,
"update": ResourceUpdateCommand,
"delete": ResourceDeleteCommand,
"enable": ResourceEnableCommand,
"disable": ResourceDisableCommand,
}
for cmd, cmd_class in cmd_map.items():
if cmd not in commands:
commands[cmd] = cmd_class
# Instantiate commands.
args = [self.resource, self.app, self.subparsers]
self.commands["list"] = commands["list"](*args)
self.commands["get"] = commands["get"](*args)
if not read_only:
self.commands["create"] = commands["create"](*args)
self.commands["update"] = commands["update"](*args)
self.commands["delete"] = commands["delete"](*args)
if has_disable:
self.commands["enable"] = commands["enable"](*args)
self.commands["disable"] = commands["disable"](*args)
@six.add_metaclass(abc.ABCMeta)
class ResourceCommand(commands.Command):
pk_argument_name = None
def __init__(self, resource, *args, **kwargs):
has_token_opt = kwargs.pop("has_token_opt", True)
super(ResourceCommand, self).__init__(*args, **kwargs)
self.resource = resource
if has_token_opt:
self.parser.add_argument(
"-t",
"--token",
dest="token",
help="Access token for user authentication. "
"Get ST2_AUTH_TOKEN from the environment "
"variables by default.",
)
self.parser.add_argument(
"--api-key",
dest="api_key",
help="Api Key for user authentication. "
"Get ST2_API_KEY from the environment "
"variables by default.",
)
# Formatter flags
self.parser.add_argument(
"-j",
"--json",
action="store_true",
dest="json",
help="Print output in JSON format.",
)
self.parser.add_argument(
"-y",
"--yaml",
action="store_true",
dest="yaml",
help="Print output in YAML format.",
)
@property
def manager(self):
return self.app.client.managers[self.resource.__name__]
@property
def arg_name_for_resource_id(self):
resource_name = self.resource.get_display_name().lower()
return "%s-id" % resource_name.replace(" ", "-")
def print_not_found(self, name):
print('%s "%s" is not found.\n' % (self.resource.get_display_name(), name))
def get_resource(self, name_or_id, **kwargs):
pk_argument_name = self.pk_argument_name
if pk_argument_name == "name_or_id":
instance = self.get_resource_by_name_or_id(name_or_id=name_or_id, **kwargs)
elif pk_argument_name == "ref_or_id":
instance = self.get_resource_by_ref_or_id(ref_or_id=name_or_id, **kwargs)
else:
instance = self.get_resource_by_pk(pk=name_or_id, **kwargs)
return instance
def get_resource_by_pk(self, pk, **kwargs):
"""
Retrieve resource by a primary key.
"""
try:
instance = self.manager.get_by_id(pk, **kwargs)
except Exception as e:
traceback.print_exc()
# Hack for "Unauthorized" exceptions, we do want to propagate those
response = getattr(e, "response", None)
status_code = getattr(response, "status_code", None)
if status_code and status_code == http_client.UNAUTHORIZED:
raise e
instance = None
return instance
def get_resource_by_id(self, id, **kwargs):
instance = self.get_resource_by_pk(pk=id, **kwargs)
if not instance:
message = 'Resource with id "%s" doesn\'t exist.' % (id)
raise ResourceNotFoundError(message)
return instance
def get_resource_by_name(self, name, **kwargs):
"""
Retrieve resource by name.
"""
instance = self.manager.get_by_name(name, **kwargs)
return instance
def get_resource_by_name_or_id(self, name_or_id, **kwargs):
instance = self.get_resource_by_name(name=name_or_id, **kwargs)
if not instance:
instance = self.get_resource_by_pk(pk=name_or_id, **kwargs)
if not instance:
message = 'Resource with id or name "%s" doesn\'t exist.' % (name_or_id)
raise ResourceNotFoundError(message)
return instance
def get_resource_by_ref_or_id(self, ref_or_id, **kwargs):
instance = self.manager.get_by_ref_or_id(ref_or_id=ref_or_id, **kwargs)
if not instance:
message = 'Resource with id or reference "%s" doesn\'t exist.' % (ref_or_id)
raise ResourceNotFoundError(message)
return instance
def _get_multiple_resources(
self, resource_ids: List[str], kwargs: Dict[str, Any]
) -> List[Any]:
"""
Return multiple resource instances for the provided resource ids.
If a resource is not found, an error is printed. This method only throws when operating on
a single resource.
:param resource_ids: A list of resources to retrieve instances for.
:param kwargs: Dictionary with keyword arguments which are passed to get_resource_by_id.
"""
more_than_one_resource = len(resource_ids) > 1
resources = []
for resource_id in resource_ids:
try:
resource = self.get_resource_by_id(resource_id, **kwargs)
except ResourceNotFoundError:
self.print_not_found(resource_id)
if not more_than_one_resource:
# For backward compatibility reasons and to comply with common "get one"
# behavior, we only fail if a single source is requested
raise ResourceNotFoundError("Resource %s not found." % resource_id)
continue
resources.append(resource)
return resources
@abc.abstractmethod
def run(self, args, **kwargs):
raise NotImplementedError
@abc.abstractmethod
def run_and_print(self, args, **kwargs):
raise NotImplementedError
def _get_metavar_for_argument(self, argument):
return argument.replace("_", "-")
def _get_help_for_argument(self, resource, argument):
argument_display_name = argument.title()
resource_display_name = resource.get_display_name().lower()
if "ref" in argument:
result = "Reference or ID of the %s." % (resource_display_name)
elif "name_or_id" in argument:
result = "Name or ID of the %s." % (resource_display_name)
else:
result = "%s of the %s." % (argument_display_name, resource_display_name)
return result
class ResourceViewCommand(ResourceCommand):
"""
Base class for read / view commands (list and get).
"""
@classmethod
def _get_include_attributes(cls, args, extra_attributes=None):
"""
Return a list of attributes to send to the API using ?include_attributes filter.
If None / empty list is returned it's assumed no filtering is to be performed and all
attributes are to be retrieved.
:param extra_attributes: Additional include attributes which should always be included.
:type extra_attributes: ``list`` of ``str``
"""
extra_attributes = extra_attributes or []
include_attributes = []
if extra_attributes:
include_attributes.extend(extra_attributes)
# If user specifies which attributes to retrieve via CLI --attr / -a argument, take that
# into account
# Special case for "all"
if "all" in args.attr:
return None
for attr in args.attr:
include_attributes.append(attr)
if include_attributes:
return include_attributes
display_attributes = getattr(cls, "display_attributes", [])
if display_attributes:
include_attributes += display_attributes
include_attributes = list(OrderedSet(include_attributes))
return include_attributes
class ResourceTableCommand(ResourceViewCommand):
display_attributes = ["id", "name", "description"]
def __init__(self, resource, name, description, *args, **kwargs):
super(ResourceTableCommand, self).__init__(
resource, name, description, *args, **kwargs
)
self.parser.add_argument(
"-a",
"--attr",
nargs="+",
default=self.display_attributes,
help=(
"List of attributes to include in the "
'output. "all" will return all '
"attributes."
),
)
self.parser.add_argument(
"-w",
"--width",
nargs="+",
type=int,
default=None,
help=("Set the width of columns in output."),
)
@add_auth_token_to_kwargs_from_cli
def run(self, args, **kwargs):
include_attributes = self._get_include_attributes(args=args)
if include_attributes:
include_attributes = ",".join(include_attributes)
kwargs["params"] = {"include_attributes": include_attributes}
return self.manager.get_all(**kwargs)
def run_and_print(self, args, **kwargs):
instances = self.run(args, **kwargs)
self.print_output(
instances,
table.MultiColumnTable,
attributes=args.attr,
widths=args.width,
json=args.json,
yaml=args.yaml,
)
class ResourceListCommand(ResourceTableCommand):
def __init__(self, resource, *args, **kwargs):
super(ResourceListCommand, self).__init__(
resource,
"list",
"Get the list of %s." % resource.get_plural_display_name().lower(),
*args,
**kwargs,
)
class ContentPackResourceListCommand(ResourceListCommand):
"""
Base command class for use with resources which belong to a content pack.
"""
def __init__(self, resource, *args, **kwargs):
super(ContentPackResourceListCommand, self).__init__(resource, *args, **kwargs)
self.parser.add_argument(
"-p",
"--pack",
type=str,
help=("Only return resources belonging to the" " provided pack"),
)
@add_auth_token_to_kwargs_from_cli
def run(self, args, **kwargs):
filters = {"pack": args.pack}
filters.update(**kwargs)
include_attributes = self._get_include_attributes(args=args)
if include_attributes:
include_attributes = ",".join(include_attributes)
filters["params"] = {"include_attributes": include_attributes}
return self.manager.get_all(**filters)
class ResourceGetCommand(ResourceViewCommand):
display_attributes = ["all"]
attribute_display_order = ["id", "name", "description"]
pk_argument_name = "name_or_id" # name of the attribute which stores resource PK
help_string = None
def __init__(self, resource, *args, **kwargs):
super(ResourceGetCommand, self).__init__(
resource,
"get",
self.help_string
or "Get individual %s." % resource.get_display_name().lower(),
*args,
**kwargs,
)
argument = self.pk_argument_name
metavar = self._get_metavar_for_argument(argument=self.pk_argument_name)
help = self._get_help_for_argument(
resource=resource, argument=self.pk_argument_name
)
self.parser.add_argument(argument, metavar=metavar, nargs="+", help=help)
self.parser.add_argument(
"-a",
"--attr",
nargs="+",
default=self.display_attributes,
help=(
"List of attributes to include in the "
'output. "all" or unspecified will '
"return all attributes."
),
)
@add_auth_token_to_kwargs_from_cli
def run(self, args, **kwargs):
resource_ids = getattr(args, self.pk_argument_name, None)
resources = self._get_multiple_resources(
resource_ids=resource_ids, kwargs=kwargs
)
return resources
def run_and_print(self, args, **kwargs):
instances = self.run(args, **kwargs)
for instance in instances:
self.print_output(
instance,
table.PropertyValueTable,
attributes=args.attr,
json=args.json,
yaml=args.yaml,
attribute_display_order=self.attribute_display_order,
)
class ContentPackResourceGetCommand(ResourceGetCommand):
"""
Command for retrieving a single resource which belongs to a content pack.
Note: All the resources which belong to the content pack can either be
retrieved by a reference or by an id.
"""
attribute_display_order = ["id", "pack", "name", "description"]
pk_argument_name = "ref_or_id"
def get_resource(self, ref_or_id, **kwargs):
return self.get_resource_by_ref_or_id(ref_or_id=ref_or_id, **kwargs)
class ResourceCreateCommand(ResourceCommand):
def __init__(self, resource, *args, **kwargs):
super(ResourceCreateCommand, self).__init__(
resource,
"create",
"Create a new %s." % resource.get_display_name().lower(),
*args,
**kwargs,
)
self.parser.add_argument(
"file",
help=(
"JSON/YAML file containing the %s to create."
% resource.get_display_name().lower()
),
)
@add_auth_token_to_kwargs_from_cli
def run(self, args, **kwargs):
data = load_meta_file(args.file)
instance = self.resource.deserialize(data)
return self.manager.create(instance, **kwargs)
def run_and_print(self, args, **kwargs):
try:
instance = self.run(args, **kwargs)
if not instance:
raise Exception("Server did not create instance.")
self.print_output(
instance,
table.PropertyValueTable,
attributes=["all"],
json=args.json,
yaml=args.yaml,
)
except Exception as e:
message = six.text_type(e)
print("ERROR: %s" % (message))
raise OperationFailureException(message)
class ResourceUpdateCommand(ResourceCommand):
pk_argument_name = "name_or_id"
def __init__(self, resource, *args, **kwargs):
super(ResourceUpdateCommand, self).__init__(
resource,
"update",
"Updating an existing %s." % resource.get_display_name().lower(),
*args,
**kwargs,
)
argument = self.pk_argument_name
metavar = self._get_metavar_for_argument(argument=self.pk_argument_name)
help = self._get_help_for_argument(
resource=resource, argument=self.pk_argument_name
)
self.parser.add_argument(argument, metavar=metavar, help=help)
self.parser.add_argument(
"file",
help=(
"JSON/YAML file containing the %s to update."
% resource.get_display_name().lower()
),
)
@add_auth_token_to_kwargs_from_cli
def run(self, args, **kwargs):
resource_id = getattr(args, self.pk_argument_name, None)
instance = self.get_resource(resource_id, **kwargs)
data = load_meta_file(args.file)
modified_instance = self.resource.deserialize(data)
if not getattr(modified_instance, "id", None):
modified_instance.id = instance.id
else:
if modified_instance.id != instance.id:
raise Exception(
"The value for the %s id in the JSON/YAML file "
"does not match the ID provided in the "
"command line arguments." % self.resource.get_display_name().lower()
)
return self.manager.update(modified_instance, **kwargs)
def run_and_print(self, args, **kwargs):
instance = self.run(args, **kwargs)
try:
self.print_output(
instance,
table.PropertyValueTable,
attributes=["all"],
json=args.json,
yaml=args.yaml,
)
except Exception as e:
print("ERROR: %s" % (six.text_type(e)))
raise OperationFailureException(six.text_type(e))
class ContentPackResourceUpdateCommand(ResourceUpdateCommand):
pk_argument_name = "ref_or_id"
class ResourceEnableCommand(ResourceCommand):
pk_argument_name = "name_or_id"
def __init__(self, resource, *args, **kwargs):
super(ResourceEnableCommand, self).__init__(
resource,
"enable",
"Enable an existing %s." % resource.get_display_name().lower(),
*args,
**kwargs,
)
argument = self.pk_argument_name
metavar = self._get_metavar_for_argument(argument=self.pk_argument_name)
help = self._get_help_for_argument(
resource=resource, argument=self.pk_argument_name
)
self.parser.add_argument(argument, metavar=metavar, help=help)
@add_auth_token_to_kwargs_from_cli
def run(self, args, **kwargs):
resource_id = getattr(args, self.pk_argument_name, None)
instance = self.get_resource(resource_id, **kwargs)
data = instance.serialize()
if "ref" in data:
del data["ref"]
data["enabled"] = True
modified_instance = self.resource.deserialize(data)
return self.manager.update(modified_instance, **kwargs)
def run_and_print(self, args, **kwargs):
instance = self.run(args, **kwargs)
self.print_output(
instance,
table.PropertyValueTable,
attributes=["all"],
json=args.json,
yaml=args.yaml,
)
class ContentPackResourceEnableCommand(ResourceEnableCommand):
pk_argument_name = "ref_or_id"
class ResourceDisableCommand(ResourceCommand):
pk_argument_name = "name_or_id"
def __init__(self, resource, *args, **kwargs):
super(ResourceDisableCommand, self).__init__(
resource,
"disable",
"Disable an existing %s." % resource.get_display_name().lower(),
*args,
**kwargs,
)
argument = self.pk_argument_name
metavar = self._get_metavar_for_argument(argument=self.pk_argument_name)
help = self._get_help_for_argument(
resource=resource, argument=self.pk_argument_name
)
self.parser.add_argument(argument, metavar=metavar, help=help)
@add_auth_token_to_kwargs_from_cli
def run(self, args, **kwargs):
resource_id = getattr(args, self.pk_argument_name, None)
instance = self.get_resource(resource_id, **kwargs)
data = instance.serialize()
if "ref" in data:
del data["ref"]
data["enabled"] = False
modified_instance = self.resource.deserialize(data)
return self.manager.update(modified_instance, **kwargs)
def run_and_print(self, args, **kwargs):
instance = self.run(args, **kwargs)
self.print_output(
instance,
table.PropertyValueTable,
attributes=["all"],
json=args.json,
yaml=args.yaml,
)
class ContentPackResourceDisableCommand(ResourceDisableCommand):
pk_argument_name = "ref_or_id"
class ResourceDeleteCommand(ResourceCommand):
pk_argument_name = "name_or_id"
def __init__(self, resource, *args, **kwargs):
super(ResourceDeleteCommand, self).__init__(
resource,
"delete",
"Delete an existing %s." % resource.get_display_name().lower(),
*args,
**kwargs,
)
argument = self.pk_argument_name
metavar = self._get_metavar_for_argument(argument=self.pk_argument_name)
help = self._get_help_for_argument(
resource=resource, argument=self.pk_argument_name
)
self.parser.add_argument(argument, metavar=metavar, help=help)
self.parser.add_argument(
"--yes",
action="store_true",
help="Auto yes flag to delete action files from disk.",
)
@add_auth_token_to_kwargs_from_cli
def run(self, args, **kwargs):
resource_id = getattr(args, self.pk_argument_name, None)
instance = self.get_resource(resource_id, **kwargs)
if args.yes:
self.manager.delete(instance, **kwargs)
print(
'Resource with id "%s" has been successfully deleted from database and disk.'
% (resource_id)
)
else:
if isinstance(instance, st2client.models.action.Action):
user_input = input(
"It will delete action files on disk as well. Do you want to continue? (y/n): "
)
if user_input.lower() == "y" or user_input.lower() == "yes":
self.manager.delete(instance, **kwargs)
print(
'Resource with id "%s" has been successfully deleted from database and disk.'
% (resource_id)
)
else:
print("Action is not deleted.")
else:
self.manager.delete(instance, **kwargs)
print(
'Resource with id "%s" has been successfully deleted.'
% (resource_id)
)
def run_and_print(self, args, **kwargs):
resource_id = getattr(args, self.pk_argument_name, None)
try:
self.run(args, **kwargs)
except ResourceNotFoundError:
self.print_not_found(resource_id)
raise OperationFailureException("Resource %s not found." % resource_id)
class ContentPackResourceDeleteCommand(ResourceDeleteCommand):
"""
Base command class for deleting a resource which belongs to a content pack.
"""
pk_argument_name = "ref_or_id"
def load_meta_file(file_path):
if not os.path.isfile(file_path):
raise Exception('File "%s" does not exist.' % file_path)
file_name, file_ext = os.path.splitext(file_path)
if file_ext not in ALLOWED_EXTS:
raise Exception(
"Unsupported meta type %s, file %s. Allowed: %s"
% (file_ext, file_path, ALLOWED_EXTS)
)
with open(file_path, "r") as f:
return PARSER_FUNCS[file_ext](f)
Updating /st2client/commands/resource.py
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from typing import List
from typing import Any
from typing import Dict
import os
import abc
import six
import json
import logging
import traceback
from functools import wraps
import yaml
from six.moves import http_client
from st2client import commands
from st2client.exceptions.operations import OperationFailureException
from st2client.formatters import table
from st2client.utils.types import OrderedSet
import st2client
ALLOWED_EXTS = [".json", ".yaml", ".yml"]
PARSER_FUNCS = {".json": json.load, ".yml": yaml.safe_load, ".yaml": yaml.safe_load}
LOG = logging.getLogger(__name__)
def add_auth_token_to_kwargs_from_cli(func):
@wraps(func)
def decorate(*args, **kwargs):
ns = args[1]
if getattr(ns, "token", None):
kwargs["token"] = ns.token
if getattr(ns, "api_key", None):
kwargs["api_key"] = ns.api_key
return func(*args, **kwargs)
return decorate
class ResourceCommandError(Exception):
pass
class ResourceNotFoundError(Exception):
pass
class ResourceBranch(commands.Branch):
def __init__(
self,
resource,
description,
app,
subparsers,
parent_parser=None,
read_only=False,
commands=None,
has_disable=False,
):
self.resource = resource
super(ResourceBranch, self).__init__(
self.resource.get_alias().lower(),
description,
app,
subparsers,
parent_parser=parent_parser,
)
# Registers subcommands for managing the resource type.
self.subparsers = self.parser.add_subparsers(
help=(
"List of commands for managing %s."
% self.resource.get_plural_display_name().lower()
)
)
# Resolves if commands need to be overridden.
commands = commands or {}
cmd_map = {
"list": ResourceListCommand,
"get": ResourceGetCommand,
"create": ResourceCreateCommand,
"update": ResourceUpdateCommand,
"delete": ResourceDeleteCommand,
"enable": ResourceEnableCommand,
"disable": ResourceDisableCommand,
}
for cmd, cmd_class in cmd_map.items():
if cmd not in commands:
commands[cmd] = cmd_class
# Instantiate commands.
args = [self.resource, self.app, self.subparsers]
self.commands["list"] = commands["list"](*args)
self.commands["get"] = commands["get"](*args)
if not read_only:
self.commands["create"] = commands["create"](*args)
self.commands["update"] = commands["update"](*args)
self.commands["delete"] = commands["delete"](*args)
if has_disable:
self.commands["enable"] = commands["enable"](*args)
self.commands["disable"] = commands["disable"](*args)
@six.add_metaclass(abc.ABCMeta)
class ResourceCommand(commands.Command):
pk_argument_name = None
def __init__(self, resource, *args, **kwargs):
has_token_opt = kwargs.pop("has_token_opt", True)
super(ResourceCommand, self).__init__(*args, **kwargs)
self.resource = resource
if has_token_opt:
self.parser.add_argument(
"-t",
"--token",
dest="token",
help="Access token for user authentication. "
"Get ST2_AUTH_TOKEN from the environment "
"variables by default.",
)
self.parser.add_argument(
"--api-key",
dest="api_key",
help="Api Key for user authentication. "
"Get ST2_API_KEY from the environment "
"variables by default.",
)
# Formatter flags
self.parser.add_argument(
"-j",
"--json",
action="store_true",
dest="json",
help="Print output in JSON format.",
)
self.parser.add_argument(
"-y",
"--yaml",
action="store_true",
dest="yaml",
help="Print output in YAML format.",
)
@property
def manager(self):
return self.app.client.managers[self.resource.__name__]
@property
def arg_name_for_resource_id(self):
resource_name = self.resource.get_display_name().lower()
return "%s-id" % resource_name.replace(" ", "-")
def print_not_found(self, name):
print('%s "%s" is not found.\n' % (self.resource.get_display_name(), name))
def get_resource(self, name_or_id, **kwargs):
pk_argument_name = self.pk_argument_name
if pk_argument_name == "name_or_id":
instance = self.get_resource_by_name_or_id(name_or_id=name_or_id, **kwargs)
elif pk_argument_name == "ref_or_id":
instance = self.get_resource_by_ref_or_id(ref_or_id=name_or_id, **kwargs)
else:
instance = self.get_resource_by_pk(pk=name_or_id, **kwargs)
return instance
def get_resource_by_pk(self, pk, **kwargs):
"""
Retrieve resource by a primary key.
"""
try:
instance = self.manager.get_by_id(pk, **kwargs)
except Exception as e:
traceback.print_exc()
# Hack for "Unauthorized" exceptions, we do want to propagate those
response = getattr(e, "response", None)
status_code = getattr(response, "status_code", None)
if status_code and status_code == http_client.UNAUTHORIZED:
raise e
instance = None
return instance
def get_resource_by_id(self, id, **kwargs):
instance = self.get_resource_by_pk(pk=id, **kwargs)
if not instance:
message = 'Resource with id "%s" doesn\'t exist.' % (id)
raise ResourceNotFoundError(message)
return instance
def get_resource_by_name(self, name, **kwargs):
"""
Retrieve resource by name.
"""
instance = self.manager.get_by_name(name, **kwargs)
return instance
def get_resource_by_name_or_id(self, name_or_id, **kwargs):
instance = self.get_resource_by_name(name=name_or_id, **kwargs)
if not instance:
instance = self.get_resource_by_pk(pk=name_or_id, **kwargs)
if not instance:
message = 'Resource with id or name "%s" doesn\'t exist.' % (name_or_id)
raise ResourceNotFoundError(message)
return instance
def get_resource_by_ref_or_id(self, ref_or_id, **kwargs):
instance = self.manager.get_by_ref_or_id(ref_or_id=ref_or_id, **kwargs)
if not instance:
message = 'Resource with id or reference "%s" doesn\'t exist.' % (ref_or_id)
raise ResourceNotFoundError(message)
return instance
def _get_multiple_resources(
self, resource_ids: List[str], kwargs: Dict[str, Any]
) -> List[Any]:
"""
Return multiple resource instances for the provided resource ids.
If a resource is not found, an error is printed. This method only throws when operating on
a single resource.
:param resource_ids: A list of resources to retrieve instances for.
:param kwargs: Dictionary with keyword arguments which are passed to get_resource_by_id.
"""
more_than_one_resource = len(resource_ids) > 1
resources = []
for resource_id in resource_ids:
try:
resource = self.get_resource_by_id(resource_id, **kwargs)
except ResourceNotFoundError:
self.print_not_found(resource_id)
if not more_than_one_resource:
# For backward compatibility reasons and to comply with common "get one"
# behavior, we only fail if a single source is requested
raise ResourceNotFoundError("Resource %s not found." % resource_id)
continue
resources.append(resource)
return resources
@abc.abstractmethod
def run(self, args, **kwargs):
raise NotImplementedError
@abc.abstractmethod
def run_and_print(self, args, **kwargs):
raise NotImplementedError
def _get_metavar_for_argument(self, argument):
return argument.replace("_", "-")
def _get_help_for_argument(self, resource, argument):
argument_display_name = argument.title()
resource_display_name = resource.get_display_name().lower()
if "ref" in argument:
result = "Reference or ID of the %s." % (resource_display_name)
elif "name_or_id" in argument:
result = "Name or ID of the %s." % (resource_display_name)
else:
result = "%s of the %s." % (argument_display_name, resource_display_name)
return result
class ResourceViewCommand(ResourceCommand):
"""
Base class for read / view commands (list and get).
"""
@classmethod
def _get_include_attributes(cls, args, extra_attributes=None):
"""
Return a list of attributes to send to the API using ?include_attributes filter.
If None / empty list is returned it's assumed no filtering is to be performed and all
attributes are to be retrieved.
:param extra_attributes: Additional include attributes which should always be included.
:type extra_attributes: ``list`` of ``str``
"""
extra_attributes = extra_attributes or []
include_attributes = []
if extra_attributes:
include_attributes.extend(extra_attributes)
# If user specifies which attributes to retrieve via CLI --attr / -a argument, take that
# into account
# Special case for "all"
if "all" in args.attr:
return None
for attr in args.attr:
include_attributes.append(attr)
if include_attributes:
return include_attributes
display_attributes = getattr(cls, "display_attributes", [])
if display_attributes:
include_attributes += display_attributes
include_attributes = list(OrderedSet(include_attributes))
return include_attributes
class ResourceTableCommand(ResourceViewCommand):
display_attributes = ["id", "name", "description"]
def __init__(self, resource, name, description, *args, **kwargs):
super(ResourceTableCommand, self).__init__(
resource, name, description, *args, **kwargs
)
self.parser.add_argument(
"-a",
"--attr",
nargs="+",
default=self.display_attributes,
help=(
"List of attributes to include in the "
'output. "all" will return all '
"attributes."
),
)
self.parser.add_argument(
"-w",
"--width",
nargs="+",
type=int,
default=None,
help=("Set the width of columns in output."),
)
@add_auth_token_to_kwargs_from_cli
def run(self, args, **kwargs):
include_attributes = self._get_include_attributes(args=args)
if include_attributes:
include_attributes = ",".join(include_attributes)
kwargs["params"] = {"include_attributes": include_attributes}
return self.manager.get_all(**kwargs)
def run_and_print(self, args, **kwargs):
instances = self.run(args, **kwargs)
self.print_output(
instances,
table.MultiColumnTable,
attributes=args.attr,
widths=args.width,
json=args.json,
yaml=args.yaml,
)
class ResourceListCommand(ResourceTableCommand):
def __init__(self, resource, *args, **kwargs):
super(ResourceListCommand, self).__init__(
resource,
"list",
"Get the list of %s." % resource.get_plural_display_name().lower(),
*args,
**kwargs,
)
class ContentPackResourceListCommand(ResourceListCommand):
"""
Base command class for use with resources which belong to a content pack.
"""
def __init__(self, resource, *args, **kwargs):
super(ContentPackResourceListCommand, self).__init__(resource, *args, **kwargs)
self.parser.add_argument(
"-p",
"--pack",
type=str,
help=("Only return resources belonging to the" " provided pack"),
)
@add_auth_token_to_kwargs_from_cli
def run(self, args, **kwargs):
filters = {"pack": args.pack}
filters.update(**kwargs)
include_attributes = self._get_include_attributes(args=args)
if include_attributes:
include_attributes = ",".join(include_attributes)
filters["params"] = {"include_attributes": include_attributes}
return self.manager.get_all(**filters)
class ResourceGetCommand(ResourceViewCommand):
display_attributes = ["all"]
attribute_display_order = ["id", "name", "description"]
pk_argument_name = "name_or_id" # name of the attribute which stores resource PK
help_string = None
def __init__(self, resource, *args, **kwargs):
super(ResourceGetCommand, self).__init__(
resource,
"get",
self.help_string
or "Get individual %s." % resource.get_display_name().lower(),
*args,
**kwargs,
)
argument = self.pk_argument_name
metavar = self._get_metavar_for_argument(argument=self.pk_argument_name)
help = self._get_help_for_argument(
resource=resource, argument=self.pk_argument_name
)
self.parser.add_argument(argument, metavar=metavar, nargs="+", help=help)
self.parser.add_argument(
"-a",
"--attr",
nargs="+",
default=self.display_attributes,
help=(
"List of attributes to include in the "
'output. "all" or unspecified will '
"return all attributes."
),
)
@add_auth_token_to_kwargs_from_cli
def run(self, args, **kwargs):
resource_ids = getattr(args, self.pk_argument_name, None)
resources = self._get_multiple_resources(
resource_ids=resource_ids, kwargs=kwargs
)
return resources
def run_and_print(self, args, **kwargs):
instances = self.run(args, **kwargs)
for instance in instances:
self.print_output(
instance,
table.PropertyValueTable,
attributes=args.attr,
json=args.json,
yaml=args.yaml,
attribute_display_order=self.attribute_display_order,
)
class ContentPackResourceGetCommand(ResourceGetCommand):
"""
Command for retrieving a single resource which belongs to a content pack.
Note: All the resources which belong to the content pack can either be
retrieved by a reference or by an id.
"""
attribute_display_order = ["id", "pack", "name", "description"]
pk_argument_name = "ref_or_id"
def get_resource(self, ref_or_id, **kwargs):
return self.get_resource_by_ref_or_id(ref_or_id=ref_or_id, **kwargs)
class ResourceCreateCommand(ResourceCommand):
def __init__(self, resource, *args, **kwargs):
super(ResourceCreateCommand, self).__init__(
resource,
"create",
"Create a new %s." % resource.get_display_name().lower(),
*args,
**kwargs,
)
self.parser.add_argument(
"file",
help=(
"JSON/YAML file containing the %s to create."
% resource.get_display_name().lower()
),
)
@add_auth_token_to_kwargs_from_cli
def run(self, args, **kwargs):
data = load_meta_file(args.file)
instance = self.resource.deserialize(data)
return self.manager.create(instance, **kwargs)
def run_and_print(self, args, **kwargs):
try:
instance = self.run(args, **kwargs)
if not instance:
raise Exception("Server did not create instance.")
self.print_output(
instance,
table.PropertyValueTable,
attributes=["all"],
json=args.json,
yaml=args.yaml,
)
except Exception as e:
message = six.text_type(e)
print("ERROR: %s" % (message))
raise OperationFailureException(message)
class ResourceUpdateCommand(ResourceCommand):
pk_argument_name = "name_or_id"
def __init__(self, resource, *args, **kwargs):
super(ResourceUpdateCommand, self).__init__(
resource,
"update",
"Updating an existing %s." % resource.get_display_name().lower(),
*args,
**kwargs,
)
argument = self.pk_argument_name
metavar = self._get_metavar_for_argument(argument=self.pk_argument_name)
help = self._get_help_for_argument(
resource=resource, argument=self.pk_argument_name
)
self.parser.add_argument(argument, metavar=metavar, help=help)
self.parser.add_argument(
"file",
help=(
"JSON/YAML file containing the %s to update."
% resource.get_display_name().lower()
),
)
@add_auth_token_to_kwargs_from_cli
def run(self, args, **kwargs):
resource_id = getattr(args, self.pk_argument_name, None)
instance = self.get_resource(resource_id, **kwargs)
data = load_meta_file(args.file)
modified_instance = self.resource.deserialize(data)
if not getattr(modified_instance, "id", None):
modified_instance.id = instance.id
else:
if modified_instance.id != instance.id:
raise Exception(
"The value for the %s id in the JSON/YAML file "
"does not match the ID provided in the "
"command line arguments." % self.resource.get_display_name().lower()
)
return self.manager.update(modified_instance, **kwargs)
def run_and_print(self, args, **kwargs):
instance = self.run(args, **kwargs)
try:
self.print_output(
instance,
table.PropertyValueTable,
attributes=["all"],
json=args.json,
yaml=args.yaml,
)
except Exception as e:
print("ERROR: %s" % (six.text_type(e)))
raise OperationFailureException(six.text_type(e))
class ContentPackResourceUpdateCommand(ResourceUpdateCommand):
pk_argument_name = "ref_or_id"
class ResourceEnableCommand(ResourceCommand):
pk_argument_name = "name_or_id"
def __init__(self, resource, *args, **kwargs):
super(ResourceEnableCommand, self).__init__(
resource,
"enable",
"Enable an existing %s." % resource.get_display_name().lower(),
*args,
**kwargs,
)
argument = self.pk_argument_name
metavar = self._get_metavar_for_argument(argument=self.pk_argument_name)
help = self._get_help_for_argument(
resource=resource, argument=self.pk_argument_name
)
self.parser.add_argument(argument, metavar=metavar, help=help)
@add_auth_token_to_kwargs_from_cli
def run(self, args, **kwargs):
resource_id = getattr(args, self.pk_argument_name, None)
instance = self.get_resource(resource_id, **kwargs)
data = instance.serialize()
if "ref" in data:
del data["ref"]
data["enabled"] = True
modified_instance = self.resource.deserialize(data)
return self.manager.update(modified_instance, **kwargs)
def run_and_print(self, args, **kwargs):
instance = self.run(args, **kwargs)
self.print_output(
instance,
table.PropertyValueTable,
attributes=["all"],
json=args.json,
yaml=args.yaml,
)
class ContentPackResourceEnableCommand(ResourceEnableCommand):
pk_argument_name = "ref_or_id"
class ResourceDisableCommand(ResourceCommand):
pk_argument_name = "name_or_id"
def __init__(self, resource, *args, **kwargs):
super(ResourceDisableCommand, self).__init__(
resource,
"disable",
"Disable an existing %s." % resource.get_display_name().lower(),
*args,
**kwargs,
)
argument = self.pk_argument_name
metavar = self._get_metavar_for_argument(argument=self.pk_argument_name)
help = self._get_help_for_argument(
resource=resource, argument=self.pk_argument_name
)
self.parser.add_argument(argument, metavar=metavar, help=help)
@add_auth_token_to_kwargs_from_cli
def run(self, args, **kwargs):
resource_id = getattr(args, self.pk_argument_name, None)
instance = self.get_resource(resource_id, **kwargs)
data = instance.serialize()
if "ref" in data:
del data["ref"]
data["enabled"] = False
modified_instance = self.resource.deserialize(data)
return self.manager.update(modified_instance, **kwargs)
def run_and_print(self, args, **kwargs):
instance = self.run(args, **kwargs)
self.print_output(
instance,
table.PropertyValueTable,
attributes=["all"],
json=args.json,
yaml=args.yaml,
)
class ContentPackResourceDisableCommand(ResourceDisableCommand):
pk_argument_name = "ref_or_id"
class ResourceDeleteCommand(ResourceCommand):
pk_argument_name = "name_or_id"
def __init__(self, resource, *args, **kwargs):
super(ResourceDeleteCommand, self).__init__(
resource,
"delete",
"Delete an existing %s." % resource.get_display_name().lower(),
*args,
**kwargs,
)
argument = self.pk_argument_name
metavar = self._get_metavar_for_argument(argument=self.pk_argument_name)
help = self._get_help_for_argument(
resource=resource, argument=self.pk_argument_name
)
self.parser.add_argument(argument, metavar=metavar, help=help)
self.parser.add_argument(
"--yes",
action="store_true",
help="Auto yes flag to delete action files from disk.",
)
@add_auth_token_to_kwargs_from_cli
def run(self, args, **kwargs):
resource_id = getattr(args, self.pk_argument_name, None)
instance = self.get_resource(resource_id, **kwargs)
if args.yes:
self.manager.delete(instance, **kwargs)
print(
'Resource with id "%s" has been successfully deleted from database and disk.'
% (resource_id)
)
else:
if isinstance(instance, st2client.models.action.Action):
user_input = input(
"It will delete action files on disk as well. Do you want to continue? (y/n): "
)
if user_input.lower() == "y" or user_input.lower() == "yes":
self.manager.delete(instance, **kwargs)
print(
'Resource with id "%s" has been successfully deleted from db and disk.'
% (resource_id)
)
else:
print("Action is not deleted.")
else:
self.manager.delete(instance, **kwargs)
print(
'Resource with id "%s" has been successfully deleted.'
% (resource_id)
)
def run_and_print(self, args, **kwargs):
resource_id = getattr(args, self.pk_argument_name, None)
try:
self.run(args, **kwargs)
except ResourceNotFoundError:
self.print_not_found(resource_id)
raise OperationFailureException("Resource %s not found." % resource_id)
class ContentPackResourceDeleteCommand(ResourceDeleteCommand):
"""
Base command class for deleting a resource which belongs to a content pack.
"""
pk_argument_name = "ref_or_id"
def load_meta_file(file_path):
if not os.path.isfile(file_path):
raise Exception('File "%s" does not exist.' % file_path)
file_name, file_ext = os.path.splitext(file_path)
if file_ext not in ALLOWED_EXTS:
raise Exception(
"Unsupported meta type %s, file %s. Allowed: %s"
% (file_ext, file_path, ALLOWED_EXTS)
)
with open(file_path, "r") as f:
return PARSER_FUNCS[file_ext](f)
|
# -*- coding: utf-8 -*-
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from st2common.exceptions.param import ParamException
from st2common.models.system.common import ResourceReference
from st2common.models.db.liveaction import LiveActionDB
from st2common.models.db.keyvalue import KeyValuePairDB
from st2common.models.utils import action_param_utils
from st2common.persistence.keyvalue import KeyValuePair
from st2common.transport.publishers import PoolPublisher
from st2common.util import date as date_utils
from st2common.util import param as param_utils
from st2common.util.config_loader import get_config
from st2tests import DbTestCase
from st2tests.fixturesloader import FixturesLoader
FIXTURES_PACK = 'generic'
TEST_MODELS = {
'actions': ['action_4_action_context_param.yaml', 'action_system_default.yaml'],
'runners': ['testrunner1.yaml']
}
FIXTURES = FixturesLoader().load_models(fixtures_pack=FIXTURES_PACK,
fixtures_dict=TEST_MODELS)
@mock.patch.object(PoolPublisher, 'publish', mock.MagicMock())
class ParamsUtilsTest(DbTestCase):
action_db = FIXTURES['actions']['action_4_action_context_param.yaml']
action_system_default_db = FIXTURES['actions']['action_system_default.yaml']
runnertype_db = FIXTURES['runners']['testrunner1.yaml']
def test_get_finalized_params(self):
params = {
'actionstr': 'foo',
'some_key_that_aint_exist_in_action_or_runner': 'bar',
'runnerint': 555,
'runnerimmutable': 'failed_override',
'actionimmutable': 'failed_override'
}
liveaction_db = self._get_liveaction_model(params)
runner_params, action_params = param_utils.get_finalized_params(
ParamsUtilsTest.runnertype_db.runner_parameters,
ParamsUtilsTest.action_db.parameters,
liveaction_db.parameters,
liveaction_db.context)
# Asserts for runner params.
# Assert that default values for runner params are resolved.
self.assertEqual(runner_params.get('runnerstr'), 'defaultfoo')
# Assert that a runner param from action exec is picked up.
self.assertEqual(runner_params.get('runnerint'), 555)
# Assert that a runner param can be overridden by action param default.
self.assertEqual(runner_params.get('runnerdummy'), 'actiondummy')
# Assert that a runner param default can be overridden by 'falsey' action param default,
# (timeout: 0 case).
self.assertEqual(runner_params.get('runnerdefaultint'), 0)
# Assert that an immutable param cannot be overridden by action param or execution param.
self.assertEqual(runner_params.get('runnerimmutable'), 'runnerimmutable')
# Asserts for action params.
self.assertEqual(action_params.get('actionstr'), 'foo')
# Assert that a param that is provided in action exec that isn't in action or runner params
# isn't in resolved params.
self.assertEqual(action_params.get('some_key_that_aint_exist_in_action_or_runner'), None)
# Assert that an immutable param cannot be overridden by execution param.
self.assertEqual(action_params.get('actionimmutable'), 'actionimmutable')
# Assert that an action context param is set correctly.
self.assertEqual(action_params.get('action_api_user'), 'noob')
# Assert that none of runner params are present in action_params.
for k in action_params:
self.assertTrue(k not in runner_params, 'Param ' + k + ' is a runner param.')
def test_get_finalized_params_system_values(self):
KeyValuePair.add_or_update(KeyValuePairDB(name='actionstr', value='foo'))
KeyValuePair.add_or_update(KeyValuePairDB(name='actionnumber', value='1.0'))
params = {
'runnerint': 555
}
liveaction_db = self._get_liveaction_model(params)
runner_params, action_params = param_utils.get_finalized_params(
ParamsUtilsTest.runnertype_db.runner_parameters,
ParamsUtilsTest.action_system_default_db.parameters,
liveaction_db.parameters,
liveaction_db.context)
# Asserts for runner params.
# Assert that default values for runner params are resolved.
self.assertEqual(runner_params.get('runnerstr'), 'defaultfoo')
# Assert that a runner param from action exec is picked up.
self.assertEqual(runner_params.get('runnerint'), 555)
# Assert that an immutable param cannot be overridden by action param or execution param.
self.assertEqual(runner_params.get('runnerimmutable'), 'runnerimmutable')
# Asserts for action params.
self.assertEqual(action_params.get('actionstr'), 'foo')
self.assertEqual(action_params.get('actionnumber'), 1.0)
def test_get_finalized_params_action_immutable(self):
params = {
'actionstr': 'foo',
'some_key_that_aint_exist_in_action_or_runner': 'bar',
'runnerint': 555,
'actionimmutable': 'failed_override'
}
liveaction_db = self._get_liveaction_model(params)
action_context = {'api_user': None}
runner_params, action_params = param_utils.get_finalized_params(
ParamsUtilsTest.runnertype_db.runner_parameters,
ParamsUtilsTest.action_db.parameters,
liveaction_db.parameters,
action_context)
# Asserts for runner params.
# Assert that default values for runner params are resolved.
self.assertEqual(runner_params.get('runnerstr'), 'defaultfoo')
# Assert that a runner param from action exec is picked up.
self.assertEqual(runner_params.get('runnerint'), 555)
# Assert that a runner param can be overridden by action param default.
self.assertEqual(runner_params.get('runnerdummy'), 'actiondummy')
# Asserts for action params.
self.assertEqual(action_params.get('actionstr'), 'foo')
# Assert that a param that is provided in action exec that isn't in action or runner params
# isn't in resolved params.
self.assertEqual(action_params.get('some_key_that_aint_exist_in_action_or_runner'), None)
def test_get_finalized_params_empty(self):
params = {}
runner_param_info = {}
action_param_info = {}
action_context = {}
r_runner_params, r_action_params = param_utils.get_finalized_params(
runner_param_info, action_param_info, params, action_context)
self.assertEqual(r_runner_params, params)
self.assertEqual(r_action_params, params)
def test_get_finalized_params_none(self):
params = {
'r1': None,
'a1': None
}
runner_param_info = {'r1': {}}
action_param_info = {'a1': {}}
action_context = {'api_user': None}
r_runner_params, r_action_params = param_utils.get_finalized_params(
runner_param_info, action_param_info, params, action_context)
self.assertEqual(r_runner_params, {'r1': None})
self.assertEqual(r_action_params, {'a1': None})
def test_get_finalized_params_no_cast(self):
params = {
'r1': '{{r2}}',
'r2': 1,
'a1': True,
'a2': '{{r1}} {{a1}}',
'a3': '{{action_context.api_user}}'
}
runner_param_info = {'r1': {}, 'r2': {}}
action_param_info = {'a1': {}, 'a2': {}, 'a3': {}}
action_context = {'api_user': 'noob'}
r_runner_params, r_action_params = param_utils.get_finalized_params(
runner_param_info, action_param_info, params, action_context)
self.assertEqual(r_runner_params, {'r1': u'1', 'r2': 1})
self.assertEqual(r_action_params, {'a1': True, 'a2': u'1 True', 'a3': 'noob'})
def test_get_finalized_params_with_cast(self):
# Note : In this test runner_params.r1 has a string value. However per runner_param_info the
# type is an integer. The expected type is considered and cast is performed accordingly.
params = {
'r1': '{{r2}}',
'r2': 1,
'a1': True,
'a2': '{{a1}}',
'a3': '{{action_context.api_user}}'
}
runner_param_info = {'r1': {'type': 'integer'}, 'r2': {'type': 'integer'}}
action_param_info = {'a1': {'type': 'boolean'}, 'a2': {'type': 'boolean'}, 'a3': {}}
action_context = {'api_user': 'noob'}
r_runner_params, r_action_params = param_utils.get_finalized_params(
runner_param_info, action_param_info, params, action_context)
self.assertEqual(r_runner_params, {'r1': 1, 'r2': 1})
self.assertEqual(r_action_params, {'a1': True, 'a2': True, 'a3': 'noob'})
def test_get_finalized_params_with_cast_overriden(self):
params = {
'r1': '{{r2}}',
'r2': 1,
'a1': '{{r1}}',
'a2': '{{r1}}',
'a3': '{{r1}}'
}
runner_param_info = {'r1': {'type': 'integer'}, 'r2': {'type': 'integer'}}
action_param_info = {'a1': {'type': 'boolean'}, 'a2': {'type': 'string'},
'a3': {'type': 'integer'}, 'r1': {'type': 'string'}}
action_context = {'api_user': 'noob'}
r_runner_params, r_action_params = param_utils.get_finalized_params(
runner_param_info, action_param_info, params, action_context)
self.assertEqual(r_runner_params, {'r1': 1, 'r2': 1})
self.assertEqual(r_action_params, {'a1': 1, 'a2': u'1', 'a3': 1})
def test_get_finalized_params_cross_talk_no_cast(self):
params = {
'r1': '{{a1}}',
'r2': 1,
'a1': True,
'a2': '{{r1}} {{a1}}',
'a3': '{{action_context.api_user}}'
}
runner_param_info = {'r1': {}, 'r2': {}}
action_param_info = {'a1': {}, 'a2': {}, 'a3': {}}
action_context = {'api_user': 'noob'}
r_runner_params, r_action_params = param_utils.get_finalized_params(
runner_param_info, action_param_info, params, action_context)
self.assertEqual(r_runner_params, {'r1': u'True', 'r2': 1})
self.assertEqual(r_action_params, {'a1': True, 'a2': u'True True', 'a3': 'noob'})
def test_get_finalized_params_cross_talk_with_cast(self):
params = {
'r1': '{{a1}}',
'r2': 1,
'r3': 1,
'a1': True,
'a2': '{{r1}},{{a1}},{{a3}},{{r3}}',
'a3': '{{a1}}'
}
runner_param_info = {'r1': {'type': 'boolean'}, 'r2': {'type': 'integer'}, 'r3': {}}
action_param_info = {'a1': {'type': 'boolean'}, 'a2': {'type': 'array'}, 'a3': {}}
action_context = {}
r_runner_params, r_action_params = param_utils.get_finalized_params(
runner_param_info, action_param_info, params, action_context)
self.assertEqual(r_runner_params, {'r1': True, 'r2': 1, 'r3': 1})
self.assertEqual(r_action_params, {'a1': True, 'a2': (True, True, True, 1), 'a3': u'True'})
def test_get_finalized_params_order(self):
params = {
'r1': 'p1',
'r2': 'p2',
'r3': 'p3',
'a1': 'p4',
'a2': 'p5'
}
runner_param_info = {'r1': {}, 'r2': {'default': 'r2'}, 'r3': {'default': 'r3'}}
action_param_info = {'a1': {}, 'a2': {'default': 'a2'}, 'r3': {'default': 'a3'}}
action_context = {'api_user': 'noob'}
r_runner_params, r_action_params = param_utils.get_finalized_params(
runner_param_info, action_param_info, params, action_context)
self.assertEqual(r_runner_params, {'r1': u'p1', 'r2': u'p2', 'r3': u'p3'})
self.assertEqual(r_action_params, {'a1': u'p4', 'a2': u'p5'})
params = {}
runner_param_info = {'r1': {}, 'r2': {'default': 'r2'}, 'r3': {'default': 'r3'}}
action_param_info = {'a1': {}, 'a2': {'default': 'a2'}, 'r3': {'default': 'a3'}}
action_context = {'api_user': 'noob'}
r_runner_params, r_action_params = param_utils.get_finalized_params(
runner_param_info, action_param_info, params, action_context)
self.assertEqual(r_runner_params, {'r1': None, 'r2': u'r2', 'r3': u'a3'})
self.assertEqual(r_action_params, {'a1': None, 'a2': u'a2'})
params = {}
runner_param_info = {'r1': {}, 'r2': {'default': 'r2'}, 'r3': {}}
action_param_info = {'r1': {}, 'r2': {}, 'r3': {'default': 'a3'}}
action_context = {'api_user': 'noob'}
r_runner_params, r_action_params = param_utils.get_finalized_params(
runner_param_info, action_param_info, params, action_context)
self.assertEqual(r_runner_params, {'r1': None, 'r2': u'r2', 'r3': u'a3'})
def test_get_finalized_params_non_existent_template_key_in_action_context(self):
params = {
'r1': 'foo',
'r2': 2,
'a1': 'i love tests',
'a2': '{{action_context.lorem_ipsum}}'
}
runner_param_info = {'r1': {'type': 'string'}, 'r2': {'type': 'integer'}}
action_param_info = {'a1': {'type': 'string'}, 'a2': {'type': 'string'}}
action_context = {'api_user': 'noob', 'source_channel': 'reddit'}
try:
r_runner_params, r_action_params = param_utils.get_finalized_params(
runner_param_info, action_param_info, params, action_context)
self.fail('This should have thrown because we are trying to deref a key in ' +
'action context that ain\'t exist.')
except ParamException as e:
error_msg = 'Failed to render parameter "a2": \'dict object\' ' + \
'has no attribute \'lorem_ipsum\''
self.assertTrue(error_msg in e.message)
pass
def test_unicode_value_casting(self):
rendered = {'a1': 'unicode1 ٩(̾●̮̮̃̾•̃̾)۶ unicode2'}
parameter_schemas = {'a1': {'type': 'string'}}
result = param_utils._cast_params(rendered=rendered,
parameter_schemas=parameter_schemas)
expected = {
'a1': (u'unicode1 \xd9\xa9(\xcc\xbe\xe2\x97\x8f\xcc\xae\xcc\xae\xcc'
u'\x83\xcc\xbe\xe2\x80\xa2\xcc\x83\xcc\xbe)\xdb\xb6 unicode2')
}
self.assertEqual(result, expected)
def test_get_finalized_params_with_casting_unicode_values(self):
params = {'a1': 'unicode1 ٩(̾●̮̮̃̾•̃̾)۶ unicode2'}
runner_param_info = {}
action_param_info = {'a1': {'type': 'string'}}
action_context = {}
r_runner_params, r_action_params = param_utils.get_finalized_params(
runner_param_info, action_param_info, params, action_context)
expected_action_params = {
'a1': (u'unicode1 \xd9\xa9(\xcc\xbe\xe2\x97\x8f\xcc\xae\xcc\xae\xcc'
u'\x83\xcc\xbe\xe2\x80\xa2\xcc\x83\xcc\xbe)\xdb\xb6 unicode2')
}
self.assertEqual(r_runner_params, {})
self.assertEqual(r_action_params, expected_action_params)
def test_get_finalized_params_with_dict(self):
# Note : In this test runner_params.r1 has a string value. However per runner_param_info the
# type is an integer. The expected type is considered and cast is performed accordingly.
params = {
'r1': '{{r2}}',
'r2': {'r2.1': 1},
'a1': True,
'a2': '{{a1}}',
'a3': {
'test': '{{a1}}',
'test1': '{{a4}}',
'test2': '{{a5}}',
},
'a4': 3,
'a5': ['1', '{{a1}}']
}
runner_param_info = {'r1': {'type': 'object'}, 'r2': {'type': 'object'}}
action_param_info = {
'a1': {
'type': 'boolean',
},
'a2': {
'type': 'boolean',
},
'a3': {
'type': 'object',
},
'a4': {
'type': 'integer',
},
'a5': {
'type': 'array',
},
}
r_runner_params, r_action_params = param_utils.get_finalized_params(
runner_param_info, action_param_info, params, {})
self.assertEqual(
r_runner_params, {'r1': {'r2.1': 1}, 'r2': {'r2.1': 1}})
self.assertEqual(
r_action_params,
{
'a1': True,
'a2': True,
'a3': {
'test': True,
'test1': 3,
'test2': [
'1',
True
],
},
'a4': 3,
'a5': [
'1',
True
],
}
)
def test_get_finalized_params_with_list(self):
# Note : In this test runner_params.r1 has a string value. However per runner_param_info the
# type is an integer. The expected type is considered and cast is performed accordingly.
self.maxDiff = None
params = {
'r1': '{{r2}}',
'r2': ['1', '2'],
'a1': True,
'a2': 'Test',
'a3': 'Test2',
'a4': '{{a1}}',
'a5': ['{{a2}}', '{{a3}}'],
'a6': [
['{{r2}}', '{{a2}}'],
['{{a3}}', '{{a1}}'],
[
'{{a7}}',
'This should be rendered as a string {{a1}}',
'{{a1}} This, too, should be rendered as a string {{a1}}',
]
],
'a7': 5,
}
runner_param_info = {'r1': {'type': 'array'}, 'r2': {'type': 'array'}}
action_param_info = {
'a1': {'type': 'boolean'},
'a2': {'type': 'string'},
'a3': {'type': 'string'},
'a4': {'type': 'boolean'},
'a5': {'type': 'array'},
'a6': {'type': 'array'},
'a7': {'type': 'integer'},
}
r_runner_params, r_action_params = param_utils.get_finalized_params(
runner_param_info, action_param_info, params, {})
self.assertEqual(r_runner_params, {'r1': ['1', '2'], 'r2': ['1', '2']})
self.assertEqual(
r_action_params,
{
'a1': True,
'a2': 'Test',
'a3': 'Test2',
'a4': True,
'a5': ['Test', 'Test2'],
'a6': [
[['1', '2'], 'Test'],
['Test2', True],
[
5,
u'This should be rendered as a string True',
u'True This, too, should be rendered as a string True'
]
],
'a7': 5,
}
)
def test_get_finalized_params_with_cyclic_dependency(self):
params = {'r1': '{{r2}}', 'r2': '{{r1}}'}
runner_param_info = {'r1': {}, 'r2': {}}
action_param_info = {}
test_pass = True
try:
param_utils.get_finalized_params(runner_param_info, action_param_info, params, {})
test_pass = False
except ParamException as e:
test_pass = e.message.find('Cyclic') == 0
self.assertTrue(test_pass)
def test_get_finalized_params_with_missing_dependency(self):
params = {'r1': '{{r3}}', 'r2': '{{r3}}'}
runner_param_info = {'r1': {}, 'r2': {}}
action_param_info = {}
test_pass = True
try:
param_utils.get_finalized_params(runner_param_info, action_param_info, params, {})
test_pass = False
except ParamException as e:
test_pass = e.message.find('Dependency') == 0
self.assertTrue(test_pass)
params = {}
runner_param_info = {'r1': {'default': '{{r3}}'}, 'r2': {'default': '{{r3}}'}}
action_param_info = {}
test_pass = True
try:
param_utils.get_finalized_params(runner_param_info, action_param_info, params, {})
test_pass = False
except ParamException as e:
test_pass = e.message.find('Dependency') == 0
self.assertTrue(test_pass)
def test_get_finalized_params_no_double_rendering(self):
params = {
'r1': '{{ action_context.h1 }}{{ action_context.h2 }}'
}
runner_param_info = {'r1': {}}
action_param_info = {}
action_context = {
'h1': '{',
'h2': '{ missing }}'
}
r_runner_params, r_action_params = param_utils.get_finalized_params(
runner_param_info, action_param_info, params, action_context)
self.assertEqual(r_runner_params, {'r1': '{{ missing }}'})
self.assertEqual(r_action_params, {})
def test_get_finalized_params_jinja_filters(self):
params = {'cmd': 'echo {{"1.6.0" | version_bump_minor}}'}
runner_param_info = {'r1': {}}
action_param_info = {'cmd': {}}
action_context = {}
r_runner_params, r_action_params = param_utils.get_finalized_params(
runner_param_info, action_param_info, params, action_context)
self.assertEqual(r_action_params['cmd'], "echo 1.7.0")
def test_get_finalized_params_param_rendering_failure(self):
params = {'cmd': '{{a2.foo}}', 'a2': 'test'}
action_param_info = {'cmd': {}, 'a2': {}}
expected_msg = 'Failed to render parameter "cmd": .*'
self.assertRaisesRegexp(ParamException,
expected_msg,
param_utils.get_finalized_params,
runnertype_parameter_info={},
action_parameter_info=action_param_info,
liveaction_parameters=params,
action_context={})
def test_get_finalized_param_object_contains_template_notation_in_the_value(self):
runner_param_info = {'r1': {}}
action_param_info = {
'params': {
'type': 'object',
'default': {
'host': '{{host}}',
'port': '{{port}}',
'path': '/bar'}
}
}
params = {
'host': 'lolcathost',
'port': 5555
}
action_context = {}
r_runner_params, r_action_params = param_utils.get_finalized_params(
runner_param_info, action_param_info, params, action_context)
expected_params = {
'host': 'lolcathost',
'port': 5555,
'path': '/bar'
}
self.assertEqual(r_action_params['params'], expected_params)
def test_cast_param_referenced_action_doesnt_exist(self):
# Make sure the function throws if the action doesnt exist
expected_msg = 'Action with ref "foo.doesntexist" doesn\'t exist'
self.assertRaisesRegexp(ValueError, expected_msg, action_param_utils.cast_params,
action_ref='foo.doesntexist', params={})
def test_get_finalized_params_with_config(self):
with mock.patch('st2common.util.config_loader.ContentPackConfigLoader') as config_loader:
config_loader().get_config.return_value = {
'generic_config_param': 'So generic'
}
params = {
'config_param': '{{config_context.generic_config_param}}',
}
liveaction_db = self._get_liveaction_model(params, True)
_, action_params = param_utils.get_finalized_params(
ParamsUtilsTest.runnertype_db.runner_parameters,
ParamsUtilsTest.action_db.parameters,
liveaction_db.parameters,
liveaction_db.context)
self.assertEqual(
action_params.get('config_param'),
'So generic'
)
def test_get_config(self):
with mock.patch('st2common.util.config_loader.ContentPackConfigLoader') as config_loader:
mock_config_return = {
'generic_config_param': 'So generic'
}
config_loader().get_config.return_value = mock_config_return
self.assertEqual(get_config(None, None), {})
self.assertEqual(get_config('pack', None), {})
self.assertEqual(get_config(None, 'user'), {})
self.assertEqual(
get_config('pack', 'user'), mock_config_return
)
config_loader.assert_called_with(pack_name='pack', user='user')
config_loader().get_config.assert_called_once()
def _get_liveaction_model(self, params, with_config_context=False):
status = 'initializing'
start_timestamp = date_utils.get_datetime_utc_now()
action_ref = ResourceReference(name=ParamsUtilsTest.action_db.name,
pack=ParamsUtilsTest.action_db.pack).ref
liveaction_db = LiveActionDB(status=status, start_timestamp=start_timestamp,
action=action_ref, parameters=params)
liveaction_db.context = {
'api_user': 'noob',
'source_channel': 'reddit',
}
if with_config_context:
liveaction_db.context.update(
{
'pack': 'generic',
'user': 'st2admin'
}
)
return liveaction_db
def test_get_live_params_with_additional_context(self):
runner_param_info = {
'r1': {
'default': 'some'
}
}
action_param_info = {
'r2': {
'default': '{{ r1 }}'
}
}
params = {
'r3': 'lolcathost',
'r1': '{{ additional.stuff }}'
}
action_context = {}
additional_contexts = {
'additional': {
'stuff': 'generic'
}
}
live_params = param_utils.render_live_params(
runner_param_info, action_param_info, params, action_context, additional_contexts)
expected_params = {
'r1': 'generic',
'r2': 'generic',
'r3': 'lolcathost'
}
self.assertEqual(live_params, expected_params)
def test_cyclic_dependency_friendly_error_message(self):
runner_param_info = {
'r1': {
'default': 'some',
'cyclic': 'cyclic value',
'morecyclic': 'cyclic value'
}
}
action_param_info = {
'r2': {
'default': '{{ r1 }}'
}
}
params = {
'r3': 'lolcathost',
'cyclic': '{{ cyclic }}',
'morecyclic': '{{ morecyclic }}'
}
action_context = {}
expected_msg = 'Cyclic dependency found in the following variables: cyclic, morecyclic'
self.assertRaisesRegexp(ParamException, expected_msg, param_utils.render_live_params,
runner_param_info, action_param_info, params, action_context)
def test_unsatisfied_dependency_friendly_error_message(self):
runner_param_info = {
'r1': {
'default': 'some',
}
}
action_param_info = {
'r2': {
'default': '{{ r1 }}'
}
}
params = {
'r3': 'lolcathost',
'r4': '{{ variable_not_defined }}',
}
action_context = {}
expected_msg = 'Dependency unsatisfied in variable "variable_not_defined"'
self.assertRaisesRegexp(ParamException, expected_msg, param_utils.render_live_params,
runner_param_info, action_param_info, params, action_context)
def test_add_default_templates_to_live_params(self):
"""Test addition of template values in defaults to live params
"""
# Test with no live params, and two parameters - one should make it through because
# it was a template, and the other shouldn't because its default wasn't a template
schemas = [
{
'templateparam': {
'default': '{{ 3 | int }}',
'type': 'integer'
}
}
]
context = {
'templateparam': '3'
}
result = param_utils._cast_params_from({}, context, schemas)
self.assertEquals(result, {'templateparam': 3})
# Ensure parameter is skipped if the value in context is identical to default
schemas = [
{
'nottemplateparam': {
'default': '4',
'type': 'integer'
}
}
]
context = {
'nottemplateparam': '4',
}
result = param_utils._cast_params_from({}, context, schemas)
self.assertEquals(result, {})
# Ensure parameter is skipped if the parameter doesn't have a default
schemas = [
{
'nottemplateparam': {
'type': 'integer'
}
}
]
context = {
'nottemplateparam': '4',
}
result = param_utils._cast_params_from({}, context, schemas)
self.assertEquals(result, {})
# Skip if the default value isn't a Jinja expression
schemas = [
{
'nottemplateparam': {
'default': '5',
'type': 'integer'
}
}
]
context = {
'nottemplateparam': '4',
}
result = param_utils._cast_params_from({}, context, schemas)
self.assertEquals(result, {})
# Ensure parameter is skipped if the parameter is being overridden
schemas = [
{
'templateparam': {
'default': '{{ 3 | int }}',
'type': 'integer'
}
}
]
context = {
'templateparam': '4',
}
result = param_utils._cast_params_from({'templateparam': '4'}, context, schemas)
self.assertEquals(result, {'templateparam': 4})
Add unit test
# -*- coding: utf-8 -*-
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from st2common.exceptions.param import ParamException
from st2common.models.system.common import ResourceReference
from st2common.models.db.liveaction import LiveActionDB
from st2common.models.db.keyvalue import KeyValuePairDB
from st2common.models.utils import action_param_utils
from st2common.persistence.keyvalue import KeyValuePair
from st2common.transport.publishers import PoolPublisher
from st2common.util import date as date_utils
from st2common.util import param as param_utils
from st2common.util.config_loader import get_config
from st2tests import DbTestCase
from st2tests.fixturesloader import FixturesLoader
FIXTURES_PACK = 'generic'
TEST_MODELS = {
'actions': ['action_4_action_context_param.yaml', 'action_system_default.yaml'],
'runners': ['testrunner1.yaml']
}
FIXTURES = FixturesLoader().load_models(fixtures_pack=FIXTURES_PACK,
fixtures_dict=TEST_MODELS)
@mock.patch.object(PoolPublisher, 'publish', mock.MagicMock())
class ParamsUtilsTest(DbTestCase):
action_db = FIXTURES['actions']['action_4_action_context_param.yaml']
action_system_default_db = FIXTURES['actions']['action_system_default.yaml']
runnertype_db = FIXTURES['runners']['testrunner1.yaml']
def test_get_finalized_params(self):
params = {
'actionstr': 'foo',
'some_key_that_aint_exist_in_action_or_runner': 'bar',
'runnerint': 555,
'runnerimmutable': 'failed_override',
'actionimmutable': 'failed_override'
}
liveaction_db = self._get_liveaction_model(params)
runner_params, action_params = param_utils.get_finalized_params(
ParamsUtilsTest.runnertype_db.runner_parameters,
ParamsUtilsTest.action_db.parameters,
liveaction_db.parameters,
liveaction_db.context)
# Asserts for runner params.
# Assert that default values for runner params are resolved.
self.assertEqual(runner_params.get('runnerstr'), 'defaultfoo')
# Assert that a runner param from action exec is picked up.
self.assertEqual(runner_params.get('runnerint'), 555)
# Assert that a runner param can be overridden by action param default.
self.assertEqual(runner_params.get('runnerdummy'), 'actiondummy')
# Assert that a runner param default can be overridden by 'falsey' action param default,
# (timeout: 0 case).
self.assertEqual(runner_params.get('runnerdefaultint'), 0)
# Assert that an immutable param cannot be overridden by action param or execution param.
self.assertEqual(runner_params.get('runnerimmutable'), 'runnerimmutable')
# Asserts for action params.
self.assertEqual(action_params.get('actionstr'), 'foo')
# Assert that a param that is provided in action exec that isn't in action or runner params
# isn't in resolved params.
self.assertEqual(action_params.get('some_key_that_aint_exist_in_action_or_runner'), None)
# Assert that an immutable param cannot be overridden by execution param.
self.assertEqual(action_params.get('actionimmutable'), 'actionimmutable')
# Assert that an action context param is set correctly.
self.assertEqual(action_params.get('action_api_user'), 'noob')
# Assert that none of runner params are present in action_params.
for k in action_params:
self.assertTrue(k not in runner_params, 'Param ' + k + ' is a runner param.')
def test_get_finalized_params_system_values(self):
KeyValuePair.add_or_update(KeyValuePairDB(name='actionstr', value='foo'))
KeyValuePair.add_or_update(KeyValuePairDB(name='actionnumber', value='1.0'))
params = {
'runnerint': 555
}
liveaction_db = self._get_liveaction_model(params)
runner_params, action_params = param_utils.get_finalized_params(
ParamsUtilsTest.runnertype_db.runner_parameters,
ParamsUtilsTest.action_system_default_db.parameters,
liveaction_db.parameters,
liveaction_db.context)
# Asserts for runner params.
# Assert that default values for runner params are resolved.
self.assertEqual(runner_params.get('runnerstr'), 'defaultfoo')
# Assert that a runner param from action exec is picked up.
self.assertEqual(runner_params.get('runnerint'), 555)
# Assert that an immutable param cannot be overridden by action param or execution param.
self.assertEqual(runner_params.get('runnerimmutable'), 'runnerimmutable')
# Asserts for action params.
self.assertEqual(action_params.get('actionstr'), 'foo')
self.assertEqual(action_params.get('actionnumber'), 1.0)
def test_get_finalized_params_action_immutable(self):
params = {
'actionstr': 'foo',
'some_key_that_aint_exist_in_action_or_runner': 'bar',
'runnerint': 555,
'actionimmutable': 'failed_override'
}
liveaction_db = self._get_liveaction_model(params)
action_context = {'api_user': None}
runner_params, action_params = param_utils.get_finalized_params(
ParamsUtilsTest.runnertype_db.runner_parameters,
ParamsUtilsTest.action_db.parameters,
liveaction_db.parameters,
action_context)
# Asserts for runner params.
# Assert that default values for runner params are resolved.
self.assertEqual(runner_params.get('runnerstr'), 'defaultfoo')
# Assert that a runner param from action exec is picked up.
self.assertEqual(runner_params.get('runnerint'), 555)
# Assert that a runner param can be overridden by action param default.
self.assertEqual(runner_params.get('runnerdummy'), 'actiondummy')
# Asserts for action params.
self.assertEqual(action_params.get('actionstr'), 'foo')
# Assert that a param that is provided in action exec that isn't in action or runner params
# isn't in resolved params.
self.assertEqual(action_params.get('some_key_that_aint_exist_in_action_or_runner'), None)
def test_get_finalized_params_empty(self):
params = {}
runner_param_info = {}
action_param_info = {}
action_context = {}
r_runner_params, r_action_params = param_utils.get_finalized_params(
runner_param_info, action_param_info, params, action_context)
self.assertEqual(r_runner_params, params)
self.assertEqual(r_action_params, params)
def test_get_finalized_params_none(self):
params = {
'r1': None,
'a1': None
}
runner_param_info = {'r1': {}}
action_param_info = {'a1': {}}
action_context = {'api_user': None}
r_runner_params, r_action_params = param_utils.get_finalized_params(
runner_param_info, action_param_info, params, action_context)
self.assertEqual(r_runner_params, {'r1': None})
self.assertEqual(r_action_params, {'a1': None})
def test_get_finalized_params_no_cast(self):
params = {
'r1': '{{r2}}',
'r2': 1,
'a1': True,
'a2': '{{r1}} {{a1}}',
'a3': '{{action_context.api_user}}'
}
runner_param_info = {'r1': {}, 'r2': {}}
action_param_info = {'a1': {}, 'a2': {}, 'a3': {}}
action_context = {'api_user': 'noob'}
r_runner_params, r_action_params = param_utils.get_finalized_params(
runner_param_info, action_param_info, params, action_context)
self.assertEqual(r_runner_params, {'r1': u'1', 'r2': 1})
self.assertEqual(r_action_params, {'a1': True, 'a2': u'1 True', 'a3': 'noob'})
def test_get_finalized_params_with_cast(self):
# Note : In this test runner_params.r1 has a string value. However per runner_param_info the
# type is an integer. The expected type is considered and cast is performed accordingly.
params = {
'r1': '{{r2}}',
'r2': 1,
'a1': True,
'a2': '{{a1}}',
'a3': '{{action_context.api_user}}'
}
runner_param_info = {'r1': {'type': 'integer'}, 'r2': {'type': 'integer'}}
action_param_info = {'a1': {'type': 'boolean'}, 'a2': {'type': 'boolean'}, 'a3': {}}
action_context = {'api_user': 'noob'}
r_runner_params, r_action_params = param_utils.get_finalized_params(
runner_param_info, action_param_info, params, action_context)
self.assertEqual(r_runner_params, {'r1': 1, 'r2': 1})
self.assertEqual(r_action_params, {'a1': True, 'a2': True, 'a3': 'noob'})
def test_get_finalized_params_with_cast_overriden(self):
params = {
'r1': '{{r2}}',
'r2': 1,
'a1': '{{r1}}',
'a2': '{{r1}}',
'a3': '{{r1}}'
}
runner_param_info = {'r1': {'type': 'integer'}, 'r2': {'type': 'integer'}}
action_param_info = {'a1': {'type': 'boolean'}, 'a2': {'type': 'string'},
'a3': {'type': 'integer'}, 'r1': {'type': 'string'}}
action_context = {'api_user': 'noob'}
r_runner_params, r_action_params = param_utils.get_finalized_params(
runner_param_info, action_param_info, params, action_context)
self.assertEqual(r_runner_params, {'r1': 1, 'r2': 1})
self.assertEqual(r_action_params, {'a1': 1, 'a2': u'1', 'a3': 1})
def test_get_finalized_params_cross_talk_no_cast(self):
params = {
'r1': '{{a1}}',
'r2': 1,
'a1': True,
'a2': '{{r1}} {{a1}}',
'a3': '{{action_context.api_user}}'
}
runner_param_info = {'r1': {}, 'r2': {}}
action_param_info = {'a1': {}, 'a2': {}, 'a3': {}}
action_context = {'api_user': 'noob'}
r_runner_params, r_action_params = param_utils.get_finalized_params(
runner_param_info, action_param_info, params, action_context)
self.assertEqual(r_runner_params, {'r1': u'True', 'r2': 1})
self.assertEqual(r_action_params, {'a1': True, 'a2': u'True True', 'a3': 'noob'})
def test_get_finalized_params_cross_talk_with_cast(self):
params = {
'r1': '{{a1}}',
'r2': 1,
'r3': 1,
'a1': True,
'a2': '{{r1}},{{a1}},{{a3}},{{r3}}',
'a3': '{{a1}}'
}
runner_param_info = {'r1': {'type': 'boolean'}, 'r2': {'type': 'integer'}, 'r3': {}}
action_param_info = {'a1': {'type': 'boolean'}, 'a2': {'type': 'array'}, 'a3': {}}
action_context = {}
r_runner_params, r_action_params = param_utils.get_finalized_params(
runner_param_info, action_param_info, params, action_context)
self.assertEqual(r_runner_params, {'r1': True, 'r2': 1, 'r3': 1})
self.assertEqual(r_action_params, {'a1': True, 'a2': (True, True, True, 1), 'a3': u'True'})
def test_get_finalized_params_order(self):
params = {
'r1': 'p1',
'r2': 'p2',
'r3': 'p3',
'a1': 'p4',
'a2': 'p5'
}
runner_param_info = {'r1': {}, 'r2': {'default': 'r2'}, 'r3': {'default': 'r3'}}
action_param_info = {'a1': {}, 'a2': {'default': 'a2'}, 'r3': {'default': 'a3'}}
action_context = {'api_user': 'noob'}
r_runner_params, r_action_params = param_utils.get_finalized_params(
runner_param_info, action_param_info, params, action_context)
self.assertEqual(r_runner_params, {'r1': u'p1', 'r2': u'p2', 'r3': u'p3'})
self.assertEqual(r_action_params, {'a1': u'p4', 'a2': u'p5'})
params = {}
runner_param_info = {'r1': {}, 'r2': {'default': 'r2'}, 'r3': {'default': 'r3'}}
action_param_info = {'a1': {}, 'a2': {'default': 'a2'}, 'r3': {'default': 'a3'}}
action_context = {'api_user': 'noob'}
r_runner_params, r_action_params = param_utils.get_finalized_params(
runner_param_info, action_param_info, params, action_context)
self.assertEqual(r_runner_params, {'r1': None, 'r2': u'r2', 'r3': u'a3'})
self.assertEqual(r_action_params, {'a1': None, 'a2': u'a2'})
params = {}
runner_param_info = {'r1': {}, 'r2': {'default': 'r2'}, 'r3': {}}
action_param_info = {'r1': {}, 'r2': {}, 'r3': {'default': 'a3'}}
action_context = {'api_user': 'noob'}
r_runner_params, r_action_params = param_utils.get_finalized_params(
runner_param_info, action_param_info, params, action_context)
self.assertEqual(r_runner_params, {'r1': None, 'r2': u'r2', 'r3': u'a3'})
def test_get_finalized_params_non_existent_template_key_in_action_context(self):
params = {
'r1': 'foo',
'r2': 2,
'a1': 'i love tests',
'a2': '{{action_context.lorem_ipsum}}'
}
runner_param_info = {'r1': {'type': 'string'}, 'r2': {'type': 'integer'}}
action_param_info = {'a1': {'type': 'string'}, 'a2': {'type': 'string'}}
action_context = {'api_user': 'noob', 'source_channel': 'reddit'}
try:
r_runner_params, r_action_params = param_utils.get_finalized_params(
runner_param_info, action_param_info, params, action_context)
self.fail('This should have thrown because we are trying to deref a key in ' +
'action context that ain\'t exist.')
except ParamException as e:
error_msg = 'Failed to render parameter "a2": \'dict object\' ' + \
'has no attribute \'lorem_ipsum\''
self.assertTrue(error_msg in e.message)
pass
def test_unicode_value_casting(self):
rendered = {'a1': 'unicode1 ٩(̾●̮̮̃̾•̃̾)۶ unicode2'}
parameter_schemas = {'a1': {'type': 'string'}}
result = param_utils._cast_params(rendered=rendered,
parameter_schemas=parameter_schemas)
expected = {
'a1': (u'unicode1 \xd9\xa9(\xcc\xbe\xe2\x97\x8f\xcc\xae\xcc\xae\xcc'
u'\x83\xcc\xbe\xe2\x80\xa2\xcc\x83\xcc\xbe)\xdb\xb6 unicode2')
}
self.assertEqual(result, expected)
def test_get_finalized_params_with_casting_unicode_values(self):
params = {'a1': 'unicode1 ٩(̾●̮̮̃̾•̃̾)۶ unicode2'}
runner_param_info = {}
action_param_info = {'a1': {'type': 'string'}}
action_context = {}
r_runner_params, r_action_params = param_utils.get_finalized_params(
runner_param_info, action_param_info, params, action_context)
expected_action_params = {
'a1': (u'unicode1 \xd9\xa9(\xcc\xbe\xe2\x97\x8f\xcc\xae\xcc\xae\xcc'
u'\x83\xcc\xbe\xe2\x80\xa2\xcc\x83\xcc\xbe)\xdb\xb6 unicode2')
}
self.assertEqual(r_runner_params, {})
self.assertEqual(r_action_params, expected_action_params)
def test_get_finalized_params_with_dict(self):
# Note : In this test runner_params.r1 has a string value. However per runner_param_info the
# type is an integer. The expected type is considered and cast is performed accordingly.
params = {
'r1': '{{r2}}',
'r2': {'r2.1': 1},
'a1': True,
'a2': '{{a1}}',
'a3': {
'test': '{{a1}}',
'test1': '{{a4}}',
'test2': '{{a5}}',
},
'a4': 3,
'a5': ['1', '{{a1}}']
}
runner_param_info = {'r1': {'type': 'object'}, 'r2': {'type': 'object'}}
action_param_info = {
'a1': {
'type': 'boolean',
},
'a2': {
'type': 'boolean',
},
'a3': {
'type': 'object',
},
'a4': {
'type': 'integer',
},
'a5': {
'type': 'array',
},
}
r_runner_params, r_action_params = param_utils.get_finalized_params(
runner_param_info, action_param_info, params, {})
self.assertEqual(
r_runner_params, {'r1': {'r2.1': 1}, 'r2': {'r2.1': 1}})
self.assertEqual(
r_action_params,
{
'a1': True,
'a2': True,
'a3': {
'test': True,
'test1': 3,
'test2': [
'1',
True
],
},
'a4': 3,
'a5': [
'1',
True
],
}
)
def test_get_finalized_params_with_list(self):
# Note : In this test runner_params.r1 has a string value. However per runner_param_info the
# type is an integer. The expected type is considered and cast is performed accordingly.
self.maxDiff = None
params = {
'r1': '{{r2}}',
'r2': ['1', '2'],
'a1': True,
'a2': 'Test',
'a3': 'Test2',
'a4': '{{a1}}',
'a5': ['{{a2}}', '{{a3}}'],
'a6': [
['{{r2}}', '{{a2}}'],
['{{a3}}', '{{a1}}'],
[
'{{a7}}',
'This should be rendered as a string {{a1}}',
'{{a1}} This, too, should be rendered as a string {{a1}}',
]
],
'a7': 5,
}
runner_param_info = {'r1': {'type': 'array'}, 'r2': {'type': 'array'}}
action_param_info = {
'a1': {'type': 'boolean'},
'a2': {'type': 'string'},
'a3': {'type': 'string'},
'a4': {'type': 'boolean'},
'a5': {'type': 'array'},
'a6': {'type': 'array'},
'a7': {'type': 'integer'},
}
r_runner_params, r_action_params = param_utils.get_finalized_params(
runner_param_info, action_param_info, params, {})
self.assertEqual(r_runner_params, {'r1': ['1', '2'], 'r2': ['1', '2']})
self.assertEqual(
r_action_params,
{
'a1': True,
'a2': 'Test',
'a3': 'Test2',
'a4': True,
'a5': ['Test', 'Test2'],
'a6': [
[['1', '2'], 'Test'],
['Test2', True],
[
5,
u'This should be rendered as a string True',
u'True This, too, should be rendered as a string True'
]
],
'a7': 5,
}
)
def test_get_finalized_params_with_cyclic_dependency(self):
params = {'r1': '{{r2}}', 'r2': '{{r1}}'}
runner_param_info = {'r1': {}, 'r2': {}}
action_param_info = {}
test_pass = True
try:
param_utils.get_finalized_params(runner_param_info, action_param_info, params, {})
test_pass = False
except ParamException as e:
test_pass = e.message.find('Cyclic') == 0
self.assertTrue(test_pass)
def test_get_finalized_params_with_missing_dependency(self):
params = {'r1': '{{r3}}', 'r2': '{{r3}}'}
runner_param_info = {'r1': {}, 'r2': {}}
action_param_info = {}
test_pass = True
try:
param_utils.get_finalized_params(runner_param_info, action_param_info, params, {})
test_pass = False
except ParamException as e:
test_pass = e.message.find('Dependency') == 0
self.assertTrue(test_pass)
params = {}
runner_param_info = {'r1': {'default': '{{r3}}'}, 'r2': {'default': '{{r3}}'}}
action_param_info = {}
test_pass = True
try:
param_utils.get_finalized_params(runner_param_info, action_param_info, params, {})
test_pass = False
except ParamException as e:
test_pass = e.message.find('Dependency') == 0
self.assertTrue(test_pass)
def test_get_finalized_params_no_double_rendering(self):
params = {
'r1': '{{ action_context.h1 }}{{ action_context.h2 }}'
}
runner_param_info = {'r1': {}}
action_param_info = {}
action_context = {
'h1': '{',
'h2': '{ missing }}'
}
r_runner_params, r_action_params = param_utils.get_finalized_params(
runner_param_info, action_param_info, params, action_context)
self.assertEqual(r_runner_params, {'r1': '{{ missing }}'})
self.assertEqual(r_action_params, {})
def test_get_finalized_params_jinja_filters(self):
params = {'cmd': 'echo {{"1.6.0" | version_bump_minor}}'}
runner_param_info = {'r1': {}}
action_param_info = {'cmd': {}}
action_context = {}
r_runner_params, r_action_params = param_utils.get_finalized_params(
runner_param_info, action_param_info, params, action_context)
self.assertEqual(r_action_params['cmd'], "echo 1.7.0")
def test_get_finalized_params_param_rendering_failure(self):
params = {'cmd': '{{a2.foo}}', 'a2': 'test'}
action_param_info = {'cmd': {}, 'a2': {}}
expected_msg = 'Failed to render parameter "cmd": .*'
self.assertRaisesRegexp(ParamException,
expected_msg,
param_utils.get_finalized_params,
runnertype_parameter_info={},
action_parameter_info=action_param_info,
liveaction_parameters=params,
action_context={})
def test_get_finalized_param_object_contains_template_notation_in_the_value(self):
runner_param_info = {'r1': {}}
action_param_info = {
'params': {
'type': 'object',
'default': {
'host': '{{host}}',
'port': '{{port}}',
'path': '/bar'}
}
}
params = {
'host': 'lolcathost',
'port': 5555
}
action_context = {}
r_runner_params, r_action_params = param_utils.get_finalized_params(
runner_param_info, action_param_info, params, action_context)
expected_params = {
'host': 'lolcathost',
'port': 5555,
'path': '/bar'
}
self.assertEqual(r_action_params['params'], expected_params)
def test_cast_param_referenced_action_doesnt_exist(self):
# Make sure the function throws if the action doesnt exist
expected_msg = 'Action with ref "foo.doesntexist" doesn\'t exist'
self.assertRaisesRegexp(ValueError, expected_msg, action_param_utils.cast_params,
action_ref='foo.doesntexist', params={})
def test_get_finalized_params_with_config(self):
with mock.patch('st2common.util.config_loader.ContentPackConfigLoader') as config_loader:
config_loader().get_config.return_value = {
'generic_config_param': 'So generic'
}
params = {
'config_param': '{{config_context.generic_config_param}}',
}
liveaction_db = self._get_liveaction_model(params, True)
_, action_params = param_utils.get_finalized_params(
ParamsUtilsTest.runnertype_db.runner_parameters,
ParamsUtilsTest.action_db.parameters,
liveaction_db.parameters,
liveaction_db.context)
self.assertEqual(
action_params.get('config_param'),
'So generic'
)
def test_get_config(self):
with mock.patch('st2common.util.config_loader.ContentPackConfigLoader') as config_loader:
mock_config_return = {
'generic_config_param': 'So generic'
}
config_loader().get_config.return_value = mock_config_return
self.assertEqual(get_config(None, None), {})
self.assertEqual(get_config('pack', None), {})
self.assertEqual(get_config(None, 'user'), {})
self.assertEqual(
get_config('pack', 'user'), mock_config_return
)
config_loader.assert_called_with(pack_name='pack', user='user')
config_loader().get_config.assert_called_once()
def _get_liveaction_model(self, params, with_config_context=False):
status = 'initializing'
start_timestamp = date_utils.get_datetime_utc_now()
action_ref = ResourceReference(name=ParamsUtilsTest.action_db.name,
pack=ParamsUtilsTest.action_db.pack).ref
liveaction_db = LiveActionDB(status=status, start_timestamp=start_timestamp,
action=action_ref, parameters=params)
liveaction_db.context = {
'api_user': 'noob',
'source_channel': 'reddit',
}
if with_config_context:
liveaction_db.context.update(
{
'pack': 'generic',
'user': 'st2admin'
}
)
return liveaction_db
def test_get_live_params_with_additional_context(self):
runner_param_info = {
'r1': {
'default': 'some'
}
}
action_param_info = {
'r2': {
'default': '{{ r1 }}'
}
}
params = {
'r3': 'lolcathost',
'r1': '{{ additional.stuff }}'
}
action_context = {}
additional_contexts = {
'additional': {
'stuff': 'generic'
}
}
live_params = param_utils.render_live_params(
runner_param_info, action_param_info, params, action_context, additional_contexts)
expected_params = {
'r1': 'generic',
'r2': 'generic',
'r3': 'lolcathost'
}
self.assertEqual(live_params, expected_params)
def test_cyclic_dependency_friendly_error_message(self):
runner_param_info = {
'r1': {
'default': 'some',
'cyclic': 'cyclic value',
'morecyclic': 'cyclic value'
}
}
action_param_info = {
'r2': {
'default': '{{ r1 }}'
}
}
params = {
'r3': 'lolcathost',
'cyclic': '{{ cyclic }}',
'morecyclic': '{{ morecyclic }}'
}
action_context = {}
expected_msg = 'Cyclic dependency found in the following variables: cyclic, morecyclic'
self.assertRaisesRegexp(ParamException, expected_msg, param_utils.render_live_params,
runner_param_info, action_param_info, params, action_context)
def test_unsatisfied_dependency_friendly_error_message(self):
runner_param_info = {
'r1': {
'default': 'some',
}
}
action_param_info = {
'r2': {
'default': '{{ r1 }}'
}
}
params = {
'r3': 'lolcathost',
'r4': '{{ variable_not_defined }}',
}
action_context = {}
expected_msg = 'Dependency unsatisfied in variable "variable_not_defined"'
self.assertRaisesRegexp(ParamException, expected_msg, param_utils.render_live_params,
runner_param_info, action_param_info, params, action_context)
def test_add_default_templates_to_live_params(self):
"""Test addition of template values in defaults to live params
"""
# Ensure parameter is skipped if the parameter has immutable set to true in schema
schemas = [
{
'templateparam': {
'default': '{{ 3 | int }}',
'type': 'integer',
'immutable': True
}
}
]
context = {
'templateparam': '3'
}
result = param_utils._cast_params_from({}, context, schemas)
self.assertEquals(result, {})
# Test with no live params, and two parameters - one should make it through because
# it was a template, and the other shouldn't because its default wasn't a template
schemas = [
{
'templateparam': {
'default': '{{ 3 | int }}',
'type': 'integer'
}
}
]
context = {
'templateparam': '3'
}
result = param_utils._cast_params_from({}, context, schemas)
self.assertEquals(result, {'templateparam': 3})
# Ensure parameter is skipped if the value in context is identical to default
schemas = [
{
'nottemplateparam': {
'default': '4',
'type': 'integer'
}
}
]
context = {
'nottemplateparam': '4',
}
result = param_utils._cast_params_from({}, context, schemas)
self.assertEquals(result, {})
# Ensure parameter is skipped if the parameter doesn't have a default
schemas = [
{
'nottemplateparam': {
'type': 'integer'
}
}
]
context = {
'nottemplateparam': '4',
}
result = param_utils._cast_params_from({}, context, schemas)
self.assertEquals(result, {})
# Skip if the default value isn't a Jinja expression
schemas = [
{
'nottemplateparam': {
'default': '5',
'type': 'integer'
}
}
]
context = {
'nottemplateparam': '4',
}
result = param_utils._cast_params_from({}, context, schemas)
self.assertEquals(result, {})
# Ensure parameter is skipped if the parameter is being overridden
schemas = [
{
'templateparam': {
'default': '{{ 3 | int }}',
'type': 'integer'
}
}
]
context = {
'templateparam': '4',
}
result = param_utils._cast_params_from({'templateparam': '4'}, context, schemas)
self.assertEquals(result, {'templateparam': 4})
|
import os
import sys
from oslo.config import cfg
from st2common import log as logging
from st2common.models.db import db_setup
from st2common.models.db import db_teardown
from st2common.constants.logging import DEFAULT_LOGGING_CONF_PATH
from st2reactor.rules import config
from st2reactor.rules import worker
from st2reactor.timer.base import St2Timer
LOG = logging.getLogger('st2reactor.bin.rulesengine')
def _setup():
# Set up logger which logs everything which happens during and before config
# parsing to sys.stdout
logging.setup(DEFAULT_LOGGING_CONF_PATH)
# 1. parse config args
config.parse_args()
# 2. setup logging.
logging.setup(cfg.CONF.rulesengine.logging)
# 3. all other setup which requires config to be parsed and logging to
# be correctly setup.
username = cfg.CONF.database.username if hasattr(cfg.CONF.database, 'username') else None
password = cfg.CONF.database.password if hasattr(cfg.CONF.database, 'password') else None
db_setup(cfg.CONF.database.db_name, cfg.CONF.database.host, cfg.CONF.database.port,
username=username, password=password)
def _teardown():
db_teardown()
def main():
timer = St2Timer(local_timezone=cfg.CONF.timer.local_timezone)
try:
_setup()
timer.start()
return worker.work()
except SystemExit as exit_code:
sys.exit(exit_code)
except:
LOG.exception('(PID:%s) RulesEngine quit due to exception.', os.getpid())
return 1
finally:
timer.cleanup()
_teardown()
Spawn scheduler and worker in eventlets (they are blocking)
import os
import sys
import eventlet
from oslo.config import cfg
from st2common import log as logging
from st2common.models.db import db_setup
from st2common.models.db import db_teardown
from st2common.constants.logging import DEFAULT_LOGGING_CONF_PATH
from st2reactor.rules import config
from st2reactor.rules import worker
from st2reactor.timer.base import St2Timer
LOG = logging.getLogger('st2reactor.bin.rulesengine')
def _setup():
# Set up logger which logs everything which happens during and before config
# parsing to sys.stdout
logging.setup(DEFAULT_LOGGING_CONF_PATH)
# 1. parse config args
config.parse_args()
# 2. setup logging.
logging.setup(cfg.CONF.rulesengine.logging)
# 3. all other setup which requires config to be parsed and logging to
# be correctly setup.
username = cfg.CONF.database.username if hasattr(cfg.CONF.database, 'username') else None
password = cfg.CONF.database.password if hasattr(cfg.CONF.database, 'password') else None
db_setup(cfg.CONF.database.db_name, cfg.CONF.database.host, cfg.CONF.database.port,
username=username, password=password)
def _teardown():
db_teardown()
def _kickoff_rules_worker(worker):
worker.work()
def _kickoff_timer(timer):
timer.start()
def main():
timer = St2Timer(local_timezone=cfg.CONF.timer.local_timezone)
try:
_setup()
timer_thread = eventlet.spawn(_kickoff_timer, timer)
worker_thread = eventlet.spawn(_kickoff_rules_worker, worker)
return (timer_thread.wait() and worker_thread.wait())
except SystemExit as exit_code:
sys.exit(exit_code)
except:
LOG.exception('(PID:%s) RulesEngine quit due to exception.', os.getpid())
return 1
finally:
timer.cleanup()
_teardown()
|
from ibugMM.mesh.face import Face
from ibugMM.mesh.cppmesh import CppMesh
from ibugMM.importer.model import OBJImporter
import numpy as np
print 'Imports done.'
objPath = '/home/jab08/testData/ioannis_001/exports/ioannis_001_022.obj'
oimporter = OBJImporter(objPath)
print 'Importer ready'
oFace = oimporter.generateFace()
print 'Face generated'
#cI = oFace.coordsIndex[np.where(oFace.coordsIndex==0)[0]]
#c = oFace.coords[np.unique(cI)]
#cI[cI==32179] = 4
#cI[cI==35717] = 5
#cI[cI==32839] = 6
#print cI
#print c
#testMesh = Face(coords=np.zeros_like(c),coordsIndex=cI)
#oFace.laplacian_operator()
#sparse = oFace.construct_sparse_vertex_matrix()
and you even forgot this part too.
from ibugMM.mesh.face import Face
from ibugMM.mesh.cppmesh import CppMesh
from ibugMM.importer.model import OBJImporter
import numpy as np
print 'Imports done.'
objPath = '/home/jab08/testData/ioannis_001/exports/ioannis_001_022.obj'
oimporter = OBJImporter(objPath)
print 'Importer ready'
oFace = oimporter.generateFace()
A,B = oFace.laplacian_operator()
C = oFace.gradient()
div = oFace.divergence()
print 'Face generated'
#cI = oFace.coordsIndex[np.where(oFace.coordsIndex==0)[0]]
#c = oFace.coords[np.unique(cI)]
#cI[cI==32179] = 4
#cI[cI==35717] = 5
#cI[cI==32839] = 6
#print cI
#print c
#testMesh = Face(coords=np.zeros_like(c),coordsIndex=cI)
#oFace.laplacian_operator()
#sparse = oFace.construct_sparse_vertex_matrix()
|
"""Provide the Query class.
"""
from warnings import warn
import icat.entity
from icat.exception import *
__all__ = ['Query']
substnames = {
"datafileFormat":"dff",
"dataset":"ds",
"dataset.investigation":"i",
"facility":"f",
"grouping":"g",
"instrumentScientists":"isc",
"investigation":"i",
"investigationGroups":"ig",
"investigationInstruments":"ii",
"investigationUsers":"iu",
"parameters":"p",
"parameters.type":"pt",
"type":"t",
"user":"u",
"userGroups":"ug",
}
"""Symbolic names for the representation of related objects in
JOIN ... AS and INCLUDE ... AS. Prescribing sensible names makes the
search expressions somewhat better readable. There is no need for
completeness here.
"""
# ======================== Internal helper ===========================
def _parents(obj):
"""Iterate over the parents of obj as dot separated components.
>>> list(_parents("a.bb.c.ddd.e.ff"))
['a', 'a.bb', 'a.bb.c', 'a.bb.c.ddd', 'a.bb.c.ddd.e']
>>> list(_parents("abc"))
[]
"""
s = 0
while True:
i = obj.find('.', s)
if i < 0:
break
yield obj[:i]
s = i+1
def _attrpath(client, entity, attrname):
"""Follow the attribute path along related objects and iterate over
the components.
"""
rclass = entity
for attr in attrname.split('.'):
if rclass is None:
# Last component was not a relation, no further components
# in the name allowed.
raise ValueError("Invalid attrname '%s' for %s."
% (attrname, entity.BeanName))
attrInfo = rclass.getAttrInfo(client, attr)
if attrInfo.relType == "ATTRIBUTE":
rclass = None
elif (attrInfo.relType == "ONE" or
attrInfo.relType == "MANY"):
rclass = client.getEntityClass(attrInfo.type)
else:
raise InternalError("Invalid relType: '%s'" % attrInfo.relType)
yield (attrInfo, rclass)
def _makesubst(objs):
subst = {}
substcount = 0
for obj in sorted(objs):
for o in _parents(obj):
if o not in subst:
if o in substnames and substnames[o] not in subst.values():
subst[o] = substnames[o]
else:
substcount += 1
subst[o] = "s%d" % substcount
return subst
def _dosubst(obj, subst, addas=True):
i = obj.rfind('.')
if i < 0:
n = "o.%s" % (obj)
else:
n = "%s.%s" % (subst[obj[:i]], obj[i+1:])
if addas and obj in subst:
n += " AS %s" % (subst[obj])
return n
# ========================== class Query =============================
class Query(object):
"""Build a query to search an ICAT server.
The query uses the JPQL inspired syntax introduced with ICAT
4.3.0. It won't work with older ICAT servers.
"""
def __init__(self, client, entity,
order=None, conditions=None, includes=None, limit=None):
"""Initialize the query.
:param client: the ICAT client.
:type client: :class:`icat.client.Client`
:param entity: the type of objects to search for. This may
either be an :class:`icat.entity.Entity` subclass or the
name of an entity type.
:param order: the sorting attributes to build the ORDER BY
clause from. See the :meth:`icat.query.Query.setOrder`
method for details.
:param conditions: the conditions to build the WHERE clause
from. See the :meth:`icat.query.Query.addConditions`
method for details.
:param includes: list of related objects to add to the INCLUDE
clause. See the :meth:`icat.query.Query.addIncludes`
method for details.
:param limit: a tuple (skip, count) to be used in the LIMIT
clause. See the :meth:`icat.query.Query.setLimit` method
for details.
"""
if client.apiversion < '4.3':
raise VersionMethodError("Query", client.apiversion)
super(Query, self).__init__()
self._init = True
self.client = client
if isinstance(entity, basestring):
self.entity = self.client.getEntityClass(entity)
elif issubclass(entity, icat.entity.Entity):
if (entity in self.client.typemap.values() and
entity.BeanName is not None):
self.entity = entity
else:
raise TypeError("Invalid entity type '%s'." % entity.__name__)
else:
raise TypeError("Invalid entity type '%s'." % type(entity))
self.conditions = dict()
self.addConditions(conditions)
self.includes = set()
self.addIncludes(includes)
self.setOrder(order)
self.setLimit(limit)
self._init = None
def setOrder(self, order):
"""Set the order to build the ORDER BY clause from.
:param order: the list of the attributes used for sorting. A
special value of :const:`True` may be used to indicate the
natural order of the entity type. Any false value means
no ORDER BY clause.
:type order: :class:`list` of :class:`str` or :class:`bool`
:raise ValueError: if the order contains invalid attributes
that either do not exist or contain one to many
relationships.
"""
if order is True:
self.order = self.entity.getNaturalOrder(self.client)
elif order:
self.order = []
for obj in order:
pattr = ""
attrpath = _attrpath(self.client, self.entity, obj)
for (attrInfo, rclass) in attrpath:
if pattr:
pattr += ".%s" % attrInfo.name
else:
pattr = attrInfo.name
if attrInfo.relType == "ONE":
if (not attrInfo.notNullable and
pattr not in self.conditions):
sl = 3 if self._init else 2
warn(QueryNullableOrderWarning(pattr),
stacklevel=sl)
elif attrInfo.relType == "MANY":
raise ValueError("Cannot use one to many relationship "
"in '%s' to order %s."
% (obj, self.entity.BeanName))
if rclass is None:
# obj is an attribute, use it right away.
self.order.append(obj)
else:
# obj is a related object, use the natural order
# of its class.
rorder = rclass.getNaturalOrder(self.client)
self.order.extend(["%s.%s" % (obj, ra) for ra in rorder])
else:
self.order = []
def addConditions(self, conditions):
"""Add conditions to the constraints to build the WHERE clause from.
:param conditions: the conditions to restrict the search
result. This must be a mapping of attribute names to
conditions on that attribute. The latter may either be a
string with a single condition or a list of strings to add
more then one condition on a single attribute. If the
query already has a condition on a given attribute, it
will be turned into a list with the new condition(s)
appended.
:type conditions: :class:`dict`
:raise ValueError: if any key in conditions is not valid.
"""
if conditions:
for a in conditions.keys():
attrpath = _attrpath(self.client, self.entity, a)
for (attrInfo, rclass) in attrpath:
pass
if a in self.conditions:
conds = []
if isinstance(self.conditions[a], basestring):
conds.append(self.conditions[a])
else:
conds.extend(self.conditions[a])
if isinstance(conditions[a], basestring):
conds.append(conditions[a])
else:
conds.extend(conditions[a])
self.conditions[a] = conds
else:
self.conditions[a] = conditions[a]
def addIncludes(self, includes):
"""Add related objects to build the INCLUDE clause from.
:param includes: list of related objects to add to the INCLUDE
clause. A special value of "1" may be used to set (the
equivalent of) an "INCLUDE 1" clause.
:type includes: iterable of :class:`str`
:raise ValueError: if any item in includes is not a related object.
"""
if includes == "1":
includes = list(self.entity.InstRel)
if includes:
for iobj in includes:
attrpath = _attrpath(self.client, self.entity, iobj)
for (attrInfo, rclass) in attrpath:
pass
if rclass is None:
raise ValueError("%s.%s is not a related object."
% (self.entity.BeanName, iobj))
self.includes.update(includes)
def setLimit(self, limit):
"""Set the limits to build the LIMIT clause from.
:param limit: a tuple (skip, count).
:type limit: :class:`tuple`
"""
if limit:
self.limit = limit
else:
self.limit = None
def __repr__(self):
"""Return a formal representation of the query.
"""
return ("%s(%s, %s, order=%s, conditions=%s, includes=%s, limit=%s)"
% (self.__class__.__name__,
repr(self.client), repr(self.entity.BeanName),
repr(self.order), repr(self.conditions),
repr(self.includes), repr(self.limit)))
def __str__(self):
"""Return a string representation of the query.
Note for Python 2: the result will be an Unicode object, if
any of the conditions in the query contains unicode. This
violates the specification of the string representation
operator that requires the return value to be a string object.
But it is the *right thing* to do to get queries with
non-ascii characters working. For Python 3, there is no
distinction between Unicode and string objects anyway.
"""
base = "SELECT o FROM %s o" % self.entity.BeanName
joinattrs = set(self.order) | set(self.conditions.keys())
subst = _makesubst(joinattrs)
joins = ""
for obj in sorted(subst.keys()):
joins += " JOIN %s" % _dosubst(obj, subst)
if self.conditions:
conds = []
for a in sorted(self.conditions.keys()):
attr = _dosubst(a, subst, False)
cond = self.conditions[a]
if isinstance(cond, basestring):
conds.append("%s %s" % (attr, cond))
else:
for c in cond:
conds.append("%s %s" % (attr, c))
where = " WHERE " + " AND ".join(conds)
else:
where = ""
if self.order:
orders = [ _dosubst(a, subst, False) for a in self.order ]
order = " ORDER BY " + ", ".join(orders)
else:
order = ""
if self.includes:
subst = _makesubst(self.includes)
includes = set(self.includes)
includes.update(subst.keys())
incl = [ _dosubst(obj, subst) for obj in sorted(includes) ]
include = " INCLUDE " + ", ".join(incl)
else:
include = ""
if self.limit:
limit = " LIMIT %s, %s" % self.limit
else:
limit = ""
return base + joins + where + order + include + limit
def copy(self):
"""Return an independent clone of this query.
"""
q = Query(self.client, self.entity)
q.order = list(self.order)
q.conditions = self.conditions.copy()
q.includes = self.includes.copy()
q.limit = self.limit
return q
Implement support searching for attributes in class Query.
"""Provide the Query class.
"""
from warnings import warn
import icat.entity
from icat.exception import *
__all__ = ['Query']
substnames = {
"datafileFormat":"dff",
"dataset":"ds",
"dataset.investigation":"i",
"facility":"f",
"grouping":"g",
"instrumentScientists":"isc",
"investigation":"i",
"investigationGroups":"ig",
"investigationInstruments":"ii",
"investigationUsers":"iu",
"parameters":"p",
"parameters.type":"pt",
"type":"t",
"user":"u",
"userGroups":"ug",
}
"""Symbolic names for the representation of related objects in
JOIN ... AS and INCLUDE ... AS. Prescribing sensible names makes the
search expressions somewhat better readable. There is no need for
completeness here.
"""
# ======================== Internal helper ===========================
def _parents(obj):
"""Iterate over the parents of obj as dot separated components.
>>> list(_parents("a.bb.c.ddd.e.ff"))
['a', 'a.bb', 'a.bb.c', 'a.bb.c.ddd', 'a.bb.c.ddd.e']
>>> list(_parents("abc"))
[]
"""
s = 0
while True:
i = obj.find('.', s)
if i < 0:
break
yield obj[:i]
s = i+1
def _attrpath(client, entity, attrname):
"""Follow the attribute path along related objects and iterate over
the components.
"""
rclass = entity
for attr in attrname.split('.'):
if rclass is None:
# Last component was not a relation, no further components
# in the name allowed.
raise ValueError("Invalid attrname '%s' for %s."
% (attrname, entity.BeanName))
attrInfo = rclass.getAttrInfo(client, attr)
if attrInfo.relType == "ATTRIBUTE":
rclass = None
elif (attrInfo.relType == "ONE" or
attrInfo.relType == "MANY"):
rclass = client.getEntityClass(attrInfo.type)
else:
raise InternalError("Invalid relType: '%s'" % attrInfo.relType)
yield (attrInfo, rclass)
def _makesubst(objs):
subst = {}
substcount = 0
for obj in sorted(objs):
for o in _parents(obj):
if o not in subst:
if o in substnames and substnames[o] not in subst.values():
subst[o] = substnames[o]
else:
substcount += 1
subst[o] = "s%d" % substcount
return subst
def _dosubst(obj, subst, addas=True):
i = obj.rfind('.')
if i < 0:
n = "o.%s" % (obj)
else:
n = "%s.%s" % (subst[obj[:i]], obj[i+1:])
if addas and obj in subst:
n += " AS %s" % (subst[obj])
return n
# ========================== class Query =============================
class Query(object):
"""Build a query to search an ICAT server.
The query uses the JPQL inspired syntax introduced with ICAT
4.3.0. It won't work with older ICAT servers.
"""
def __init__(self, client, entity,
attribute=None, order=None,
conditions=None, includes=None, limit=None):
"""Initialize the query.
:param client: the ICAT client.
:type client: :class:`icat.client.Client`
:param entity: the type of objects to search for. This may
either be an :class:`icat.entity.Entity` subclass or the
name of an entity type.
:param attribute: the attribute that the query shall return.
See the :meth:`icat.query.Query.setAttribute` method for
details.
:param order: the sorting attributes to build the ORDER BY
clause from. See the :meth:`icat.query.Query.setOrder`
method for details.
:param conditions: the conditions to build the WHERE clause
from. See the :meth:`icat.query.Query.addConditions`
method for details.
:param includes: list of related objects to add to the INCLUDE
clause. See the :meth:`icat.query.Query.addIncludes`
method for details.
:param limit: a tuple (skip, count) to be used in the LIMIT
clause. See the :meth:`icat.query.Query.setLimit` method
for details.
"""
if client.apiversion < '4.3':
raise VersionMethodError("Query", client.apiversion)
super(Query, self).__init__()
self._init = True
self.client = client
if isinstance(entity, basestring):
self.entity = self.client.getEntityClass(entity)
elif issubclass(entity, icat.entity.Entity):
if (entity in self.client.typemap.values() and
entity.BeanName is not None):
self.entity = entity
else:
raise TypeError("Invalid entity type '%s'." % entity.__name__)
else:
raise TypeError("Invalid entity type '%s'." % type(entity))
self.setAttribute(attribute)
self.conditions = dict()
self.addConditions(conditions)
self.includes = set()
self.addIncludes(includes)
self.setOrder(order)
self.setLimit(limit)
self._init = None
def setAttribute(self, attribute):
"""Set the attribute that the query shall return.
:param attribute: the name of the attribute. The result of
the query will be a list of attribute values for the
matching entity objects. If attribute is :const:`None`,
the result will be the list of matching objects instead.
:type attribute: :class:`str`
"""
# Get the attribute path only to verify that the attribute is valid.
if attribute is not None:
attrpath = list(_attrpath(self.client, self.entity, attribute))
self.attribute = attribute
def setOrder(self, order):
"""Set the order to build the ORDER BY clause from.
:param order: the list of the attributes used for sorting. A
special value of :const:`True` may be used to indicate the
natural order of the entity type. Any false value means
no ORDER BY clause.
:type order: :class:`list` of :class:`str` or :class:`bool`
:raise ValueError: if the order contains invalid attributes
that either do not exist or contain one to many
relationships.
"""
if order is True:
self.order = self.entity.getNaturalOrder(self.client)
elif order:
self.order = []
for obj in order:
pattr = ""
attrpath = _attrpath(self.client, self.entity, obj)
for (attrInfo, rclass) in attrpath:
if pattr:
pattr += ".%s" % attrInfo.name
else:
pattr = attrInfo.name
if attrInfo.relType == "ONE":
if (not attrInfo.notNullable and
pattr not in self.conditions):
sl = 3 if self._init else 2
warn(QueryNullableOrderWarning(pattr),
stacklevel=sl)
elif attrInfo.relType == "MANY":
raise ValueError("Cannot use one to many relationship "
"in '%s' to order %s."
% (obj, self.entity.BeanName))
if rclass is None:
# obj is an attribute, use it right away.
self.order.append(obj)
else:
# obj is a related object, use the natural order
# of its class.
rorder = rclass.getNaturalOrder(self.client)
self.order.extend(["%s.%s" % (obj, ra) for ra in rorder])
else:
self.order = []
def addConditions(self, conditions):
"""Add conditions to the constraints to build the WHERE clause from.
:param conditions: the conditions to restrict the search
result. This must be a mapping of attribute names to
conditions on that attribute. The latter may either be a
string with a single condition or a list of strings to add
more then one condition on a single attribute. If the
query already has a condition on a given attribute, it
will be turned into a list with the new condition(s)
appended.
:type conditions: :class:`dict`
:raise ValueError: if any key in conditions is not valid.
"""
if conditions:
for a in conditions.keys():
attrpath = _attrpath(self.client, self.entity, a)
for (attrInfo, rclass) in attrpath:
pass
if a in self.conditions:
conds = []
if isinstance(self.conditions[a], basestring):
conds.append(self.conditions[a])
else:
conds.extend(self.conditions[a])
if isinstance(conditions[a], basestring):
conds.append(conditions[a])
else:
conds.extend(conditions[a])
self.conditions[a] = conds
else:
self.conditions[a] = conditions[a]
def addIncludes(self, includes):
"""Add related objects to build the INCLUDE clause from.
:param includes: list of related objects to add to the INCLUDE
clause. A special value of "1" may be used to set (the
equivalent of) an "INCLUDE 1" clause.
:type includes: iterable of :class:`str`
:raise ValueError: if any item in includes is not a related object.
"""
if includes == "1":
includes = list(self.entity.InstRel)
if includes:
for iobj in includes:
attrpath = _attrpath(self.client, self.entity, iobj)
for (attrInfo, rclass) in attrpath:
pass
if rclass is None:
raise ValueError("%s.%s is not a related object."
% (self.entity.BeanName, iobj))
self.includes.update(includes)
def setLimit(self, limit):
"""Set the limits to build the LIMIT clause from.
:param limit: a tuple (skip, count).
:type limit: :class:`tuple`
"""
if limit:
self.limit = limit
else:
self.limit = None
def __repr__(self):
"""Return a formal representation of the query.
"""
return ("%s(%s, %s, order=%s, conditions=%s, includes=%s, limit=%s)"
% (self.__class__.__name__,
repr(self.client), repr(self.entity.BeanName),
repr(self.order), repr(self.conditions),
repr(self.includes), repr(self.limit)))
def __str__(self):
"""Return a string representation of the query.
Note for Python 2: the result will be an Unicode object, if
any of the conditions in the query contains unicode. This
violates the specification of the string representation
operator that requires the return value to be a string object.
But it is the *right thing* to do to get queries with
non-ascii characters working. For Python 3, there is no
distinction between Unicode and string objects anyway.
"""
if self.attribute is None:
res = "o"
else:
res = "o.%s" % self.attribute
base = "SELECT %s FROM %s o" % (res, self.entity.BeanName)
joinattrs = set(self.order) | set(self.conditions.keys())
subst = _makesubst(joinattrs)
joins = ""
for obj in sorted(subst.keys()):
joins += " JOIN %s" % _dosubst(obj, subst)
if self.conditions:
conds = []
for a in sorted(self.conditions.keys()):
attr = _dosubst(a, subst, False)
cond = self.conditions[a]
if isinstance(cond, basestring):
conds.append("%s %s" % (attr, cond))
else:
for c in cond:
conds.append("%s %s" % (attr, c))
where = " WHERE " + " AND ".join(conds)
else:
where = ""
if self.order:
orders = [ _dosubst(a, subst, False) for a in self.order ]
order = " ORDER BY " + ", ".join(orders)
else:
order = ""
if self.includes:
subst = _makesubst(self.includes)
includes = set(self.includes)
includes.update(subst.keys())
incl = [ _dosubst(obj, subst) for obj in sorted(includes) ]
include = " INCLUDE " + ", ".join(incl)
else:
include = ""
if self.limit:
limit = " LIMIT %s, %s" % self.limit
else:
limit = ""
return base + joins + where + order + include + limit
def copy(self):
"""Return an independent clone of this query.
"""
q = Query(self.client, self.entity)
q.attribute = self.attribute
q.order = list(self.order)
q.conditions = self.conditions.copy()
q.includes = self.includes.copy()
q.limit = self.limit
return q
|
import logging
import re
from datetime import datetime
from praw.helpers import submission_stream
from praw.errors import AlreadySubmitted, APIException
from images_of import settings
from images_of.subreddit import Subreddit
class Bot:
def __init__(self, r, should_post=True):
self.r = r
self.should_post = should_post
logging.info('Loading global user blacklist from wiki')
self.blacklist_users = self._read_blacklist('userblacklist')
logging.info('Loading global subreddit blacklist from wiki')
self.blacklist_subs = self._read_blacklist('subredditblacklist')
self.subreddits = []
for sub_settings in settings.SLAVE_SUBS:
sub = Subreddit(**sub_settings)
sub.load_wiki_blacklist(r)
self.subreddits.append(sub)
ext_pattern = '({})$'.format('|'.join(settings.EXTENSIONS))
self.ext_re = re.compile(ext_pattern, flags=re.IGNORECASE)
domain_pattern = '^({})$'.format('|'.join(settings.DOMAINS))
self.domain_re = re.compile(domain_pattern, flags=re.IGNORECASE)
def _read_blacklist(self, wiki_page):
content = self.r.get_wiki_page(settings.MASTER_SUB, wiki_page).content_md
entries = [line.strip().lower()[3:] for line in content.splitlines() if line]
return set(entries)
def check(self, post):
"""Check global conditions on a post"""
if not settings.NSFW_OK and post.over_18:
return False
user = post.author.name.lower()
if user in self.blacklist_users:
return False
sub = post.subreddit.display_name.lower()
if sub in self.blacklist_subs:
return False
if self.domain_re.search(post.domain):
return True
if self.ext_re.search(post.url):
return True
return False
def crosspost(self, post, sub):
title = post.title
comment = '[Original post]({}) by /u/{} in /r/{}'.format(
post.permalink,
post.author,
post.subreddit)
try:
logging.info('X-Posting into /r/{}: {}'.format(sub.name, title))
if self.should_post:
xpost = self.r.submit(
sub.name,
title,
url=post.url,
captcha=None,
send_replies=True,
resubmit=False)
logging.debug('Commenting: {}'.format(comment))
if self.should_post:
xpost.add_comment(comment)
except AlreadySubmitted:
logging.info('Already submitted. Skipping.')
except APIException as e:
logging.warning(e)
def check_age(self, post):
created = datetime.utcfromtimestamp(post.author.created_utc)
age = (datetime.utcnow() - created).days
return age > 2
def run(self):
stream = submission_stream(self.r, 'all')
for post in stream:
if not self.check(post):
continue
age_ok = None
for sub in self.subreddits:
if sub.check(post):
if age_ok is None:
age_ok = self.check_age(post)
if not age_ok:
break
self.crosspost(post, sub)
Add recent_posts log to avoid double posting.
import logging
import re
from collections import deque
from datetime import datetime
from praw.helpers import submission_stream
from praw.errors import AlreadySubmitted, APIException
from images_of import settings
from images_of.subreddit import Subreddit
class Bot:
def __init__(self, r, should_post=True):
self.r = r
self.should_post = should_post
self.recent_posts = deque(maxlen=50)
logging.info('Loading global user blacklist from wiki')
self.blacklist_users = self._read_blacklist('userblacklist')
logging.info('Loading global subreddit blacklist from wiki')
self.blacklist_subs = self._read_blacklist('subredditblacklist')
self.subreddits = []
for sub_settings in settings.SLAVE_SUBS:
sub = Subreddit(**sub_settings)
sub.load_wiki_blacklist(r)
self.subreddits.append(sub)
ext_pattern = '({})$'.format('|'.join(settings.EXTENSIONS))
self.ext_re = re.compile(ext_pattern, flags=re.IGNORECASE)
domain_pattern = '^({})$'.format('|'.join(settings.DOMAINS))
self.domain_re = re.compile(domain_pattern, flags=re.IGNORECASE)
def _read_blacklist(self, wiki_page):
content = self.r.get_wiki_page(settings.MASTER_SUB, wiki_page).content_md
entries = [line.strip().lower()[3:] for line in content.splitlines() if line]
return set(entries)
def check(self, post):
"""Check global conditions on a post"""
if not settings.NSFW_OK and post.over_18:
return False
user = post.author.name.lower()
if user in self.blacklist_users:
return False
sub = post.subreddit.display_name.lower()
if sub in self.blacklist_subs:
return False
if self.domain_re.search(post.domain):
return True
if self.ext_re.search(post.url):
return True
return False
def crosspost(self, post, sub):
title = post.title
comment = '[Original post]({}) by /u/{} in /r/{}'.format(
post.permalink,
post.author,
post.subreddit)
log_entry = (post.url, sub.name)
if log_entry in self.recent_posts:
logging.info('Already posted {} to /r/{}. Skipping.'.format(title, sub.name))
return
else:
self.recent_posts.append(log_entry)
logging.debug('Added {} to recent posts. Now tracking {} items.'
.format(log_entry, len(self.recent_posts)))
try:
logging.info('X-Posting into /r/{}: {}'.format(sub.name, title))
if self.should_post:
xpost = self.r.submit(
sub.name,
title,
url=post.url,
captcha=None,
send_replies=True,
resubmit=False)
logging.debug('Commenting: {}'.format(comment))
if self.should_post:
xpost.add_comment(comment)
except AlreadySubmitted:
logging.info('Already submitted. Skipping.')
except APIException as e:
logging.warning(e)
def check_age(self, post):
created = datetime.utcfromtimestamp(post.author.created_utc)
age = (datetime.utcnow() - created).days
return age > 2
def run(self):
stream = submission_stream(self.r, 'all')
for post in stream:
if not self.check(post):
continue
age_ok = None
for sub in self.subreddits:
if sub.check(post):
if age_ok is None:
age_ok = self.check_age(post)
if not age_ok:
break
self.crosspost(post, sub)
|
# Copyright (c) Sunlight Foundation, 2014, under the BSD-3 License.
# Authors:
# - Paul R. Tagliamonte <paultag@sunlightfoundation.com>
from django.core.paginator import Paginator, EmptyPage
from restless.modelviews import ListEndpoint, DetailEndpoint
from restless.models import serialize
from restless.http import HttpError, Http200
from collections import defaultdict
from django.conf import settings
from django.db import connections
import datetime
import math
def get_field_list(model, without=None):
"""
Get a list of all known field names on a Django model. Optionally,
you may exclude keys by passing a list of keys to avoid in the 'without'
kwarg.
"""
if without is None:
without = set()
else:
without = set(without)
return list(set(model._meta.get_all_field_names()) - without)
class FieldKeyError(KeyError):
def __init__(self, field):
self.field = field
def __str__(self):
return "<FieldKeyError: %s>" % (self.field)
def get_fields(root, fields):
"""
Return a list of objects to prefetch and a composed spec for the
DjangoRestless serialize call given a root spec dictionary and a list
of fields.
Fields may be dotted to represent sub-elements, which will
traverse the root dictonary.
This function returns a tuple, prefetch-able fields, and a serialize
function spec. The result of the latter may be passed directly into
serialize, and will limit based on `fields`, rather then `include` or
`exclude`.
"""
def fwrap(obj, memo=None):
"""
Ensure this object can be passed into serialize by turning it from
a raw structure dict into a serialize spec. Most of the time
this is just wrapping dicts in {"fields": ...}.
"""
memo = memo if memo else set()
id_ = id(obj)
if id_ in memo:
return None
memo.add(id_)
if isinstance(obj, dict):
if obj == {} or obj.get("fields"):
return obj
obj = list(filter(
lambda x: x[1] != None,
[(x, fwrap(y, memo=memo)) for x, y in obj.items()]
))
if obj == []:
return None
return {"fields": obj}
return obj
prefetch = set([])
subfields = defaultdict(list)
concrete = []
for field in fields:
if '.' not in field:
concrete.append(field)
continue
prefix, postfix = field.split(".", 1)
if '.' not in postfix:
prefetch.add(prefix)
subfields[prefix].append(postfix)
try:
ret = {x: fwrap(root[x]) for x in concrete}
except KeyError as e:
raise FieldKeyError(*e.args)
for key, fields in subfields.items():
try:
_prefetch, ret[key] = get_fields(root[key], fields)
except FieldKeyError as e:
e.field = "%s.%s" % (key, e.field)
raise e
prefetch = prefetch.union({"%s__%s" % (key, x) for x in _prefetch})
return (prefetch, fwrap(ret))
def cachebusterable(fn):
"""
Allow front-end tools to pass a "_" pararm with different arguments
to work past cache. This is the default behavior for select2, and was
easy enough to avoid.
This ensures we don't get "_" in the view handler, avoding special
casing in multiple places.
"""
def _(self, request, *args, **kwargs):
params = request.params
if '_' in params:
params.pop("_")
return fn(self, request, *args, **kwargs)
return _
class DebugMixin(object):
def start_debug(self):
if settings.DEBUG:
self.start_time = datetime.datetime.utcnow()
self.start_queries = len(connections['default'].queries)
def get_debug(self):
if settings.DEBUG:
end_time = datetime.datetime.utcnow()
connection = connections['default']
end_queries = len(connection.queries)
return {
"connection": {
"query": {
"count_start": self.start_queries,
"count_end": end_queries,
"count": (end_queries - self.start_queries),
"list": connection.queries,
},
"dsn": connection.connection.dsn,
"vendor": connection.vendor,
"pg_version": connection.pg_version,
"psycopg2_version": ".".join([
str(x) for x in connection.psycopg2_version
])
},
"time": {
"start": self.start_time.isoformat(),
"end": end_time.isoformat(),
"seconds": (end_time - self.start_time).total_seconds()
},
}
class PublicListEndpoint(ListEndpoint, DebugMixin):
"""
Imago public list API helper class.
This class exists to be subclassed by concrete views, and builds in
sane default behavior for all list views.
Critically it allows for:
- Filtering
- Sorting
- Pagination
- Meta-dictionary for the clients
- Opinionated serializion with the helpers above.
This allows our views to be declarative, and allow for subclass overriding
of methods when needed.
Access-Control-Allow-Origin is currently always set to "*", since this
is a global read-only API.
As a result, JSONP is disabled. Read more on using CORS:
- http://en.wikipedia.org/wiki/Cross-origin_resource_sharing
The 'get' class-based view method invokes the following helpers:
[ Methods ]
- get_query_set | Get the Django query set for the request.
- filter | Filter the resulting query set.
- sort | Sort the filtered query set
- paginate | Paginate the sorted query set
[ Object Properties ]
- model | Django ORM Model / class to query using.
- per_page | Objects to show per-page.
- default_fields | If no `fields` param is passed in, use this
| to limit the `serialize_config`.
- serialize_config | Object serializion to use. Many are in
| the imago.serialize module
"""
methods = ['GET']
max_per_page = 100
serialize_config = {}
default_fields = []
def adjust_filters(self, params):
"""
Adjust the filter params from within the `filter' call.
"""
return params
def filter(self, data, **kwargs):
"""
Filter the Django query set.
THe kwargs will be unpacked into Django directly, letting you
use full Django query syntax here.
"""
kwargs = self.adjust_filters(kwargs)
return data.filter(**kwargs)
def sort(self, data, sort_by):
"""
Sort the Django query set. The sort_by param will be
unpacked into 'order_by' directly.
"""
return data.order_by(*sort_by)
def paginate(self, data, page, per_page):
"""
Paginate the Django response. It will default to
`self.per_page` as the `per_page` argument to the built-in
Django `Paginator`. This will return `paginator.page` for the
page number passed in.
"""
paginator = Paginator(data, per_page=per_page)
return paginator.page(page)
@cachebusterable
def get(self, request, *args, **kwargs):
"""
Default 'GET' class-based view.
"""
params = request.params
# default to page 1
page = int(params.pop('page', 1))
per_page = min(self.max_per_page, int(params.pop('per_page', self.max_per_page)))
sort_by = []
if 'sort_by' in params:
sort_by = params.pop('sort_by').split(",")
fields = self.default_fields
if 'fields' in params:
fields = params.pop('fields').split(",")
data = self.get_query_set(request, *args, **kwargs)
data = self.filter(data, **params)
data = self.sort(data, sort_by)
try:
related, config = get_fields(self.serialize_config, fields=fields)
except FieldKeyError as e:
raise HttpError(400, "Error: You've asked for a field ({}) that "
"is invalid. Valid fields are: {}".format(
e.field, ', '.join(self.serialize_config.keys()))
)
except KeyError as e:
raise HttpError(400, "Error: Invalid field: %s" % (e))
data = data.prefetch_related(*related)
try:
data_page = self.paginate(data, page, per_page)
except EmptyPage:
raise HttpError(404, 'No such page (heh, literally - its out of bounds)')
self.start_debug()
count = data_page.paginator.count
response = {
"meta": {
"count": len(data_page.object_list),
"page": page,
"per_page": per_page,
"max_page": math.ceil(count / per_page),
"total_count": count,
}, "results": [
serialize(x, **config) for x in data_page.object_list
]
}
if settings.DEBUG:
response['debug'] = self.get_debug()
response['debug'].update({
"prefetch_fields": list(related),
"page": page,
"sort_by": sort_by,
"field": fields,
})
response = Http200(response)
response['Access-Control-Allow-Origin'] = "*"
return response
class PublicDetailEndpoint(DetailEndpoint, DebugMixin):
"""
Imago public detail view API helper class.
This class exists to be subclassed by concrete views, and builds in
sane default behavior for all list views.
This allows our views to be declarative, and allow for subclass overriding
of methods when needed.
Access-Control-Allow-Origin is currently always set to "*", since this
is a global read-only API.
As a result, JSONP is disabled. Read more on using CORS:
- http://en.wikipedia.org/wiki/Cross-origin_resource_sharing
The 'get' class-based view method uses the following object properties:
- model | Django ORM Model / class to query using.
- default_fields | If no `fields` param is passed in, use this
| to limit the `serialize_config`.
- serialize_config | Object serializion to use. Many are in
| the imago.serialize module
"""
methods = ['GET']
@cachebusterable
def get(self, request, pk, *args, **kwargs):
params = request.params
fields = self.default_fields
if 'fields' in params:
fields = params.pop('fields').split(",")
related, config = get_fields(self.serialize_config, fields=fields)
self.start_debug()
obj = self.model.objects.prefetch_related(*related).get(pk=pk)
serialized = serialize(obj, **config)
serialized['debug'] = self.get_debug()
response = Http200(serialized)
response['Access-Control-Allow-Origin'] = "*"
return response
authenticated
# Copyright (c) Sunlight Foundation, 2014, under the BSD-3 License.
# Authors:
# - Paul R. Tagliamonte <paultag@sunlightfoundation.com>
from django.core.paginator import Paginator, EmptyPage
from restless.modelviews import ListEndpoint, DetailEndpoint
from restless.models import serialize
from restless.http import HttpError, Http200
from collections import defaultdict
from django.conf import settings
from django.db import connections
import datetime
import math
def get_field_list(model, without=None):
"""
Get a list of all known field names on a Django model. Optionally,
you may exclude keys by passing a list of keys to avoid in the 'without'
kwarg.
"""
if without is None:
without = set()
else:
without = set(without)
return list(set(model._meta.get_all_field_names()) - without)
class FieldKeyError(KeyError):
def __init__(self, field):
self.field = field
def __str__(self):
return "<FieldKeyError: %s>" % (self.field)
def get_fields(root, fields):
"""
Return a list of objects to prefetch and a composed spec for the
DjangoRestless serialize call given a root spec dictionary and a list
of fields.
Fields may be dotted to represent sub-elements, which will
traverse the root dictonary.
This function returns a tuple, prefetch-able fields, and a serialize
function spec. The result of the latter may be passed directly into
serialize, and will limit based on `fields`, rather then `include` or
`exclude`.
"""
def fwrap(obj, memo=None):
"""
Ensure this object can be passed into serialize by turning it from
a raw structure dict into a serialize spec. Most of the time
this is just wrapping dicts in {"fields": ...}.
"""
memo = memo if memo else set()
id_ = id(obj)
if id_ in memo:
return None
memo.add(id_)
if isinstance(obj, dict):
if obj == {} or obj.get("fields"):
return obj
obj = list(filter(
lambda x: x[1] != None,
[(x, fwrap(y, memo=memo)) for x, y in obj.items()]
))
if obj == []:
return None
return {"fields": obj}
return obj
prefetch = set([])
subfields = defaultdict(list)
concrete = []
for field in fields:
if '.' not in field:
concrete.append(field)
continue
prefix, postfix = field.split(".", 1)
if '.' not in postfix:
prefetch.add(prefix)
subfields[prefix].append(postfix)
try:
ret = {x: fwrap(root[x]) for x in concrete}
except KeyError as e:
raise FieldKeyError(*e.args)
for key, fields in subfields.items():
try:
_prefetch, ret[key] = get_fields(root[key], fields)
except FieldKeyError as e:
e.field = "%s.%s" % (key, e.field)
raise e
prefetch = prefetch.union({"%s__%s" % (key, x) for x in _prefetch})
return (prefetch, fwrap(ret))
def cachebusterable(fn):
"""
Allow front-end tools to pass a "_" pararm with different arguments
to work past cache. This is the default behavior for select2, and was
easy enough to avoid.
This ensures we don't get "_" in the view handler, avoding special
casing in multiple places.
"""
def _(self, request, *args, **kwargs):
params = request.params
if '_' in params:
params.pop("_")
return fn(self, request, *args, **kwargs)
return _
def authenticated(fn):
""" ensure that request.apikey is valid """
def _(self, request, *args, **kwargs):
if not settings.USE_LOCKSMITH or (hasattr(request, 'apikey') and
request.apikey.status == 'A'):
return fn(self, request, *args, **kwargs)
else:
raise HttpError(403, "Authorization Required: obtain a key at " +
LOCKSMITH_REGISTRATION_URL)
return _
class DebugMixin(object):
def start_debug(self):
if settings.DEBUG:
self.start_time = datetime.datetime.utcnow()
self.start_queries = len(connections['default'].queries)
def get_debug(self):
if settings.DEBUG:
end_time = datetime.datetime.utcnow()
connection = connections['default']
end_queries = len(connection.queries)
return {
"connection": {
"query": {
"count_start": self.start_queries,
"count_end": end_queries,
"count": (end_queries - self.start_queries),
"list": connection.queries,
},
"dsn": connection.connection.dsn,
"vendor": connection.vendor,
"pg_version": connection.pg_version,
"psycopg2_version": ".".join([
str(x) for x in connection.psycopg2_version
])
},
"time": {
"start": self.start_time.isoformat(),
"end": end_time.isoformat(),
"seconds": (end_time - self.start_time).total_seconds()
},
}
class PublicListEndpoint(ListEndpoint, DebugMixin):
"""
Imago public list API helper class.
This class exists to be subclassed by concrete views, and builds in
sane default behavior for all list views.
Critically it allows for:
- Filtering
- Sorting
- Pagination
- Meta-dictionary for the clients
- Opinionated serializion with the helpers above.
This allows our views to be declarative, and allow for subclass overriding
of methods when needed.
Access-Control-Allow-Origin is currently always set to "*", since this
is a global read-only API.
As a result, JSONP is disabled. Read more on using CORS:
- http://en.wikipedia.org/wiki/Cross-origin_resource_sharing
The 'get' class-based view method invokes the following helpers:
[ Methods ]
- get_query_set | Get the Django query set for the request.
- filter | Filter the resulting query set.
- sort | Sort the filtered query set
- paginate | Paginate the sorted query set
[ Object Properties ]
- model | Django ORM Model / class to query using.
- per_page | Objects to show per-page.
- default_fields | If no `fields` param is passed in, use this
| to limit the `serialize_config`.
- serialize_config | Object serializion to use. Many are in
| the imago.serialize module
"""
methods = ['GET']
max_per_page = 100
serialize_config = {}
default_fields = []
def adjust_filters(self, params):
"""
Adjust the filter params from within the `filter' call.
"""
return params
def filter(self, data, **kwargs):
"""
Filter the Django query set.
THe kwargs will be unpacked into Django directly, letting you
use full Django query syntax here.
"""
kwargs = self.adjust_filters(kwargs)
return data.filter(**kwargs)
def sort(self, data, sort_by):
"""
Sort the Django query set. The sort_by param will be
unpacked into 'order_by' directly.
"""
return data.order_by(*sort_by)
def paginate(self, data, page, per_page):
"""
Paginate the Django response. It will default to
`self.per_page` as the `per_page` argument to the built-in
Django `Paginator`. This will return `paginator.page` for the
page number passed in.
"""
paginator = Paginator(data, per_page=per_page)
return paginator.page(page)
@authenticated
@cachebusterable
def get(self, request, *args, **kwargs):
"""
Default 'GET' class-based view.
"""
params = request.params
# default to page 1
page = int(params.pop('page', 1))
per_page = min(self.max_per_page, int(params.pop('per_page', self.max_per_page)))
sort_by = []
if 'sort_by' in params:
sort_by = params.pop('sort_by').split(",")
fields = self.default_fields
if 'fields' in params:
fields = params.pop('fields').split(",")
data = self.get_query_set(request, *args, **kwargs)
data = self.filter(data, **params)
data = self.sort(data, sort_by)
try:
related, config = get_fields(self.serialize_config, fields=fields)
except FieldKeyError as e:
raise HttpError(400, "Error: You've asked for a field ({}) that "
"is invalid. Valid fields are: {}".format(
e.field, ', '.join(self.serialize_config.keys()))
)
except KeyError as e:
raise HttpError(400, "Error: Invalid field: %s" % (e))
data = data.prefetch_related(*related)
try:
data_page = self.paginate(data, page, per_page)
except EmptyPage:
raise HttpError(404, 'No such page (heh, literally - its out of bounds)')
self.start_debug()
count = data_page.paginator.count
response = {
"meta": {
"count": len(data_page.object_list),
"page": page,
"per_page": per_page,
"max_page": math.ceil(count / per_page),
"total_count": count,
}, "results": [
serialize(x, **config) for x in data_page.object_list
]
}
if settings.DEBUG:
response['debug'] = self.get_debug()
response['debug'].update({
"prefetch_fields": list(related),
"page": page,
"sort_by": sort_by,
"field": fields,
})
response = Http200(response)
response['Access-Control-Allow-Origin'] = "*"
return response
class PublicDetailEndpoint(DetailEndpoint, DebugMixin):
"""
Imago public detail view API helper class.
This class exists to be subclassed by concrete views, and builds in
sane default behavior for all list views.
This allows our views to be declarative, and allow for subclass overriding
of methods when needed.
Access-Control-Allow-Origin is currently always set to "*", since this
is a global read-only API.
As a result, JSONP is disabled. Read more on using CORS:
- http://en.wikipedia.org/wiki/Cross-origin_resource_sharing
The 'get' class-based view method uses the following object properties:
- model | Django ORM Model / class to query using.
- default_fields | If no `fields` param is passed in, use this
| to limit the `serialize_config`.
- serialize_config | Object serializion to use. Many are in
| the imago.serialize module
"""
methods = ['GET']
@authenticated
@cachebusterable
def get(self, request, pk, *args, **kwargs):
params = request.params
fields = self.default_fields
if 'fields' in params:
fields = params.pop('fields').split(",")
related, config = get_fields(self.serialize_config, fields=fields)
self.start_debug()
obj = self.model.objects.prefetch_related(*related).get(pk=pk)
serialized = serialize(obj, **config)
serialized['debug'] = self.get_debug()
response = Http200(serialized)
response['Access-Control-Allow-Origin'] = "*"
return response
|
from __future__ import print_function, division, absolute_import
import random
import numpy as np
import copy
import numbers
import cv2
import math
from scipy import misc, ndimage
import multiprocessing
import threading
import traceback
import sys
import six
import six.moves as sm
import os
import skimage.draw
import skimage.measure
import collections
import time
if sys.version_info[0] == 2:
import cPickle as pickle
from Queue import Empty as QueueEmpty, Full as QueueFull
elif sys.version_info[0] == 3:
import pickle
from queue import Empty as QueueEmpty, Full as QueueFull
xrange = range
ALL = "ALL"
FILE_DIR = os.path.dirname(os.path.abspath(__file__))
# filepath to the quokka image, its annotations and depth map
QUOKKA_FP = os.path.join(FILE_DIR, "quokka.jpg")
QUOKKA_ANNOTATIONS_FP = os.path.join(FILE_DIR, "quokka_annotations.json")
QUOKKA_DEPTH_MAP_HALFRES_FP = os.path.join(FILE_DIR, "quokka_depth_map_halfres.png")
DEFAULT_FONT_FP = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"DejaVuSans.ttf"
)
# We instantiate a current/global random state here once.
# One can also call np.random, but that is (in contrast to np.random.RandomState)
# a module and hence cannot be copied via deepcopy. That's why we use RandomState
# here (and in all augmenters) instead of np.random.
CURRENT_RANDOM_STATE = np.random.RandomState(42)
def is_np_array(val):
"""
Checks whether a variable is a numpy array.
Parameters
----------
val : anything
The variable to
check.
Returns
-------
out : bool
True if the variable is a numpy array. Otherwise False.
"""
# using np.generic here seems to also fire for scalar numpy values even
# though those are not arrays
#return isinstance(val, (np.ndarray, np.generic))
return isinstance(val, np.ndarray)
def is_single_integer(val):
"""
Checks whether a variable is an integer.
Parameters
----------
val : anything
The variable to
check.
Returns
-------
out : bool
True if the variable is an integer. Otherwise False.
"""
return isinstance(val, numbers.Integral) and not isinstance(val, bool)
def is_single_float(val):
"""
Checks whether a variable is a float.
Parameters
----------
val : anything
The variable to
check.
Returns
-------
out : bool
True if the variable is a float. Otherwise False.
"""
return isinstance(val, numbers.Real) and not is_single_integer(val) and not isinstance(val, bool)
def is_single_number(val):
"""
Checks whether a variable is a number, i.e. an integer or float.
Parameters
----------
val : anything
The variable to
check.
Returns
-------
out : bool
True if the variable is a number. Otherwise False.
"""
return is_single_integer(val) or is_single_float(val)
def is_iterable(val):
"""
Checks whether a variable is iterable.
Parameters
----------
val : anything
The variable to
check.
Returns
-------
out : bool
True if the variable is an iterable. Otherwise False.
"""
return isinstance(val, collections.Iterable)
# TODO convert to is_single_string() or rename is_single_integer/float/number()
def is_string(val):
"""
Checks whether a variable is a string.
Parameters
----------
val : anything
The variable to
check.
Returns
-------
out : bool
True if the variable is a string. Otherwise False.
"""
return isinstance(val, six.string_types)
def is_integer_array(val):
"""
Checks whether a variable is a numpy integer array.
Parameters
----------
val : anything
The variable to
check.
Returns
-------
out : bool
True if the variable is a numpy integer array. Otherwise False.
"""
return is_np_array(val) and issubclass(val.dtype.type, np.integer)
def is_float_array(val):
"""
Checks whether a variable is a numpy float array.
Parameters
----------
val : anything
The variable to
check.
Returns
-------
out : bool
True if the variable is a numpy float array. Otherwise False.
"""
return is_np_array(val) and issubclass(val.dtype.type, np.floating)
def is_callable(val):
"""
Checks whether a variable is a callable, e.g. a function.
Parameters
----------
val : anything
The variable to
check.
Returns
-------
out : bool
True if the variable is a callable. Otherwise False.
"""
# python 3.x with x <= 2 does not support callable(), apparently
if sys.version_info[0] == 3 and sys.version_info[1] <= 2:
return hasattr(val, '__call__')
else:
return callable(val)
def caller_name():
"""
Returns the name of the caller, e.g. a function.
Returns
-------
name : str
The name of the caller as a string
"""
return sys._getframe(1).f_code.co_name
def seed(seedval):
"""
Set the seed used by the global random state and thereby all randomness
in the library.
This random state is by default by all augmenters. Under special
circumstances (e.g. when an augmenter is switched to deterministic mode),
the global random state is replaced by another -- local -- one.
The replacement is dependent on the global random state.
Parameters
----------
seedval : int
The seed to
use.
"""
CURRENT_RANDOM_STATE.seed(seedval)
def current_random_state():
"""
Returns the current/global random state of the library.
Returns
----------
out : np.random.RandomState
The current/global random state.
"""
return CURRENT_RANDOM_STATE
def new_random_state(seed=None, fully_random=False):
"""
Returns a new random state.
Parameters
----------
seed : None or int, optional(default=None)
Optional seed value to use.
The same datatypes are allowed as for np.random.RandomState(seed).
fully_random : bool, optional(default=False)
Whether to use numpy's random initialization for the
RandomState (used if set to True). If False, a seed is sampled from
the global random state, which is a bit faster and hence the default.
Returns
-------
out : np.random.RandomState
The new random state.
"""
if seed is None:
if not fully_random:
# sample manually a seed instead of just RandomState(),
# because the latter one
# is way slower.
seed = CURRENT_RANDOM_STATE.randint(0, 10**6, 1)[0]
return np.random.RandomState(seed)
def dummy_random_state():
"""
Returns a dummy random state that is always based on a seed of 1.
Returns
-------
out : np.random.RandomState
The new random state.
"""
return np.random.RandomState(1)
def copy_random_state(random_state, force_copy=False):
"""
Creates a copy of a random state.
Parameters
----------
random_state : np.random.RandomState
The random state to
copy.
force_copy : bool, optional(default=False)
If True, this function will always create a copy of every random
state. If False, it will not copy numpy's default random state,
but all other random states.
Returns
-------
rs_copy : np.random.RandomState
The copied random state.
"""
if random_state == np.random and not force_copy:
return random_state
else:
rs_copy = dummy_random_state()
orig_state = random_state.get_state()
rs_copy.set_state(orig_state)
return rs_copy
def derive_random_state(random_state):
"""
Create a new random states based on an existing random state or seed.
Parameters
----------
random_state : np.random.RandomState
Random state or seed from which to derive the new random state.
Returns
-------
result : np.random.RandomState
Derived random state.
"""
return derive_random_states(random_state, n=1)[0]
# TODO use this everywhere instead of manual seed + create
def derive_random_states(random_state, n=1):
"""
Create N new random states based on an existing random state or seed.
Parameters
----------
random_state : np.random.RandomState
Random state or seed from which to derive new random states.
n : int, optional(default=1)
Number of random states to derive.
Returns
-------
result : list of np.random.RandomState
Derived random states.
"""
seed = random_state.randint(0, 10**6, 1)[0]
return [new_random_state(seed+i) for i in sm.xrange(n)]
def forward_random_state(random_state):
"""
Forward the internal state of a random state.
This makes sure that future calls to the random_state will produce new random values.
Parameters
----------
random_state : np.random.RandomState
Random state to forward.
"""
random_state.uniform()
# TODO
# def from_json(json_str):
# pass
def _quokka_normalize_extract(extract):
"""
Generate a normalized rectangle to be extract from the standard quokka image.
Parameters
----------
extract : "square" or tuple of number or BoundingBox or BoundingBoxesOnImage
Unnormalized representation of the image subarea to be extracted.
* If string "square", then a squared area (x: 0-643, y: 0-643) will be extracted from
the image.
* If a tuple, then expected to contain four numbers denoting x1, y1, x2 and y2.
* If a BoundingBox, then that bounding box's area will be extracted from the image.
* If a BoundingBoxesOnImage, then expected to contain exactly one bounding box
and a shape matching the full image dimensions (i.e. (643, 960, *)). Then the
one bounding box will be used similar to BoundingBox.
Returns
-------
bb : BoundingBox
Normalized representation of the area to extract from the standard quokka image.
"""
if extract == "square":
bb = BoundingBox(x1=0, y1=0, x2=643, y2=643)
elif isinstance(extract, tuple) and len(extract) == 4:
bb = BoundingBox(x1=extract[0], y1=extract[1], x2=extract[2], y2=extract[3])
elif isinstance(extract, BoundingBox):
bb = extract
elif isinstance(extract, BoundingBoxesOnImage):
assert len(BoundingBoxesOnImage.bounding_boxes) == 1
assert extract.shape[0:2] == (643, 960)
bb = extract.bounding_boxes[0]
else:
raise Exception(
"Expected None or tuple of four entries or BoundingBox or BoundingBoxesOnImage "
"for parameter 'extract', got %s." % (type(extract),)
)
return bb
def _compute_resized_shape(from_shape, to_shape):
"""
Computes the intended new shape of an image-like array after resizing.
Parameters
----------
from_shape : tuple or ndarray
Old shape of the array. Usually expected to be a tuple of form (H, W) or (H, W, C) or
alternatively an array with two or three dimensions.
to_shape : None or tuple of ints or tuple of floats or int or float
New shape of the array.
* If None, then `from_shape` will be used as the new shape.
* If an int V, then the new shape will be (V, V, [C]), where C will be added if it
is part of from_shape.
* If a float V, then the new shape will be (H*V, W*V, [C]), where H and W are the old
height/width.
* If a tuple (H', W', [C']) of ints, then H' and W' will be used as the new height
and width.
* If a tuple (H', W', [C']) of floats (except C), then H' and W' will be used as the new height
and width.
Returns
-------
to_shape_computed : tuple of int
New shape.
"""
if is_np_array(from_shape):
from_shape = from_shape.shape
if is_np_array(to_shape):
to_shape = to_shape.shape
to_shape_computed = list(from_shape)
if to_shape is None:
pass
elif isinstance(to_shape, tuple):
if len(from_shape) == 3 and len(to_shape) == 3:
assert from_shape[2] == to_shape[2]
elif len(to_shape) == 3:
to_shape_computed.append(to_shape[2])
if all([is_single_integer(v) for v in to_shape[0:2]]):
to_shape_computed[0] = to_shape[0]
to_shape_computed[1] = to_shape[1]
elif all([is_single_float(v) for v in to_shape[0:2]]):
to_shape_computed[0] = int(round(from_shape[0] * to_shape[0])) if to_shape[0] is not None else from_shape[0]
to_shape_computed[1] = int(round(from_shape[1] * to_shape[1])) if to_shape[1] is not None else from_shape[1]
elif ia.is_single_int(to_shape) or ia.is_single_float(to_shape):
if len(to_shape) == 2:
to_shape_computed = _compute_resized_shape(from_shape, (to_shape[0], to_shape[1]))
else:
to_shape_computed = _compute_resized_shape(from_shape, (to_shape[0], to_shape[1], to_shape[2]))
else:
raise Exception("Expected to_shape to be None or ndarray or tuple of floats or tuple of ints or single int or single float, got %s." % (type(to_shape),))
return to_shape_computed
def quokka(size=None, extract=None):
"""
Returns an image of a quokka as a numpy array.
Parameters
----------
size : None or float or tuple of two ints, optional(default=None)
Size of the output image. Input into scipy.misc.imresize.
Usually expected to be a tuple (H, W), where H is the desired height
and W is the width. If None, then the image will not be resized.
extract : None or "square" or tuple of four numbers or BoundingBox or BoundingBoxesOnImage
Subarea of the quokka image to extract::
* If None, then the whole image will be used.
* If string "square", then a squared area (x: 0-643, y: 0-643) will be extracted from
the image.
* If a tuple, then expected to contain four numbers denoting x1, y1, x2 and y2.
* If a BoundingBox, then that bounding box's area will be extracted from the image.
* If a BoundingBoxesOnImage, then expected to contain exactly one bounding box
and a shape matching the full image dimensions (i.e. (643, 960, *)). Then the
one bounding box will be used similar to BoundingBox.
Returns
-------
img : (H,W,3) ndarray
The image array of dtype uint8.
"""
img = ndimage.imread(QUOKKA_FP, mode="RGB")
if extract is not None:
bb = _quokka_normalize_extract(extract)
img = bb.extract_from_image(img)
if size is not None:
shape_resized = _compute_resized_shape(img.shape, size)
img = misc.imresize(img, shape_resized[0:2])
return img
def quokka_square(size=None):
"""
Returns an (square) image of a quokka as a numpy array.
Parameters
----------
size : None or float or tuple of two ints, optional(default=None)
Size of the output image. Input into scipy.misc.imresize.
Usually expected to be a tuple (H, W), where H is the desired height
and W is the width. If None, then the image will not be resized.
Returns
-------
img : (H,W,3) ndarray
The image array of dtype uint8.
"""
return quokka(size=size, extract="square")
def quokka_depth_map(size=None, extract=None):
img = ndimage.imread(QUOKKA_FP, mode="RGB")
if extract is not None:
bb = _quokka_normalize_extract(extract)
img = bb.extract_from_image(img)
if size is not None:
shape_resized = _compute_resized_shape(img.shape, size)
img = misc.imresize(img, shape_resized[0:2])
return img
def quokka_keypoints(size=None, extract=None):
"""
Returns example keypoints on the standard example quokke image.
The keypoints cover the eyes, ears, nose and paws.
Parameters
----------
size : None or float or tuple of two ints or tuple of two floats, optional(default=None)
Size of the output image on which the keypoints are placed. If None, then the keypoints
are not projected to any new size (positions on the original image are used).
Floats lead to relative size changes, ints to absolute sizes in pixels.
extract : None or "square" or tuple of number or BoundingBox or BoundingBoxesOnImage
Subarea to extract from the image. See `_quokka_normalize_extract()`.
Returns
-------
kpsoi : KeypointsOnImage
Example keypoints on the quokka image.
"""
left, top = 0, 0
if extract is not None:
bb_extract = _quokka_normalize_extract(extract)
left = bb_extract.x1
top = bb_extract.y1
with open(QUOKKA_ANNOTATIONS_FP, "r") as f:
json_dict = json.load(f)
keypoints = []
for kp_dict in json_dict["keypoints"]:
keypoints.append(Keypoint(x=kp_dict["x"] - left, y=kp_dict["y"] - top))
if extract is not None:
shape = (bb_extract.height, bb_extract.width, 3)
else:
shape = (643, 960, 3)
kpsoi = KeypointsOnImage(keypoints, shape=shape)
if size is not None:
shape_resized = _compute_resized_shape(shape, size)
kpsoi = kpsoi.on(shape_resized)
return kpsoi
def quokka_bounding_boxes(size=None, extract=None):
"""
Returns example bounding boxes on the standard example quokke image.
Currently only a single bounding box is returned that covers the quokka.
Parameters
----------
size : None or float or tuple of two ints or tuple of two floats, optional(default=None)
Size of the output image on which the BBs are placed. If None, then the BBs
are not projected to any new size (positions on the original image are used).
Floats lead to relative size changes, ints to absolute sizes in pixels.
extract : None or "square" or tuple of number or BoundingBox or BoundingBoxesOnImage
Subarea to extract from the image. See `_quokka_normalize_extract()`.
Returns
-------
bbsoi : BoundingBoxesOnImage
Example BBs on the quokka image.
"""
left, top = 0, 0
if extract is not None:
bb_extract = _quokka_normalize_extract(extract)
left = bb_extract.x1
top = bb_extract.y1
with open(QUOKKA_ANNOTATIONS_FP, "r") as f:
json_dict = json.load(f)
bbs = []
for bb_dict in json_dict["bounding_boxes"]:
bbs.append(
BoundingBox(
x1=bb_dict["x1"] - left,
y1=bb_dict["y1"] - top,
x2=bb_dict["x2"] - left,
y2=bb_dict["y2"] - top
)
)
if extract is not None:
shape = (bb_extract.height, bb_extract.width, 3)
else:
shape = (643, 960, 3)
bbsoi = BoundingBoxesOnImage(bbs, shape=shape)
if size is not None:
shape_resized = _compute_resized_shape(shape, size)
bbsoi = bbsoi.on(shape_resized)
return bbsoi
def angle_between_vectors(v1, v2):
"""
Returns the angle in radians between vectors 'v1' and 'v2'.
From http://stackoverflow.com/questions/2827393/angles-between-two-n-dimensional-vectors-in-python
Parameters
----------
{v1, v2} : (N,) ndarray
Input
vectors.
Returns
-------
out : float
Angle in radians.
Examples
--------
>>> angle_between((1, 0, 0), (0, 1, 0))
1.5707963267948966
>>> angle_between((1, 0, 0), (1, 0, 0))
0.0
>>> angle_between((1, 0, 0), (-1, 0, 0))
3.141592653589793
"""
v1_u = v1 / np.linalg.norm(v1)
v2_u = v2 / np.linalg.norm(v2)
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
def draw_text(img, y, x, text, color=[0, 255, 0], size=25): # pylint: disable=locally-disabled, dangerous-default-value, line-too-long
"""
Draw text on an image.
This uses by default DejaVuSans as its font, which is included in the
library.
Parameters
----------
img : (H,W,3) ndarray
The image array to draw text on.
Expected to be of dtype uint8 or float32 (value range 0.0 to 255.0).
{y, x} : int
x- and y- coordinate of the top left corner of the
text.
color : iterable of 3 ints, optional(default=[0, 255, 0])
Color of the text to draw. For RGB-images this is expected to be
an RGB color.
size : int, optional(default=25)
Font size of the text to
draw.
Returns
-------
img_np : (H,W,3) ndarray
Input image with text drawn on it.
"""
# keeping PIL here so that it is not a dependency of the library right now
from PIL import Image, ImageDraw, ImageFont
do_assert(img.dtype in [np.uint8, np.float32])
input_dtype = img.dtype
if img.dtype == np.float32:
img = img.astype(np.uint8)
for i in range(len(color)):
val = color[i]
if isinstance(val, float):
val = int(val * 255)
val = np.clip(val, 0, 255)
color[i] = val
img = Image.fromarray(img)
font = ImageFont.truetype(DEFAULT_FONT_FP, size)
context = ImageDraw.Draw(img)
context.text((x, y), text, fill=tuple(color), font=font)
img_np = np.asarray(img)
img_np.setflags(write=True) # PIL/asarray returns read only array
if img_np.dtype != input_dtype:
img_np = img_np.astype(input_dtype)
return img_np
# TODO rename sizes to size?
def imresize_many_images(images, sizes=None, interpolation=None):
"""
Resize many images to a specified size.
Parameters
----------
images : (N,H,W,C) ndarray
Array of the images to resize.
Expected to usually be of dtype uint8.
sizes : float or iterable of two ints or iterable of two floats
The new size of the images, given either as a fraction (a single float) or as
a (height, width) tuple of two integers or as a (height fraction, width fraction)
tuple of two floats.
interpolation : None or string or int, optional(default=None)
The interpolation to use during resize.
If int, then expected to be one of:
* cv2.INTER_NEAREST (nearest neighbour interpolation)
* cv2.INTER_LINEAR (linear interpolation)
* cv2.INTER_AREA (area interpolation)
* cv2.INTER_CUBIC (cubic interpolation)
If string, then expected to be one of:
* "nearest" (identical to cv2.INTER_NEAREST)
* "linear" (identical to cv2.INTER_LINEAR)
* "area" (identical to cv2.INTER_AREA)
* "cubic" (identical to cv2.INTER_CUBIC)
If None, the interpolation will be chosen automatically. For size
increases, area interpolation will be picked and for size decreases,
linear interpolation will be picked.
Returns
-------
result : (N,H',W',C) ndarray
Array of the resized images.
Examples
--------
>>> imresize_many_images(np.zeros((2, 16, 16, 3), dtype=np.uint8), 2.0)
Converts 2 RGB images of height and width 16 to images of height and width 16*2 = 32.
>>> imresize_many_images(np.zeros((2, 16, 16, 3), dtype=np.uint8), (16, 32))
Converts 2 RGB images of height and width 16 to images of height 16 and width 32.
>>> imresize_many_images(np.zeros((2, 16, 16, 3), dtype=np.uint8), (2.0, 4.0))
Converts 2 RGB images of height and width 16 to images of height 32 and width 64.
"""
shape = images.shape
do_assert(images.ndim == 4, "Expected array of shape (N, H, W, C), got shape %s" % (str(shape),))
nb_images = shape[0]
im_height, im_width = shape[1], shape[2]
nb_channels = shape[3]
if is_single_float(sizes):
do_assert(sizes > 0.0)
height = int(round(im_height * sizes))
width = int(round(im_width * sizes))
else:
do_assert(len(sizes) == 2)
all_int = all([is_single_integer(size) for size in sizes])
all_float = all([is_single_float(size) for size in sizes])
do_assert(all_int or all_float)
if all_int:
height, width = sizes[0], sizes[1]
else:
height = int(round(im_height * sizes[0]))
width = int(round(im_width * sizes[1]))
if height == im_height and width == im_width:
return np.copy(images)
ip = interpolation
do_assert(ip is None or ip in ["nearest", "linear", "area", "cubic", cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_AREA, cv2.INTER_CUBIC])
if ip is None:
if height > im_height or width > im_width:
ip = cv2.INTER_AREA
else:
ip = cv2.INTER_LINEAR
elif ip in ["nearest", cv2.INTER_NEAREST]:
ip = cv2.INTER_NEAREST
elif ip in ["linear", cv2.INTER_LINEAR]:
ip = cv2.INTER_LINEAR
elif ip in ["area", cv2.INTER_AREA]:
ip = cv2.INTER_AREA
else: # if ip in ["cubic", cv2.INTER_CUBIC]:
ip = cv2.INTER_CUBIC
result = np.zeros((nb_images, height, width, nb_channels), dtype=images.dtype)
for img_idx in sm.xrange(nb_images):
# TODO fallback to scipy here if image isn't uint8
result_img = cv2.resize(images[img_idx], (width, height), interpolation=ip)
if len(result_img.shape) == 2:
result_img = result_img[:, :, np.newaxis]
result[img_idx] = result_img.astype(images.dtype)
return result
def imresize_single_image(image, sizes, interpolation=None):
"""
Resizes a single image.
Parameters
----------
image : (H,W,C) ndarray or (H,W) ndarray
Array of the image to resize.
Expected to usually be of dtype uint8.
sizes : float or iterable of two ints or iterable of two floats
See `imresize_many_images()`.
interpolation : None or string or int, optional(default=None)
See `imresize_many_images()`.
Returns
-------
out : (H',W',C) ndarray or (H',W') ndarray
The resized image.
"""
grayscale = False
if image.ndim == 2:
grayscale = True
image = image[:, :, np.newaxis]
do_assert(len(image.shape) == 3, image.shape)
rs = imresize_many_images(image[np.newaxis, :, :, :], sizes, interpolation=interpolation)
if grayscale:
return np.squeeze(rs[0, :, :, 0])
else:
return rs[0, ...]
def pad(arr, top=0, right=0, bottom=0, left=0, mode="constant", cval=0):
"""
Pad an image-like array on its top/right/bottom/left side.
This function is a wrapper around `numpy.pad()`.
Parameters
----------
arr : (H,W) or (H,W,C) ndarray
Image-like array to pad.
top : int, optional(default=0)
Amount of pixels to add at the top side of the image. Must be 0 or greater.
right : int, optional(default=0)
Amount of pixels to add at the right side of the image. Must be 0 or greater.
bottom : int, optional(default=0)
Amount of pixels to add at the bottom side of the image. Must be 0 or greater.
left : int, optional(default=0)
Amount of pixels to add at the left side of the image. Must be 0 or greater.
mode : string, optional(default="constant")
Padding mode to use. See `numpy.pad()` for details.
cval : number, optional(default=0)
Value to use for padding if mode="constant". See `numpy.pad()` for details.
Returns
-------
arr_pad : (H',W') or (H',W',C) ndarray
Padded array with height H'=H+top+bottom and width W'=W+left+right.
"""
assert arr.ndim in [2, 3]
assert top >= 0
assert right >= 0
assert bottom >= 0
assert left >= 0
if top > 0 or right > 0 or bottom > 0 or left > 0:
paddings_np = [(top, bottom), (left, right)] # paddings for 2d case
if arr.ndim == 3:
paddings_np.append((0, 0)) # add paddings for 3d case
if mode == "constant":
arr_pad = np.pad(
arr,
paddings_np,
mode=mode,
constant_values=cval
)
else:
arr_pad = np.pad(
arr,
paddings_np,
mode=mode
)
return arr_pad
else:
return np.copy(arr)
def compute_paddings_for_aspect_ratio(arr, aspect_ratio):
"""
Compute the amount of pixels by which an array has to be padded to fulfill an aspect ratio.
The aspect ratio is given as width/height.
Depending on which dimension is smaller (height or width), only the corresponding
sides (left/right or top/bottom) will be padded. In each case, both of the sides will
be padded equally.
Parameters
----------
arr : (H,W) or (H,W,C) ndarray
Image-like array for which to compute pad amounts.
aspect_ratio : float
Target aspect ratio, given as width/height. E.g. 2.0 denotes the image having twice
as much width as height.
Returns
-------
result : tuple of ints
Required paddign amounts to reach the target aspect ratio, given as a tuple
of the form (top, right, bottom, left).
"""
assert arr.ndim in [2, 3]
assert aspect_ratio > 0
height, width = arr.shape[0:2]
assert height > 0
aspect_ratio_current = width / height
pad_top = 0
pad_right = 0
pad_bottom = 0
pad_left = 0
if aspect_ratio_current < aspect_ratio:
# vertical image, height > width
diff = (aspect_ratio * height) - width
pad_right = int(np.ceil(diff / 2))
pad_left = int(np.floor(diff / 2))
elif aspect_ratio_current > aspect_ratio:
# horizontal image, width > height
diff = ((1/aspect_ratio) * width) - height
pad_top = int(np.ceil(diff / 2))
pad_bottom = int(np.floor(diff / 2))
return (pad_top, pad_right, pad_bottom, pad_left)
def pad_to_aspect_ratio(arr, aspect_ratio, mode="constant", cval=0, return_pad_amounts=False):
"""
Pad an image-like array on its sides so that it matches a target aspect ratio.
Depending on which dimension is smaller (height or width), only the corresponding
sides (left/right or top/bottom) will be padded. In each case, both of the sides will
be padded equally.
Parameters
----------
arr : (H,W) or (H,W,C) ndarray
Image-like array to pad.
aspect_ratio : float
Target aspect ratio, given as width/height. E.g. 2.0 denotes the image having twice
as much width as height.
mode : string, optional(default="constant")
Padding mode to use. See `numpy.pad()` for details.
cval : number, optional(default=0)
Value to use for padding if mode="constant". See `numpy.pad()` for details.
return_pad_amounts : bool, optional(default=False)
If False, then only the padded image will be returned. If True, a tuple with two
entries will be returned, where the first entry is the padded image and the second
entry are the amounts by which each image side was padded. These amounts are again a
tuple of the form (top, right, bottom, left), with each value being an integer.
Returns
-------
result : tuple
First tuple entry: Padded image as (H',W') or (H',W',C) ndarray, fulfulling the given
aspect_ratio.
Second tuple entry: Amounts by which the image was padded on each side, given
as a tuple (top, right, bottom, left).
If return_pad_amounts is False, then only the image is returned.
"""
pad_top, pad_right, pad_bottom, pad_left = compute_paddings_for_aspect_ratio(arr, aspect_ratio)
arr_padded = pad(
arr,
top=pad_top,
right=pad_right,
bottom=pad_bottom,
left=pad_left,
mode=mode,
cval=cval
)
if return_pad_amounts:
return arr_padded, (pad_top, pad_right, pad_bottom, pad_left)
else:
return arr_padded
def pool(arr, block_size, func, cval=0, preserve_dtype=True):
"""
Rescale an array by pooling values within blocks.
Parameters
----------
arr : (H,W) or (H,W,C) ndarray
Image-like array to pool. Ideally of datatype np.float64.
block_size : int or tuple of two ints or tuple of three ints
Spatial size of each group of each values to pool, aka kernel size.
If a single integer, then a symmetric block of that size along height and width will
be used.
If a tuple of two values, it is assumed to be the block size along height and width
of the image-like, with pooling happening per channel.
If a tuple of three values, it is assuemd to be the block size along height, width and
channels.
func : callable
Function to apply to a given block in order to convert it to a single number,
e.g. np.average, np.min, np.max.
cval : number, optional(default=0)
Value to use in order to pad the array along its border if the array cannot be divided
by block_size without remainder.
preserve_dtype : bool, optional(default=True)
Whether to convert the array back to the input datatype if it is changed away from
that in the pooling process.
Returns
-------
arr_reduced : (H',W') or (H',W',C') ndarray
Array after pooling.
"""
assert arr.ndim in [2, 3]
is_valid_int = is_single_integer(block_size) and block_size >= 1
is_valid_tuple = is_iterable(block_size) and len(block_size) in [2, 3] and [is_single_integer(val) and val >= 1 for val in block_size]
assert is_valid_int or is_valid_tuple
if is_single_integer(block_size):
block_size = [block_size, block_size]
if len(block_size) < arr.ndim:
block_size = list(block_size) + [1]
input_dtype = arr.dtype
arr_reduced = skimage.measure.block_reduce(arr, tuple(block_size), func, cval=cval)
if preserve_dtype and arr_reduced.dtype.type != input_dtype:
arr_reduced = arr_reduced.astype(input_dtype)
return arr_reduced
def avg_pool(arr, block_size, cval=0, preserve_dtype=True):
"""
Rescale an array using average pooling.
Parameters
----------
arr : (H,W) or (H,W,C) ndarray
Image-like array to pool. See `pool()` for details.
block_size : int or tuple of two ints or tuple of three ints
Size of each block of values to pool. See `pool()` for details.
cval : number, optional(default=0)
Padding value. See `pool()` for details.
preserve_dtype : bool, optional(default=True)
Whether to preserve the input array dtype. See `pool()` for details.
Returns
-------
arr_reduced : (H',W') or (H',W',C') ndarray
Array after average pooling.
"""
return pool(arr, block_size, np.average, cval=cval, preserve_dtype=preserve_dtype)
def max_pool(arr, block_size, cval=0, preserve_dtype=True):
"""
Rescale an array using max-pooling.
Parameters
----------
arr : (H,W) or (H,W,C) ndarray
Image-like array to pool. See `pool()` for details.
block_size : int or tuple of two ints or tuple of three ints
Size of each block of values to pool. See `pool()` for details.
cval : number, optional(default=0)
Padding value. See `pool()` for details.
preserve_dtype : bool, optional(default=True)
Whether to preserve the input array dtype. See `pool()` for details.
Returns
-------
arr_reduced : (H',W') or (H',W',C') ndarray
Array after max-pooling.
"""
return pool(arr, block_size, np.max, cval=cval, preserve_dtype=preserve_dtype)
def draw_grid(images, rows=None, cols=None):
"""
Converts multiple input images into a single image showing them in a grid.
Parameters
----------
images : (N,H,W,3) ndarray or iterable of (H,W,3) array
The input images to convert to a grid.
Expected to be RGB and have dtype uint8.
rows : None or int, optional(default=None)
The number of rows to show in the grid.
If None, it will be automatically derived.
cols : None or int, optional(default=None)
The number of cols to show in the grid.
If None, it will be automatically derived.
Returns
-------
grid : (H',W',3) ndarray
Image of the generated grid.
"""
if is_np_array(images):
do_assert(images.ndim == 4)
else:
do_assert(is_iterable(images) and is_np_array(images[0]) and images[0].ndim == 3)
nb_images = len(images)
do_assert(nb_images > 0)
cell_height = max([image.shape[0] for image in images])
cell_width = max([image.shape[1] for image in images])
channels = set([image.shape[2] for image in images])
do_assert(len(channels) == 1, "All images are expected to have the same number of channels, but got channel set %s with length %d instead." % (str(channels), len(channels)))
nb_channels = list(channels)[0]
if rows is None and cols is None:
rows = cols = int(math.ceil(math.sqrt(nb_images)))
elif rows is not None:
cols = int(math.ceil(nb_images / rows))
elif cols is not None:
rows = int(math.ceil(nb_images / cols))
do_assert(rows * cols >= nb_images)
width = cell_width * cols
height = cell_height * rows
grid = np.zeros((height, width, nb_channels), dtype=np.uint8)
cell_idx = 0
for row_idx in sm.xrange(rows):
for col_idx in sm.xrange(cols):
if cell_idx < nb_images:
image = images[cell_idx]
cell_y1 = cell_height * row_idx
cell_y2 = cell_y1 + image.shape[0]
cell_x1 = cell_width * col_idx
cell_x2 = cell_x1 + image.shape[1]
grid[cell_y1:cell_y2, cell_x1:cell_x2, :] = image
cell_idx += 1
return grid
def show_grid(images, rows=None, cols=None):
"""
Converts the input images to a grid image and shows it in a new window.
This function wraps around scipy.misc.imshow(), which requires the
`see <image>` command to work. On Windows systems, this tends to not be
the case.
Parameters
----------
images : (N,H,W,3) ndarray or iterable of (H,W,3) array
See `draw_grid()`.
rows : None or int, optional(default=None)
See `draw_grid()`.
cols : None or int, optional(default=None)
See `draw_grid()`.
"""
grid = draw_grid(images, rows=rows, cols=cols)
misc.imshow(grid)
def do_assert(condition, message="Assertion failed."):
"""
Function that behaves equally to an `assert` statement, but raises an
Exception.
This is added because `assert` statements are removed in optimized code.
It replaces `assert` statements throughout the library that should be
kept even in optimized code.
Parameters
----------
condition : bool
If False, an exception is raised.
message : string, optional(default="Assertion failed.")
Error message.
"""
if not condition:
raise AssertionError(str(message))
class HooksImages(object):
"""
Class to intervene with image augmentation runs.
This is e.g. useful to dynamically deactivate some augmenters.
Parameters
----------
activator : None or callable, optional(default=None)
A function that gives permission to execute an augmenter.
The expected interface is `f(images, augmenter, parents, default)`,
where `images` are the input images to augment, `augmenter` is the
instance of the augmenter to execute, `parents` are previously
executed augmenters and `default` is an expected default value to be
returned if the activator function does not plan to make a decision
for the given inputs.
propagator : None or callable, optional(default=None)
A function that gives permission to propagate the augmentation further
to the children of an augmenter. This happens after the activator.
In theory, an augmenter may augment images itself (if allowed by the
activator) and then execute child augmenters afterwards (if allowed by
the propagator). If the activator returned False, the propagation step
will never be executed.
The expected interface is `f(images, augmenter, parents, default)`,
with all arguments having identical meaning to the activator.
preprocessor : None or callable, optional(default=None)
A function to call before an augmenter performed any augmentations.
The interface is `f(images, augmenter, parents)`,
with all arguments having identical meaning to the activator.
It is expected to return the input images, optionally modified.
postprocessor : None or callable, optional(default=None)
A function to call after an augmenter performed augmentations.
The interface is the same as for the preprocessor.
Examples
--------
>>> seq = iaa.Sequential([
>>> iaa.GaussianBlur(3.0, name="blur"),
>>> iaa.Dropout(0.05, name="dropout"),
>>> iaa.Affine(translate_px=-5, name="affine")
>>> ])
>>>
>>> def activator(images, augmenter, parents, default):
>>> return False if augmenter.name in ["blur", "dropout"] else default
>>>
>>> seq_det = seq.to_deterministic()
>>> images_aug = seq_det.augment_images(images)
>>> heatmaps_aug = seq_det.augment_images(
>>> heatmaps,
>>> hooks=ia.HooksImages(activator=activator)
>>> )
This augments images and their respective heatmaps in the same way.
The heatmaps however are only modified by Affine, not by GaussianBlur or
Dropout.
"""
#def __init__(self, activator=None, propagator=None, preprocessor=None, postprocessor=None, propagation_method=None):
def __init__(self, activator=None, propagator=None, preprocessor=None, postprocessor=None):
self.activator = activator
self.propagator = propagator
self.preprocessor = preprocessor
self.postprocessor = postprocessor
#self.propagation_method = propagation_method
def is_activated(self, images, augmenter, parents, default):
"""
Returns whether an augmenter may be executed.
Returns
-------
out : bool
If True, the augmenter may be executed. If False, it may
not be executed.
"""
if self.activator is None:
return default
else:
return self.activator(images, augmenter, parents, default)
# TODO is a propagating hook necessary? seems to be covered by activated
# hook already
def is_propagating(self, images, augmenter, parents, default):
"""
Returns whether an augmenter may call its children to augment an
image. This is independent of the augmenter itself possible changing
the image, without calling its children. (Most (all?) augmenters with
children currently dont perform any changes themselves.)
Returns
-------
out : bool
If True, the augmenter may be propagate to its children.
If False, it may not.
"""
if self.propagator is None:
return default
else:
return self.propagator(images, augmenter, parents, default)
#def get_propagation_method(self, images, augmenter, parents, child, default):
# if self.propagation_method is None:
# return default
# else:
# return self.propagation_method(images, augmenter, parents, child, default)
def preprocess(self, images, augmenter, parents):
"""
A function to be called before the augmentation of images starts (per
augmenter).
Returns
-------
out : (N,H,W,C) ndarray or (N,H,W) ndarray or list of (H,W,C) ndarray or list of (H,W) ndarray
The input images, optionally modified.
"""
if self.preprocessor is None:
return images
else:
return self.preprocessor(images, augmenter, parents)
def postprocess(self, images, augmenter, parents):
"""
A function to be called after the augmentation of images was
performed.
Returns
-------
out : (N,H,W,C) ndarray or (N,H,W) ndarray or list of (H,W,C) ndarray or list of (H,W) ndarray
The input images, optionally modified.
"""
if self.postprocessor is None:
return images
else:
return self.postprocessor(images, augmenter, parents)
class HooksHeatmaps(HooksImages):
"""
Class to intervene with heatmap augmentation runs.
This is e.g. useful to dynamically deactivate some augmenters.
This class is currently the same as the one for images. This may or may
not change in the future.
"""
pass
class HooksKeypoints(HooksImages):
"""
Class to intervene with keypoint augmentation runs.
This is e.g. useful to dynamically deactivate some augmenters.
This class is currently the same as the one for images. This may or may
not change in the future.
"""
pass
class Keypoint(object):
"""
A single keypoint (aka landmark) on an image.
Parameters
----------
x : number
Coordinate of the keypoint on the x axis.
y : number
Coordinate of the keypoint on the y axis.
"""
def __init__(self, x, y):
# these checks are currently removed because they are very slow for some
# reason
#assert is_single_integer(x), type(x)
#assert is_single_integer(y), type(y)
self.x = x
self.y = y
@property
def x_int(self):
"""
Return the keypoint's x-coordinate, rounded to the closest integer.
Returns
-------
result : int
Keypoint's x-coordinate, rounded to the closest integer.
"""
return int(round(self.x))
@property
def y_int(self):
"""
Return the keypoint's y-coordinate, rounded to the closest integer.
Returns
-------
result : int
Keypoint's y-coordinate, rounded to the closest integer.
"""
return int(round(self.y))
def project(self, from_shape, to_shape):
"""
Project the keypoint onto a new position on a new image.
E.g. if the keypoint is on its original image at x=(10 of 100 pixels)
and y=(20 of 100 pixels) and is projected onto a new image with
size (width=200, height=200), its new position will be (20, 40).
This is intended for cases where the original image is resized.
It cannot be used for more complex changes (e.g. padding, cropping).
Parameters
----------
from_shape : tuple
Shape of the original image. (Before resize.)
to_shape : tuple
Shape of the new image. (After resize.)
Returns
-------
out : Keypoint
Keypoint object with new coordinates.
"""
if from_shape[0:2] == to_shape[0:2]:
return Keypoint(x=self.x, y=self.y)
else:
from_height, from_width = from_shape[0:2]
to_height, to_width = to_shape[0:2]
x = (self.x / from_width) * to_width
y = (self.y / from_height) * to_height
return Keypoint(x=x, y=y)
def shift(self, x=0, y=0):
"""
Move the keypoint around on an image.
Parameters
----------
x : number, optional(default=0)
Move by this value on the x axis.
y : number, optional(default=0)
Move by this value on the y axis.
Returns
-------
out : Keypoint
Keypoint object with new coordinates.
"""
return Keypoint(self.x + x, self.y + y)
def __repr__(self):
return self.__str__()
def __str__(self):
return "Keypoint(x=%.8f, y=%.8f)" % (self.x, self.y)
class KeypointsOnImage(object):
"""
Object that represents all keypoints on a single image.
Parameters
----------
keypoints : list of Keypoint
List of keypoints on the image.
shape : tuple of int
The shape of the image on which the keypoints are placed.
Examples
--------
>>> kps = [Keypoint(x=10, y=20), Keypoint(x=34, y=60)]
>>> kps_oi = KeypointsOnImage(kps, shape=image.shape)
"""
def __init__(self, keypoints, shape):
#assert len(shape) == 3, "KeypointsOnImage requires shape tuples of form (H, W, C) but got %s. Use C=1 for 2-dimensional images." % (str(shape),)
self.keypoints = keypoints
if is_np_array(shape):
self.shape = shape.shape
else:
do_assert(isinstance(shape, (tuple, list)))
self.shape = tuple(shape)
@property
def height(self):
return self.shape[0]
@property
def width(self):
return self.shape[1]
def on(self, image):
"""
Project keypoints from one image to a new one.
Parameters
----------
image : ndarray or tuple
New image onto which the keypoints are to be projected.
May also simply be that new image's shape tuple.
Returns
-------
keypoints : KeypointsOnImage
Object containing all projected keypoints.
"""
if is_np_array(image):
shape = image.shape
else:
shape = image
if shape[0:2] == self.shape[0:2]:
return self.deepcopy()
else:
keypoints = [kp.project(self.shape, shape) for kp in self.keypoints]
return KeypointsOnImage(keypoints, shape)
def draw_on_image(self, image, color=[0, 255, 0], size=3, copy=True, raise_if_out_of_image=False): # pylint: disable=locally-disabled, dangerous-default-value, line-too-long
"""
Draw all keypoints onto a given image. Each keypoint is marked by a
square of a chosen color and size.
Parameters
----------
image : (H,W,3) ndarray
The image onto which to draw the keypoints.
This image should usually have the same shape as
set in KeypointsOnImage.shape.
color : int or list of ints or tuple of ints or (3,) ndarray, optional(default=[0, 255, 0])
The RGB color of all keypoints. If a single int `C`, then that is
equivalent to (C,C,C).
size : int, optional(default=3)
The size of each point. If set to C, each square will have
size CxC.
copy : bool, optional(default=True)
Whether to copy the image before drawing the points.
raise_if_out_of_image : bool, optional(default=False)
Whether to raise an exception if any keypoint is outside of the
image.
Returns
-------
image : (H,W,3) ndarray
Image with drawn keypoints.
"""
if copy:
image = np.copy(image)
height, width = image.shape[0:2]
for keypoint in self.keypoints:
y, x = keypoint.y_int, keypoint.x_int
if 0 <= y < height and 0 <= x < width:
x1 = max(x - size//2, 0)
x2 = min(x + 1 + size//2, width)
y1 = max(y - size//2, 0)
y2 = min(y + 1 + size//2, height)
image[y1:y2, x1:x2] = color
else:
if raise_if_out_of_image:
raise Exception("Cannot draw keypoint x=%.8f, y=%.8f on image with shape %s." % (y, x, image.shape))
return image
def shift(self, x=0, y=0):
"""
Move the keypoints around on an image.
Parameters
----------
x : number, optional(default=0)
Move each keypoint by this value on the x axis.
y : number, optional(default=0)
Move each keypoint by this value on the y axis.
Returns
-------
out : KeypointsOnImage
Keypoints after moving them.
"""
keypoints = [keypoint.shift(x=x, y=y) for keypoint in self.keypoints]
return KeypointsOnImage(keypoints, self.shape)
def get_coords_array(self):
"""
Convert the coordinates of all keypoints in this object to
an array of shape (N,2).
Returns
-------
result : (N, 2) ndarray
Where N is the number of keypoints. Each first value is the
x coordinate, each second value is the y coordinate.
"""
result = np.zeros((len(self.keypoints), 2), np.float32)
for i, keypoint in enumerate(self.keypoints):
result[i, 0] = keypoint.x
result[i, 1] = keypoint.y
return result
@staticmethod
def from_coords_array(coords, shape):
"""
Convert an array (N,2) with a given image shape to a KeypointsOnImage
object.
Parameters
----------
coords : (N, 2) ndarray
Coordinates of N keypoints on the original image.
Each first entry (i, 0) is expected to be the x coordinate.
Each second entry (i, 1) is expected to be the y coordinate.
shape : tuple
Shape tuple of the image on which the keypoints are placed.
Returns
-------
out : KeypointsOnImage
KeypointsOnImage object that contains all keypoints from the array.
"""
keypoints = [Keypoint(x=coords[i, 0], y=coords[i, 1]) for i in sm.xrange(coords.shape[0])]
return KeypointsOnImage(keypoints, shape)
def to_keypoint_image(self, size=1):
"""
Draws a new black image of shape (H,W,N) in which all keypoint coordinates
are set to 255.
(H=shape height, W=shape width, N=number of keypoints)
This function can be used as a helper when augmenting keypoints with
a method that only supports the augmentation of images.
Parameters
-------
size : int
Size of each (squared) point.
Returns
-------
image : (H,W,N) ndarray
Image in which the keypoints are marked. H is the height,
defined in KeypointsOnImage.shape[0] (analogous W). N is the
number of keypoints.
"""
do_assert(len(self.keypoints) > 0)
height, width = self.shape[0:2]
image = np.zeros((height, width, len(self.keypoints)), dtype=np.uint8)
do_assert(size % 2 != 0)
sizeh = max(0, (size-1)//2)
for i, keypoint in enumerate(self.keypoints):
# TODO for float values spread activation over several cells
# here and do voting at the end
y = keypoint.y_int
x = keypoint.x_int
x1 = np.clip(x - sizeh, 0, width-1)
x2 = np.clip(x + sizeh + 1, 0, width)
y1 = np.clip(y - sizeh, 0, height-1)
y2 = np.clip(y + sizeh + 1, 0, height)
#if 0 <= y < height and 0 <= x < width:
# image[y, x, i] = 255
if x1 < x2 and y1 < y2:
image[y1:y2, x1:x2, i] = 128
if 0 <= y < height and 0 <= x < width:
image[y, x, i] = 255
return image
@staticmethod
def from_keypoint_image(image, if_not_found_coords={"x": -1, "y": -1}, threshold=1, nb_channels=None): # pylint: disable=locally-disabled, dangerous-default-value, line-too-long
"""
Converts an image generated by `to_keypoint_image()` back to
an KeypointsOnImage object.
Parameters
----------
image : (H,W,N) ndarray
The keypoints image. N is the number of
keypoints.
if_not_found_coords : tuple or list or dict or None
Coordinates to use for keypoints that cannot be found in `image`.
If this is a list/tuple, it must have two integer values. If it
is a dictionary, it must have the keys "x" and "y". If this
is None, then the keypoint will not be added to the final
KeypointsOnImage object.
threshold : int
The search for keypoints works by searching for the argmax in
each channel. This parameters contains the minimum value that
the max must have in order to be viewed as a keypoint.
nb_channels : None or int
Number of channels of the image on which the keypoints are placed.
Some keypoint augmenters require that information.
If set to None, the keypoint's shape will be set
to `(height, width)`, otherwise `(height, width, nb_channels)`.
Returns
-------
out : KeypointsOnImage
The extracted keypoints.
"""
do_assert(len(image.shape) == 3)
height, width, nb_keypoints = image.shape
drop_if_not_found = False
if if_not_found_coords is None:
drop_if_not_found = True
if_not_found_x = -1
if_not_found_y = -1
elif isinstance(if_not_found_coords, (tuple, list)):
do_assert(len(if_not_found_coords) == 2)
if_not_found_x = if_not_found_coords[0]
if_not_found_y = if_not_found_coords[1]
elif isinstance(if_not_found_coords, dict):
if_not_found_x = if_not_found_coords["x"]
if_not_found_y = if_not_found_coords["y"]
else:
raise Exception("Expected if_not_found_coords to be None or tuple or list or dict, got %s." % (type(if_not_found_coords),))
keypoints = []
for i in sm.xrange(nb_keypoints):
maxidx_flat = np.argmax(image[..., i])
maxidx_ndim = np.unravel_index(maxidx_flat, (height, width))
found = (image[maxidx_ndim[0], maxidx_ndim[1], i] >= threshold)
if found:
keypoints.append(Keypoint(x=maxidx_ndim[1], y=maxidx_ndim[0]))
else:
if drop_if_not_found:
pass # dont add the keypoint to the result list, i.e. drop it
else:
keypoints.append(Keypoint(x=if_not_found_x, y=if_not_found_y))
out_shape = (height, width)
if nb_channels is not None:
out_shape += (nb_channels,)
return KeypointsOnImage(keypoints, shape=out_shape)
def copy(self):
"""
Create a shallow copy of the KeypointsOnImage object.
Returns
-------
out : KeypointsOnImage
Shallow copy.
"""
return copy.copy(self)
def deepcopy(self):
"""
Create a deep copy of the KeypointsOnImage object.
Returns
-------
out : KeypointsOnImage
Deep copy.
"""
# for some reason deepcopy is way slower here than manual copy
#return copy.deepcopy(self)
kps = [Keypoint(x=kp.x, y=kp.y) for kp in self.keypoints]
return KeypointsOnImage(kps, tuple(self.shape))
def __repr__(self):
return self.__str__()
def __str__(self):
return "KeypointsOnImage(%s, shape=%s)" % (str(self.keypoints), self.shape)
# TODO functions: square(), to_aspect_ratio(), extend()/add_border(), contains_point()
class BoundingBox(object):
"""
Class representing bounding boxes.
Each bounding box is parameterized by its top left and bottom right corners. Both are given
as x and y-coordinates.
Parameters
----------
x1 : number
X-coordinate of the top left of the bounding box.
y1 : number
Y-coordinate of the top left of the bounding box.
x2 : number
X-coordinate of the bottom right of the bounding box.
y2 : number
Y-coordinate of the bottom right of the bounding box.
label : None or string, optional(default=None)
Label of the bounding box, e.g. a string representing the class.
"""
def __init__(self, x1, y1, x2, y2, label=None):
"""Create a new BoundingBox instance."""
if x1 > x2:
x2, x1 = x1, x2
do_assert(x2 > x1)
if y1 > y2:
y2, y1 = y1, y2
do_assert(y2 > y1)
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
self.label = label
@property
def x1_int(self):
"""
Return the x-coordinate of the top left corner as an integer.
Returns
-------
result : int
X-coordinate of the top left corner, rounded to the closest integer.
"""
return int(round(self.x1))
@property
def y1_int(self):
"""
Return the y-coordinate of the top left corner as an integer.
Returns
-------
result : int
Y-coordinate of the top left corner, rounded to the closest integer.
"""
return int(round(self.y1))
@property
def x2_int(self):
"""
Return the x-coordinate of the bottom left corner as an integer.
Returns
-------
result : int
X-coordinate of the bottom left corner, rounded to the closest integer.
"""
return int(round(self.x2))
@property
def y2_int(self):
"""
Return the y-coordinate of the bottom left corner as an integer.
Returns
-------
result : int
Y-coordinate of the bottom left corner, rounded to the closest integer.
"""
return int(round(self.y2))
@property
def height(self):
"""
Estimate the height of the bounding box.
Returns
-------
result : number
Height of the bounding box.
"""
return self.y2 - self.y1
@property
def width(self):
"""
Estimate the width of the bounding box.
Returns
-------
result : number
Width of the bounding box.
"""
return self.x2 - self.x1
@property
def center_x(self):
"""
Estimate the x-coordinate of the center point of the bounding box.
Returns
-------
result : number
X-coordinate of the center point of the bounding box.
"""
return self.x1 + self.width/2
@property
def center_y(self):
"""
Estimate the y-coordinate of the center point of the bounding box.
Returns
-------
result : number
Y-coordinate of the center point of the bounding box.
"""
return self.y1 + self.height/2
@property
def area(self):
"""
Estimate the area of the bounding box.
Returns
-------
result : number
Area of the bounding box, i.e. `height * width`.
"""
return self.height * self.width
def project(self, from_shape, to_shape):
"""
Project the bounding box onto a new position on a new image.
E.g. if the bounding box is on its original image at
x1=(10 of 100 pixels) and y1=(20 of 100 pixels) and is projected onto
a new image with size (width=200, height=200), its new position will
be (x1=20, y1=40). (Analogous for x2/y2.)
This is intended for cases where the original image is resized.
It cannot be used for more complex changes (e.g. padding, cropping).
Parameters
----------
from_shape : tuple
Shape of the original image. (Before resize.)
to_shape : tuple
Shape of the new image. (After resize.)
Returns
-------
out : BoundingBox
BoundingBox object with new coordinates.
"""
if from_shape[0:2] == to_shape[0:2]:
return self.copy()
else:
from_height, from_width = from_shape[0:2]
to_height, to_width = to_shape[0:2]
do_assert(from_height > 0)
do_assert(from_width > 0)
do_assert(to_height > 0)
do_assert(to_width > 0)
x1 = (self.x1 / from_width) * to_width
y1 = (self.y1 / from_height) * to_height
x2 = (self.x2 / from_width) * to_width
y2 = (self.y2 / from_height) * to_height
return self.copy(
x1=x1,
y1=y1,
x2=x2,
y2=y2,
label=self.label
)
def extend(self, all_sides=0, top=0, right=0, bottom=0, left=0):
"""
Extend the size of the bounding box along its sides.
Parameters
----------
all_sides : number, optional(default=0)
Value by which to extend the bounding box size along all sides.
top : number, optional(default=0)
Value by which to extend the bounding box size along its top side.
right : number, optional(default=0)
Value by which to extend the bounding box size along its right side.
bottom : number, optional(default=0)
Value by which to extend the bounding box size along its bottom side.
left : number, optional(default=0)
Value by which to extend the bounding box size along its left side.
Returns
-------
result : BoundingBox
Extended bounding box.
"""
return BoundingBox(
x1=self.x1 - all_sides - left,
x2=self.x2 + all_sides + right,
y1=self.y1 - all_sides - top,
y2=self.y2 + all_sides + bottom
)
def intersection(self, other, default=None):
"""
Compute the intersection bounding box of this bounding box and another one.
Parameters
----------
other : BoundingBox
Other bounding box with which to generate the intersection.
Returns
-------
result : BoundingBox
Intersection bounding box of the two bounding boxes.
"""
x1_i = max(self.x1, other.x1)
y1_i = max(self.y1, other.y1)
x2_i = min(self.x2, other.x2)
y2_i = min(self.y2, other.y2)
if x1_i >= x2_i or y1_i >= y2_i:
return default
else:
return BoundingBox(x1=x1_i, y1=y1_i, x2=x2_i, y2=y2_i)
def union(self, other):
"""
Compute the union bounding box of this bounding box and another one.
This is equivalent to drawing a bounding box around all corners points of both
bounding boxes.
Parameters
----------
other : BoundingBox
Other bounding box with which to generate the union.
Returns
-------
result : BoundingBox
Union bounding box of the two bounding boxes.
"""
return BoundingBox(
x1=min(self.x1, other.x1),
y1=min(self.y1, other.y1),
x2=max(self.x2, other.x2),
y2=max(self.y2, other.y2),
)
def iou(self, other):
"""
Compute the IoU of this bounding box with another one.
IoU is the intersection over union, defined as:
area(intersection(A, B)) / area(union(A, B))
= area(intersection(A, B)) / (area(A) + area(B) - area(intersection(A, B)))
Parameters
----------
other : BoundingBox
Other bounding box with which to compare.
Returns
-------
result : float
IoU between the two bounding boxes.
"""
inters = self.intersection(other)
if inters is None:
return 0
else:
return inters.area / (self.area + other.area - inters.area)
def is_fully_within_image(self, image):
"""
Estimate whether the bounding box is fully inside the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of at least two ints
Image dimensions to use. If an ndarray, its shape will be used. If a tuple, it is
assumed to represent the image shape.
Returns
-------
result : bool
True if the bounding box is fully inside the image area.
False otherwise.
"""
if isinstance(image, tuple):
shape = image
else:
shape = image.shape
height, width = shape[0:2]
return self.x1 >= 0 and self.x2 <= width and self.y1 >= 0 and self.y2 <= height
def is_partly_within_image(self, image):
"""
Estimate whether the bounding box is at least partially inside the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of at least two ints
Image dimensions to use. If an ndarray, its shape will be used. If a tuple, it is
assumed to represent the image shape.
Returns
-------
result : bool
True if the bounding box is at least partially inside the image area.
False otherwise.
"""
if isinstance(image, tuple):
shape = image
else:
shape = image.shape
height, width = shape[0:2]
img_bb = BoundingBox(x1=0, x2=width, y1=0, y2=height)
return self.intersection(img_bb) is not None
def is_out_of_image(self, image, fully=True, partly=False):
"""
Estimate whether the bounding box is partially or fully outside of the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of ints
Image dimensions to use. If an ndarray, its shape will be used. If a tuple, it is
assumed to represent the image shape and must contain at least two integers.
fully : bool, optional(default=True)
Whether to return True if the bounding box is fully outside fo the image area.
partly : bool, optional(default=False)
Whether to return True if the bounding box is at least partially outside fo the
image area.
Returns
-------
result : bool
True if the bounding box is partially/fully outside of the image area, depending
on defined parameters. False otherwise.
"""
if self.is_fully_within_image(image):
return False
elif self.is_partly_within_image(image):
return partly
else:
return fully
def cut_out_of_image(self, image):
"""
Cut off all parts of the bounding box that are outside of the image.
Parameters
----------
image : (H,W,...) ndarray or tuple of at least two ints
Image dimensions to use for the clipping of the bounding box. If an ndarray, its
shape will be used. If a tuple, it is assumed to represent the image shape.
Returns
-------
result : BoundingBox
Bounding box, clipped to fall within the image dimensions.
"""
if isinstance(image, tuple):
shape = image
else:
shape = image.shape
height, width = shape[0:2]
do_assert(height > 0)
do_assert(width > 0)
x1 = np.clip(self.x1, 0, width)
x2 = np.clip(self.x2, 0, width)
y1 = np.clip(self.y1, 0, height)
y2 = np.clip(self.y2, 0, height)
return self.copy(
x1=x1,
y1=y1,
x2=x2,
y2=y2,
label=self.label
)
def shift(self, top=None, right=None, bottom=None, left=None):
"""
Shift the bounding box from one or more image sides, i.e. move it on the x/y-axis.
Parameters
----------
top : None or int, optional(default=None)
Amount of pixels by which to shift the bounding box from the top.
right : None or int, optional(default=None)
Amount of pixels by which to shift the bounding box from the right.
bottom : None or int, optional(default=None)
Amount of pixels by which to shift the bounding box from the bottom.
left : None or int, optional(default=None)
Amount of pixels by which to shift the bounding box from the left.
Returns
-------
result : BoundingBox
Shifted bounding box.
"""
top = top if top is not None else 0
right = right if right is not None else 0
bottom = bottom if bottom is not None else 0
left = left if left is not None else 0
return self.copy(
x1=self.x1+left-right,
x2=self.x2+left-right,
y1=self.y1+top-bottom,
y2=self.y2+top-bottom
)
def draw_on_image(self, image, color=[0, 255, 0], alpha=1.0, thickness=1, copy=True, raise_if_out_of_image=False): # pylint: disable=locally-disabled, dangerous-default-value, line-too-long
"""
Draw the bounding box on an image.
Parameters
----------
image : (H,W,C) ndarray(uint8)
The image onto which to draw the bounding box.
color : iterable of int, optional(default=[0,255,0])
The color to use, corresponding to the channel layout of the image. Usually RGB.
alpha : float, optional(default=1.0)
The transparency of the drawn bounding box, where 1.0 denotes no transparency and
0.0 is invisible.
thickness : int, optional(default=1)
The thickness of the bounding box in pixels. If the value is larger than 1, then
additional pixels will be added around the bounding box (i.e. extension towards the
outside).
copy : bool, optional(default=True)
Whether to copy the input image or change it in-place.
raise_if_out_of_image : bool, optional(default=False)
Whether to raise an error if the bounding box is partially/fully outside of the
image. If set to False, no error will be raised and only the parts inside the image
will be drawn.
Returns
-------
result : (H,W,C) ndarray(uint8)
Image with bounding box drawn on it.
"""
if raise_if_out_of_image and self.is_out_of_image(image):
raise Exception("Cannot draw bounding box x1=%.8f, y1=%.8f, x2=%.8f, y2=%.8f on image with shape %s." % (self.x1, self.y1, self.x2, self.y2, image.shape))
result = np.copy(image) if copy else image
if isinstance(color, (tuple, list)):
color = np.uint8(color)
for i in range(thickness):
y = [self.y1_int-i, self.y1_int-i, self.y2_int+i, self.y2_int+i]
x = [self.x1_int-i, self.x2_int+i, self.x2_int+i, self.x1_int-i]
rr, cc = skimage.draw.polygon_perimeter(y, x, shape=result.shape)
if alpha >= 0.99:
result[rr, cc, :] = color
else:
if is_float_array(result):
result[rr, cc, :] = (1 - alpha) * result[rr, cc, :] + alpha * color
result = np.clip(result, 0, 255)
else:
input_dtype = result.dtype
result = result.astype(np.float32)
result[rr, cc, :] = (1 - alpha) * result[rr, cc, :] + alpha * color
result = np.clip(result, 0, 255).astype(input_dtype)
return result
def extract_from_image(self, image):
"""
Extract the image pixels within the bounding box.
This function will zero-pad the image if the bounding box is partially/fully outside of
the image.
Parameters
----------
image : (H,W) or (H,W,C) ndarray
The image from which to extract the pixels within the bounding box.
Returns
-------
result : (H',W') or (H',W',C) ndarray
Pixels within the bounding box. Zero-padded if the bounding box is partially/fully
outside of the image.
"""
pad_top = 0
pad_right = 0
pad_bottom = 0
pad_left = 0
height, width = image.shape[0], image.shape[1]
x1, x2, y1, y2 = self.x1_int, self.x2_int, self.y1_int, self.y2_int
# if the bb is outside of the image area, the following pads the image
# first with black pixels until the bb is inside the image
# and only then extracts the image area
# TODO probably more efficient to initialize an array of zeros
# and copy only the portions of the bb into that array that are
# natively inside the image area
if x1 < 0:
pad_left = abs(x1)
x2 = x2 + abs(x1)
x1 = 0
if y1 < 0:
pad_top = abs(y1)
y2 = y2 + abs(y1)
y1 = 0
if x2 >= width:
pad_right = x2 - (width - 1)
if y2 >= height:
pad_bottom = y2 - (height - 1)
if any([val > 0 for val in [pad_top, pad_right, pad_bottom, pad_left]]):
if len(image.shape) == 2:
image = np.pad(image, ((pad_top, pad_bottom), (pad_left, pad_right)), mode="constant")
else:
image = np.pad(image, ((pad_top, pad_bottom), (pad_left, pad_right), (0, 0)), mode="constant")
return image[y1:y2, x1:x2]
# TODO also add to_heatmap
# TODO add this to BoundingBoxesOnImage
def to_keypoints(self):
"""
Convert the corners of the bounding box to keypoints (clockwise, starting at top left).
Returns
-------
result : list of Keypoint
Corners of the bounding box as keypoints.
"""
return [
Keypoint(x=self.x1, y=self.y1),
Keypoint(x=self.x2, y=self.y1),
Keypoint(x=self.x2, y=self.y2),
Keypoint(x=self.x1, y=self.y2)
]
def copy(self, x1=None, y1=None, x2=None, y2=None, label=None):
"""
Create a shallow copy of the BoundingBox object.
Parameters
----------
x1 : None or number
If not None, then the x1 coordinate of the copied object will be set to this value.
y1 : None or number
If not None, then the y1 coordinate of the copied object will be set to this value.
x2 : None or number
If not None, then the x2 coordinate of the copied object will be set to this value.
y2 : None or number
If not None, then the y2 coordinate of the copied object will be set to this value.
label : None or string
If not None, then the label of the copied object will be set to this value.
Returns
-------
result : BoundingBox
Shallow copy.
"""
return BoundingBox(
x1=self.x1 if x1 is None else x1,
x2=self.x2 if x2 is None else x2,
y1=self.y1 if y1 is None else y1,
y2=self.y2 if y2 is None else y2,
label=self.label if label is None else label
)
def deepcopy(self, x1=None, y1=None, x2=None, y2=None, label=None):
"""
Create a deep copy of the BoundingBoxesOnImage object.
Returns
-------
out : KeypointsOnImage
Deep copy.
"""
return self.copy(x1=x1, y1=y1, x2=x2, y2=y2, label=label)
def __repr__(self):
return self.__str__()
def __str__(self):
return "BoundingBox(x1=%.4f, y1=%.4f, x2=%.4f, y2=%.4f, label=%s)" % (self.x1, self.y1, self.x2, self.y2, self.label)
class BoundingBoxesOnImage(object):
"""
Object that represents all bounding boxes on a single image.
Parameters
----------
bounding_boxes : list of BoundingBox
List of bounding boxes on the image.
shape : tuple of int
The shape of the image on which the bounding boxes are placed.
Examples
--------
>>> bbs = [
>>> BoundingBox(x1=10, y1=20, x2=20, y2=30),
>>> BoundingBox(x1=25, y1=50, x2=30, y2=70)
>>> ]
>>> bbs_oi = BoundingBoxesOnImage(bbs, shape=image.shape)
"""
def __init__(self, bounding_boxes, shape):
self.bounding_boxes = bounding_boxes
if is_np_array(shape):
self.shape = shape.shape
else:
do_assert(isinstance(shape, (tuple, list)))
self.shape = tuple(shape)
@property
def height(self):
"""
Get the height of the image on which the bounding boxes fall.
Returns
-------
result : int
Image height.
"""
return self.shape[0]
@property
def width(self):
"""
Get the width of the image on which the bounding boxes fall.
Returns
-------
result : int
Image width.
"""
return self.shape[1]
def on(self, image):
"""
Project bounding boxes from one image to a new one.
Parameters
----------
image : ndarray or tuple
New image onto which the bounding boxes are to be projected.
May also simply be that new image's shape tuple.
Returns
-------
keypoints : BoundingBoxesOnImage
Object containing all projected bounding boxes.
"""
if is_np_array(image):
shape = image.shape
else:
shape = image
if shape[0:2] == self.shape[0:2]:
return self.deepcopy()
else:
bounding_boxes = [bb.project(self.shape, shape) for bb in self.bounding_boxes]
return BoundingBoxesOnImage(bounding_boxes, shape)
def draw_on_image(self, image, color=[0, 255, 0], alpha=1.0, thickness=1, copy=True, raise_if_out_of_image=False):
"""
Draw all bounding boxes onto a given image.
Parameters
----------
image : (H,W,3) ndarray
The image onto which to draw the bounding boxes.
This image should usually have the same shape as
set in BoundingBoxesOnImage.shape.
color : int or list of ints or tuple of ints or (3,) ndarray, optional(default=[0, 255, 0])
The RGB color of all bounding boxes. If a single int `C`, then that is
equivalent to (C,C,C).
size : float, optional(default=1.0)
Alpha/transparency of the bounding box.
thickness : int, optional(default=1)
Thickness in pixels.
copy : bool, optional(default=True)
Whether to copy the image before drawing the points.
raise_if_out_of_image : bool, optional(default=False)
Whether to raise an exception if any bounding box is outside of the
image.
Returns
-------
image : (H,W,3) ndarray
Image with drawn bounding boxes.
"""
for bb in self.bounding_boxes:
image = bb.draw_on_image(
image,
color=color,
alpha=alpha,
thickness=thickness,
copy=copy,
raise_if_out_of_image=raise_if_out_of_image
)
return image
def remove_out_of_image(self, fully=True, partly=False):
"""
Remove all bounding boxes that are fully or partially outside of the image.
Parameters
----------
fully : bool, optional(default=True)
Whether to remove bounding boxes that are fully outside of the image.
partly : bool, optional(default=False)
Whether to remove bounding boxes that are partially outside of the image.
Returns
-------
result : BoundingBoxesOnImage
Reduced set of bounding boxes, with those that were fully/partially outside of
the image removed.
"""
bbs_clean = [bb for bb in self.bounding_boxes if not bb.is_out_of_image(self.shape, fully=fully, partly=partly)]
return BoundingBoxesOnImage(bbs_clean, shape=self.shape)
def cut_out_of_image(self):
"""
Cut off all parts from all bounding boxes that are outside of the image.
Returns
-------
result : BoundingBoxesOnImage
Bounding boxes, clipped to fall within the image dimensions.
"""
bbs_cut = [bb.cut_out_of_image(self.shape) for bb in self.bounding_boxes if bb.is_partly_within_image(self.shape)]
return BoundingBoxesOnImage(bbs_cut, shape=self.shape)
def shift(self, top=None, right=None, bottom=None, left=None):
"""
Shift all bounding boxes from one or more image sides, i.e. move them on the x/y-axis.
Parameters
----------
top : None or int, optional(default=None)
Amount of pixels by which to shift all bounding boxes from the top.
right : None or int, optional(default=None)
Amount of pixels by which to shift all bounding boxes from the right.
bottom : None or int, optional(default=None)
Amount of pixels by which to shift all bounding boxes from the bottom.
left : None or int, optional(default=None)
Amount of pixels by which to shift all bounding boxes from the left.
Returns
-------
result : BoundingBoxesOnImage
Shifted bounding boxes.
"""
bbs_new = [bb.shift(top=top, right=right, bottom=bottom, left=left) for bb in self.bounding_boxes]
return BoundingBoxesOnImage(bbs_new, shape=self.shape)
def copy(self):
"""
Create a shallow copy of the BoundingBoxesOnImage object.
Returns
-------
out : BoundingBoxesOnImage
Shallow copy.
"""
return copy.copy(self)
def deepcopy(self):
"""
Create a deep copy of the BoundingBoxesOnImage object.
Returns
-------
out : KeypointsOnImage
Deep copy.
"""
# Manual copy is far faster than deepcopy for KeypointsOnImage,
# so use manual copy here too
bbs = [bb.deepcopy() for bb in self.bounding_boxes]
return BoundingBoxesOnImage(bbs, tuple(self.shape))
def __repr__(self):
return self.__str__()
def __str__(self):
return "BoundingBoxesOnImage(%s, shape=%s)" % (str(self.bounding_boxes), self.shape)
class HeatmapsOnImage(object):
"""
Object representing heatmaps on images.
Parameters
----------
arr : (H,W) or (H,W,C) ndarray(float32)
Array representing the heatmap(s). If multiple heatmaps, then C is expected to denote
their number.
shape : tuple of ints
Shape of the image on which the heatmap(s) is/are placed. NOT the shape of the
heatmap(s) array, unless it is identical to the image shape (note the likely
difference between the arrays in the number of channels).
If there is not a corresponding image, use the shape of the heatmaps array.
min_value : float, optional(default=0.0)
Minimum value for the heatmaps that `arr` represents. This will usually
be 0.0.
max_value : float, optional(default=1.0)
Maximum value for the heatmaps that `arr` represents. This will usually
be 1.0.
"""
def __init__(self, arr, shape, min_value=0.0, max_value=1.0):
"""Construct a new HeatmapsOnImage object."""
assert arr.dtype.type in [np.float32]
assert arr.ndim in [2, 3]
assert len(shape) in [2, 3]
assert min_value < max_value
assert np.min(arr.flat[0:50]) >= min_value - np.finfo(arr.dtype).eps
assert np.max(arr.flat[0:50]) <= max_value + np.finfo(arr.dtype).eps
if arr.ndim == 2:
arr = arr[..., np.newaxis]
self.arr_was_2d = True
else:
self.arr_was_2d = False
eps = np.finfo(np.float32).eps
min_is_zero = 0.0 - eps < min_value < 0.0 + eps
max_is_one = 1.0 - eps < max_value < 1.0 + eps
if min_is_zero and max_is_one:
self.arr_0to1 = arr
else:
self.arr_0to1 = (arr - min_value) / (max_value - min_value)
self.shape = shape
self.min_value = min_value
self.max_value = max_value
def get_arr(self):
"""
Get the heatmap array in the desired value range.
The HeatmapsOnImage object saves heatmaps internally in the value range (min=0.0, max=1.0).
This function converts the internal representation to (min=min_value, max=max_value),
where min_value and max_value are provided upon instantiation of the object.
Returns
-------
result : (H,W) or (H,W,C) ndarray(float32)
Heatmap array.
"""
if self.arr_was_2d and self.arr_0to1.shape[2] == 1:
arr = self.arr_0to1[:, :, 0]
else:
arr = self.arr_0to1
eps = np.finfo(np.float32).eps
min_is_zero = 0.0 - eps < self.min_value < 0.0 + eps
max_is_one = 1.0 - eps < self.max_value < 1.0 + eps
if min_is_zero and max_is_one:
return np.copy(arr)
else:
diff = self.max_value - self.min_value
return self.min_value + diff * arr
# TODO
#def find_global_maxima(self):
# raise NotImplementedError()
def draw(self, size=None, cmap="jet"):
"""
Render the heatmaps as RGB images.
Parameters
----------
size : None or float or iterable of two ints or iterable of two floats, optional(default=None)
Size of the rendered RGB image as (height, width).
See `imresize_single_image()` for details.
If set to None, no resizing is performed and the size of the heatmaps array is used.
cmap : string or None, optional(default="jet")
Color map of matplotlib to use in order to convert the heatmaps into RGB images.
If set to None, no color map will be used and the heatmaps will be converted
as simple intensity maps.
Returns
-------
heatmaps_drawn : list of (H,W,3) ndarray(uint8)
Rendered heatmaps, one per heatmap array channel.
"""
heatmaps_uint8 = self.to_uint8()
heatmaps_drawn = []
for c in sm.xrange(heatmaps_uint8.shape[2]):
# c:c+1 here, because the additional axis is needed by imresize_single_image
heatmap_c = heatmaps_uint8[..., c:c+1]
if size is not None:
heatmap_c_rs = imresize_single_image(heatmap_c, size,
interpolation="nearest")
else:
heatmap_c_rs = heatmap_c
heatmap_c_rs = np.squeeze(heatmap_c_rs).astype(np.float32) / 255.0
if cmap is not None:
import matplotlib.pyplot as plt
cmap_func = plt.get_cmap(cmap)
heatmap_cmapped = cmap_func(heatmap_c_rs)
heatmap_cmapped = np.delete(heatmap_cmapped, 3, 2)
else:
heatmap_cmapped = np.tile(heatmap_c_rs[..., np.newaxis], (1, 1, 3))
heatmap_cmapped = np.clip(heatmap_cmapped * 255, 0, 255).astype(np.uint8)
heatmaps_drawn.append(heatmap_cmapped)
return heatmaps_drawn
def draw_on_image(self, image, alpha=0.75, cmap="jet", resize="heatmaps"):
"""
Draw the heatmaps as overlays over an image.
Parameters
----------
image : (H,W,3) ndarray(uint8)
Image onto which to draw the heatmaps.
alpha : float, optional(default=0.75)
Alpha/opacity value to use for the mixing of image and heatmaps.
Higher values mean that the heatmaps will be more visible and the image less visible.
cmap : string or None, optional(default="jet")
Color map to use. See `HeatmapsOnImage.draw()` for details.
resize : "heatmaps" or "image", optional(default="heatmaps")
In case of size differences between the image and heatmaps, either the image or
the heatmaps can be resized. This parameter controls which of the two will be resized
to the other's size.
Returns
-------
mix : list of (H,W,3) ndarray(uint8)
Rendered overlays, one per heatmap array channel.
"""
# assert RGB image
assert image.ndim == 3
assert image.shape[2] == 3
assert image.dtype.type == np.uint8
assert 0 - 1e-8 <= alpha <= 1.0 + 1e-8
assert resize in ["heatmaps", "image"]
if resize == "image":
image = imresize_single_image(image, self.arr_0to1.shape[0:2], interpolation="cubic")
heatmaps_drawn = self.draw(
size=image.shape[0:2] if resize == "heatmaps" else None,
cmap=cmap
)
mix = [
np.clip((1-alpha) * image + alpha * heatmap_i, 0, 255).astype(np.uint8)
for heatmap_i
in heatmaps_drawn
]
return mix
def invert(self):
"""
Inverts each value in the heatmap, shifting low towards high values and vice versa.
This changes each value to::
v' = max - (v - min)
where `v` is the value at some spatial location, `min` is the minimum value in the heatmap
and `max` is the maximum value.
As the heatmap uses internally a 0.0 to 1.0 representation, this simply
becomes `v' = 1.0 - v`.
Note that the attributes `min_value` and `max_value` are not switched. They both keep their
values.
This function can be useful e.g. when working with depth maps, where algorithms might have
an easier time representing the furthest away points with zeros, requiring an inverted
depth map.
Returns
-------
result : HeatmapsOnImage
Inverted heatmap.
"""
arr_inv = HeatmapsOnImage.from_0to1(1 - self.arr_0to1, shape=self.shape, min_value=self.min_value, max_value=self.max_value)
arr_inv.arr_was_2d = self.arr_was_2d
return arr_inv
def pad(self, top=0, right=0, bottom=0, left=0, mode="constant", cval=0.0):
"""
Pad the heatmaps on their top/right/bottom/left side.
Parameters
----------
top : int, optional(default=0)
Amount of pixels to add at the top side of the heatmaps. Must be 0 or greater.
right : int, optional(default=0)
Amount of pixels to add at the right side of the heatmaps. Must be 0 or greater.
bottom : int, optional(default=0)
Amount of pixels to add at the bottom side of the heatmaps. Must be 0 or greater.
left : int, optional(default=0)
Amount of pixels to add at the left side of the heatmaps. Must be 0 or greater.
mode : string, optional(default="constant")
Padding mode to use. See `numpy.pad()` for details.
cval : number, optional(default=0.0)
Value to use for padding if mode="constant". See `numpy.pad()` for details.
Returns
-------
result : HeatmapsOnImage
Padded heatmaps of height H'=H+top+bottom and width W'=W+left+right.
"""
arr_0to1_padded = pad(self.arr_0to1, top=top, right=right, bottom=bottom, left=left, mode=mode, cval=cval)
return HeatmapsOnImage.from_0to1(arr_0to1_padded, shape=self.shape, min_value=self.min_value, max_value=self.max_value)
def pad_to_aspect_ratio(self, aspect_ratio, mode="constant", cval=0.0, return_pad_amounts=False):
"""
Pad the heatmaps on their sides so that they match a target aspect ratio.
Depending on which dimension is smaller (height or width), only the corresponding
sides (left/right or top/bottom) will be padded. In each case, both of the sides will
be padded equally.
Parameters
----------
aspect_ratio : float
Target aspect ratio, given as width/height. E.g. 2.0 denotes the image having twice
as much width as height.
mode : string, optional(default="constant")
Padding mode to use. See `numpy.pad()` for details.
cval : number, optional(default=0.0)
Value to use for padding if mode="constant". See `numpy.pad()` for details.
return_pad_amounts : bool, optional(default=False)
If False, then only the padded image will be returned. If True, a tuple with two
entries will be returned, where the first entry is the padded image and the second
entry are the amounts by which each image side was padded. These amounts are again a
tuple of the form (top, right, bottom, left), with each value being an integer.
Returns
-------
result : tuple
First tuple entry: Padded heatmaps as HeatmapsOnImage object.
Second tuple entry: Amounts by which the heatmaps were padded on each side, given
as a tuple (top, right, bottom, left).
If return_pad_amounts is False, then only the heatmaps object is returned.
"""
arr_0to1_padded, pad_amounts = pad_to_aspect_ratio(self.arr_0to1, aspect_ratio=aspect_ratio, mode=mode, cval=cval, return_pad_amounts=True)
heatmaps = HeatmapsOnImage.from_0to1(arr_0to1_padded, shape=self.shape, min_value=self.min_value, max_value=self.max_value)
if return_pad_amounts:
return heatmaps, pad_amounts
else:
return heatmaps
def avg_pool(self, block_size):
"""
Rescale the heatmap(s) array using average pooling of a given block/kernel size.
Parameters
----------
block_size : int or tuple of two ints or tuple of three ints
Size of each block of values to pool, aka kernel size. See `imgaug.pool()` for details.
Returns
-------
result : HeatmapsOnImage
Heatmaps after average pooling.
"""
arr_0to1_reduced = avg_pool(self.arr_0to1, block_size, cval=0.0)
return HeatmapsOnImage.from_0to1(arr_0to1_reduced, shape=self.shape, min_value=self.min_value, max_value=self.max_value)
def max_pool(self, block_size):
"""
Rescale the heatmap(s) array using max-pooling of a given block/kernel size.
Parameters
----------
block_size : int or tuple of two ints or tuple of three ints
Size of each block of values to pool, aka kernel size. See `imgaug.pool()` for details.
Returns
-------
result : HeatmapsOnImage
Heatmaps after max-pooling.
"""
arr_0to1_reduced = max_pool(self.arr_0to1, block_size)
return HeatmapsOnImage.from_0to1(arr_0to1_reduced, shape=self.shape, min_value=self.min_value, max_value=self.max_value)
def scale(self, sizes, interpolation="cubic"):
"""
Rescale the heatmap(s) array to the provided size given the provided interpolation.
Parameters
----------
sizes : float or iterable of two ints or iterable of two floats
New size of the array in (height, width). See `imresize_single_image()` for details.
interpolation : None or string or int, optional(default="cubic")
The interpolation to use during resize. See `imresize_single_image()` for details.
Returns
-------
result : HeatmapsOnImage
Rescaled heatmaps object.
"""
arr_0to1_rescaled = imresize_single_image(self.arr_0to1, sizes, interpolation=interpolation)
# cubic interpolation can lead to values outside of [0.0, 1.0],
# see https://github.com/opencv/opencv/issues/7195
# TODO area interpolation too?
arr_0to1_rescaled = np.clip(arr_0to1_rescaled, 0.0, 1.0)
return HeatmapsOnImage.from_0to1(arr_0to1_rescaled, shape=self.shape, min_value=self.min_value, max_value=self.max_value)
def to_uint8(self):
"""
Convert this heatmaps object to a 0-to-255 array.
Returns
-------
arr_uint8 : (H,W,C) ndarray(uint8)
Heatmap as a 0-to-255 array.
"""
# TODO this always returns (H,W,C), even if input ndarray was originall (H,W)
# does it make sense here to also return (H,W) if self.arr_was_2d?
arr_0to255 = np.clip(np.round(self.arr_0to1 * 255), 0, 255)
arr_uint8 = arr_0to255.astype(np.uint8)
return arr_uint8
@staticmethod
def from_uint8(arr_uint8, shape, min_value=0.0, max_value=1.0):
"""
Create a heatmaps object from an heatmap array containing values ranging from 0 to 255.
Parameters
----------
arr_uint8 : (H,W) or (H,W,C) ndarray(uint8)
Heatmap(s) array, where H=height, W=width, C=heatmap channels.
shape : tuple of ints
Shape of the image on which the heatmap(s) is/are placed. NOT the shape of the
heatmap(s) array, unless it is identical to the image shape (note the likely
difference between the arrays in the number of channels).
If there is not a corresponding image, use the shape of the heatmaps array.
min_value : float, optional(default=0.0)
Minimum value for the heatmaps that the 0-to-255 array represents. This will usually
be 0.0. It is used when calling `HeatmapsOnImage.get_arr()`, which converts the
underlying (0, 255) array to value range (min_value, max_value).
max_value : float, optional(default=1.0)
Maximum value for the heatmaps that 0-to-255 array represents.
See parameter min_value for details.
Returns
-------
heatmaps : HeatmapsOnImage
Heatmaps object.
"""
arr_0to1 = arr_uint8.astype(np.float32) / 255.0
return HeatmapsOnImage.from_0to1(arr_0to1, shape, min_value=min_value, max_value=max_value)
@staticmethod
def from_0to1(arr_0to1, shape, min_value=0.0, max_value=1.0):
"""
Create a heatmaps object from an heatmap array containing values ranging from 0.0 to 1.0.
Parameters
----------
arr_0to1 : (H,W) or (H,W,C) ndarray(float32)
Heatmap(s) array, where H=height, W=width, C=heatmap channels.
shape : tuple of ints
Shape of the image on which the heatmap(s) is/are placed. NOT the shape of the
heatmap(s) array, unless it is identical to the image shape (note the likely
difference between the arrays in the number of channels).
If there is not a corresponding image, use the shape of the heatmaps array.
min_value : float, optional(default=0.0)
Minimum value for the heatmaps that the 0-to-1 array represents. This will usually
be 0.0. It is used when calling `HeatmapsOnImage.get_arr()`, which converts the
underlying (0.0, 1.0) array to value range (min_value, max_value).
E.g. if you started with heatmaps in the range (-1.0, 1.0) and projected these
to (0.0, 1.0), you should call this function with min_value=-1.0, max_value=1.0
so that `get_arr()` returns heatmap arrays having value range (-1.0, 1.0).
max_value : float, optional(default=1.0)
Maximum value for the heatmaps that to 0-to-255 array represents.
See parameter min_value for details.
Returns
-------
heatmaps : HeatmapsOnImage
Heatmaps object.
"""
heatmaps = HeatmapsOnImage(arr_0to1, shape, min_value=0.0, max_value=1.0)
heatmaps.min_value = min_value
heatmaps.max_value = max_value
return heatmaps
@staticmethod
def change_normalization(arr, source, target):
"""
Change the value range of a heatmap from one min-max to another min-max.
E.g. the value range may be changed from min=0.0, max=1.0 to min=-1.0, max=1.0.
Parameters
----------
arr : ndarray
Heatmap array to modify.
source : tuple of two floats
Current value range of the input array, given as (min, max), where both are float
values.
target : tuple of two floats
Desired output value range of the array, given as (min, max), where both are float
values.
Returns
-------
arr_target : ndarray
Input array, with value range projected to the desired target value range.
"""
assert is_np_array(arr)
if isinstance(source, HeatmapsOnImage):
source = (source.min_value, source.max_value)
else:
assert isinstance(source, tuple)
assert len(source) == 2
assert source[0] < source[1]
if isinstance(target, HeatmapsOnImage):
target = (target.min_value, target.max_value)
else:
assert isinstance(target, tuple)
assert len(target) == 2
assert target[0] < target[1]
# Check if source and target are the same (with a tiny bit of tolerance)
# if so, evade compuation and just copy the array instead.
# This is reasonable, as source and target will often both be (0.0, 1.0).
eps = np.finfo(arr.dtype).eps
mins_same = source[0] - 10*eps < target[0] < source[0] + 10*eps
maxs_same = source[1] - 10*eps < target[1] < source[1] + 10*eps
if mins_same and maxs_same:
return np.copy(arr)
min_source, max_source = source
min_target, max_target = target
diff_source = max_source - min_source
diff_target = max_target - min_target
arr_0to1 = (arr - min_source) / diff_source
arr_target = min_target + arr_0to1 * diff_target
return arr_target
def copy(self):
"""
Create a shallow copy of the Heatmaps object.
Returns
-------
out : HeatmapsOnImage
Shallow copy.
"""
return self.deepcopy()
def deepcopy(self):
"""
Create a deep copy of the Heatmaps object.
Returns
-------
out : HeatmapsOnImage
Deep copy.
"""
return HeatmapsOnImage(self.get_arr(), shape=self.shape, min_value=self.min_value, max_value=self.max_value)
class SegmentationMapOnImage(object):
"""
Object representing a segmentation map associated with an image.
Attributes
----------
DEFAULT_SEGMENT_COLORS : list of tuple of int
Standard RGB colors to use during drawing, ordered by class index.
Parameters
----------
arr : (H,W) ndarray or (H,W,1) ndarray or (H,W,C) ndarray
Array representing the segmentation map. May have datatypes bool, integer or float.
* If bool: Assumed to be of shape (H,W), (H,W,1) or (H,W,C). If (H,W) or (H,W,1) it
is assumed to be for the case of having a single class (where any False denotes
background). Otherwise there are assumed to be C channels, one for each class,
with each of them containing a mask for that class. The masks may overlap.
* If integer: Assumed to be of shape (H,W) or (H,W,1). Each pixel is assumed to
contain an integer denoting the class index. Classes are assumed to be
non-overlapping. The number of classes cannot be guessed from this input, hence
nb_classes must be set.
* If float: Assumed to b eof shape (H,W), (H,W,1) or (H,W,C) with meanings being
similar to the case of `bool`. Values are expected to fall always in the range
0.0 to 1.0 and are usually expected to be either 0.0 or 1.0 upon instantiation
of a new segmentation map. Classes may overlap.
shape : iterable of int
Shape of the corresponding image (NOT the segmentation map array). This is expected
to be (H, W) or (H, W, C) with C usually being 3. If there is no corresponding image,
then use the segmentation map's shape instead.
nb_classes : int or None
Total number of unique classes that may appear in an segmentation map, i.e. the max
class index. This may be None if the input array is of type bool or float. The number
of class however must be provided if the input array is of type int, as then the
number of classes cannot be guessed.
"""
DEFAULT_SEGMENT_COLORS = [
(0, 0, 0), # black
(230, 25, 75), # red
(60, 180, 75), # green
(255, 225, 25), # yellow
(0, 130, 200), # blue
(245, 130, 48), # orange
(145, 30, 180), # purple
(70, 240, 240), # cyan
(240, 50, 230), # magenta
(210, 245, 60), # lime
(250, 190, 190), # pink
(0, 128, 128), # teal
(230, 190, 255), # lavender
(170, 110, 40), # brown
(255, 250, 200), # beige
(128, 0, 0), # maroon
(170, 255, 195), # mint
(128, 128, 0), # olive
(255, 215, 180), # coral
(0, 0, 128), # navy
(128, 128, 128), # grey
(255, 255, 255), # white
# --
(115, 12, 37), # dark red
(30, 90, 37), # dark green
(127, 112, 12), # dark yellow
(0, 65, 100), # dark blue
(122, 65, 24), # dark orange
(72, 15, 90), # dark purple
(35, 120, 120), # dark cyan
(120, 25, 115), # dark magenta
(105, 122, 30), # dark lime
(125, 95, 95), # dark pink
(0, 64, 64), # dark teal
(115, 95, 127), # dark lavender
(85, 55, 20), # dark brown
(127, 125, 100), # dark beige
(64, 0, 0), # dark maroon
(85, 127, 97), # dark mint
(64, 64, 0), # dark olive
(127, 107, 90), # dark coral
(0, 0, 64), # dark navy
(64, 64, 64), # dark grey
]
def __init__(self, arr, shape, nb_classes=None):
if arr.dtype.type == np.bool:
assert arr.ndim in [2, 3]
self.input_was = ("bool", arr.ndim)
if arr.ndim == 2:
arr = arr[..., np.newaxis]
arr = arr.astype(np.float32)
elif arr.dtype.type in [np.uint8, np.uint32, np.int8, np.int16, np.int32]:
assert arr.ndim == 2 or (arr.ndim == 3 and arr.shape[2] == 1)
assert nb_classes is not None
assert nb_classes > 0
assert np.min(arr.flat[0:100]) >= 0
assert np.max(arr.flat[0:100]) <= nb_classes
self.input_was = ("int", arr.dtype.type, arr.ndim)
if arr.ndim == 3:
arr = arr[..., 0]
arr = np.eye(nb_classes)[arr] # from class indices to one hot
arr = arr.astype(np.float32)
elif arr.dtype.type in [np.float16, np.float32]:
assert arr.ndim == 3
self.input_was = ("float", arr.dtype.type, arr.ndim)
arr = arr.astype(np.float32)
else:
dt = str(arr.dtype) if is_np_array(arr) else "<no ndarray>"
raise Exception("Input was expected to be an ndarray of dtype bool, uint8, uint32 "
"int8, int16, int32 or float32. Got type %s with dtype %s." % (type(arr), dt))
assert arr.ndim == 3
assert arr.dtype.type == np.float32
self.arr = arr
self.shape = shape
self.nb_classes = nb_classes if nb_classes is not None else arr.shape[2]
#@property
#def nb_classes(self):
# return self.arr.shape[2]
def get_arr_int(self, background_threshold=0.01, background_class_id=0):
"""
Get the segmentation map array as an integer array of shape (H, W).
Each pixel in that array contains an integer value representing the pixel's class.
If multiple classes overlap, the one with the highest local float value is picked.
If that highest local value is below `background_threshold`, the method instead uses
the background class id as the pixel's class value.
Parameters
----------
background_threshold : float, optional(default=0.01)
At each pixel, each class-heatmap has a value between 0.0 and 1.0. If none of the
class-heatmaps has a value above this threshold, the method uses the background class
id instead.
background_class_id : int, optional(default=0)
Class id to fall back to if no class-heatmap passes the threshold at a spatial
location.
Returns
-------
result : (H,W) ndarray(int)
Segmentation map array.
"""
channelwise_max_idx = np.argmax(self.arr, axis=2)
result = channelwise_max_idx
if background_threshold is not None and background_threshold > 0:
probs = np.amax(self.arr, axis=2)
result[probs < background_threshold] = background_class_id
return result.astype(np.int32)
#def get_arr_bool(self, allow_overlapping=False, threshold=0.5, background_threshold=0.01, background_class_id=0):
# # TODO
# raise NotImplementedError()
def draw(self, size=None, background_threshold=0.01, background_class_id=0, colors=None, return_foreground_mask=False):
"""
Render the segmentation map as an RGB image.
Parameters
----------
size : None or float or iterable of two ints or iterable of two floats, optional(default=None)
Size of the rendered RGB image as (height, width).
See `imresize_single_image()` for details.
If set to None, no resizing is performed and the size of the segmentation map array is
used.
background_threshold : float, optional(default=0.01)
At each pixel, each class-heatmap has a value between 0.0 and 1.0. If none of the
class-heatmaps has a value above this threshold, the method uses the background class
id instead.
background_class_id : int, optional(default=0)
Class id to fall back to if no class-heatmap passes the threshold at a spatial
location.
colors : None or list of tuple of int, optional(default=None)
Colors to use. One for each class to draw. If None, then default colors will be used.
return_foreground_mask : bool, optional(default=False)
Whether to return a mask of the same size as the drawn segmentation map, containing
True at any spatial location that is not the background class and False everywhere
else.
Returns
-------
segmap_drawn : (H,W,3) ndarray(uint8)
Rendered segmentation map.
foreground_mask : (H,W) ndarray(bool)
Mask indicating the locations of foreground classes. Only returned if
return_foreground_mask is True.
"""
arr = self.get_arr_int(background_threshold=background_threshold, background_class_id=background_class_id)
nb_classes = self.nb_classes
segmap_drawn = np.zeros((arr.shape[0], arr.shape[1], 3), dtype=np.uint8)
if colors is None:
colors = SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS
assert nb_classes <= len(colors), "Can't draw all %d classes as it would exceed the maximum number of %d available colors." % (nb_classes, len(colors),)
ids_in_map = np.unique(arr)
for c, color in zip(sm.xrange(1+nb_classes), colors):
if c in ids_in_map:
class_mask = (arr == c)
segmap_drawn[class_mask] = color
if return_foreground_mask:
foreground_mask = (arr != background_class_id)
else:
foreground_mask = None
if size is not None:
segmap_drawn = imresize_single_image(segmap_drawn, size, interpolation="nearest")
if foreground_mask is not None:
foreground_mask = imresize_single_image(foreground_mask.astype(np.uint8), size, interpolation="nearest") > 0
if foreground_mask is not None:
return segmap_drawn, foreground_mask
return segmap_drawn
def draw_on_image(self, image, alpha=0.5, resize="segmentation_map", background_threshold=0.01, background_class_id=0, colors=None, draw_background=False):
"""
Draw the segmentation map as an overlay over an image.
Parameters
----------
image : (H,W,3) ndarray(uint8)
Image onto which to draw the segmentation map.
alpha : float, optional(default=0.75)
Alpha/opacity value to use for the mixing of image and segmentation map.
Higher values mean that the segmentation map will be more visible and the image less
visible.
resize : "segmentation_map" or "image", optional(default="segmentation_map")
In case of size differences between the image and segmentation map, either the image or
the segmentation map can be resized. This parameter controls which of the two will be
resized to the other's size.
background_threshold : float, optional(default=0.01)
At each pixel, each class-heatmap has a value between 0.0 and 1.0. If none of the
class-heatmaps has a value above this threshold, the method uses the background class
id instead.
background_class_id : int, optional(default=0)
Class id to fall back to if no class-heatmap passes the threshold at a spatial
location.
colors : None or list of tuple of int, optional(default=None)
Colors to use. One for each class to draw. If None, then default colors will be used.
draw_background : bool, optional(default=False)
If True, the background will be drawn like any other class.
If False, the background will not be drawn, i.e. the respective background pixels
will be identical with the image's RGB color at the corresponding spatial location
and no color overlay will be applied.
Returns
-------
mix : (H,W,3) ndarray(uint8)
Rendered overlays.
"""
# assert RGB image
assert image.ndim == 3
assert image.shape[2] == 3
assert image.dtype.type == np.uint8
assert 0 - 1e-8 <= alpha <= 1.0 + 1e-8
assert resize in ["segmentation_map", "image"]
if resize == "image":
image = imresize_single_image(image, self.arr.shape[0:2], interpolation="cubic")
segmap_drawn, foreground_mask = self.draw(
background_threshold=background_threshold,
background_class_id=background_class_id,
size=image.shape[0:2] if resize == "segmentation_map" else None,
colors=colors,
return_foreground_mask=True
)
if draw_background:
mix = np.clip(
(1-alpha) * image + alpha * segmap_drawn,
0,
255
).astype(np.uint8)
else:
foreground_mask = foreground_mask[..., np.newaxis]
mix = np.zeros_like(image)
mix += (~foreground_mask).astype(np.uint8) * image
mix += foreground_mask.astype(np.uint8) * np.clip(
(1-alpha) * image + alpha * segmap_drawn,
0,
255
).astype(np.uint8)
return mix
def pad(self, top=0, right=0, bottom=0, left=0, mode="constant", cval=0.0):
"""
Pad the segmentation map on its top/right/bottom/left side.
Parameters
----------
top : int, optional(default=0)
Amount of pixels to add at the top side of the segmentation map. Must be 0 or
greater.
right : int, optional(default=0)
Amount of pixels to add at the right side of the segmentation map. Must be 0 or
greater.
bottom : int, optional(default=0)
Amount of pixels to add at the bottom side of the segmentation map. Must be 0 or
greater.
left : int, optional(default=0)
Amount of pixels to add at the left side of the segmentation map. Must be 0 or
greater.
mode : string, optional(default="constant")
Padding mode to use. See `numpy.pad()` for details.
cval : number, optional(default=0.0)
Value to use for padding if mode="constant". See `numpy.pad()` for details.
Returns
-------
result : SegmentationMapOnImage
Padded segmentation map of height H'=H+top+bottom and width W'=W+left+right.
"""
arr_padded = pad(self.arr, top=top, right=right, bottom=bottom, left=left, mode=mode, cval=cval)
return SegmentationMapOnImage(arr_padded, shape=self.shape)
def pad_to_aspect_ratio(self, aspect_ratio, mode="constant", cval=0.0, return_pad_amounts=False):
"""
Pad the segmentation map on its sides so that its matches a target aspect ratio.
Depending on which dimension is smaller (height or width), only the corresponding
sides (left/right or top/bottom) will be padded. In each case, both of the sides will
be padded equally.
Parameters
----------
aspect_ratio : float
Target aspect ratio, given as width/height. E.g. 2.0 denotes the image having twice
as much width as height.
mode : string, optional(default="constant")
Padding mode to use. See `numpy.pad()` for details.
cval : number, optional(default=0.0)
Value to use for padding if mode="constant". See `numpy.pad()` for details.
return_pad_amounts : bool, optional(default=False)
If False, then only the padded image will be returned. If True, a tuple with two
entries will be returned, where the first entry is the padded image and the second
entry are the amounts by which each image side was padded. These amounts are again a
tuple of the form (top, right, bottom, left), with each value being an integer.
Returns
-------
result : tuple
First tuple entry: Padded segmentation map as SegmentationMapOnImage object.
Second tuple entry: Amounts by which the segmentation map was padded on each side,
given as a tuple (top, right, bottom, left).
If return_pad_amounts is False, then only the segmentation map object is returned.
"""
arr_padded, pad_amounts = pad_to_aspect_ratio(self.arr, aspect_ratio=aspect_ratio, mode=mode, cval=cval, return_pad_amounts=True)
segmap = SegmentationMapOnImage(arr_padded, shape=self.shape)
if return_pad_amounts:
return segmap, pad_amounts
else:
return segmap
def scale(self, sizes, interpolation="cubic"):
"""
Rescale the segmentation map array to the provided size given the provided interpolation.
Parameters
----------
sizes : float or iterable of two ints or iterable of two floats
New size of the array in (height, width). See `imresize_single_image()` for details.
interpolation : None or string or int, optional(default="cubic")
The interpolation to use during resize. See `imresize_single_image()` for details.
Note: The segmentation map is internally stored as multiple float-based heatmaps,
making smooth interpolations potentially more reasonable than nearest neighbour
interpolation.
Returns
-------
result : SegmentationMapOnImage
Rescaled segmentation map object.
"""
arr_rescaled = imresize_single_image(self.arr, sizes, interpolation=interpolation)
# cubic interpolation can lead to values outside of [0.0, 1.0],
# see https://github.com/opencv/opencv/issues/7195
# TODO area interpolation too?
arr_rescaled = np.clip(arr_rescaled, 0.0, 1.0)
return SegmentationMapOnImage(arr_rescaled, shape=self.shape)
def to_heatmaps(self, only_nonempty=False, not_none_if_no_nonempty=False):
"""
Convert segmentation map to heatmaps object.
Each segmentation map class will be represented as a single heatmap channel.
Parameters
----------
only_nonempty : bool, optional(default=False)
If True, then only heatmaps for classes that appear in the segmentation map will be
generated. Additionally, a list of these class ids will be returned.
not_none_if_no_nonempty : bool, optional(default=False)
If `only_nonempty` is True and for a segmentation map no channel was non-empty,
this function usually returns None as the heatmaps object. If however this parameter
is set to True, a heatmaps object with one channel (representing class 0)
will be returned as a fallback in these cases.
Returns
-------
result : HeatmapsOnImage or None
Segmentation map as heatmaps.
If `only_nonempty` was set to True and no class appeared in the segmentation map,
then this is None.
class_indices : list of int
Class ids (0 to C-1) of the classes that were actually added to the heatmaps.
Only returned if `only_nonempty` was set to True.
"""
if not only_nonempty:
return HeatmapsOnImage.from_0to1(self.arr, self.shape, min_value=0.0, max_value=1.0)
else:
nonempty_mask = np.sum(self.arr, axis=(0, 1)) > 0 + 1e-4
if np.sum(nonempty_mask) == 0:
if not_none_if_no_nonempty:
nonempty_mask[0] = True
else:
return None, []
class_indices = np.arange(self.arr.shape[2])[nonempty_mask]
channels = self.arr[..., class_indices]
return HeatmapsOnImage(channels, self.shape, min_value=0.0, max_value=1.0), class_indices
@staticmethod
def from_heatmaps(heatmaps, class_indices=None, nb_classes=None):
"""
Convert heatmaps to segmentation map.
Assumes that each class is represented as a single heatmap channel.
Parameters
----------
heatmaps : HeatmapsOnImage
Heatmaps to convert.
class_indices : None or list of int, optional(default=None)
List of class indices represented by each heatmap channel. See also the
secondary output of `to_heatmap()`. If this is provided, it must have the same
length as the number of heatmap channels.
nb_classes : None or int, optional(default=None)
Number of classes. Must be provided if class_indices is set.
Returns
-------
result : SegmentationMapOnImage
Segmentation map derived from heatmaps.
"""
if class_indices is None:
return SegmentationMapOnImage(heatmaps.arr_0to1, shape=heatmaps.shape)
else:
assert nb_classes is not None
assert min(class_indices) >= 0
assert max(class_indices) < nb_classes
assert len(class_indices) == heatmaps.arr_0to1.shape[2]
arr_0to1 = heatmaps.arr_0to1
arr_0to1_full = np.zeros((arr_0to1.shape[0], arr_0to1.shape[1], nb_classes), dtype=np.float32)
#empty_channel = np.zeros((arr_0to1.shape[0], arr_0to1.shape[1]), dtype=np.float32)
class_indices_set = set(class_indices)
heatmap_channel = 0
for c in sm.xrange(nb_classes):
if c in class_indices_set:
arr_0to1_full[:, :, c] = arr_0to1[:, :, heatmap_channel]
heatmap_channel += 1
return SegmentationMapOnImage(arr_0to1_full, shape=heatmaps.shape)
def copy(self):
"""
Create a shallow copy of the segmentation map object.
Returns
-------
out : SegmentationMapOnImage
Shallow copy.
"""
return self.deepcopy()
def deepcopy(self):
"""
Create a deep copy of the segmentation map object.
Returns
-------
out : SegmentationMapOnImage
Deep copy.
"""
segmap = SegmentationMapOnImage(self.arr, shape=self.shape, nb_classes=self.nb_classes)
segmap.input_was = self.input_was
return segmap
############################
# Background augmentation
############################
class Batch(object):
"""
Class encapsulating a batch before and after augmentation.
Parameters
----------
images : None or (N,H,W,C) ndarray or (N,H,W) ndarray or list of (H,W,C) ndarray or list of (H,W) ndarray
The images to
augment.
heatmaps : None or list of HeatmapsOnImage
The heatmaps to
augment.
segmentation_maps : None or list of SegmentationMapOnImage
The segmentation maps to
augment.
keypoints : None or list of KeypointOnImage
The keypoints to
augment.
bounding_boxes : None or list of BoundingBoxesOnImage
The bounding boxes to
augment.
data : anything
Additional data that is saved in the batch and may be read out
after augmentation. This could e.g. contain filepaths to each image
in `images`. As this object is usually used for background
augmentation with multiple processes, the augmented Batch objects might
not be returned in the original order, making this information useful.
"""
def __init__(self, images=None, heatmaps=None, segmentation_maps=None, keypoints=None, bounding_boxes=None, data=None):
self.images = images
self.images_aug = None
self.heatmaps = heatmaps
self.heatmaps_aug = None
self.segmentation_maps = segmentation_maps
self.segmentation_maps = None
self.keypoints = keypoints
self.keypoints_aug = None
self.bounding_boxes = bounding_boxes
self.bounding_boxes_aug = None
self.data = data
class BatchLoader(object):
"""
Class to load batches in the background.
Loaded batches can be accesses using `BatchLoader.queue`.
Parameters
----------
load_batch_func : callable
Function that yields Batch objects (i.e. expected to be a generator).
Background loading automatically stops when the last batch was yielded.
queue_size : int, optional(default=50)
Maximum number of batches to store in the queue. May be set higher
for small images and/or small batches.
nb_workers : int, optional(default=1)
Number of workers to run in the background.
threaded : bool, optional(default=True)
Whether to run the background processes using threads (true) or
full processes (false).
"""
def __init__(self, load_batch_func, queue_size=50, nb_workers=1, threaded=True):
do_assert(queue_size > 0)
do_assert(nb_workers >= 1)
self.queue = multiprocessing.Queue(queue_size)
self.join_signal = multiprocessing.Event()
self.finished_signals = []
self.workers = []
self.threaded = threaded
seeds = current_random_state().randint(0, 10**6, size=(nb_workers,))
for i in range(nb_workers):
finished_signal = multiprocessing.Event()
self.finished_signals.append(finished_signal)
if threaded:
worker = threading.Thread(target=self._load_batches, args=(load_batch_func, self.queue, finished_signal, self.join_signal, None))
else:
worker = multiprocessing.Process(target=self._load_batches, args=(load_batch_func, self.queue, finished_signal, self.join_signal, seeds[i]))
worker.daemon = True
worker.start()
self.workers.append(worker)
def all_finished(self):
"""
Determine whether the workers have finished the loading process.
Returns
-------
out : bool
True if all workers have finished. Else False.
"""
return all([event.is_set() for event in self.finished_signals])
def _load_batches(self, load_batch_func, queue, finished_signal, join_signal, seedval):
if seedval is not None:
random.seed(seedval)
np.random.seed(seedval)
seed(seedval)
try:
for batch in load_batch_func():
do_assert(isinstance(batch, Batch), "Expected batch returned by lambda function to be of class imgaug.Batch, got %s." % (type(batch),))
batch_pickled = pickle.dumps(batch, protocol=-1)
while not join_signal.is_set():
try:
queue.put(batch_pickled, timeout=0.001)
break
except QueueFull:
pass
if join_signal.is_set():
break
except Exception as exc:
traceback.print_exc()
finally:
finished_signal.set()
def terminate(self):
"""
Stop all workers.
"""
self.join_signal.set()
# give minimal time to put generated batches in queue and gracefully shut down
time.sleep(0.002)
# clean the queue, this reportedly prevents hanging threads
while True:
try:
self.queue.get(timeout=0.005)
except QueueEmpty:
break
if self.threaded:
for worker in self.workers:
worker.join()
# we don't have to set the finished_signals here, because threads always finish
# gracefully
else:
for worker in self.workers:
worker.terminate()
worker.join()
# wait here a tiny bit to really make sure that everything is killed before setting
# the finished_signals. calling set() and is_set() (via a subprocess) on them at the
# same time apparently results in a deadlock (at least in python 2).
#time.sleep(0.02)
for finished_signal in self.finished_signals:
finished_signal.set()
self.queue.close()
class BackgroundAugmenter(object):
"""
Class to augment batches in the background (while training on the GPU).
This is a wrapper around the multiprocessing module.
Parameters
----------
batch_loader : BatchLoader
BatchLoader object to load data in the
background.
augseq : Augmenter
An augmenter to apply to all loaded images.
This may be e.g. a Sequential to apply multiple augmenters.
queue_size : int
Size of the queue that is used to temporarily save the augmentation
results. Larger values offer the background processes more room
to save results when the main process doesn't load much, i.e. they
can lead to smoother and faster training. For large images, high
values can block a lot of RAM though.
nb_workers : "auto" or int
Number of background workers to spawn. If auto, it will be set
to C-1, where C is the number of CPU cores.
"""
def __init__(self, batch_loader, augseq, queue_size=50, nb_workers="auto"):
do_assert(queue_size > 0)
self.augseq = augseq
self.source_finished_signals = batch_loader.finished_signals
self.queue_source = batch_loader.queue
self.queue_result = multiprocessing.Queue(queue_size)
if nb_workers == "auto":
try:
nb_workers = multiprocessing.cpu_count()
except (ImportError, NotImplementedError):
nb_workers = 1
# try to reserve at least one core for the main process
nb_workers = max(1, nb_workers - 1)
else:
do_assert(nb_workers >= 1)
#print("Starting %d background processes" % (nb_workers,))
self.nb_workers = nb_workers
self.workers = []
self.nb_workers_finished = 0
self.augment_images = True
self.augment_keypoints = True
seeds = current_random_state().randint(0, 10**6, size=(nb_workers,))
for i in range(nb_workers):
worker = multiprocessing.Process(target=self._augment_images_worker, args=(augseq, self.queue_source, self.queue_result, self.source_finished_signals, seeds[i]))
worker.daemon = True
worker.start()
self.workers.append(worker)
def get_batch(self):
"""
Returns a batch from the queue of augmented batches.
If workers are still running and there are no batches in the queue,
it will automatically wait for the next batch.
Returns
-------
out : None or ia.Batch
One batch or None if all workers have finished.
"""
batch_str = self.queue_result.get()
batch = pickle.loads(batch_str)
if batch is not None:
return batch
else:
self.nb_workers_finished += 1
if self.nb_workers_finished == self.nb_workers:
return None
else:
return self.get_batch()
def _augment_images_worker(self, augseq, queue_source, queue_result, source_finished_signals, seedval):
"""
Worker function that endlessly queries the source queue (input
batches), augments batches in it and sends the result to the output
queue.
"""
np.random.seed(seedval)
random.seed(seedval)
augseq.reseed(seedval)
seed(seedval)
while True:
# wait for a new batch in the source queue and load it
try:
batch_str = queue_source.get(timeout=0.1)
batch = pickle.loads(batch_str)
"""
# augment the batch
batch_augment_images = batch.images is not None and self.augment_images
batch_augment_keypoints = batch.keypoints is not None and self.augment_keypoints
if batch_augment_images and batch_augment_keypoints:
augseq_det = augseq.to_deterministic() if not augseq.deterministic else augseq
batch.images_aug = augseq_det.augment_images(batch.images)
batch.keypoints_aug = augseq_det.augment_keypoints(batch.keypoints)
elif batch_augment_images:
batch.images_aug = augseq.augment_images(batch.images)
elif batch_augment_keypoints:
batch.keypoints_aug = augseq.augment_keypoints(batch.keypoints)
"""
batch_aug = list(augseq.augment_batches([batch], background=False))[0]
# send augmented batch to output queue
batch_str = pickle.dumps(batch, protocol=-1)
queue_result.put(batch_str)
except QueueEmpty:
if all([signal.is_set() for signal in source_finished_signals]):
queue_result.put(pickle.dumps(None, protocol=-1))
return
def terminate(self):
"""
Terminates all background processes immediately.
This will also free their RAM.
"""
for worker in self.workers:
worker.terminate()
self.queue_result.close()
Fix handling of floats/ints in _compute_resized_shape()
from __future__ import print_function, division, absolute_import
import random
import numpy as np
import copy
import numbers
import cv2
import math
from scipy import misc, ndimage
import multiprocessing
import threading
import traceback
import sys
import six
import six.moves as sm
import os
import skimage.draw
import skimage.measure
import collections
import time
if sys.version_info[0] == 2:
import cPickle as pickle
from Queue import Empty as QueueEmpty, Full as QueueFull
elif sys.version_info[0] == 3:
import pickle
from queue import Empty as QueueEmpty, Full as QueueFull
xrange = range
ALL = "ALL"
FILE_DIR = os.path.dirname(os.path.abspath(__file__))
# filepath to the quokka image, its annotations and depth map
QUOKKA_FP = os.path.join(FILE_DIR, "quokka.jpg")
QUOKKA_ANNOTATIONS_FP = os.path.join(FILE_DIR, "quokka_annotations.json")
QUOKKA_DEPTH_MAP_HALFRES_FP = os.path.join(FILE_DIR, "quokka_depth_map_halfres.png")
DEFAULT_FONT_FP = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"DejaVuSans.ttf"
)
# We instantiate a current/global random state here once.
# One can also call np.random, but that is (in contrast to np.random.RandomState)
# a module and hence cannot be copied via deepcopy. That's why we use RandomState
# here (and in all augmenters) instead of np.random.
CURRENT_RANDOM_STATE = np.random.RandomState(42)
def is_np_array(val):
"""
Checks whether a variable is a numpy array.
Parameters
----------
val : anything
The variable to
check.
Returns
-------
out : bool
True if the variable is a numpy array. Otherwise False.
"""
# using np.generic here seems to also fire for scalar numpy values even
# though those are not arrays
#return isinstance(val, (np.ndarray, np.generic))
return isinstance(val, np.ndarray)
def is_single_integer(val):
"""
Checks whether a variable is an integer.
Parameters
----------
val : anything
The variable to
check.
Returns
-------
out : bool
True if the variable is an integer. Otherwise False.
"""
return isinstance(val, numbers.Integral) and not isinstance(val, bool)
def is_single_float(val):
"""
Checks whether a variable is a float.
Parameters
----------
val : anything
The variable to
check.
Returns
-------
out : bool
True if the variable is a float. Otherwise False.
"""
return isinstance(val, numbers.Real) and not is_single_integer(val) and not isinstance(val, bool)
def is_single_number(val):
"""
Checks whether a variable is a number, i.e. an integer or float.
Parameters
----------
val : anything
The variable to
check.
Returns
-------
out : bool
True if the variable is a number. Otherwise False.
"""
return is_single_integer(val) or is_single_float(val)
def is_iterable(val):
"""
Checks whether a variable is iterable.
Parameters
----------
val : anything
The variable to
check.
Returns
-------
out : bool
True if the variable is an iterable. Otherwise False.
"""
return isinstance(val, collections.Iterable)
# TODO convert to is_single_string() or rename is_single_integer/float/number()
def is_string(val):
"""
Checks whether a variable is a string.
Parameters
----------
val : anything
The variable to
check.
Returns
-------
out : bool
True if the variable is a string. Otherwise False.
"""
return isinstance(val, six.string_types)
def is_integer_array(val):
"""
Checks whether a variable is a numpy integer array.
Parameters
----------
val : anything
The variable to
check.
Returns
-------
out : bool
True if the variable is a numpy integer array. Otherwise False.
"""
return is_np_array(val) and issubclass(val.dtype.type, np.integer)
def is_float_array(val):
"""
Checks whether a variable is a numpy float array.
Parameters
----------
val : anything
The variable to
check.
Returns
-------
out : bool
True if the variable is a numpy float array. Otherwise False.
"""
return is_np_array(val) and issubclass(val.dtype.type, np.floating)
def is_callable(val):
"""
Checks whether a variable is a callable, e.g. a function.
Parameters
----------
val : anything
The variable to
check.
Returns
-------
out : bool
True if the variable is a callable. Otherwise False.
"""
# python 3.x with x <= 2 does not support callable(), apparently
if sys.version_info[0] == 3 and sys.version_info[1] <= 2:
return hasattr(val, '__call__')
else:
return callable(val)
def caller_name():
"""
Returns the name of the caller, e.g. a function.
Returns
-------
name : str
The name of the caller as a string
"""
return sys._getframe(1).f_code.co_name
def seed(seedval):
"""
Set the seed used by the global random state and thereby all randomness
in the library.
This random state is by default by all augmenters. Under special
circumstances (e.g. when an augmenter is switched to deterministic mode),
the global random state is replaced by another -- local -- one.
The replacement is dependent on the global random state.
Parameters
----------
seedval : int
The seed to
use.
"""
CURRENT_RANDOM_STATE.seed(seedval)
def current_random_state():
"""
Returns the current/global random state of the library.
Returns
----------
out : np.random.RandomState
The current/global random state.
"""
return CURRENT_RANDOM_STATE
def new_random_state(seed=None, fully_random=False):
"""
Returns a new random state.
Parameters
----------
seed : None or int, optional(default=None)
Optional seed value to use.
The same datatypes are allowed as for np.random.RandomState(seed).
fully_random : bool, optional(default=False)
Whether to use numpy's random initialization for the
RandomState (used if set to True). If False, a seed is sampled from
the global random state, which is a bit faster and hence the default.
Returns
-------
out : np.random.RandomState
The new random state.
"""
if seed is None:
if not fully_random:
# sample manually a seed instead of just RandomState(),
# because the latter one
# is way slower.
seed = CURRENT_RANDOM_STATE.randint(0, 10**6, 1)[0]
return np.random.RandomState(seed)
def dummy_random_state():
"""
Returns a dummy random state that is always based on a seed of 1.
Returns
-------
out : np.random.RandomState
The new random state.
"""
return np.random.RandomState(1)
def copy_random_state(random_state, force_copy=False):
"""
Creates a copy of a random state.
Parameters
----------
random_state : np.random.RandomState
The random state to
copy.
force_copy : bool, optional(default=False)
If True, this function will always create a copy of every random
state. If False, it will not copy numpy's default random state,
but all other random states.
Returns
-------
rs_copy : np.random.RandomState
The copied random state.
"""
if random_state == np.random and not force_copy:
return random_state
else:
rs_copy = dummy_random_state()
orig_state = random_state.get_state()
rs_copy.set_state(orig_state)
return rs_copy
def derive_random_state(random_state):
"""
Create a new random states based on an existing random state or seed.
Parameters
----------
random_state : np.random.RandomState
Random state or seed from which to derive the new random state.
Returns
-------
result : np.random.RandomState
Derived random state.
"""
return derive_random_states(random_state, n=1)[0]
# TODO use this everywhere instead of manual seed + create
def derive_random_states(random_state, n=1):
"""
Create N new random states based on an existing random state or seed.
Parameters
----------
random_state : np.random.RandomState
Random state or seed from which to derive new random states.
n : int, optional(default=1)
Number of random states to derive.
Returns
-------
result : list of np.random.RandomState
Derived random states.
"""
seed = random_state.randint(0, 10**6, 1)[0]
return [new_random_state(seed+i) for i in sm.xrange(n)]
def forward_random_state(random_state):
"""
Forward the internal state of a random state.
This makes sure that future calls to the random_state will produce new random values.
Parameters
----------
random_state : np.random.RandomState
Random state to forward.
"""
random_state.uniform()
# TODO
# def from_json(json_str):
# pass
def _quokka_normalize_extract(extract):
"""
Generate a normalized rectangle to be extract from the standard quokka image.
Parameters
----------
extract : "square" or tuple of number or BoundingBox or BoundingBoxesOnImage
Unnormalized representation of the image subarea to be extracted.
* If string "square", then a squared area (x: 0-643, y: 0-643) will be extracted from
the image.
* If a tuple, then expected to contain four numbers denoting x1, y1, x2 and y2.
* If a BoundingBox, then that bounding box's area will be extracted from the image.
* If a BoundingBoxesOnImage, then expected to contain exactly one bounding box
and a shape matching the full image dimensions (i.e. (643, 960, *)). Then the
one bounding box will be used similar to BoundingBox.
Returns
-------
bb : BoundingBox
Normalized representation of the area to extract from the standard quokka image.
"""
if extract == "square":
bb = BoundingBox(x1=0, y1=0, x2=643, y2=643)
elif isinstance(extract, tuple) and len(extract) == 4:
bb = BoundingBox(x1=extract[0], y1=extract[1], x2=extract[2], y2=extract[3])
elif isinstance(extract, BoundingBox):
bb = extract
elif isinstance(extract, BoundingBoxesOnImage):
assert len(BoundingBoxesOnImage.bounding_boxes) == 1
assert extract.shape[0:2] == (643, 960)
bb = extract.bounding_boxes[0]
else:
raise Exception(
"Expected None or tuple of four entries or BoundingBox or BoundingBoxesOnImage "
"for parameter 'extract', got %s." % (type(extract),)
)
return bb
def _compute_resized_shape(from_shape, to_shape):
"""
Computes the intended new shape of an image-like array after resizing.
Parameters
----------
from_shape : tuple or ndarray
Old shape of the array. Usually expected to be a tuple of form (H, W) or (H, W, C) or
alternatively an array with two or three dimensions.
to_shape : None or tuple of ints or tuple of floats or int or float
New shape of the array.
* If None, then `from_shape` will be used as the new shape.
* If an int V, then the new shape will be (V, V, [C]), where C will be added if it
is part of from_shape.
* If a float V, then the new shape will be (H*V, W*V, [C]), where H and W are the old
height/width.
* If a tuple (H', W', [C']) of ints, then H' and W' will be used as the new height
and width.
* If a tuple (H', W', [C']) of floats (except C), then H' and W' will be used as the new height
and width.
Returns
-------
to_shape_computed : tuple of int
New shape.
"""
if is_np_array(from_shape):
from_shape = from_shape.shape
if is_np_array(to_shape):
to_shape = to_shape.shape
to_shape_computed = list(from_shape)
if to_shape is None:
pass
elif isinstance(to_shape, tuple):
if len(from_shape) == 3 and len(to_shape) == 3:
assert from_shape[2] == to_shape[2]
elif len(to_shape) == 3:
to_shape_computed.append(to_shape[2])
if all([is_single_integer(v) for v in to_shape[0:2]]):
to_shape_computed[0] = to_shape[0]
to_shape_computed[1] = to_shape[1]
elif all([is_single_float(v) for v in to_shape[0:2]]):
to_shape_computed[0] = int(round(from_shape[0] * to_shape[0])) if to_shape[0] is not None else from_shape[0]
to_shape_computed[1] = int(round(from_shape[1] * to_shape[1])) if to_shape[1] is not None else from_shape[1]
elif is_single_integer(to_shape) or is_single_float(to_shape):
to_shape_computed = _compute_resized_shape(from_shape, (to_shape, to_shape))
else:
raise Exception("Expected to_shape to be None or ndarray or tuple of floats or tuple of ints or single int or single float, got %s." % (type(to_shape),))
return to_shape_computed
def quokka(size=None, extract=None):
"""
Returns an image of a quokka as a numpy array.
Parameters
----------
size : None or float or tuple of two ints, optional(default=None)
Size of the output image. Input into scipy.misc.imresize.
Usually expected to be a tuple (H, W), where H is the desired height
and W is the width. If None, then the image will not be resized.
extract : None or "square" or tuple of four numbers or BoundingBox or BoundingBoxesOnImage
Subarea of the quokka image to extract::
* If None, then the whole image will be used.
* If string "square", then a squared area (x: 0-643, y: 0-643) will be extracted from
the image.
* If a tuple, then expected to contain four numbers denoting x1, y1, x2 and y2.
* If a BoundingBox, then that bounding box's area will be extracted from the image.
* If a BoundingBoxesOnImage, then expected to contain exactly one bounding box
and a shape matching the full image dimensions (i.e. (643, 960, *)). Then the
one bounding box will be used similar to BoundingBox.
Returns
-------
img : (H,W,3) ndarray
The image array of dtype uint8.
"""
img = ndimage.imread(QUOKKA_FP, mode="RGB")
if extract is not None:
bb = _quokka_normalize_extract(extract)
img = bb.extract_from_image(img)
if size is not None:
shape_resized = _compute_resized_shape(img.shape, size)
img = misc.imresize(img, shape_resized[0:2])
return img
def quokka_square(size=None):
"""
Returns an (square) image of a quokka as a numpy array.
Parameters
----------
size : None or float or tuple of two ints, optional(default=None)
Size of the output image. Input into scipy.misc.imresize.
Usually expected to be a tuple (H, W), where H is the desired height
and W is the width. If None, then the image will not be resized.
Returns
-------
img : (H,W,3) ndarray
The image array of dtype uint8.
"""
return quokka(size=size, extract="square")
def quokka_depth_map(size=None, extract=None):
img = ndimage.imread(QUOKKA_FP, mode="RGB")
if extract is not None:
bb = _quokka_normalize_extract(extract)
img = bb.extract_from_image(img)
if size is not None:
shape_resized = _compute_resized_shape(img.shape, size)
img = misc.imresize(img, shape_resized[0:2])
return img
def quokka_keypoints(size=None, extract=None):
"""
Returns example keypoints on the standard example quokke image.
The keypoints cover the eyes, ears, nose and paws.
Parameters
----------
size : None or float or tuple of two ints or tuple of two floats, optional(default=None)
Size of the output image on which the keypoints are placed. If None, then the keypoints
are not projected to any new size (positions on the original image are used).
Floats lead to relative size changes, ints to absolute sizes in pixels.
extract : None or "square" or tuple of number or BoundingBox or BoundingBoxesOnImage
Subarea to extract from the image. See `_quokka_normalize_extract()`.
Returns
-------
kpsoi : KeypointsOnImage
Example keypoints on the quokka image.
"""
left, top = 0, 0
if extract is not None:
bb_extract = _quokka_normalize_extract(extract)
left = bb_extract.x1
top = bb_extract.y1
with open(QUOKKA_ANNOTATIONS_FP, "r") as f:
json_dict = json.load(f)
keypoints = []
for kp_dict in json_dict["keypoints"]:
keypoints.append(Keypoint(x=kp_dict["x"] - left, y=kp_dict["y"] - top))
if extract is not None:
shape = (bb_extract.height, bb_extract.width, 3)
else:
shape = (643, 960, 3)
kpsoi = KeypointsOnImage(keypoints, shape=shape)
if size is not None:
shape_resized = _compute_resized_shape(shape, size)
kpsoi = kpsoi.on(shape_resized)
return kpsoi
def quokka_bounding_boxes(size=None, extract=None):
"""
Returns example bounding boxes on the standard example quokke image.
Currently only a single bounding box is returned that covers the quokka.
Parameters
----------
size : None or float or tuple of two ints or tuple of two floats, optional(default=None)
Size of the output image on which the BBs are placed. If None, then the BBs
are not projected to any new size (positions on the original image are used).
Floats lead to relative size changes, ints to absolute sizes in pixels.
extract : None or "square" or tuple of number or BoundingBox or BoundingBoxesOnImage
Subarea to extract from the image. See `_quokka_normalize_extract()`.
Returns
-------
bbsoi : BoundingBoxesOnImage
Example BBs on the quokka image.
"""
left, top = 0, 0
if extract is not None:
bb_extract = _quokka_normalize_extract(extract)
left = bb_extract.x1
top = bb_extract.y1
with open(QUOKKA_ANNOTATIONS_FP, "r") as f:
json_dict = json.load(f)
bbs = []
for bb_dict in json_dict["bounding_boxes"]:
bbs.append(
BoundingBox(
x1=bb_dict["x1"] - left,
y1=bb_dict["y1"] - top,
x2=bb_dict["x2"] - left,
y2=bb_dict["y2"] - top
)
)
if extract is not None:
shape = (bb_extract.height, bb_extract.width, 3)
else:
shape = (643, 960, 3)
bbsoi = BoundingBoxesOnImage(bbs, shape=shape)
if size is not None:
shape_resized = _compute_resized_shape(shape, size)
bbsoi = bbsoi.on(shape_resized)
return bbsoi
def angle_between_vectors(v1, v2):
"""
Returns the angle in radians between vectors 'v1' and 'v2'.
From http://stackoverflow.com/questions/2827393/angles-between-two-n-dimensional-vectors-in-python
Parameters
----------
{v1, v2} : (N,) ndarray
Input
vectors.
Returns
-------
out : float
Angle in radians.
Examples
--------
>>> angle_between((1, 0, 0), (0, 1, 0))
1.5707963267948966
>>> angle_between((1, 0, 0), (1, 0, 0))
0.0
>>> angle_between((1, 0, 0), (-1, 0, 0))
3.141592653589793
"""
v1_u = v1 / np.linalg.norm(v1)
v2_u = v2 / np.linalg.norm(v2)
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
def draw_text(img, y, x, text, color=[0, 255, 0], size=25): # pylint: disable=locally-disabled, dangerous-default-value, line-too-long
"""
Draw text on an image.
This uses by default DejaVuSans as its font, which is included in the
library.
Parameters
----------
img : (H,W,3) ndarray
The image array to draw text on.
Expected to be of dtype uint8 or float32 (value range 0.0 to 255.0).
{y, x} : int
x- and y- coordinate of the top left corner of the
text.
color : iterable of 3 ints, optional(default=[0, 255, 0])
Color of the text to draw. For RGB-images this is expected to be
an RGB color.
size : int, optional(default=25)
Font size of the text to
draw.
Returns
-------
img_np : (H,W,3) ndarray
Input image with text drawn on it.
"""
# keeping PIL here so that it is not a dependency of the library right now
from PIL import Image, ImageDraw, ImageFont
do_assert(img.dtype in [np.uint8, np.float32])
input_dtype = img.dtype
if img.dtype == np.float32:
img = img.astype(np.uint8)
for i in range(len(color)):
val = color[i]
if isinstance(val, float):
val = int(val * 255)
val = np.clip(val, 0, 255)
color[i] = val
img = Image.fromarray(img)
font = ImageFont.truetype(DEFAULT_FONT_FP, size)
context = ImageDraw.Draw(img)
context.text((x, y), text, fill=tuple(color), font=font)
img_np = np.asarray(img)
img_np.setflags(write=True) # PIL/asarray returns read only array
if img_np.dtype != input_dtype:
img_np = img_np.astype(input_dtype)
return img_np
# TODO rename sizes to size?
def imresize_many_images(images, sizes=None, interpolation=None):
"""
Resize many images to a specified size.
Parameters
----------
images : (N,H,W,C) ndarray
Array of the images to resize.
Expected to usually be of dtype uint8.
sizes : float or iterable of two ints or iterable of two floats
The new size of the images, given either as a fraction (a single float) or as
a (height, width) tuple of two integers or as a (height fraction, width fraction)
tuple of two floats.
interpolation : None or string or int, optional(default=None)
The interpolation to use during resize.
If int, then expected to be one of:
* cv2.INTER_NEAREST (nearest neighbour interpolation)
* cv2.INTER_LINEAR (linear interpolation)
* cv2.INTER_AREA (area interpolation)
* cv2.INTER_CUBIC (cubic interpolation)
If string, then expected to be one of:
* "nearest" (identical to cv2.INTER_NEAREST)
* "linear" (identical to cv2.INTER_LINEAR)
* "area" (identical to cv2.INTER_AREA)
* "cubic" (identical to cv2.INTER_CUBIC)
If None, the interpolation will be chosen automatically. For size
increases, area interpolation will be picked and for size decreases,
linear interpolation will be picked.
Returns
-------
result : (N,H',W',C) ndarray
Array of the resized images.
Examples
--------
>>> imresize_many_images(np.zeros((2, 16, 16, 3), dtype=np.uint8), 2.0)
Converts 2 RGB images of height and width 16 to images of height and width 16*2 = 32.
>>> imresize_many_images(np.zeros((2, 16, 16, 3), dtype=np.uint8), (16, 32))
Converts 2 RGB images of height and width 16 to images of height 16 and width 32.
>>> imresize_many_images(np.zeros((2, 16, 16, 3), dtype=np.uint8), (2.0, 4.0))
Converts 2 RGB images of height and width 16 to images of height 32 and width 64.
"""
shape = images.shape
do_assert(images.ndim == 4, "Expected array of shape (N, H, W, C), got shape %s" % (str(shape),))
nb_images = shape[0]
im_height, im_width = shape[1], shape[2]
nb_channels = shape[3]
if is_single_float(sizes):
do_assert(sizes > 0.0)
height = int(round(im_height * sizes))
width = int(round(im_width * sizes))
else:
do_assert(len(sizes) == 2)
all_int = all([is_single_integer(size) for size in sizes])
all_float = all([is_single_float(size) for size in sizes])
do_assert(all_int or all_float)
if all_int:
height, width = sizes[0], sizes[1]
else:
height = int(round(im_height * sizes[0]))
width = int(round(im_width * sizes[1]))
if height == im_height and width == im_width:
return np.copy(images)
ip = interpolation
do_assert(ip is None or ip in ["nearest", "linear", "area", "cubic", cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_AREA, cv2.INTER_CUBIC])
if ip is None:
if height > im_height or width > im_width:
ip = cv2.INTER_AREA
else:
ip = cv2.INTER_LINEAR
elif ip in ["nearest", cv2.INTER_NEAREST]:
ip = cv2.INTER_NEAREST
elif ip in ["linear", cv2.INTER_LINEAR]:
ip = cv2.INTER_LINEAR
elif ip in ["area", cv2.INTER_AREA]:
ip = cv2.INTER_AREA
else: # if ip in ["cubic", cv2.INTER_CUBIC]:
ip = cv2.INTER_CUBIC
result = np.zeros((nb_images, height, width, nb_channels), dtype=images.dtype)
for img_idx in sm.xrange(nb_images):
# TODO fallback to scipy here if image isn't uint8
result_img = cv2.resize(images[img_idx], (width, height), interpolation=ip)
if len(result_img.shape) == 2:
result_img = result_img[:, :, np.newaxis]
result[img_idx] = result_img.astype(images.dtype)
return result
def imresize_single_image(image, sizes, interpolation=None):
"""
Resizes a single image.
Parameters
----------
image : (H,W,C) ndarray or (H,W) ndarray
Array of the image to resize.
Expected to usually be of dtype uint8.
sizes : float or iterable of two ints or iterable of two floats
See `imresize_many_images()`.
interpolation : None or string or int, optional(default=None)
See `imresize_many_images()`.
Returns
-------
out : (H',W',C) ndarray or (H',W') ndarray
The resized image.
"""
grayscale = False
if image.ndim == 2:
grayscale = True
image = image[:, :, np.newaxis]
do_assert(len(image.shape) == 3, image.shape)
rs = imresize_many_images(image[np.newaxis, :, :, :], sizes, interpolation=interpolation)
if grayscale:
return np.squeeze(rs[0, :, :, 0])
else:
return rs[0, ...]
def pad(arr, top=0, right=0, bottom=0, left=0, mode="constant", cval=0):
"""
Pad an image-like array on its top/right/bottom/left side.
This function is a wrapper around `numpy.pad()`.
Parameters
----------
arr : (H,W) or (H,W,C) ndarray
Image-like array to pad.
top : int, optional(default=0)
Amount of pixels to add at the top side of the image. Must be 0 or greater.
right : int, optional(default=0)
Amount of pixels to add at the right side of the image. Must be 0 or greater.
bottom : int, optional(default=0)
Amount of pixels to add at the bottom side of the image. Must be 0 or greater.
left : int, optional(default=0)
Amount of pixels to add at the left side of the image. Must be 0 or greater.
mode : string, optional(default="constant")
Padding mode to use. See `numpy.pad()` for details.
cval : number, optional(default=0)
Value to use for padding if mode="constant". See `numpy.pad()` for details.
Returns
-------
arr_pad : (H',W') or (H',W',C) ndarray
Padded array with height H'=H+top+bottom and width W'=W+left+right.
"""
assert arr.ndim in [2, 3]
assert top >= 0
assert right >= 0
assert bottom >= 0
assert left >= 0
if top > 0 or right > 0 or bottom > 0 or left > 0:
paddings_np = [(top, bottom), (left, right)] # paddings for 2d case
if arr.ndim == 3:
paddings_np.append((0, 0)) # add paddings for 3d case
if mode == "constant":
arr_pad = np.pad(
arr,
paddings_np,
mode=mode,
constant_values=cval
)
else:
arr_pad = np.pad(
arr,
paddings_np,
mode=mode
)
return arr_pad
else:
return np.copy(arr)
def compute_paddings_for_aspect_ratio(arr, aspect_ratio):
"""
Compute the amount of pixels by which an array has to be padded to fulfill an aspect ratio.
The aspect ratio is given as width/height.
Depending on which dimension is smaller (height or width), only the corresponding
sides (left/right or top/bottom) will be padded. In each case, both of the sides will
be padded equally.
Parameters
----------
arr : (H,W) or (H,W,C) ndarray
Image-like array for which to compute pad amounts.
aspect_ratio : float
Target aspect ratio, given as width/height. E.g. 2.0 denotes the image having twice
as much width as height.
Returns
-------
result : tuple of ints
Required paddign amounts to reach the target aspect ratio, given as a tuple
of the form (top, right, bottom, left).
"""
assert arr.ndim in [2, 3]
assert aspect_ratio > 0
height, width = arr.shape[0:2]
assert height > 0
aspect_ratio_current = width / height
pad_top = 0
pad_right = 0
pad_bottom = 0
pad_left = 0
if aspect_ratio_current < aspect_ratio:
# vertical image, height > width
diff = (aspect_ratio * height) - width
pad_right = int(np.ceil(diff / 2))
pad_left = int(np.floor(diff / 2))
elif aspect_ratio_current > aspect_ratio:
# horizontal image, width > height
diff = ((1/aspect_ratio) * width) - height
pad_top = int(np.ceil(diff / 2))
pad_bottom = int(np.floor(diff / 2))
return (pad_top, pad_right, pad_bottom, pad_left)
def pad_to_aspect_ratio(arr, aspect_ratio, mode="constant", cval=0, return_pad_amounts=False):
"""
Pad an image-like array on its sides so that it matches a target aspect ratio.
Depending on which dimension is smaller (height or width), only the corresponding
sides (left/right or top/bottom) will be padded. In each case, both of the sides will
be padded equally.
Parameters
----------
arr : (H,W) or (H,W,C) ndarray
Image-like array to pad.
aspect_ratio : float
Target aspect ratio, given as width/height. E.g. 2.0 denotes the image having twice
as much width as height.
mode : string, optional(default="constant")
Padding mode to use. See `numpy.pad()` for details.
cval : number, optional(default=0)
Value to use for padding if mode="constant". See `numpy.pad()` for details.
return_pad_amounts : bool, optional(default=False)
If False, then only the padded image will be returned. If True, a tuple with two
entries will be returned, where the first entry is the padded image and the second
entry are the amounts by which each image side was padded. These amounts are again a
tuple of the form (top, right, bottom, left), with each value being an integer.
Returns
-------
result : tuple
First tuple entry: Padded image as (H',W') or (H',W',C) ndarray, fulfulling the given
aspect_ratio.
Second tuple entry: Amounts by which the image was padded on each side, given
as a tuple (top, right, bottom, left).
If return_pad_amounts is False, then only the image is returned.
"""
pad_top, pad_right, pad_bottom, pad_left = compute_paddings_for_aspect_ratio(arr, aspect_ratio)
arr_padded = pad(
arr,
top=pad_top,
right=pad_right,
bottom=pad_bottom,
left=pad_left,
mode=mode,
cval=cval
)
if return_pad_amounts:
return arr_padded, (pad_top, pad_right, pad_bottom, pad_left)
else:
return arr_padded
def pool(arr, block_size, func, cval=0, preserve_dtype=True):
"""
Rescale an array by pooling values within blocks.
Parameters
----------
arr : (H,W) or (H,W,C) ndarray
Image-like array to pool. Ideally of datatype np.float64.
block_size : int or tuple of two ints or tuple of three ints
Spatial size of each group of each values to pool, aka kernel size.
If a single integer, then a symmetric block of that size along height and width will
be used.
If a tuple of two values, it is assumed to be the block size along height and width
of the image-like, with pooling happening per channel.
If a tuple of three values, it is assuemd to be the block size along height, width and
channels.
func : callable
Function to apply to a given block in order to convert it to a single number,
e.g. np.average, np.min, np.max.
cval : number, optional(default=0)
Value to use in order to pad the array along its border if the array cannot be divided
by block_size without remainder.
preserve_dtype : bool, optional(default=True)
Whether to convert the array back to the input datatype if it is changed away from
that in the pooling process.
Returns
-------
arr_reduced : (H',W') or (H',W',C') ndarray
Array after pooling.
"""
assert arr.ndim in [2, 3]
is_valid_int = is_single_integer(block_size) and block_size >= 1
is_valid_tuple = is_iterable(block_size) and len(block_size) in [2, 3] and [is_single_integer(val) and val >= 1 for val in block_size]
assert is_valid_int or is_valid_tuple
if is_single_integer(block_size):
block_size = [block_size, block_size]
if len(block_size) < arr.ndim:
block_size = list(block_size) + [1]
input_dtype = arr.dtype
arr_reduced = skimage.measure.block_reduce(arr, tuple(block_size), func, cval=cval)
if preserve_dtype and arr_reduced.dtype.type != input_dtype:
arr_reduced = arr_reduced.astype(input_dtype)
return arr_reduced
def avg_pool(arr, block_size, cval=0, preserve_dtype=True):
"""
Rescale an array using average pooling.
Parameters
----------
arr : (H,W) or (H,W,C) ndarray
Image-like array to pool. See `pool()` for details.
block_size : int or tuple of two ints or tuple of three ints
Size of each block of values to pool. See `pool()` for details.
cval : number, optional(default=0)
Padding value. See `pool()` for details.
preserve_dtype : bool, optional(default=True)
Whether to preserve the input array dtype. See `pool()` for details.
Returns
-------
arr_reduced : (H',W') or (H',W',C') ndarray
Array after average pooling.
"""
return pool(arr, block_size, np.average, cval=cval, preserve_dtype=preserve_dtype)
def max_pool(arr, block_size, cval=0, preserve_dtype=True):
"""
Rescale an array using max-pooling.
Parameters
----------
arr : (H,W) or (H,W,C) ndarray
Image-like array to pool. See `pool()` for details.
block_size : int or tuple of two ints or tuple of three ints
Size of each block of values to pool. See `pool()` for details.
cval : number, optional(default=0)
Padding value. See `pool()` for details.
preserve_dtype : bool, optional(default=True)
Whether to preserve the input array dtype. See `pool()` for details.
Returns
-------
arr_reduced : (H',W') or (H',W',C') ndarray
Array after max-pooling.
"""
return pool(arr, block_size, np.max, cval=cval, preserve_dtype=preserve_dtype)
def draw_grid(images, rows=None, cols=None):
"""
Converts multiple input images into a single image showing them in a grid.
Parameters
----------
images : (N,H,W,3) ndarray or iterable of (H,W,3) array
The input images to convert to a grid.
Expected to be RGB and have dtype uint8.
rows : None or int, optional(default=None)
The number of rows to show in the grid.
If None, it will be automatically derived.
cols : None or int, optional(default=None)
The number of cols to show in the grid.
If None, it will be automatically derived.
Returns
-------
grid : (H',W',3) ndarray
Image of the generated grid.
"""
if is_np_array(images):
do_assert(images.ndim == 4)
else:
do_assert(is_iterable(images) and is_np_array(images[0]) and images[0].ndim == 3)
nb_images = len(images)
do_assert(nb_images > 0)
cell_height = max([image.shape[0] for image in images])
cell_width = max([image.shape[1] for image in images])
channels = set([image.shape[2] for image in images])
do_assert(len(channels) == 1, "All images are expected to have the same number of channels, but got channel set %s with length %d instead." % (str(channels), len(channels)))
nb_channels = list(channels)[0]
if rows is None and cols is None:
rows = cols = int(math.ceil(math.sqrt(nb_images)))
elif rows is not None:
cols = int(math.ceil(nb_images / rows))
elif cols is not None:
rows = int(math.ceil(nb_images / cols))
do_assert(rows * cols >= nb_images)
width = cell_width * cols
height = cell_height * rows
grid = np.zeros((height, width, nb_channels), dtype=np.uint8)
cell_idx = 0
for row_idx in sm.xrange(rows):
for col_idx in sm.xrange(cols):
if cell_idx < nb_images:
image = images[cell_idx]
cell_y1 = cell_height * row_idx
cell_y2 = cell_y1 + image.shape[0]
cell_x1 = cell_width * col_idx
cell_x2 = cell_x1 + image.shape[1]
grid[cell_y1:cell_y2, cell_x1:cell_x2, :] = image
cell_idx += 1
return grid
def show_grid(images, rows=None, cols=None):
"""
Converts the input images to a grid image and shows it in a new window.
This function wraps around scipy.misc.imshow(), which requires the
`see <image>` command to work. On Windows systems, this tends to not be
the case.
Parameters
----------
images : (N,H,W,3) ndarray or iterable of (H,W,3) array
See `draw_grid()`.
rows : None or int, optional(default=None)
See `draw_grid()`.
cols : None or int, optional(default=None)
See `draw_grid()`.
"""
grid = draw_grid(images, rows=rows, cols=cols)
misc.imshow(grid)
def do_assert(condition, message="Assertion failed."):
"""
Function that behaves equally to an `assert` statement, but raises an
Exception.
This is added because `assert` statements are removed in optimized code.
It replaces `assert` statements throughout the library that should be
kept even in optimized code.
Parameters
----------
condition : bool
If False, an exception is raised.
message : string, optional(default="Assertion failed.")
Error message.
"""
if not condition:
raise AssertionError(str(message))
class HooksImages(object):
"""
Class to intervene with image augmentation runs.
This is e.g. useful to dynamically deactivate some augmenters.
Parameters
----------
activator : None or callable, optional(default=None)
A function that gives permission to execute an augmenter.
The expected interface is `f(images, augmenter, parents, default)`,
where `images` are the input images to augment, `augmenter` is the
instance of the augmenter to execute, `parents` are previously
executed augmenters and `default` is an expected default value to be
returned if the activator function does not plan to make a decision
for the given inputs.
propagator : None or callable, optional(default=None)
A function that gives permission to propagate the augmentation further
to the children of an augmenter. This happens after the activator.
In theory, an augmenter may augment images itself (if allowed by the
activator) and then execute child augmenters afterwards (if allowed by
the propagator). If the activator returned False, the propagation step
will never be executed.
The expected interface is `f(images, augmenter, parents, default)`,
with all arguments having identical meaning to the activator.
preprocessor : None or callable, optional(default=None)
A function to call before an augmenter performed any augmentations.
The interface is `f(images, augmenter, parents)`,
with all arguments having identical meaning to the activator.
It is expected to return the input images, optionally modified.
postprocessor : None or callable, optional(default=None)
A function to call after an augmenter performed augmentations.
The interface is the same as for the preprocessor.
Examples
--------
>>> seq = iaa.Sequential([
>>> iaa.GaussianBlur(3.0, name="blur"),
>>> iaa.Dropout(0.05, name="dropout"),
>>> iaa.Affine(translate_px=-5, name="affine")
>>> ])
>>>
>>> def activator(images, augmenter, parents, default):
>>> return False if augmenter.name in ["blur", "dropout"] else default
>>>
>>> seq_det = seq.to_deterministic()
>>> images_aug = seq_det.augment_images(images)
>>> heatmaps_aug = seq_det.augment_images(
>>> heatmaps,
>>> hooks=ia.HooksImages(activator=activator)
>>> )
This augments images and their respective heatmaps in the same way.
The heatmaps however are only modified by Affine, not by GaussianBlur or
Dropout.
"""
#def __init__(self, activator=None, propagator=None, preprocessor=None, postprocessor=None, propagation_method=None):
def __init__(self, activator=None, propagator=None, preprocessor=None, postprocessor=None):
self.activator = activator
self.propagator = propagator
self.preprocessor = preprocessor
self.postprocessor = postprocessor
#self.propagation_method = propagation_method
def is_activated(self, images, augmenter, parents, default):
"""
Returns whether an augmenter may be executed.
Returns
-------
out : bool
If True, the augmenter may be executed. If False, it may
not be executed.
"""
if self.activator is None:
return default
else:
return self.activator(images, augmenter, parents, default)
# TODO is a propagating hook necessary? seems to be covered by activated
# hook already
def is_propagating(self, images, augmenter, parents, default):
"""
Returns whether an augmenter may call its children to augment an
image. This is independent of the augmenter itself possible changing
the image, without calling its children. (Most (all?) augmenters with
children currently dont perform any changes themselves.)
Returns
-------
out : bool
If True, the augmenter may be propagate to its children.
If False, it may not.
"""
if self.propagator is None:
return default
else:
return self.propagator(images, augmenter, parents, default)
#def get_propagation_method(self, images, augmenter, parents, child, default):
# if self.propagation_method is None:
# return default
# else:
# return self.propagation_method(images, augmenter, parents, child, default)
def preprocess(self, images, augmenter, parents):
"""
A function to be called before the augmentation of images starts (per
augmenter).
Returns
-------
out : (N,H,W,C) ndarray or (N,H,W) ndarray or list of (H,W,C) ndarray or list of (H,W) ndarray
The input images, optionally modified.
"""
if self.preprocessor is None:
return images
else:
return self.preprocessor(images, augmenter, parents)
def postprocess(self, images, augmenter, parents):
"""
A function to be called after the augmentation of images was
performed.
Returns
-------
out : (N,H,W,C) ndarray or (N,H,W) ndarray or list of (H,W,C) ndarray or list of (H,W) ndarray
The input images, optionally modified.
"""
if self.postprocessor is None:
return images
else:
return self.postprocessor(images, augmenter, parents)
class HooksHeatmaps(HooksImages):
"""
Class to intervene with heatmap augmentation runs.
This is e.g. useful to dynamically deactivate some augmenters.
This class is currently the same as the one for images. This may or may
not change in the future.
"""
pass
class HooksKeypoints(HooksImages):
"""
Class to intervene with keypoint augmentation runs.
This is e.g. useful to dynamically deactivate some augmenters.
This class is currently the same as the one for images. This may or may
not change in the future.
"""
pass
class Keypoint(object):
"""
A single keypoint (aka landmark) on an image.
Parameters
----------
x : number
Coordinate of the keypoint on the x axis.
y : number
Coordinate of the keypoint on the y axis.
"""
def __init__(self, x, y):
# these checks are currently removed because they are very slow for some
# reason
#assert is_single_integer(x), type(x)
#assert is_single_integer(y), type(y)
self.x = x
self.y = y
@property
def x_int(self):
"""
Return the keypoint's x-coordinate, rounded to the closest integer.
Returns
-------
result : int
Keypoint's x-coordinate, rounded to the closest integer.
"""
return int(round(self.x))
@property
def y_int(self):
"""
Return the keypoint's y-coordinate, rounded to the closest integer.
Returns
-------
result : int
Keypoint's y-coordinate, rounded to the closest integer.
"""
return int(round(self.y))
def project(self, from_shape, to_shape):
"""
Project the keypoint onto a new position on a new image.
E.g. if the keypoint is on its original image at x=(10 of 100 pixels)
and y=(20 of 100 pixels) and is projected onto a new image with
size (width=200, height=200), its new position will be (20, 40).
This is intended for cases where the original image is resized.
It cannot be used for more complex changes (e.g. padding, cropping).
Parameters
----------
from_shape : tuple
Shape of the original image. (Before resize.)
to_shape : tuple
Shape of the new image. (After resize.)
Returns
-------
out : Keypoint
Keypoint object with new coordinates.
"""
if from_shape[0:2] == to_shape[0:2]:
return Keypoint(x=self.x, y=self.y)
else:
from_height, from_width = from_shape[0:2]
to_height, to_width = to_shape[0:2]
x = (self.x / from_width) * to_width
y = (self.y / from_height) * to_height
return Keypoint(x=x, y=y)
def shift(self, x=0, y=0):
"""
Move the keypoint around on an image.
Parameters
----------
x : number, optional(default=0)
Move by this value on the x axis.
y : number, optional(default=0)
Move by this value on the y axis.
Returns
-------
out : Keypoint
Keypoint object with new coordinates.
"""
return Keypoint(self.x + x, self.y + y)
def __repr__(self):
return self.__str__()
def __str__(self):
return "Keypoint(x=%.8f, y=%.8f)" % (self.x, self.y)
class KeypointsOnImage(object):
"""
Object that represents all keypoints on a single image.
Parameters
----------
keypoints : list of Keypoint
List of keypoints on the image.
shape : tuple of int
The shape of the image on which the keypoints are placed.
Examples
--------
>>> kps = [Keypoint(x=10, y=20), Keypoint(x=34, y=60)]
>>> kps_oi = KeypointsOnImage(kps, shape=image.shape)
"""
def __init__(self, keypoints, shape):
#assert len(shape) == 3, "KeypointsOnImage requires shape tuples of form (H, W, C) but got %s. Use C=1 for 2-dimensional images." % (str(shape),)
self.keypoints = keypoints
if is_np_array(shape):
self.shape = shape.shape
else:
do_assert(isinstance(shape, (tuple, list)))
self.shape = tuple(shape)
@property
def height(self):
return self.shape[0]
@property
def width(self):
return self.shape[1]
def on(self, image):
"""
Project keypoints from one image to a new one.
Parameters
----------
image : ndarray or tuple
New image onto which the keypoints are to be projected.
May also simply be that new image's shape tuple.
Returns
-------
keypoints : KeypointsOnImage
Object containing all projected keypoints.
"""
if is_np_array(image):
shape = image.shape
else:
shape = image
if shape[0:2] == self.shape[0:2]:
return self.deepcopy()
else:
keypoints = [kp.project(self.shape, shape) for kp in self.keypoints]
return KeypointsOnImage(keypoints, shape)
def draw_on_image(self, image, color=[0, 255, 0], size=3, copy=True, raise_if_out_of_image=False): # pylint: disable=locally-disabled, dangerous-default-value, line-too-long
"""
Draw all keypoints onto a given image. Each keypoint is marked by a
square of a chosen color and size.
Parameters
----------
image : (H,W,3) ndarray
The image onto which to draw the keypoints.
This image should usually have the same shape as
set in KeypointsOnImage.shape.
color : int or list of ints or tuple of ints or (3,) ndarray, optional(default=[0, 255, 0])
The RGB color of all keypoints. If a single int `C`, then that is
equivalent to (C,C,C).
size : int, optional(default=3)
The size of each point. If set to C, each square will have
size CxC.
copy : bool, optional(default=True)
Whether to copy the image before drawing the points.
raise_if_out_of_image : bool, optional(default=False)
Whether to raise an exception if any keypoint is outside of the
image.
Returns
-------
image : (H,W,3) ndarray
Image with drawn keypoints.
"""
if copy:
image = np.copy(image)
height, width = image.shape[0:2]
for keypoint in self.keypoints:
y, x = keypoint.y_int, keypoint.x_int
if 0 <= y < height and 0 <= x < width:
x1 = max(x - size//2, 0)
x2 = min(x + 1 + size//2, width)
y1 = max(y - size//2, 0)
y2 = min(y + 1 + size//2, height)
image[y1:y2, x1:x2] = color
else:
if raise_if_out_of_image:
raise Exception("Cannot draw keypoint x=%.8f, y=%.8f on image with shape %s." % (y, x, image.shape))
return image
def shift(self, x=0, y=0):
"""
Move the keypoints around on an image.
Parameters
----------
x : number, optional(default=0)
Move each keypoint by this value on the x axis.
y : number, optional(default=0)
Move each keypoint by this value on the y axis.
Returns
-------
out : KeypointsOnImage
Keypoints after moving them.
"""
keypoints = [keypoint.shift(x=x, y=y) for keypoint in self.keypoints]
return KeypointsOnImage(keypoints, self.shape)
def get_coords_array(self):
"""
Convert the coordinates of all keypoints in this object to
an array of shape (N,2).
Returns
-------
result : (N, 2) ndarray
Where N is the number of keypoints. Each first value is the
x coordinate, each second value is the y coordinate.
"""
result = np.zeros((len(self.keypoints), 2), np.float32)
for i, keypoint in enumerate(self.keypoints):
result[i, 0] = keypoint.x
result[i, 1] = keypoint.y
return result
@staticmethod
def from_coords_array(coords, shape):
"""
Convert an array (N,2) with a given image shape to a KeypointsOnImage
object.
Parameters
----------
coords : (N, 2) ndarray
Coordinates of N keypoints on the original image.
Each first entry (i, 0) is expected to be the x coordinate.
Each second entry (i, 1) is expected to be the y coordinate.
shape : tuple
Shape tuple of the image on which the keypoints are placed.
Returns
-------
out : KeypointsOnImage
KeypointsOnImage object that contains all keypoints from the array.
"""
keypoints = [Keypoint(x=coords[i, 0], y=coords[i, 1]) for i in sm.xrange(coords.shape[0])]
return KeypointsOnImage(keypoints, shape)
def to_keypoint_image(self, size=1):
"""
Draws a new black image of shape (H,W,N) in which all keypoint coordinates
are set to 255.
(H=shape height, W=shape width, N=number of keypoints)
This function can be used as a helper when augmenting keypoints with
a method that only supports the augmentation of images.
Parameters
-------
size : int
Size of each (squared) point.
Returns
-------
image : (H,W,N) ndarray
Image in which the keypoints are marked. H is the height,
defined in KeypointsOnImage.shape[0] (analogous W). N is the
number of keypoints.
"""
do_assert(len(self.keypoints) > 0)
height, width = self.shape[0:2]
image = np.zeros((height, width, len(self.keypoints)), dtype=np.uint8)
do_assert(size % 2 != 0)
sizeh = max(0, (size-1)//2)
for i, keypoint in enumerate(self.keypoints):
# TODO for float values spread activation over several cells
# here and do voting at the end
y = keypoint.y_int
x = keypoint.x_int
x1 = np.clip(x - sizeh, 0, width-1)
x2 = np.clip(x + sizeh + 1, 0, width)
y1 = np.clip(y - sizeh, 0, height-1)
y2 = np.clip(y + sizeh + 1, 0, height)
#if 0 <= y < height and 0 <= x < width:
# image[y, x, i] = 255
if x1 < x2 and y1 < y2:
image[y1:y2, x1:x2, i] = 128
if 0 <= y < height and 0 <= x < width:
image[y, x, i] = 255
return image
@staticmethod
def from_keypoint_image(image, if_not_found_coords={"x": -1, "y": -1}, threshold=1, nb_channels=None): # pylint: disable=locally-disabled, dangerous-default-value, line-too-long
"""
Converts an image generated by `to_keypoint_image()` back to
an KeypointsOnImage object.
Parameters
----------
image : (H,W,N) ndarray
The keypoints image. N is the number of
keypoints.
if_not_found_coords : tuple or list or dict or None
Coordinates to use for keypoints that cannot be found in `image`.
If this is a list/tuple, it must have two integer values. If it
is a dictionary, it must have the keys "x" and "y". If this
is None, then the keypoint will not be added to the final
KeypointsOnImage object.
threshold : int
The search for keypoints works by searching for the argmax in
each channel. This parameters contains the minimum value that
the max must have in order to be viewed as a keypoint.
nb_channels : None or int
Number of channels of the image on which the keypoints are placed.
Some keypoint augmenters require that information.
If set to None, the keypoint's shape will be set
to `(height, width)`, otherwise `(height, width, nb_channels)`.
Returns
-------
out : KeypointsOnImage
The extracted keypoints.
"""
do_assert(len(image.shape) == 3)
height, width, nb_keypoints = image.shape
drop_if_not_found = False
if if_not_found_coords is None:
drop_if_not_found = True
if_not_found_x = -1
if_not_found_y = -1
elif isinstance(if_not_found_coords, (tuple, list)):
do_assert(len(if_not_found_coords) == 2)
if_not_found_x = if_not_found_coords[0]
if_not_found_y = if_not_found_coords[1]
elif isinstance(if_not_found_coords, dict):
if_not_found_x = if_not_found_coords["x"]
if_not_found_y = if_not_found_coords["y"]
else:
raise Exception("Expected if_not_found_coords to be None or tuple or list or dict, got %s." % (type(if_not_found_coords),))
keypoints = []
for i in sm.xrange(nb_keypoints):
maxidx_flat = np.argmax(image[..., i])
maxidx_ndim = np.unravel_index(maxidx_flat, (height, width))
found = (image[maxidx_ndim[0], maxidx_ndim[1], i] >= threshold)
if found:
keypoints.append(Keypoint(x=maxidx_ndim[1], y=maxidx_ndim[0]))
else:
if drop_if_not_found:
pass # dont add the keypoint to the result list, i.e. drop it
else:
keypoints.append(Keypoint(x=if_not_found_x, y=if_not_found_y))
out_shape = (height, width)
if nb_channels is not None:
out_shape += (nb_channels,)
return KeypointsOnImage(keypoints, shape=out_shape)
def copy(self):
"""
Create a shallow copy of the KeypointsOnImage object.
Returns
-------
out : KeypointsOnImage
Shallow copy.
"""
return copy.copy(self)
def deepcopy(self):
"""
Create a deep copy of the KeypointsOnImage object.
Returns
-------
out : KeypointsOnImage
Deep copy.
"""
# for some reason deepcopy is way slower here than manual copy
#return copy.deepcopy(self)
kps = [Keypoint(x=kp.x, y=kp.y) for kp in self.keypoints]
return KeypointsOnImage(kps, tuple(self.shape))
def __repr__(self):
return self.__str__()
def __str__(self):
return "KeypointsOnImage(%s, shape=%s)" % (str(self.keypoints), self.shape)
# TODO functions: square(), to_aspect_ratio(), extend()/add_border(), contains_point()
class BoundingBox(object):
"""
Class representing bounding boxes.
Each bounding box is parameterized by its top left and bottom right corners. Both are given
as x and y-coordinates.
Parameters
----------
x1 : number
X-coordinate of the top left of the bounding box.
y1 : number
Y-coordinate of the top left of the bounding box.
x2 : number
X-coordinate of the bottom right of the bounding box.
y2 : number
Y-coordinate of the bottom right of the bounding box.
label : None or string, optional(default=None)
Label of the bounding box, e.g. a string representing the class.
"""
def __init__(self, x1, y1, x2, y2, label=None):
"""Create a new BoundingBox instance."""
if x1 > x2:
x2, x1 = x1, x2
do_assert(x2 > x1)
if y1 > y2:
y2, y1 = y1, y2
do_assert(y2 > y1)
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
self.label = label
@property
def x1_int(self):
"""
Return the x-coordinate of the top left corner as an integer.
Returns
-------
result : int
X-coordinate of the top left corner, rounded to the closest integer.
"""
return int(round(self.x1))
@property
def y1_int(self):
"""
Return the y-coordinate of the top left corner as an integer.
Returns
-------
result : int
Y-coordinate of the top left corner, rounded to the closest integer.
"""
return int(round(self.y1))
@property
def x2_int(self):
"""
Return the x-coordinate of the bottom left corner as an integer.
Returns
-------
result : int
X-coordinate of the bottom left corner, rounded to the closest integer.
"""
return int(round(self.x2))
@property
def y2_int(self):
"""
Return the y-coordinate of the bottom left corner as an integer.
Returns
-------
result : int
Y-coordinate of the bottom left corner, rounded to the closest integer.
"""
return int(round(self.y2))
@property
def height(self):
"""
Estimate the height of the bounding box.
Returns
-------
result : number
Height of the bounding box.
"""
return self.y2 - self.y1
@property
def width(self):
"""
Estimate the width of the bounding box.
Returns
-------
result : number
Width of the bounding box.
"""
return self.x2 - self.x1
@property
def center_x(self):
"""
Estimate the x-coordinate of the center point of the bounding box.
Returns
-------
result : number
X-coordinate of the center point of the bounding box.
"""
return self.x1 + self.width/2
@property
def center_y(self):
"""
Estimate the y-coordinate of the center point of the bounding box.
Returns
-------
result : number
Y-coordinate of the center point of the bounding box.
"""
return self.y1 + self.height/2
@property
def area(self):
"""
Estimate the area of the bounding box.
Returns
-------
result : number
Area of the bounding box, i.e. `height * width`.
"""
return self.height * self.width
def project(self, from_shape, to_shape):
"""
Project the bounding box onto a new position on a new image.
E.g. if the bounding box is on its original image at
x1=(10 of 100 pixels) and y1=(20 of 100 pixels) and is projected onto
a new image with size (width=200, height=200), its new position will
be (x1=20, y1=40). (Analogous for x2/y2.)
This is intended for cases where the original image is resized.
It cannot be used for more complex changes (e.g. padding, cropping).
Parameters
----------
from_shape : tuple
Shape of the original image. (Before resize.)
to_shape : tuple
Shape of the new image. (After resize.)
Returns
-------
out : BoundingBox
BoundingBox object with new coordinates.
"""
if from_shape[0:2] == to_shape[0:2]:
return self.copy()
else:
from_height, from_width = from_shape[0:2]
to_height, to_width = to_shape[0:2]
do_assert(from_height > 0)
do_assert(from_width > 0)
do_assert(to_height > 0)
do_assert(to_width > 0)
x1 = (self.x1 / from_width) * to_width
y1 = (self.y1 / from_height) * to_height
x2 = (self.x2 / from_width) * to_width
y2 = (self.y2 / from_height) * to_height
return self.copy(
x1=x1,
y1=y1,
x2=x2,
y2=y2,
label=self.label
)
def extend(self, all_sides=0, top=0, right=0, bottom=0, left=0):
"""
Extend the size of the bounding box along its sides.
Parameters
----------
all_sides : number, optional(default=0)
Value by which to extend the bounding box size along all sides.
top : number, optional(default=0)
Value by which to extend the bounding box size along its top side.
right : number, optional(default=0)
Value by which to extend the bounding box size along its right side.
bottom : number, optional(default=0)
Value by which to extend the bounding box size along its bottom side.
left : number, optional(default=0)
Value by which to extend the bounding box size along its left side.
Returns
-------
result : BoundingBox
Extended bounding box.
"""
return BoundingBox(
x1=self.x1 - all_sides - left,
x2=self.x2 + all_sides + right,
y1=self.y1 - all_sides - top,
y2=self.y2 + all_sides + bottom
)
def intersection(self, other, default=None):
"""
Compute the intersection bounding box of this bounding box and another one.
Parameters
----------
other : BoundingBox
Other bounding box with which to generate the intersection.
Returns
-------
result : BoundingBox
Intersection bounding box of the two bounding boxes.
"""
x1_i = max(self.x1, other.x1)
y1_i = max(self.y1, other.y1)
x2_i = min(self.x2, other.x2)
y2_i = min(self.y2, other.y2)
if x1_i >= x2_i or y1_i >= y2_i:
return default
else:
return BoundingBox(x1=x1_i, y1=y1_i, x2=x2_i, y2=y2_i)
def union(self, other):
"""
Compute the union bounding box of this bounding box and another one.
This is equivalent to drawing a bounding box around all corners points of both
bounding boxes.
Parameters
----------
other : BoundingBox
Other bounding box with which to generate the union.
Returns
-------
result : BoundingBox
Union bounding box of the two bounding boxes.
"""
return BoundingBox(
x1=min(self.x1, other.x1),
y1=min(self.y1, other.y1),
x2=max(self.x2, other.x2),
y2=max(self.y2, other.y2),
)
def iou(self, other):
"""
Compute the IoU of this bounding box with another one.
IoU is the intersection over union, defined as:
area(intersection(A, B)) / area(union(A, B))
= area(intersection(A, B)) / (area(A) + area(B) - area(intersection(A, B)))
Parameters
----------
other : BoundingBox
Other bounding box with which to compare.
Returns
-------
result : float
IoU between the two bounding boxes.
"""
inters = self.intersection(other)
if inters is None:
return 0
else:
return inters.area / (self.area + other.area - inters.area)
def is_fully_within_image(self, image):
"""
Estimate whether the bounding box is fully inside the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of at least two ints
Image dimensions to use. If an ndarray, its shape will be used. If a tuple, it is
assumed to represent the image shape.
Returns
-------
result : bool
True if the bounding box is fully inside the image area.
False otherwise.
"""
if isinstance(image, tuple):
shape = image
else:
shape = image.shape
height, width = shape[0:2]
return self.x1 >= 0 and self.x2 <= width and self.y1 >= 0 and self.y2 <= height
def is_partly_within_image(self, image):
"""
Estimate whether the bounding box is at least partially inside the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of at least two ints
Image dimensions to use. If an ndarray, its shape will be used. If a tuple, it is
assumed to represent the image shape.
Returns
-------
result : bool
True if the bounding box is at least partially inside the image area.
False otherwise.
"""
if isinstance(image, tuple):
shape = image
else:
shape = image.shape
height, width = shape[0:2]
img_bb = BoundingBox(x1=0, x2=width, y1=0, y2=height)
return self.intersection(img_bb) is not None
def is_out_of_image(self, image, fully=True, partly=False):
"""
Estimate whether the bounding box is partially or fully outside of the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of ints
Image dimensions to use. If an ndarray, its shape will be used. If a tuple, it is
assumed to represent the image shape and must contain at least two integers.
fully : bool, optional(default=True)
Whether to return True if the bounding box is fully outside fo the image area.
partly : bool, optional(default=False)
Whether to return True if the bounding box is at least partially outside fo the
image area.
Returns
-------
result : bool
True if the bounding box is partially/fully outside of the image area, depending
on defined parameters. False otherwise.
"""
if self.is_fully_within_image(image):
return False
elif self.is_partly_within_image(image):
return partly
else:
return fully
def cut_out_of_image(self, image):
"""
Cut off all parts of the bounding box that are outside of the image.
Parameters
----------
image : (H,W,...) ndarray or tuple of at least two ints
Image dimensions to use for the clipping of the bounding box. If an ndarray, its
shape will be used. If a tuple, it is assumed to represent the image shape.
Returns
-------
result : BoundingBox
Bounding box, clipped to fall within the image dimensions.
"""
if isinstance(image, tuple):
shape = image
else:
shape = image.shape
height, width = shape[0:2]
do_assert(height > 0)
do_assert(width > 0)
x1 = np.clip(self.x1, 0, width)
x2 = np.clip(self.x2, 0, width)
y1 = np.clip(self.y1, 0, height)
y2 = np.clip(self.y2, 0, height)
return self.copy(
x1=x1,
y1=y1,
x2=x2,
y2=y2,
label=self.label
)
def shift(self, top=None, right=None, bottom=None, left=None):
"""
Shift the bounding box from one or more image sides, i.e. move it on the x/y-axis.
Parameters
----------
top : None or int, optional(default=None)
Amount of pixels by which to shift the bounding box from the top.
right : None or int, optional(default=None)
Amount of pixels by which to shift the bounding box from the right.
bottom : None or int, optional(default=None)
Amount of pixels by which to shift the bounding box from the bottom.
left : None or int, optional(default=None)
Amount of pixels by which to shift the bounding box from the left.
Returns
-------
result : BoundingBox
Shifted bounding box.
"""
top = top if top is not None else 0
right = right if right is not None else 0
bottom = bottom if bottom is not None else 0
left = left if left is not None else 0
return self.copy(
x1=self.x1+left-right,
x2=self.x2+left-right,
y1=self.y1+top-bottom,
y2=self.y2+top-bottom
)
def draw_on_image(self, image, color=[0, 255, 0], alpha=1.0, thickness=1, copy=True, raise_if_out_of_image=False): # pylint: disable=locally-disabled, dangerous-default-value, line-too-long
"""
Draw the bounding box on an image.
Parameters
----------
image : (H,W,C) ndarray(uint8)
The image onto which to draw the bounding box.
color : iterable of int, optional(default=[0,255,0])
The color to use, corresponding to the channel layout of the image. Usually RGB.
alpha : float, optional(default=1.0)
The transparency of the drawn bounding box, where 1.0 denotes no transparency and
0.0 is invisible.
thickness : int, optional(default=1)
The thickness of the bounding box in pixels. If the value is larger than 1, then
additional pixels will be added around the bounding box (i.e. extension towards the
outside).
copy : bool, optional(default=True)
Whether to copy the input image or change it in-place.
raise_if_out_of_image : bool, optional(default=False)
Whether to raise an error if the bounding box is partially/fully outside of the
image. If set to False, no error will be raised and only the parts inside the image
will be drawn.
Returns
-------
result : (H,W,C) ndarray(uint8)
Image with bounding box drawn on it.
"""
if raise_if_out_of_image and self.is_out_of_image(image):
raise Exception("Cannot draw bounding box x1=%.8f, y1=%.8f, x2=%.8f, y2=%.8f on image with shape %s." % (self.x1, self.y1, self.x2, self.y2, image.shape))
result = np.copy(image) if copy else image
if isinstance(color, (tuple, list)):
color = np.uint8(color)
for i in range(thickness):
y = [self.y1_int-i, self.y1_int-i, self.y2_int+i, self.y2_int+i]
x = [self.x1_int-i, self.x2_int+i, self.x2_int+i, self.x1_int-i]
rr, cc = skimage.draw.polygon_perimeter(y, x, shape=result.shape)
if alpha >= 0.99:
result[rr, cc, :] = color
else:
if is_float_array(result):
result[rr, cc, :] = (1 - alpha) * result[rr, cc, :] + alpha * color
result = np.clip(result, 0, 255)
else:
input_dtype = result.dtype
result = result.astype(np.float32)
result[rr, cc, :] = (1 - alpha) * result[rr, cc, :] + alpha * color
result = np.clip(result, 0, 255).astype(input_dtype)
return result
def extract_from_image(self, image):
"""
Extract the image pixels within the bounding box.
This function will zero-pad the image if the bounding box is partially/fully outside of
the image.
Parameters
----------
image : (H,W) or (H,W,C) ndarray
The image from which to extract the pixels within the bounding box.
Returns
-------
result : (H',W') or (H',W',C) ndarray
Pixels within the bounding box. Zero-padded if the bounding box is partially/fully
outside of the image.
"""
pad_top = 0
pad_right = 0
pad_bottom = 0
pad_left = 0
height, width = image.shape[0], image.shape[1]
x1, x2, y1, y2 = self.x1_int, self.x2_int, self.y1_int, self.y2_int
# if the bb is outside of the image area, the following pads the image
# first with black pixels until the bb is inside the image
# and only then extracts the image area
# TODO probably more efficient to initialize an array of zeros
# and copy only the portions of the bb into that array that are
# natively inside the image area
if x1 < 0:
pad_left = abs(x1)
x2 = x2 + abs(x1)
x1 = 0
if y1 < 0:
pad_top = abs(y1)
y2 = y2 + abs(y1)
y1 = 0
if x2 >= width:
pad_right = x2 - (width - 1)
if y2 >= height:
pad_bottom = y2 - (height - 1)
if any([val > 0 for val in [pad_top, pad_right, pad_bottom, pad_left]]):
if len(image.shape) == 2:
image = np.pad(image, ((pad_top, pad_bottom), (pad_left, pad_right)), mode="constant")
else:
image = np.pad(image, ((pad_top, pad_bottom), (pad_left, pad_right), (0, 0)), mode="constant")
return image[y1:y2, x1:x2]
# TODO also add to_heatmap
# TODO add this to BoundingBoxesOnImage
def to_keypoints(self):
"""
Convert the corners of the bounding box to keypoints (clockwise, starting at top left).
Returns
-------
result : list of Keypoint
Corners of the bounding box as keypoints.
"""
return [
Keypoint(x=self.x1, y=self.y1),
Keypoint(x=self.x2, y=self.y1),
Keypoint(x=self.x2, y=self.y2),
Keypoint(x=self.x1, y=self.y2)
]
def copy(self, x1=None, y1=None, x2=None, y2=None, label=None):
"""
Create a shallow copy of the BoundingBox object.
Parameters
----------
x1 : None or number
If not None, then the x1 coordinate of the copied object will be set to this value.
y1 : None or number
If not None, then the y1 coordinate of the copied object will be set to this value.
x2 : None or number
If not None, then the x2 coordinate of the copied object will be set to this value.
y2 : None or number
If not None, then the y2 coordinate of the copied object will be set to this value.
label : None or string
If not None, then the label of the copied object will be set to this value.
Returns
-------
result : BoundingBox
Shallow copy.
"""
return BoundingBox(
x1=self.x1 if x1 is None else x1,
x2=self.x2 if x2 is None else x2,
y1=self.y1 if y1 is None else y1,
y2=self.y2 if y2 is None else y2,
label=self.label if label is None else label
)
def deepcopy(self, x1=None, y1=None, x2=None, y2=None, label=None):
"""
Create a deep copy of the BoundingBoxesOnImage object.
Returns
-------
out : KeypointsOnImage
Deep copy.
"""
return self.copy(x1=x1, y1=y1, x2=x2, y2=y2, label=label)
def __repr__(self):
return self.__str__()
def __str__(self):
return "BoundingBox(x1=%.4f, y1=%.4f, x2=%.4f, y2=%.4f, label=%s)" % (self.x1, self.y1, self.x2, self.y2, self.label)
class BoundingBoxesOnImage(object):
"""
Object that represents all bounding boxes on a single image.
Parameters
----------
bounding_boxes : list of BoundingBox
List of bounding boxes on the image.
shape : tuple of int
The shape of the image on which the bounding boxes are placed.
Examples
--------
>>> bbs = [
>>> BoundingBox(x1=10, y1=20, x2=20, y2=30),
>>> BoundingBox(x1=25, y1=50, x2=30, y2=70)
>>> ]
>>> bbs_oi = BoundingBoxesOnImage(bbs, shape=image.shape)
"""
def __init__(self, bounding_boxes, shape):
self.bounding_boxes = bounding_boxes
if is_np_array(shape):
self.shape = shape.shape
else:
do_assert(isinstance(shape, (tuple, list)))
self.shape = tuple(shape)
@property
def height(self):
"""
Get the height of the image on which the bounding boxes fall.
Returns
-------
result : int
Image height.
"""
return self.shape[0]
@property
def width(self):
"""
Get the width of the image on which the bounding boxes fall.
Returns
-------
result : int
Image width.
"""
return self.shape[1]
def on(self, image):
"""
Project bounding boxes from one image to a new one.
Parameters
----------
image : ndarray or tuple
New image onto which the bounding boxes are to be projected.
May also simply be that new image's shape tuple.
Returns
-------
keypoints : BoundingBoxesOnImage
Object containing all projected bounding boxes.
"""
if is_np_array(image):
shape = image.shape
else:
shape = image
if shape[0:2] == self.shape[0:2]:
return self.deepcopy()
else:
bounding_boxes = [bb.project(self.shape, shape) for bb in self.bounding_boxes]
return BoundingBoxesOnImage(bounding_boxes, shape)
def draw_on_image(self, image, color=[0, 255, 0], alpha=1.0, thickness=1, copy=True, raise_if_out_of_image=False):
"""
Draw all bounding boxes onto a given image.
Parameters
----------
image : (H,W,3) ndarray
The image onto which to draw the bounding boxes.
This image should usually have the same shape as
set in BoundingBoxesOnImage.shape.
color : int or list of ints or tuple of ints or (3,) ndarray, optional(default=[0, 255, 0])
The RGB color of all bounding boxes. If a single int `C`, then that is
equivalent to (C,C,C).
size : float, optional(default=1.0)
Alpha/transparency of the bounding box.
thickness : int, optional(default=1)
Thickness in pixels.
copy : bool, optional(default=True)
Whether to copy the image before drawing the points.
raise_if_out_of_image : bool, optional(default=False)
Whether to raise an exception if any bounding box is outside of the
image.
Returns
-------
image : (H,W,3) ndarray
Image with drawn bounding boxes.
"""
for bb in self.bounding_boxes:
image = bb.draw_on_image(
image,
color=color,
alpha=alpha,
thickness=thickness,
copy=copy,
raise_if_out_of_image=raise_if_out_of_image
)
return image
def remove_out_of_image(self, fully=True, partly=False):
"""
Remove all bounding boxes that are fully or partially outside of the image.
Parameters
----------
fully : bool, optional(default=True)
Whether to remove bounding boxes that are fully outside of the image.
partly : bool, optional(default=False)
Whether to remove bounding boxes that are partially outside of the image.
Returns
-------
result : BoundingBoxesOnImage
Reduced set of bounding boxes, with those that were fully/partially outside of
the image removed.
"""
bbs_clean = [bb for bb in self.bounding_boxes if not bb.is_out_of_image(self.shape, fully=fully, partly=partly)]
return BoundingBoxesOnImage(bbs_clean, shape=self.shape)
def cut_out_of_image(self):
"""
Cut off all parts from all bounding boxes that are outside of the image.
Returns
-------
result : BoundingBoxesOnImage
Bounding boxes, clipped to fall within the image dimensions.
"""
bbs_cut = [bb.cut_out_of_image(self.shape) for bb in self.bounding_boxes if bb.is_partly_within_image(self.shape)]
return BoundingBoxesOnImage(bbs_cut, shape=self.shape)
def shift(self, top=None, right=None, bottom=None, left=None):
"""
Shift all bounding boxes from one or more image sides, i.e. move them on the x/y-axis.
Parameters
----------
top : None or int, optional(default=None)
Amount of pixels by which to shift all bounding boxes from the top.
right : None or int, optional(default=None)
Amount of pixels by which to shift all bounding boxes from the right.
bottom : None or int, optional(default=None)
Amount of pixels by which to shift all bounding boxes from the bottom.
left : None or int, optional(default=None)
Amount of pixels by which to shift all bounding boxes from the left.
Returns
-------
result : BoundingBoxesOnImage
Shifted bounding boxes.
"""
bbs_new = [bb.shift(top=top, right=right, bottom=bottom, left=left) for bb in self.bounding_boxes]
return BoundingBoxesOnImage(bbs_new, shape=self.shape)
def copy(self):
"""
Create a shallow copy of the BoundingBoxesOnImage object.
Returns
-------
out : BoundingBoxesOnImage
Shallow copy.
"""
return copy.copy(self)
def deepcopy(self):
"""
Create a deep copy of the BoundingBoxesOnImage object.
Returns
-------
out : KeypointsOnImage
Deep copy.
"""
# Manual copy is far faster than deepcopy for KeypointsOnImage,
# so use manual copy here too
bbs = [bb.deepcopy() for bb in self.bounding_boxes]
return BoundingBoxesOnImage(bbs, tuple(self.shape))
def __repr__(self):
return self.__str__()
def __str__(self):
return "BoundingBoxesOnImage(%s, shape=%s)" % (str(self.bounding_boxes), self.shape)
class HeatmapsOnImage(object):
"""
Object representing heatmaps on images.
Parameters
----------
arr : (H,W) or (H,W,C) ndarray(float32)
Array representing the heatmap(s). If multiple heatmaps, then C is expected to denote
their number.
shape : tuple of ints
Shape of the image on which the heatmap(s) is/are placed. NOT the shape of the
heatmap(s) array, unless it is identical to the image shape (note the likely
difference between the arrays in the number of channels).
If there is not a corresponding image, use the shape of the heatmaps array.
min_value : float, optional(default=0.0)
Minimum value for the heatmaps that `arr` represents. This will usually
be 0.0.
max_value : float, optional(default=1.0)
Maximum value for the heatmaps that `arr` represents. This will usually
be 1.0.
"""
def __init__(self, arr, shape, min_value=0.0, max_value=1.0):
"""Construct a new HeatmapsOnImage object."""
assert arr.dtype.type in [np.float32]
assert arr.ndim in [2, 3]
assert len(shape) in [2, 3]
assert min_value < max_value
assert np.min(arr.flat[0:50]) >= min_value - np.finfo(arr.dtype).eps
assert np.max(arr.flat[0:50]) <= max_value + np.finfo(arr.dtype).eps
if arr.ndim == 2:
arr = arr[..., np.newaxis]
self.arr_was_2d = True
else:
self.arr_was_2d = False
eps = np.finfo(np.float32).eps
min_is_zero = 0.0 - eps < min_value < 0.0 + eps
max_is_one = 1.0 - eps < max_value < 1.0 + eps
if min_is_zero and max_is_one:
self.arr_0to1 = arr
else:
self.arr_0to1 = (arr - min_value) / (max_value - min_value)
self.shape = shape
self.min_value = min_value
self.max_value = max_value
def get_arr(self):
"""
Get the heatmap array in the desired value range.
The HeatmapsOnImage object saves heatmaps internally in the value range (min=0.0, max=1.0).
This function converts the internal representation to (min=min_value, max=max_value),
where min_value and max_value are provided upon instantiation of the object.
Returns
-------
result : (H,W) or (H,W,C) ndarray(float32)
Heatmap array.
"""
if self.arr_was_2d and self.arr_0to1.shape[2] == 1:
arr = self.arr_0to1[:, :, 0]
else:
arr = self.arr_0to1
eps = np.finfo(np.float32).eps
min_is_zero = 0.0 - eps < self.min_value < 0.0 + eps
max_is_one = 1.0 - eps < self.max_value < 1.0 + eps
if min_is_zero and max_is_one:
return np.copy(arr)
else:
diff = self.max_value - self.min_value
return self.min_value + diff * arr
# TODO
#def find_global_maxima(self):
# raise NotImplementedError()
def draw(self, size=None, cmap="jet"):
"""
Render the heatmaps as RGB images.
Parameters
----------
size : None or float or iterable of two ints or iterable of two floats, optional(default=None)
Size of the rendered RGB image as (height, width).
See `imresize_single_image()` for details.
If set to None, no resizing is performed and the size of the heatmaps array is used.
cmap : string or None, optional(default="jet")
Color map of matplotlib to use in order to convert the heatmaps into RGB images.
If set to None, no color map will be used and the heatmaps will be converted
as simple intensity maps.
Returns
-------
heatmaps_drawn : list of (H,W,3) ndarray(uint8)
Rendered heatmaps, one per heatmap array channel.
"""
heatmaps_uint8 = self.to_uint8()
heatmaps_drawn = []
for c in sm.xrange(heatmaps_uint8.shape[2]):
# c:c+1 here, because the additional axis is needed by imresize_single_image
heatmap_c = heatmaps_uint8[..., c:c+1]
if size is not None:
heatmap_c_rs = imresize_single_image(heatmap_c, size,
interpolation="nearest")
else:
heatmap_c_rs = heatmap_c
heatmap_c_rs = np.squeeze(heatmap_c_rs).astype(np.float32) / 255.0
if cmap is not None:
import matplotlib.pyplot as plt
cmap_func = plt.get_cmap(cmap)
heatmap_cmapped = cmap_func(heatmap_c_rs)
heatmap_cmapped = np.delete(heatmap_cmapped, 3, 2)
else:
heatmap_cmapped = np.tile(heatmap_c_rs[..., np.newaxis], (1, 1, 3))
heatmap_cmapped = np.clip(heatmap_cmapped * 255, 0, 255).astype(np.uint8)
heatmaps_drawn.append(heatmap_cmapped)
return heatmaps_drawn
def draw_on_image(self, image, alpha=0.75, cmap="jet", resize="heatmaps"):
"""
Draw the heatmaps as overlays over an image.
Parameters
----------
image : (H,W,3) ndarray(uint8)
Image onto which to draw the heatmaps.
alpha : float, optional(default=0.75)
Alpha/opacity value to use for the mixing of image and heatmaps.
Higher values mean that the heatmaps will be more visible and the image less visible.
cmap : string or None, optional(default="jet")
Color map to use. See `HeatmapsOnImage.draw()` for details.
resize : "heatmaps" or "image", optional(default="heatmaps")
In case of size differences between the image and heatmaps, either the image or
the heatmaps can be resized. This parameter controls which of the two will be resized
to the other's size.
Returns
-------
mix : list of (H,W,3) ndarray(uint8)
Rendered overlays, one per heatmap array channel.
"""
# assert RGB image
assert image.ndim == 3
assert image.shape[2] == 3
assert image.dtype.type == np.uint8
assert 0 - 1e-8 <= alpha <= 1.0 + 1e-8
assert resize in ["heatmaps", "image"]
if resize == "image":
image = imresize_single_image(image, self.arr_0to1.shape[0:2], interpolation="cubic")
heatmaps_drawn = self.draw(
size=image.shape[0:2] if resize == "heatmaps" else None,
cmap=cmap
)
mix = [
np.clip((1-alpha) * image + alpha * heatmap_i, 0, 255).astype(np.uint8)
for heatmap_i
in heatmaps_drawn
]
return mix
def invert(self):
"""
Inverts each value in the heatmap, shifting low towards high values and vice versa.
This changes each value to::
v' = max - (v - min)
where `v` is the value at some spatial location, `min` is the minimum value in the heatmap
and `max` is the maximum value.
As the heatmap uses internally a 0.0 to 1.0 representation, this simply
becomes `v' = 1.0 - v`.
Note that the attributes `min_value` and `max_value` are not switched. They both keep their
values.
This function can be useful e.g. when working with depth maps, where algorithms might have
an easier time representing the furthest away points with zeros, requiring an inverted
depth map.
Returns
-------
result : HeatmapsOnImage
Inverted heatmap.
"""
arr_inv = HeatmapsOnImage.from_0to1(1 - self.arr_0to1, shape=self.shape, min_value=self.min_value, max_value=self.max_value)
arr_inv.arr_was_2d = self.arr_was_2d
return arr_inv
def pad(self, top=0, right=0, bottom=0, left=0, mode="constant", cval=0.0):
"""
Pad the heatmaps on their top/right/bottom/left side.
Parameters
----------
top : int, optional(default=0)
Amount of pixels to add at the top side of the heatmaps. Must be 0 or greater.
right : int, optional(default=0)
Amount of pixels to add at the right side of the heatmaps. Must be 0 or greater.
bottom : int, optional(default=0)
Amount of pixels to add at the bottom side of the heatmaps. Must be 0 or greater.
left : int, optional(default=0)
Amount of pixels to add at the left side of the heatmaps. Must be 0 or greater.
mode : string, optional(default="constant")
Padding mode to use. See `numpy.pad()` for details.
cval : number, optional(default=0.0)
Value to use for padding if mode="constant". See `numpy.pad()` for details.
Returns
-------
result : HeatmapsOnImage
Padded heatmaps of height H'=H+top+bottom and width W'=W+left+right.
"""
arr_0to1_padded = pad(self.arr_0to1, top=top, right=right, bottom=bottom, left=left, mode=mode, cval=cval)
return HeatmapsOnImage.from_0to1(arr_0to1_padded, shape=self.shape, min_value=self.min_value, max_value=self.max_value)
def pad_to_aspect_ratio(self, aspect_ratio, mode="constant", cval=0.0, return_pad_amounts=False):
"""
Pad the heatmaps on their sides so that they match a target aspect ratio.
Depending on which dimension is smaller (height or width), only the corresponding
sides (left/right or top/bottom) will be padded. In each case, both of the sides will
be padded equally.
Parameters
----------
aspect_ratio : float
Target aspect ratio, given as width/height. E.g. 2.0 denotes the image having twice
as much width as height.
mode : string, optional(default="constant")
Padding mode to use. See `numpy.pad()` for details.
cval : number, optional(default=0.0)
Value to use for padding if mode="constant". See `numpy.pad()` for details.
return_pad_amounts : bool, optional(default=False)
If False, then only the padded image will be returned. If True, a tuple with two
entries will be returned, where the first entry is the padded image and the second
entry are the amounts by which each image side was padded. These amounts are again a
tuple of the form (top, right, bottom, left), with each value being an integer.
Returns
-------
result : tuple
First tuple entry: Padded heatmaps as HeatmapsOnImage object.
Second tuple entry: Amounts by which the heatmaps were padded on each side, given
as a tuple (top, right, bottom, left).
If return_pad_amounts is False, then only the heatmaps object is returned.
"""
arr_0to1_padded, pad_amounts = pad_to_aspect_ratio(self.arr_0to1, aspect_ratio=aspect_ratio, mode=mode, cval=cval, return_pad_amounts=True)
heatmaps = HeatmapsOnImage.from_0to1(arr_0to1_padded, shape=self.shape, min_value=self.min_value, max_value=self.max_value)
if return_pad_amounts:
return heatmaps, pad_amounts
else:
return heatmaps
def avg_pool(self, block_size):
"""
Rescale the heatmap(s) array using average pooling of a given block/kernel size.
Parameters
----------
block_size : int or tuple of two ints or tuple of three ints
Size of each block of values to pool, aka kernel size. See `imgaug.pool()` for details.
Returns
-------
result : HeatmapsOnImage
Heatmaps after average pooling.
"""
arr_0to1_reduced = avg_pool(self.arr_0to1, block_size, cval=0.0)
return HeatmapsOnImage.from_0to1(arr_0to1_reduced, shape=self.shape, min_value=self.min_value, max_value=self.max_value)
def max_pool(self, block_size):
"""
Rescale the heatmap(s) array using max-pooling of a given block/kernel size.
Parameters
----------
block_size : int or tuple of two ints or tuple of three ints
Size of each block of values to pool, aka kernel size. See `imgaug.pool()` for details.
Returns
-------
result : HeatmapsOnImage
Heatmaps after max-pooling.
"""
arr_0to1_reduced = max_pool(self.arr_0to1, block_size)
return HeatmapsOnImage.from_0to1(arr_0to1_reduced, shape=self.shape, min_value=self.min_value, max_value=self.max_value)
def scale(self, sizes, interpolation="cubic"):
"""
Rescale the heatmap(s) array to the provided size given the provided interpolation.
Parameters
----------
sizes : float or iterable of two ints or iterable of two floats
New size of the array in (height, width). See `imresize_single_image()` for details.
interpolation : None or string or int, optional(default="cubic")
The interpolation to use during resize. See `imresize_single_image()` for details.
Returns
-------
result : HeatmapsOnImage
Rescaled heatmaps object.
"""
arr_0to1_rescaled = imresize_single_image(self.arr_0to1, sizes, interpolation=interpolation)
# cubic interpolation can lead to values outside of [0.0, 1.0],
# see https://github.com/opencv/opencv/issues/7195
# TODO area interpolation too?
arr_0to1_rescaled = np.clip(arr_0to1_rescaled, 0.0, 1.0)
return HeatmapsOnImage.from_0to1(arr_0to1_rescaled, shape=self.shape, min_value=self.min_value, max_value=self.max_value)
def to_uint8(self):
"""
Convert this heatmaps object to a 0-to-255 array.
Returns
-------
arr_uint8 : (H,W,C) ndarray(uint8)
Heatmap as a 0-to-255 array.
"""
# TODO this always returns (H,W,C), even if input ndarray was originall (H,W)
# does it make sense here to also return (H,W) if self.arr_was_2d?
arr_0to255 = np.clip(np.round(self.arr_0to1 * 255), 0, 255)
arr_uint8 = arr_0to255.astype(np.uint8)
return arr_uint8
@staticmethod
def from_uint8(arr_uint8, shape, min_value=0.0, max_value=1.0):
"""
Create a heatmaps object from an heatmap array containing values ranging from 0 to 255.
Parameters
----------
arr_uint8 : (H,W) or (H,W,C) ndarray(uint8)
Heatmap(s) array, where H=height, W=width, C=heatmap channels.
shape : tuple of ints
Shape of the image on which the heatmap(s) is/are placed. NOT the shape of the
heatmap(s) array, unless it is identical to the image shape (note the likely
difference between the arrays in the number of channels).
If there is not a corresponding image, use the shape of the heatmaps array.
min_value : float, optional(default=0.0)
Minimum value for the heatmaps that the 0-to-255 array represents. This will usually
be 0.0. It is used when calling `HeatmapsOnImage.get_arr()`, which converts the
underlying (0, 255) array to value range (min_value, max_value).
max_value : float, optional(default=1.0)
Maximum value for the heatmaps that 0-to-255 array represents.
See parameter min_value for details.
Returns
-------
heatmaps : HeatmapsOnImage
Heatmaps object.
"""
arr_0to1 = arr_uint8.astype(np.float32) / 255.0
return HeatmapsOnImage.from_0to1(arr_0to1, shape, min_value=min_value, max_value=max_value)
@staticmethod
def from_0to1(arr_0to1, shape, min_value=0.0, max_value=1.0):
"""
Create a heatmaps object from an heatmap array containing values ranging from 0.0 to 1.0.
Parameters
----------
arr_0to1 : (H,W) or (H,W,C) ndarray(float32)
Heatmap(s) array, where H=height, W=width, C=heatmap channels.
shape : tuple of ints
Shape of the image on which the heatmap(s) is/are placed. NOT the shape of the
heatmap(s) array, unless it is identical to the image shape (note the likely
difference between the arrays in the number of channels).
If there is not a corresponding image, use the shape of the heatmaps array.
min_value : float, optional(default=0.0)
Minimum value for the heatmaps that the 0-to-1 array represents. This will usually
be 0.0. It is used when calling `HeatmapsOnImage.get_arr()`, which converts the
underlying (0.0, 1.0) array to value range (min_value, max_value).
E.g. if you started with heatmaps in the range (-1.0, 1.0) and projected these
to (0.0, 1.0), you should call this function with min_value=-1.0, max_value=1.0
so that `get_arr()` returns heatmap arrays having value range (-1.0, 1.0).
max_value : float, optional(default=1.0)
Maximum value for the heatmaps that to 0-to-255 array represents.
See parameter min_value for details.
Returns
-------
heatmaps : HeatmapsOnImage
Heatmaps object.
"""
heatmaps = HeatmapsOnImage(arr_0to1, shape, min_value=0.0, max_value=1.0)
heatmaps.min_value = min_value
heatmaps.max_value = max_value
return heatmaps
@staticmethod
def change_normalization(arr, source, target):
"""
Change the value range of a heatmap from one min-max to another min-max.
E.g. the value range may be changed from min=0.0, max=1.0 to min=-1.0, max=1.0.
Parameters
----------
arr : ndarray
Heatmap array to modify.
source : tuple of two floats
Current value range of the input array, given as (min, max), where both are float
values.
target : tuple of two floats
Desired output value range of the array, given as (min, max), where both are float
values.
Returns
-------
arr_target : ndarray
Input array, with value range projected to the desired target value range.
"""
assert is_np_array(arr)
if isinstance(source, HeatmapsOnImage):
source = (source.min_value, source.max_value)
else:
assert isinstance(source, tuple)
assert len(source) == 2
assert source[0] < source[1]
if isinstance(target, HeatmapsOnImage):
target = (target.min_value, target.max_value)
else:
assert isinstance(target, tuple)
assert len(target) == 2
assert target[0] < target[1]
# Check if source and target are the same (with a tiny bit of tolerance)
# if so, evade compuation and just copy the array instead.
# This is reasonable, as source and target will often both be (0.0, 1.0).
eps = np.finfo(arr.dtype).eps
mins_same = source[0] - 10*eps < target[0] < source[0] + 10*eps
maxs_same = source[1] - 10*eps < target[1] < source[1] + 10*eps
if mins_same and maxs_same:
return np.copy(arr)
min_source, max_source = source
min_target, max_target = target
diff_source = max_source - min_source
diff_target = max_target - min_target
arr_0to1 = (arr - min_source) / diff_source
arr_target = min_target + arr_0to1 * diff_target
return arr_target
def copy(self):
"""
Create a shallow copy of the Heatmaps object.
Returns
-------
out : HeatmapsOnImage
Shallow copy.
"""
return self.deepcopy()
def deepcopy(self):
"""
Create a deep copy of the Heatmaps object.
Returns
-------
out : HeatmapsOnImage
Deep copy.
"""
return HeatmapsOnImage(self.get_arr(), shape=self.shape, min_value=self.min_value, max_value=self.max_value)
class SegmentationMapOnImage(object):
"""
Object representing a segmentation map associated with an image.
Attributes
----------
DEFAULT_SEGMENT_COLORS : list of tuple of int
Standard RGB colors to use during drawing, ordered by class index.
Parameters
----------
arr : (H,W) ndarray or (H,W,1) ndarray or (H,W,C) ndarray
Array representing the segmentation map. May have datatypes bool, integer or float.
* If bool: Assumed to be of shape (H,W), (H,W,1) or (H,W,C). If (H,W) or (H,W,1) it
is assumed to be for the case of having a single class (where any False denotes
background). Otherwise there are assumed to be C channels, one for each class,
with each of them containing a mask for that class. The masks may overlap.
* If integer: Assumed to be of shape (H,W) or (H,W,1). Each pixel is assumed to
contain an integer denoting the class index. Classes are assumed to be
non-overlapping. The number of classes cannot be guessed from this input, hence
nb_classes must be set.
* If float: Assumed to b eof shape (H,W), (H,W,1) or (H,W,C) with meanings being
similar to the case of `bool`. Values are expected to fall always in the range
0.0 to 1.0 and are usually expected to be either 0.0 or 1.0 upon instantiation
of a new segmentation map. Classes may overlap.
shape : iterable of int
Shape of the corresponding image (NOT the segmentation map array). This is expected
to be (H, W) or (H, W, C) with C usually being 3. If there is no corresponding image,
then use the segmentation map's shape instead.
nb_classes : int or None
Total number of unique classes that may appear in an segmentation map, i.e. the max
class index. This may be None if the input array is of type bool or float. The number
of class however must be provided if the input array is of type int, as then the
number of classes cannot be guessed.
"""
DEFAULT_SEGMENT_COLORS = [
(0, 0, 0), # black
(230, 25, 75), # red
(60, 180, 75), # green
(255, 225, 25), # yellow
(0, 130, 200), # blue
(245, 130, 48), # orange
(145, 30, 180), # purple
(70, 240, 240), # cyan
(240, 50, 230), # magenta
(210, 245, 60), # lime
(250, 190, 190), # pink
(0, 128, 128), # teal
(230, 190, 255), # lavender
(170, 110, 40), # brown
(255, 250, 200), # beige
(128, 0, 0), # maroon
(170, 255, 195), # mint
(128, 128, 0), # olive
(255, 215, 180), # coral
(0, 0, 128), # navy
(128, 128, 128), # grey
(255, 255, 255), # white
# --
(115, 12, 37), # dark red
(30, 90, 37), # dark green
(127, 112, 12), # dark yellow
(0, 65, 100), # dark blue
(122, 65, 24), # dark orange
(72, 15, 90), # dark purple
(35, 120, 120), # dark cyan
(120, 25, 115), # dark magenta
(105, 122, 30), # dark lime
(125, 95, 95), # dark pink
(0, 64, 64), # dark teal
(115, 95, 127), # dark lavender
(85, 55, 20), # dark brown
(127, 125, 100), # dark beige
(64, 0, 0), # dark maroon
(85, 127, 97), # dark mint
(64, 64, 0), # dark olive
(127, 107, 90), # dark coral
(0, 0, 64), # dark navy
(64, 64, 64), # dark grey
]
def __init__(self, arr, shape, nb_classes=None):
if arr.dtype.type == np.bool:
assert arr.ndim in [2, 3]
self.input_was = ("bool", arr.ndim)
if arr.ndim == 2:
arr = arr[..., np.newaxis]
arr = arr.astype(np.float32)
elif arr.dtype.type in [np.uint8, np.uint32, np.int8, np.int16, np.int32]:
assert arr.ndim == 2 or (arr.ndim == 3 and arr.shape[2] == 1)
assert nb_classes is not None
assert nb_classes > 0
assert np.min(arr.flat[0:100]) >= 0
assert np.max(arr.flat[0:100]) <= nb_classes
self.input_was = ("int", arr.dtype.type, arr.ndim)
if arr.ndim == 3:
arr = arr[..., 0]
arr = np.eye(nb_classes)[arr] # from class indices to one hot
arr = arr.astype(np.float32)
elif arr.dtype.type in [np.float16, np.float32]:
assert arr.ndim == 3
self.input_was = ("float", arr.dtype.type, arr.ndim)
arr = arr.astype(np.float32)
else:
dt = str(arr.dtype) if is_np_array(arr) else "<no ndarray>"
raise Exception("Input was expected to be an ndarray of dtype bool, uint8, uint32 "
"int8, int16, int32 or float32. Got type %s with dtype %s." % (type(arr), dt))
assert arr.ndim == 3
assert arr.dtype.type == np.float32
self.arr = arr
self.shape = shape
self.nb_classes = nb_classes if nb_classes is not None else arr.shape[2]
#@property
#def nb_classes(self):
# return self.arr.shape[2]
def get_arr_int(self, background_threshold=0.01, background_class_id=0):
"""
Get the segmentation map array as an integer array of shape (H, W).
Each pixel in that array contains an integer value representing the pixel's class.
If multiple classes overlap, the one with the highest local float value is picked.
If that highest local value is below `background_threshold`, the method instead uses
the background class id as the pixel's class value.
Parameters
----------
background_threshold : float, optional(default=0.01)
At each pixel, each class-heatmap has a value between 0.0 and 1.0. If none of the
class-heatmaps has a value above this threshold, the method uses the background class
id instead.
background_class_id : int, optional(default=0)
Class id to fall back to if no class-heatmap passes the threshold at a spatial
location.
Returns
-------
result : (H,W) ndarray(int)
Segmentation map array.
"""
channelwise_max_idx = np.argmax(self.arr, axis=2)
result = channelwise_max_idx
if background_threshold is not None and background_threshold > 0:
probs = np.amax(self.arr, axis=2)
result[probs < background_threshold] = background_class_id
return result.astype(np.int32)
#def get_arr_bool(self, allow_overlapping=False, threshold=0.5, background_threshold=0.01, background_class_id=0):
# # TODO
# raise NotImplementedError()
def draw(self, size=None, background_threshold=0.01, background_class_id=0, colors=None, return_foreground_mask=False):
"""
Render the segmentation map as an RGB image.
Parameters
----------
size : None or float or iterable of two ints or iterable of two floats, optional(default=None)
Size of the rendered RGB image as (height, width).
See `imresize_single_image()` for details.
If set to None, no resizing is performed and the size of the segmentation map array is
used.
background_threshold : float, optional(default=0.01)
At each pixel, each class-heatmap has a value between 0.0 and 1.0. If none of the
class-heatmaps has a value above this threshold, the method uses the background class
id instead.
background_class_id : int, optional(default=0)
Class id to fall back to if no class-heatmap passes the threshold at a spatial
location.
colors : None or list of tuple of int, optional(default=None)
Colors to use. One for each class to draw. If None, then default colors will be used.
return_foreground_mask : bool, optional(default=False)
Whether to return a mask of the same size as the drawn segmentation map, containing
True at any spatial location that is not the background class and False everywhere
else.
Returns
-------
segmap_drawn : (H,W,3) ndarray(uint8)
Rendered segmentation map.
foreground_mask : (H,W) ndarray(bool)
Mask indicating the locations of foreground classes. Only returned if
return_foreground_mask is True.
"""
arr = self.get_arr_int(background_threshold=background_threshold, background_class_id=background_class_id)
nb_classes = self.nb_classes
segmap_drawn = np.zeros((arr.shape[0], arr.shape[1], 3), dtype=np.uint8)
if colors is None:
colors = SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS
assert nb_classes <= len(colors), "Can't draw all %d classes as it would exceed the maximum number of %d available colors." % (nb_classes, len(colors),)
ids_in_map = np.unique(arr)
for c, color in zip(sm.xrange(1+nb_classes), colors):
if c in ids_in_map:
class_mask = (arr == c)
segmap_drawn[class_mask] = color
if return_foreground_mask:
foreground_mask = (arr != background_class_id)
else:
foreground_mask = None
if size is not None:
segmap_drawn = imresize_single_image(segmap_drawn, size, interpolation="nearest")
if foreground_mask is not None:
foreground_mask = imresize_single_image(foreground_mask.astype(np.uint8), size, interpolation="nearest") > 0
if foreground_mask is not None:
return segmap_drawn, foreground_mask
return segmap_drawn
def draw_on_image(self, image, alpha=0.5, resize="segmentation_map", background_threshold=0.01, background_class_id=0, colors=None, draw_background=False):
"""
Draw the segmentation map as an overlay over an image.
Parameters
----------
image : (H,W,3) ndarray(uint8)
Image onto which to draw the segmentation map.
alpha : float, optional(default=0.75)
Alpha/opacity value to use for the mixing of image and segmentation map.
Higher values mean that the segmentation map will be more visible and the image less
visible.
resize : "segmentation_map" or "image", optional(default="segmentation_map")
In case of size differences between the image and segmentation map, either the image or
the segmentation map can be resized. This parameter controls which of the two will be
resized to the other's size.
background_threshold : float, optional(default=0.01)
At each pixel, each class-heatmap has a value between 0.0 and 1.0. If none of the
class-heatmaps has a value above this threshold, the method uses the background class
id instead.
background_class_id : int, optional(default=0)
Class id to fall back to if no class-heatmap passes the threshold at a spatial
location.
colors : None or list of tuple of int, optional(default=None)
Colors to use. One for each class to draw. If None, then default colors will be used.
draw_background : bool, optional(default=False)
If True, the background will be drawn like any other class.
If False, the background will not be drawn, i.e. the respective background pixels
will be identical with the image's RGB color at the corresponding spatial location
and no color overlay will be applied.
Returns
-------
mix : (H,W,3) ndarray(uint8)
Rendered overlays.
"""
# assert RGB image
assert image.ndim == 3
assert image.shape[2] == 3
assert image.dtype.type == np.uint8
assert 0 - 1e-8 <= alpha <= 1.0 + 1e-8
assert resize in ["segmentation_map", "image"]
if resize == "image":
image = imresize_single_image(image, self.arr.shape[0:2], interpolation="cubic")
segmap_drawn, foreground_mask = self.draw(
background_threshold=background_threshold,
background_class_id=background_class_id,
size=image.shape[0:2] if resize == "segmentation_map" else None,
colors=colors,
return_foreground_mask=True
)
if draw_background:
mix = np.clip(
(1-alpha) * image + alpha * segmap_drawn,
0,
255
).astype(np.uint8)
else:
foreground_mask = foreground_mask[..., np.newaxis]
mix = np.zeros_like(image)
mix += (~foreground_mask).astype(np.uint8) * image
mix += foreground_mask.astype(np.uint8) * np.clip(
(1-alpha) * image + alpha * segmap_drawn,
0,
255
).astype(np.uint8)
return mix
def pad(self, top=0, right=0, bottom=0, left=0, mode="constant", cval=0.0):
"""
Pad the segmentation map on its top/right/bottom/left side.
Parameters
----------
top : int, optional(default=0)
Amount of pixels to add at the top side of the segmentation map. Must be 0 or
greater.
right : int, optional(default=0)
Amount of pixels to add at the right side of the segmentation map. Must be 0 or
greater.
bottom : int, optional(default=0)
Amount of pixels to add at the bottom side of the segmentation map. Must be 0 or
greater.
left : int, optional(default=0)
Amount of pixels to add at the left side of the segmentation map. Must be 0 or
greater.
mode : string, optional(default="constant")
Padding mode to use. See `numpy.pad()` for details.
cval : number, optional(default=0.0)
Value to use for padding if mode="constant". See `numpy.pad()` for details.
Returns
-------
result : SegmentationMapOnImage
Padded segmentation map of height H'=H+top+bottom and width W'=W+left+right.
"""
arr_padded = pad(self.arr, top=top, right=right, bottom=bottom, left=left, mode=mode, cval=cval)
return SegmentationMapOnImage(arr_padded, shape=self.shape)
def pad_to_aspect_ratio(self, aspect_ratio, mode="constant", cval=0.0, return_pad_amounts=False):
"""
Pad the segmentation map on its sides so that its matches a target aspect ratio.
Depending on which dimension is smaller (height or width), only the corresponding
sides (left/right or top/bottom) will be padded. In each case, both of the sides will
be padded equally.
Parameters
----------
aspect_ratio : float
Target aspect ratio, given as width/height. E.g. 2.0 denotes the image having twice
as much width as height.
mode : string, optional(default="constant")
Padding mode to use. See `numpy.pad()` for details.
cval : number, optional(default=0.0)
Value to use for padding if mode="constant". See `numpy.pad()` for details.
return_pad_amounts : bool, optional(default=False)
If False, then only the padded image will be returned. If True, a tuple with two
entries will be returned, where the first entry is the padded image and the second
entry are the amounts by which each image side was padded. These amounts are again a
tuple of the form (top, right, bottom, left), with each value being an integer.
Returns
-------
result : tuple
First tuple entry: Padded segmentation map as SegmentationMapOnImage object.
Second tuple entry: Amounts by which the segmentation map was padded on each side,
given as a tuple (top, right, bottom, left).
If return_pad_amounts is False, then only the segmentation map object is returned.
"""
arr_padded, pad_amounts = pad_to_aspect_ratio(self.arr, aspect_ratio=aspect_ratio, mode=mode, cval=cval, return_pad_amounts=True)
segmap = SegmentationMapOnImage(arr_padded, shape=self.shape)
if return_pad_amounts:
return segmap, pad_amounts
else:
return segmap
def scale(self, sizes, interpolation="cubic"):
"""
Rescale the segmentation map array to the provided size given the provided interpolation.
Parameters
----------
sizes : float or iterable of two ints or iterable of two floats
New size of the array in (height, width). See `imresize_single_image()` for details.
interpolation : None or string or int, optional(default="cubic")
The interpolation to use during resize. See `imresize_single_image()` for details.
Note: The segmentation map is internally stored as multiple float-based heatmaps,
making smooth interpolations potentially more reasonable than nearest neighbour
interpolation.
Returns
-------
result : SegmentationMapOnImage
Rescaled segmentation map object.
"""
arr_rescaled = imresize_single_image(self.arr, sizes, interpolation=interpolation)
# cubic interpolation can lead to values outside of [0.0, 1.0],
# see https://github.com/opencv/opencv/issues/7195
# TODO area interpolation too?
arr_rescaled = np.clip(arr_rescaled, 0.0, 1.0)
return SegmentationMapOnImage(arr_rescaled, shape=self.shape)
def to_heatmaps(self, only_nonempty=False, not_none_if_no_nonempty=False):
"""
Convert segmentation map to heatmaps object.
Each segmentation map class will be represented as a single heatmap channel.
Parameters
----------
only_nonempty : bool, optional(default=False)
If True, then only heatmaps for classes that appear in the segmentation map will be
generated. Additionally, a list of these class ids will be returned.
not_none_if_no_nonempty : bool, optional(default=False)
If `only_nonempty` is True and for a segmentation map no channel was non-empty,
this function usually returns None as the heatmaps object. If however this parameter
is set to True, a heatmaps object with one channel (representing class 0)
will be returned as a fallback in these cases.
Returns
-------
result : HeatmapsOnImage or None
Segmentation map as heatmaps.
If `only_nonempty` was set to True and no class appeared in the segmentation map,
then this is None.
class_indices : list of int
Class ids (0 to C-1) of the classes that were actually added to the heatmaps.
Only returned if `only_nonempty` was set to True.
"""
if not only_nonempty:
return HeatmapsOnImage.from_0to1(self.arr, self.shape, min_value=0.0, max_value=1.0)
else:
nonempty_mask = np.sum(self.arr, axis=(0, 1)) > 0 + 1e-4
if np.sum(nonempty_mask) == 0:
if not_none_if_no_nonempty:
nonempty_mask[0] = True
else:
return None, []
class_indices = np.arange(self.arr.shape[2])[nonempty_mask]
channels = self.arr[..., class_indices]
return HeatmapsOnImage(channels, self.shape, min_value=0.0, max_value=1.0), class_indices
@staticmethod
def from_heatmaps(heatmaps, class_indices=None, nb_classes=None):
"""
Convert heatmaps to segmentation map.
Assumes that each class is represented as a single heatmap channel.
Parameters
----------
heatmaps : HeatmapsOnImage
Heatmaps to convert.
class_indices : None or list of int, optional(default=None)
List of class indices represented by each heatmap channel. See also the
secondary output of `to_heatmap()`. If this is provided, it must have the same
length as the number of heatmap channels.
nb_classes : None or int, optional(default=None)
Number of classes. Must be provided if class_indices is set.
Returns
-------
result : SegmentationMapOnImage
Segmentation map derived from heatmaps.
"""
if class_indices is None:
return SegmentationMapOnImage(heatmaps.arr_0to1, shape=heatmaps.shape)
else:
assert nb_classes is not None
assert min(class_indices) >= 0
assert max(class_indices) < nb_classes
assert len(class_indices) == heatmaps.arr_0to1.shape[2]
arr_0to1 = heatmaps.arr_0to1
arr_0to1_full = np.zeros((arr_0to1.shape[0], arr_0to1.shape[1], nb_classes), dtype=np.float32)
#empty_channel = np.zeros((arr_0to1.shape[0], arr_0to1.shape[1]), dtype=np.float32)
class_indices_set = set(class_indices)
heatmap_channel = 0
for c in sm.xrange(nb_classes):
if c in class_indices_set:
arr_0to1_full[:, :, c] = arr_0to1[:, :, heatmap_channel]
heatmap_channel += 1
return SegmentationMapOnImage(arr_0to1_full, shape=heatmaps.shape)
def copy(self):
"""
Create a shallow copy of the segmentation map object.
Returns
-------
out : SegmentationMapOnImage
Shallow copy.
"""
return self.deepcopy()
def deepcopy(self):
"""
Create a deep copy of the segmentation map object.
Returns
-------
out : SegmentationMapOnImage
Deep copy.
"""
segmap = SegmentationMapOnImage(self.arr, shape=self.shape, nb_classes=self.nb_classes)
segmap.input_was = self.input_was
return segmap
############################
# Background augmentation
############################
class Batch(object):
"""
Class encapsulating a batch before and after augmentation.
Parameters
----------
images : None or (N,H,W,C) ndarray or (N,H,W) ndarray or list of (H,W,C) ndarray or list of (H,W) ndarray
The images to
augment.
heatmaps : None or list of HeatmapsOnImage
The heatmaps to
augment.
segmentation_maps : None or list of SegmentationMapOnImage
The segmentation maps to
augment.
keypoints : None or list of KeypointOnImage
The keypoints to
augment.
bounding_boxes : None or list of BoundingBoxesOnImage
The bounding boxes to
augment.
data : anything
Additional data that is saved in the batch and may be read out
after augmentation. This could e.g. contain filepaths to each image
in `images`. As this object is usually used for background
augmentation with multiple processes, the augmented Batch objects might
not be returned in the original order, making this information useful.
"""
def __init__(self, images=None, heatmaps=None, segmentation_maps=None, keypoints=None, bounding_boxes=None, data=None):
self.images = images
self.images_aug = None
self.heatmaps = heatmaps
self.heatmaps_aug = None
self.segmentation_maps = segmentation_maps
self.segmentation_maps = None
self.keypoints = keypoints
self.keypoints_aug = None
self.bounding_boxes = bounding_boxes
self.bounding_boxes_aug = None
self.data = data
class BatchLoader(object):
"""
Class to load batches in the background.
Loaded batches can be accesses using `BatchLoader.queue`.
Parameters
----------
load_batch_func : callable
Function that yields Batch objects (i.e. expected to be a generator).
Background loading automatically stops when the last batch was yielded.
queue_size : int, optional(default=50)
Maximum number of batches to store in the queue. May be set higher
for small images and/or small batches.
nb_workers : int, optional(default=1)
Number of workers to run in the background.
threaded : bool, optional(default=True)
Whether to run the background processes using threads (true) or
full processes (false).
"""
def __init__(self, load_batch_func, queue_size=50, nb_workers=1, threaded=True):
do_assert(queue_size > 0)
do_assert(nb_workers >= 1)
self.queue = multiprocessing.Queue(queue_size)
self.join_signal = multiprocessing.Event()
self.finished_signals = []
self.workers = []
self.threaded = threaded
seeds = current_random_state().randint(0, 10**6, size=(nb_workers,))
for i in range(nb_workers):
finished_signal = multiprocessing.Event()
self.finished_signals.append(finished_signal)
if threaded:
worker = threading.Thread(target=self._load_batches, args=(load_batch_func, self.queue, finished_signal, self.join_signal, None))
else:
worker = multiprocessing.Process(target=self._load_batches, args=(load_batch_func, self.queue, finished_signal, self.join_signal, seeds[i]))
worker.daemon = True
worker.start()
self.workers.append(worker)
def all_finished(self):
"""
Determine whether the workers have finished the loading process.
Returns
-------
out : bool
True if all workers have finished. Else False.
"""
return all([event.is_set() for event in self.finished_signals])
def _load_batches(self, load_batch_func, queue, finished_signal, join_signal, seedval):
if seedval is not None:
random.seed(seedval)
np.random.seed(seedval)
seed(seedval)
try:
for batch in load_batch_func():
do_assert(isinstance(batch, Batch), "Expected batch returned by lambda function to be of class imgaug.Batch, got %s." % (type(batch),))
batch_pickled = pickle.dumps(batch, protocol=-1)
while not join_signal.is_set():
try:
queue.put(batch_pickled, timeout=0.001)
break
except QueueFull:
pass
if join_signal.is_set():
break
except Exception as exc:
traceback.print_exc()
finally:
finished_signal.set()
def terminate(self):
"""
Stop all workers.
"""
self.join_signal.set()
# give minimal time to put generated batches in queue and gracefully shut down
time.sleep(0.002)
# clean the queue, this reportedly prevents hanging threads
while True:
try:
self.queue.get(timeout=0.005)
except QueueEmpty:
break
if self.threaded:
for worker in self.workers:
worker.join()
# we don't have to set the finished_signals here, because threads always finish
# gracefully
else:
for worker in self.workers:
worker.terminate()
worker.join()
# wait here a tiny bit to really make sure that everything is killed before setting
# the finished_signals. calling set() and is_set() (via a subprocess) on them at the
# same time apparently results in a deadlock (at least in python 2).
#time.sleep(0.02)
for finished_signal in self.finished_signals:
finished_signal.set()
self.queue.close()
class BackgroundAugmenter(object):
"""
Class to augment batches in the background (while training on the GPU).
This is a wrapper around the multiprocessing module.
Parameters
----------
batch_loader : BatchLoader
BatchLoader object to load data in the
background.
augseq : Augmenter
An augmenter to apply to all loaded images.
This may be e.g. a Sequential to apply multiple augmenters.
queue_size : int
Size of the queue that is used to temporarily save the augmentation
results. Larger values offer the background processes more room
to save results when the main process doesn't load much, i.e. they
can lead to smoother and faster training. For large images, high
values can block a lot of RAM though.
nb_workers : "auto" or int
Number of background workers to spawn. If auto, it will be set
to C-1, where C is the number of CPU cores.
"""
def __init__(self, batch_loader, augseq, queue_size=50, nb_workers="auto"):
do_assert(queue_size > 0)
self.augseq = augseq
self.source_finished_signals = batch_loader.finished_signals
self.queue_source = batch_loader.queue
self.queue_result = multiprocessing.Queue(queue_size)
if nb_workers == "auto":
try:
nb_workers = multiprocessing.cpu_count()
except (ImportError, NotImplementedError):
nb_workers = 1
# try to reserve at least one core for the main process
nb_workers = max(1, nb_workers - 1)
else:
do_assert(nb_workers >= 1)
#print("Starting %d background processes" % (nb_workers,))
self.nb_workers = nb_workers
self.workers = []
self.nb_workers_finished = 0
self.augment_images = True
self.augment_keypoints = True
seeds = current_random_state().randint(0, 10**6, size=(nb_workers,))
for i in range(nb_workers):
worker = multiprocessing.Process(target=self._augment_images_worker, args=(augseq, self.queue_source, self.queue_result, self.source_finished_signals, seeds[i]))
worker.daemon = True
worker.start()
self.workers.append(worker)
def get_batch(self):
"""
Returns a batch from the queue of augmented batches.
If workers are still running and there are no batches in the queue,
it will automatically wait for the next batch.
Returns
-------
out : None or ia.Batch
One batch or None if all workers have finished.
"""
batch_str = self.queue_result.get()
batch = pickle.loads(batch_str)
if batch is not None:
return batch
else:
self.nb_workers_finished += 1
if self.nb_workers_finished == self.nb_workers:
return None
else:
return self.get_batch()
def _augment_images_worker(self, augseq, queue_source, queue_result, source_finished_signals, seedval):
"""
Worker function that endlessly queries the source queue (input
batches), augments batches in it and sends the result to the output
queue.
"""
np.random.seed(seedval)
random.seed(seedval)
augseq.reseed(seedval)
seed(seedval)
while True:
# wait for a new batch in the source queue and load it
try:
batch_str = queue_source.get(timeout=0.1)
batch = pickle.loads(batch_str)
"""
# augment the batch
batch_augment_images = batch.images is not None and self.augment_images
batch_augment_keypoints = batch.keypoints is not None and self.augment_keypoints
if batch_augment_images and batch_augment_keypoints:
augseq_det = augseq.to_deterministic() if not augseq.deterministic else augseq
batch.images_aug = augseq_det.augment_images(batch.images)
batch.keypoints_aug = augseq_det.augment_keypoints(batch.keypoints)
elif batch_augment_images:
batch.images_aug = augseq.augment_images(batch.images)
elif batch_augment_keypoints:
batch.keypoints_aug = augseq.augment_keypoints(batch.keypoints)
"""
batch_aug = list(augseq.augment_batches([batch], background=False))[0]
# send augmented batch to output queue
batch_str = pickle.dumps(batch, protocol=-1)
queue_result.put(batch_str)
except QueueEmpty:
if all([signal.is_set() for signal in source_finished_signals]):
queue_result.put(pickle.dumps(None, protocol=-1))
return
def terminate(self):
"""
Terminates all background processes immediately.
This will also free their RAM.
"""
for worker in self.workers:
worker.terminate()
self.queue_result.close()
|
# simple script to get the TP's and FP's
import sys
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
name_and_taxpath_file = '/home/dkoslicki/Data/MiCOPMinHash/Test/File_name_and_taxonomy.txt'
# low
#cmash_out_file = '/home/dkoslicki/Data/MiCOPMinHash/Test/RL_S001__insert_270_classified.csv'
#truth_file = '/home/dkoslicki/Dropbox/Repositories/firstchallenge_evaluation_Backup/profiling/MyAnalysis/GroundTruth/all/CAMI:low:pool.profile'
# medium
#cmash_out_file = '/home/dkoslicki/Data/MiCOPMinHash/Test/RM_S001_classified.csv'
#truth_file = '/home/dkoslicki/Dropbox/Repositories/firstchallenge_evaluation_Backup/profiling/MyAnalysis/GroundTruth/all/CAMI:medium:1.profile'
# high
#cmash_out_file = '/home/dkoslicki/Data/MiCOPMinHash/Test/RH_S001_classified.csv'
#truth_file = '/home/dkoslicki/Dropbox/Repositories/firstchallenge_evaluation_Backup/profiling/MyAnalysis/GroundTruth/all/CAMI:high:1.profile'
# reduced, low
#cmash_out_file = '/home/dkoslicki/Data/MiCOPMinHash/Test/RL_S001_reduced_classified.csv'
#truth_file = '/home/dkoslicki/Dropbox/Repositories/firstchallenge_evaluation_Backup/profiling/MyAnalysis/GroundTruth/all/CAMI:low:pool.profile'
# reduced k=120
#cmash_out_file = '/home/dkoslicki/Data/MiCOPMinHash/Test/RL_S001_reduced_k120_classified.csv'
#truth_file = '/home/dkoslicki/Dropbox/Repositories/firstchallenge_evaluation_Backup/profiling/MyAnalysis/GroundTruth/all/CAMI:low:pool.profile'
# reduced n500
#cmash_out_file = '/home/dkoslicki/Data/MiCOPMinHash/Test/RL_S001_reduced_n500_classified.csv'
#truth_file = '/home/dkoslicki/Dropbox/Repositories/firstchallenge_evaluation_Backup/profiling/MyAnalysis/GroundTruth/all/CAMI:low:pool.profile'
# after the fix
#cmash_out_file = '/home/dkoslicki/Data/MiCOPMinHash/Test/RL_S001__insert_270_classified_postprocess.csv'
#truth_file = '/home/dkoslicki/Dropbox/Repositories/firstchallenge_evaluation_Backup/profiling/MyAnalysis/GroundTruth/all/CAMI:low:pool.profile'
# after the pos-process, low
#cmash_out_file = '/home/dkoslicki/Data/MiCOPMinHash/Test/RL_S001__insert_270_classified_postprocess_finished.csv'
#truth_file = '/home/dkoslicki/Dropbox/Repositories/firstchallenge_evaluation_Backup/profiling/MyAnalysis/GroundTruth/all/CAMI:low:pool.profile'
# Nathan micop database, low
name_and_taxpath_file = '/home/dkoslicki/Data/MiCOPMinHash/Test/fnames_to_info_repophlan_format.tsv'
#cmash_out_file = '/home/dkoslicki/Data/MiCOPMinHash/Test/RL_S001__insert_270_classified_micopdb-n1000-300-60_postprocess.csv'
cmash_out_file = '/home/dkoslicki/Data/MiCOPMinHash/Test/RL_S001__insert_270_classified_micopdb-n1000-300-60.csv'
truth_file = '/home/dkoslicki/Dropbox/Repositories/firstchallenge_evaluation_Backup/profiling/MyAnalysis/GroundTruth/all/CAMI:low:pool.profile'
# after medium
#cmash_out_file = '/home/dkoslicki/Data/MiCOPMinHash/Test/RM_S001_classified_postprocess_finished.csv'
#truth_file = '/home/dkoslicki/Dropbox/Repositories/firstchallenge_evaluation_Backup/profiling/MyAnalysis/GroundTruth/all/CAMI:medium:pool.profile'
# after high
cmash_out_file = '/home/dkoslicki/Data/MiCOPMinHash/Test/RH_S001_classified_postprocess_finished.csv'
truth_file = '/home/dkoslicki/Dropbox/Repositories/firstchallenge_evaluation_Backup/profiling/MyAnalysis/GroundTruth/all/CAMI:high:pool.profile'
# TODO: conclusion: using the reduced database is a bad idea (for some really weird reason)
coverage_threshold = 0
#sort_key = 'k=60' # 'k=30' barely adds any TP's with a huge hit of lots of FP's
sort_key = None
abbrv_to_rank = dict()
abbrv_to_rank['k'] = "superkingdom"
abbrv_to_rank['p'] = "phylum"
abbrv_to_rank['c'] = "class"
abbrv_to_rank['o'] = "order"
abbrv_to_rank['f'] = "family"
abbrv_to_rank['g'] = "genus"
abbrv_to_rank['s'] = "species"
abbrv_to_rank['t'] = "strain"
rank_to_abbrv = dict()
for key, value in abbrv_to_rank.items():
rank_to_abbrv[value] = key
# read in the taxonomy
fid = open(name_and_taxpath_file, 'r')
name_to_taxpath = dict()
for line in fid.readlines():
line = line.strip()
ref_file = line.split()[0] # first column is file name
tax_info = line.split()[1:] # rest is the taxpath info
if ref_file in name_to_taxpath:
print("Uh oh, file names are not unique! culprit: %s" % ref_file)
sys.exit(1)
else:
name_to_taxpath[ref_file] = tax_info
fid.close()
# Now read in the ground truth
true_taxa = dict()
for key in abbrv_to_rank.keys():
true_taxa[key] = set()
fid = open(truth_file, 'r')
for line in fid.readlines():
line = line.strip()
if line and line[0] != '@':
split_line = line.split()
tax_id = split_line[0]
rank = split_line[1]
rank_abbrv = rank_to_abbrv[rank]
true_taxa[rank_abbrv].add(tax_id)
fid.close()
TP_vers_cov = []
FP_vers_cov = []
FN_vers_cov = []
df = pd.read_csv(cmash_out_file, index_col=0)
max_key = df.keys()[-1]
# loop over coverages, get binary stats with that threshold
cov_range = np.linspace(.6, 0, 50)
#cov_range = [0.05] # for the low complexity sample: TP=18, FP=101, FN=5. TODO: after fix, at cutoff of 0.06, TP=18, FP=78, FN=5.
# TODO: After post-process complete, cutoff 0.06, TP=17, FP=11, FN=6 !!!!!!!!!!!!!!!!!!!!!!, gets to TP=18 at cutoff 0.03
#cov_range = [0.004] # good for the medium complexity sample: TP=54, FP=536, FN=18
#cov_range = [0.008] # good for the high complexity sample: TP=153, FP=1123, FN=90
for coverage_threshold in cov_range:
if not sort_key:
sort_key = max_key
cmash_thresh = df[df[sort_key] > coverage_threshold].sort_values(max_key, ascending=False)
names_passed_thresh = list(cmash_thresh.index)
pred_taxa = dict()
for key in abbrv_to_rank.keys():
pred_taxa[key] = set()
for name in names_passed_thresh:
if name in name_to_taxpath:
tax_info = name_to_taxpath[name]
# add all higher ranks as well
for rank_ind in range(1, len(tax_info[2].split('|')) + 1):
try:
tax_info_to_rank = tax_info[2].split('|')[:rank_ind] # tax info up to the rank under consideration
tax_id = [i.split('_')[2] for i in tax_info_to_rank][-1] # get the last guy's tax id
rank_abbrv = tax_info_to_rank[-1].split('_')[0]
tax_path_list = [i.split('_')[2] for i in tax_info_to_rank]
#pred_taxa[rank_abbrv].update(tax_path_list) # TODO: this may not be right, getting more FP's then there are possible
pred_taxa[rank_abbrv].add(tax_id)
except:
pass
num_TPs = []
num_FPs = []
num_FNs = []
TPs = []
FPs = []
FNs = []
for rank_abbrv in 'kpcofgs':
TP = true_taxa[rank_abbrv].intersection(pred_taxa[rank_abbrv])
FP = pred_taxa[rank_abbrv] - true_taxa[rank_abbrv]
FN = true_taxa[rank_abbrv] - pred_taxa[rank_abbrv]
TPs.append(TP)
FPs.append(FP)
FNs.append(FN)
num_TPs.append(len(TP))
num_FPs.append(len(FP))
num_FNs.append(len(FN))
TP_vers_cov.append(num_TPs[-1])
FP_vers_cov.append(num_FPs[-1])
FN_vers_cov.append(num_FNs[-1])
together = np.array([TP_vers_cov, FP_vers_cov]).transpose()
plt.plot(cov_range, together)
plt.legend(['True Positive', 'False Positive'])
#plt.ylim([0,100])
plt.show()
add new results
# simple script to get the TP's and FP's
import sys
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
name_and_taxpath_file = '/home/dkoslicki/Data/MiCOPMinHash/Test/File_name_and_taxonomy.txt'
# low
#cmash_out_file = '/home/dkoslicki/Data/MiCOPMinHash/Test/RL_S001__insert_270_classified.csv'
#truth_file = '/home/dkoslicki/Dropbox/Repositories/firstchallenge_evaluation_Backup/profiling/MyAnalysis/GroundTruth/all/CAMI:low:pool.profile'
# medium
#cmash_out_file = '/home/dkoslicki/Data/MiCOPMinHash/Test/RM_S001_classified.csv'
#truth_file = '/home/dkoslicki/Dropbox/Repositories/firstchallenge_evaluation_Backup/profiling/MyAnalysis/GroundTruth/all/CAMI:medium:1.profile'
# high
#cmash_out_file = '/home/dkoslicki/Data/MiCOPMinHash/Test/RH_S001_classified.csv'
#truth_file = '/home/dkoslicki/Dropbox/Repositories/firstchallenge_evaluation_Backup/profiling/MyAnalysis/GroundTruth/all/CAMI:high:1.profile'
# reduced, low
#cmash_out_file = '/home/dkoslicki/Data/MiCOPMinHash/Test/RL_S001_reduced_classified.csv'
#truth_file = '/home/dkoslicki/Dropbox/Repositories/firstchallenge_evaluation_Backup/profiling/MyAnalysis/GroundTruth/all/CAMI:low:pool.profile'
# reduced k=120
#cmash_out_file = '/home/dkoslicki/Data/MiCOPMinHash/Test/RL_S001_reduced_k120_classified.csv'
#truth_file = '/home/dkoslicki/Dropbox/Repositories/firstchallenge_evaluation_Backup/profiling/MyAnalysis/GroundTruth/all/CAMI:low:pool.profile'
# reduced n500
#cmash_out_file = '/home/dkoslicki/Data/MiCOPMinHash/Test/RL_S001_reduced_n500_classified.csv'
#truth_file = '/home/dkoslicki/Dropbox/Repositories/firstchallenge_evaluation_Backup/profiling/MyAnalysis/GroundTruth/all/CAMI:low:pool.profile'
# after the fix
#cmash_out_file = '/home/dkoslicki/Data/MiCOPMinHash/Test/RL_S001__insert_270_classified_postprocess.csv'
#truth_file = '/home/dkoslicki/Dropbox/Repositories/firstchallenge_evaluation_Backup/profiling/MyAnalysis/GroundTruth/all/CAMI:low:pool.profile'
# after the pos-process, low
#cmash_out_file = '/home/dkoslicki/Data/MiCOPMinHash/Test/RL_S001__insert_270_classified_postprocess_finished.csv'
#truth_file = '/home/dkoslicki/Dropbox/Repositories/firstchallenge_evaluation_Backup/profiling/MyAnalysis/GroundTruth/all/CAMI:low:pool.profile'
# after the filename order fix
#cmash_out_file = '/home/dkoslicki/Data/MiCOPMinHash/Test/RL_S001__insert_270_classified_after_file_order_fix.csv'
#truth_file = '/home/dkoslicki/Dropbox/Repositories/firstchallenge_evaluation_Backup/profiling/MyAnalysis/GroundTruth/all/CAMI:low:pool.profile'
# after the filename order fix, sensitive
#cmash_out_file = '/home/dkoslicki/Data/MiCOPMinHash/Test/RL_S001__insert_270_classified_after_file_order_fix.csv'
#truth_file = '/home/dkoslicki/Dropbox/Repositories/firstchallenge_evaluation_Backup/profiling/MyAnalysis/GroundTruth/all/CAMI:low:pool.profile'
# Nathan micop database, low
#name_and_taxpath_file = '/home/dkoslicki/Data/MiCOPMinHash/Test/fnames_to_info_repophlan_format.tsv'
##cmash_out_file = '/home/dkoslicki/Data/MiCOPMinHash/Test/RL_S001__insert_270_classified_micopdb-n1000-300-60_postprocess.csv'
#cmash_out_file = '/home/dkoslicki/Data/MiCOPMinHash/Test/RL_S001__insert_270_classified_micopdb-n1000-300-60.csv'
#truth_file = '/home/dkoslicki/Dropbox/Repositories/firstchallenge_evaluation_Backup/profiling/MyAnalysis/GroundTruth/all/CAMI:low:pool.profile'
# Nathan micop database, low, sensitive
name_and_taxpath_file = '/home/dkoslicki/Data/MiCOPMinHash/Test/fnames_to_info_repophlan_format.tsv'
#cmash_out_file = '/home/dkoslicki/Data/MiCOPMinHash/Test/RL_S001__insert_270_classified_micopdb-n1000-300-60_postprocess.csv'
cmash_out_file = '/home/dkoslicki/Data/MiCOPMinHash/Test/RL_S001__insert_270_classified_micopdb-n1000-300-60_sensitive.csv'
truth_file = '/home/dkoslicki/Dropbox/Repositories/firstchallenge_evaluation_Backup/profiling/MyAnalysis/GroundTruth/all/CAMI:low:pool.profile'
# after medium
#cmash_out_file = '/home/dkoslicki/Data/MiCOPMinHash/Test/RM_S001_classified_postprocess_finished.csv'
#truth_file = '/home/dkoslicki/Dropbox/Repositories/firstchallenge_evaluation_Backup/profiling/MyAnalysis/GroundTruth/all/CAMI:medium:pool.profile'
# after high
#cmash_out_file = '/home/dkoslicki/Data/MiCOPMinHash/Test/RH_S001_classified_postprocess_finished.csv'
#truth_file = '/home/dkoslicki/Dropbox/Repositories/firstchallenge_evaluation_Backup/profiling/MyAnalysis/GroundTruth/all/CAMI:high:pool.profile'
# TODO: conclusion: using the reduced database is a bad idea (for some really weird reason)
coverage_threshold = 0
#sort_key = 'k=60' # 'k=30' barely adds any TP's with a huge hit of lots of FP's
sort_key = None
abbrv_to_rank = dict()
abbrv_to_rank['k'] = "superkingdom"
abbrv_to_rank['p'] = "phylum"
abbrv_to_rank['c'] = "class"
abbrv_to_rank['o'] = "order"
abbrv_to_rank['f'] = "family"
abbrv_to_rank['g'] = "genus"
abbrv_to_rank['s'] = "species"
abbrv_to_rank['t'] = "strain"
rank_to_abbrv = dict()
for key, value in abbrv_to_rank.items():
rank_to_abbrv[value] = key
# read in the taxonomy
fid = open(name_and_taxpath_file, 'r')
name_to_taxpath = dict()
for line in fid.readlines():
line = line.strip()
ref_file = line.split()[0] # first column is file name
tax_info = line.split()[1:] # rest is the taxpath info
if ref_file in name_to_taxpath:
print("Uh oh, file names are not unique! culprit: %s" % ref_file)
sys.exit(1)
else:
name_to_taxpath[ref_file] = tax_info
fid.close()
# Now read in the ground truth
true_taxa = dict()
for key in abbrv_to_rank.keys():
true_taxa[key] = set()
fid = open(truth_file, 'r')
for line in fid.readlines():
line = line.strip()
if line and line[0] != '@':
split_line = line.split()
tax_id = split_line[0]
rank = split_line[1]
rank_abbrv = rank_to_abbrv[rank]
true_taxa[rank_abbrv].add(tax_id)
fid.close()
TP_vers_cov = []
FP_vers_cov = []
FN_vers_cov = []
df = pd.read_csv(cmash_out_file, index_col=0)
max_key = df.keys()[-1]
# loop over coverages, get binary stats with that threshold
cov_range = np.linspace(.6, 0, 50)
#cov_range = [0.05] # for the low complexity sample: TP=18, FP=101, FN=5. TODO: after fix, at cutoff of 0.06, TP=18, FP=78, FN=5.
# TODO: After post-process complete, cutoff 0.06, TP=17, FP=11, FN=6 !!!!!!!!!!!!!!!!!!!!!!, gets to TP=18 at cutoff 0.03
#cov_range = [0.004] # good for the medium complexity sample: TP=54, FP=536, FN=18
#cov_range = [0.008] # good for the high complexity sample: TP=153, FP=1123, FN=90
for coverage_threshold in cov_range:
if not sort_key:
sort_key = max_key
cmash_thresh = df[df[sort_key] > coverage_threshold].sort_values(max_key, ascending=False)
names_passed_thresh = list(cmash_thresh.index)
pred_taxa = dict()
for key in abbrv_to_rank.keys():
pred_taxa[key] = set()
for name in names_passed_thresh:
if name in name_to_taxpath:
tax_info = name_to_taxpath[name]
# add all higher ranks as well
for rank_ind in range(1, len(tax_info[2].split('|')) + 1):
try:
tax_info_to_rank = tax_info[2].split('|')[:rank_ind] # tax info up to the rank under consideration
tax_id = [i.split('_')[2] for i in tax_info_to_rank][-1] # get the last guy's tax id
rank_abbrv = tax_info_to_rank[-1].split('_')[0]
tax_path_list = [i.split('_')[2] for i in tax_info_to_rank]
#pred_taxa[rank_abbrv].update(tax_path_list) # TODO: this may not be right, getting more FP's then there are possible
pred_taxa[rank_abbrv].add(tax_id)
except:
pass
num_TPs = []
num_FPs = []
num_FNs = []
TPs = []
FPs = []
FNs = []
for rank_abbrv in 'kpcofgs':
TP = true_taxa[rank_abbrv].intersection(pred_taxa[rank_abbrv])
FP = pred_taxa[rank_abbrv] - true_taxa[rank_abbrv]
FN = true_taxa[rank_abbrv] - pred_taxa[rank_abbrv]
TPs.append(TP)
FPs.append(FP)
FNs.append(FN)
num_TPs.append(len(TP))
num_FPs.append(len(FP))
num_FNs.append(len(FN))
TP_vers_cov.append(num_TPs[-1])
FP_vers_cov.append(num_FPs[-1])
FN_vers_cov.append(num_FNs[-1])
together = np.array([TP_vers_cov, FP_vers_cov]).transpose()
plt.plot(cov_range, together)
plt.legend(['True Positive', 'False Positive'])
#plt.ylim([0,100])
plt.show()
|
#!/usr/bin/env python3
import asyncio
import os
import sys
import logging
import struct
import hashlib
import argparse
import binascii
import aiodns
from urllib.parse import urlparse, parse_qs
from base64 import b32decode, b16decode
from concurrent.futures import FIRST_COMPLETED, CancelledError
from logging import StreamHandler
from socket import inet_aton, inet_ntoa
from random import randint
from binascii import hexlify
from bencodepy import encode as bencode, decode as bdecode, DecodingError
HANDSHAKE = 1
MESSAGE_LEN = 2
MESSAGE_TYPE = 3
MESSAGE_PAYLOAD = 4
class SetQueue(asyncio.Queue):
def _init(self, maxsize):
self._queue = set()
def _put(self, item):
self._queue.add(item)
def _get(self):
return self._queue.pop()
# A SortedQueue is constructed from an infohash, internally it removes
# duplicates, sorts items put into it based on their distance to the
# given infohash and yields the closer ones to the infohash first when
# asked.
class SortedQueue(asyncio.Queue):
def __init__(self, infohash):
super(SortedQueue, self).__init__()
self.infohash = infohash
def _init(self, maxsize):
self._queue = []
def _put(self, item):
if item not in self._queue:
self._queue.append(item)
self._queue.sort(key=lambda i: -distance(i, self.infohash))
def _get(self):
return self._queue.pop()
TIMEOUT = 5
RETRIES = 2
resolver = None
nodeid = None
nodes = None
values = SetQueue()
all_peers = set()
metadata_size = 0
metadata = set()
full_metadata = b''
keep_running = False
get_peers_in_progress = 0
get_metadatas_in_progress = 0
class BitTorrentProtocol:
def __init__(self, infohash, peerid):
self.handshake_complete = asyncio.Event()
self.extended_handshake_complete = asyncio.Event()
self.metadata_block_received = asyncio.Event()
self.error = asyncio.Event()
self.infohash = infohash
self.peerid = peerid
self.state = HANDSHAKE
self.field_len = 68
self.field = b''
self.leftover = b''
self.metadata_size = 0
self.metadata_block = b''
def connection_made(self, transport):
self.transport = transport
self.transport.write(
b'\x13BitTorrent protocol'
b'\x00\x00\x00\x00\x00\x10\x00\x04' + \
self.infohash + self.peerid
)
def data_received(self, data):
data = self.leftover + data
if len(self.field) + len(data) < self.field_len:
self.field += data
self.leftover = b''
elif len(self.field) + len(data) == self.field_len:
self.field += data
self.leftover = b''
self.parse_field()
else:
n = self.field_len - len(self.field)
self.field += data[:n]
self.leftover = data[n:]
self.parse_field()
if len(self.leftover) >= self.field_len and not self.error.is_set():
self.data_received(b'')
def eof_received(self):
logger.debug('EOF received.')
self.error.set()
def connection_lost(self, exc):
logger.debug('Connection lost: {}'.format(exc))
self.error.set()
def parse_field(self):
if self.state == HANDSHAKE:
if not self.field[:20] == b'\x13BitTorrent protocol':
logger.debug('Invalid pstr.')
self.error.set()
return
if int.from_bytes(self.field[20:28], byteorder='big') & 0x0000000000100000 == 0:
logger.debug('Peer does not support extension protocol.')
self.error.set()
return
if int.from_bytes(self.field[20:28], byteorder='big') & 0x0000000000000004 == 0:
logger.debug('Peer does not support fast protocol.')
self.error.set()
return
self.state = MESSAGE_LEN
self.field_len = 4
self.handshake_complete.set()
extended_handshake = bencode({
'm': {b'ut_metadata': 2},
'v': 'S.P.E.W.'
})
self.write_extended_message(0, extended_handshake)
logger.debug('Sent extended handshake.')
elif self.state == MESSAGE_LEN:
self.message_len = int.from_bytes(self.field, byteorder='big')
if self.message_len == 0:
self.state = MESSAGE_LEN
self.field = 4
else:
self.state = MESSAGE_TYPE
self.field_len = 1
elif self.state == MESSAGE_TYPE:
self.message_type = int.from_bytes(self.field, byteorder='big')
if self.message_len == 1:
self.state = MESSAGE_LEN
self.field = 4
else:
self.message_len -= 1
self.field_len = self.message_len
self.state = MESSAGE_PAYLOAD
elif self.state == MESSAGE_PAYLOAD:
self.parse_message()
self.field_len = 4
self.state = MESSAGE_LEN
else:
logger.error('Invalid state.')
self.error.set()
self.field = b''
def parse_message(self):
logger.debug({
0: 'CHOKE',
1: 'UNCHOKE',
2: 'INTERESTED',
3: 'NOT INTERESTED',
4: 'HAVE',
5: 'BITFIELD',
6: 'REQUEST',
7: 'PIECE',
8: 'CANCEL',
9: 'PORT',
13: 'SUGGEST PIECE',
14: 'HAVE ALL',
15: 'HAVE NONE',
16: 'REJECT REQUEST',
17: 'ALLOWED FAST',
20: 'EXTENDED'
}.get(self.message_type, 'UNKNOWN MESSAGE'))
if self.message_type == 20:
self.parse_extended_message()
def parse_extended_message(self):
extended_message_type = self.field[0]
message = self.field[1:]
if extended_message_type == 0:
try:
message = bdecode(message)
except DecodingError:
self.error.set()
return
if b'm' not in message:
logger.debug('"m" not in extended handshake.')
self.error.set()
return
self.extended_message_types = message[b'm']
if b'ut_metadata' not in self.extended_message_types:
logger.debug('Peer does not support metadata protocol.')
self.error.set()
return
if b'metadata_size' not in message:
logger.debug('Peer did not send "metadata_size" in extended handshake.')
self.error.set()
return
self.metadata_size = message[b'metadata_size']
logger.info('metadata size: {}'.format(self.metadata_size))
self.extended_handshake_complete.set()
self.write_message(15, b'') # have none
logger.debug('Sent HAVE NONE.')
self.write_message(0, b'') # choke
logger.debug('Sent CHOKE.')
self.write_message(3, b'') # not interesete
logger.debug('Sent NOT INTERESTED.')
elif extended_message_type == self.extended_message_types[b'ut_metadata']:
original_message = message
try:
message = bdecode(message)
except DecodingError:
self.error.set()
return
if message[b'msg_type'] == 0:
reply = {
'msg_type': 2,
'piece': message[b'piece']
}
elif message[b'msg_type'] == 2:
logger.debug('Request for metadata rejected.')
return
elif message[b'msg_type'] == 1:
size = len(original_message) - len(bencode(message))
logger.debug('Got a metadata block of size: {}'.format(size))
self.metadata_block = original_message[-size:]
self.metadata_block_received.set()
def get_metadata_block(self, n):
message = {
'msg_type': 0,
'piece': n
}
logger.info('Requesting piece {} of metadata.'.format(n))
msg = bencode(message)
self.write_extended_message(self.extended_message_types[b'ut_metadata'], msg)
def write_message(self, type, msg):
msg_len = 1 + len(msg)
self.transport.write(msg_len.to_bytes(length=4, byteorder='big') + \
bytes([type]) + msg)
def write_extended_message(self, ex_type, msg):
self.write_message(20, bytes([ex_type]) + msg)
class DhtProtocol:
def __init__(self, query_type, nodeid,
target=None,
infohash=None,
implied_port=None,
port=None,
token=None):
self.query_type = query_type
self.nodeid = nodeid
self.target = target
self.infohash = infohash
self.implied_port = implied_port
self.port = port
self.token = token
self.tid = struct.pack('!H', randint(0, 65535))
self.reply_received = asyncio.Event(loop=loop)
def construct_message(self):
args = {
'ping': {
'id': self.nodeid
},
'find_node': {
'id': self.nodeid,
'target': self.target
},
'get_peers': {
'id': self.nodeid,
'info_hash': self.infohash
},
'announce_peer': {
'id': self.nodeid,
'implied_port': self.implied_port,
'info_hash': self.infohash,
'port': self.port,
'token': self.token
}
}.get(self.query_type, None)
if args == None:
raise RuntimeError('Invalid DHT query type: {}'.format(
self.query_type))
return bencode({
't': self.tid,
'y': 'q',
'q': self.query_type,
'a': args
})
def connection_made(self, transport):
self.transport = transport
self.send_message()
def send_message(self):
message = self.construct_message()
logger.debug('Sending DHT query.')
self.transport.sendto(message)
def datagram_received(self, data, addr):
try:
message = bdecode(data)
except DecodingError:
logger.debug('Received invalid bencoding in reply. Discarded.')
return
if b't' not in message:
logger.debug('Received invalid reply. Discarded')
return
if message[b't'] != self.tid:
logger.debug('Received reply with invalid transaction ID. Discarded.')
return
if b'r' not in message or b'id' not in message[b'r']:
logger.debug('Received invalid reply. Discarded.')
return
logger.debug('Received DHT reply from {}:{} with node ID {}.'.format(
addr[0], addr[1], hexlify(message[b'r'][b'id']).decode()))
self.reply = message[b'r']
self.reply_received.set()
def error_received(self, exc):
pass
def connection_lost(self, exc):
pass
def retry(self):
logger.debug('Retrying...')
self.send_message()
@asyncio.coroutine
def ping(loop, host, port):
try:
transport, protocol = yield from loop.create_datagram_endpoint(
lambda: DhtProtocol('ping', nodeid=nodeid),
remote_addr=(host, port))
except OSError as e:
logger.debug('Error opening socket for "ping": {}'.format(e))
for i in range(RETRIES):
try:
yield from asyncio.wait_for(
protocol.reply_received.wait(),
timeout=TIMEOUT)
except asyncio.TimeoutError:
protocol.retry()
else:
break
transport.close()
if protocol.reply_received.is_set():
logger.debug('Reply:', protocol.reply)
logger.debug('Done.')
else:
logger.debug('No reply received.')
return protocol.reply_received.is_set()
@asyncio.coroutine
def get_peers(loop, host, port, infohash):
global get_peers_in_progress
get_peers_in_progress += 1
try:
try:
transport, protocol = yield from loop.create_datagram_endpoint(
lambda: DhtProtocol('get_peers', nodeid=nodeid, infohash=infohash),
remote_addr=(host, port))
except OSError as e:
logger.debug('Error opening socket for get_peers: {}'.format(e))
return
for i in range(RETRIES):
try:
yield from asyncio.wait_for(
protocol.reply_received.wait(),
timeout=5)
except asyncio.TimeoutError:
protocol.retry()
else:
break
transport.close()
if not protocol.reply_received.is_set():
logger.debug('get_peers: No reply received.')
return
if b'values' in protocol.reply:
peers = protocol.reply[b'values']
for p in peers:
if len(p) != 6:
logger.debug('Invalid peer "{}". Ignored.'.format(repr(p)))
else:
all_peers.add(p)
yield from values.put(p)
elif b'nodes' in protocol.reply:
peers = protocol.reply[b'nodes']
peers = [peers[i:i+26] for i in range(0, len(peers), 26)]
for p in peers:
yield from nodes.put(p[20:])
finally:
get_peers_in_progress -= 1
@asyncio.coroutine
def dns_resolve(loop, name):
logger.info('Resolving: {}'.format(name))
try:
result = yield from resolver.query(name, 'A')
except aiodns.error.DNSError as e:
raise RuntimeError('Could not resolve name:', name)
return result[0].host
@asyncio.coroutine
def get_metadata(loop, host, port, infohash):
global metadata, metadata_size, keep_running, full_metadata, get_metadatas_in_progress
if not keep_running:
return True
get_metadatas_in_progress += 1
try:
logger.info('Getting metadata from: {}:{}'.format(host, port))
try:
transport, protocol = yield from loop.create_connection(
lambda: BitTorrentProtocol(infohash, nodeid), host, port)
except OSError as e:
logger.debug('Connection error: {}'.format(e))
return False
logger.debug('Connected to peer: {}:{}'.format(host, port))
done, pending = yield from asyncio.wait(
[protocol.handshake_complete.wait(),
protocol.error.wait()],
return_when=FIRST_COMPLETED,
timeout=TIMEOUT)
for task in pending:
task.cancel()
if not done or protocol.error.is_set():
logger.debug('Error communicating with the peer while waiting for the handshake.')
transport.close()
return False
done, pending = yield from asyncio.wait(
[protocol.extended_handshake_complete.wait(),
protocol.error.wait()],
return_when=FIRST_COMPLETED,
timeout=TIMEOUT)
for task in pending:
task.cancel()
if not done or protocol.error.is_set():
logger.debug('Error communicating with the peer while waiting for the extended handshake.')
transport.close()
return False
if metadata_size > 0 and metadata_size != protocol.metadata_size:
logger.warning('Inconsistent metadata size received.')
metadata_size = protocol.metadata_size
metadata_nblocks = int(metadata_size / (16 * 1024))
metadata_nblocks += 0 if metadata_size % (16 * 1024) == 0 else 1
while keep_running:
protocol.metadata_block_received.clear()
try:
i = next(i for i in range(metadata_nblocks)
if i not in [m[0] for m in metadata])
except StopIteration as e:
transport.close()
return True
protocol.get_metadata_block(i)
done, pending = yield from asyncio.wait(
[protocol.metadata_block_received.wait(),
protocol.error.wait()],
return_when=FIRST_COMPLETED,
timeout=TIMEOUT)
for task in pending:
task.cancel()
if not done or protocol.error.is_set():
logger.debug('Error communicating with the peer while waiting for metadata block.')
transport.close()
return False
metadata.add((i, protocol.metadata_block))
if {m[0] for m in metadata} == set(range(metadata_nblocks)):
# metadata complete. hash check.
m = hashlib.sha1()
full_metadata = b''
for i, b in sorted(metadata, key=lambda m: m[0]):
full_metadata += b
m.update(b)
if m.digest() != infohash:
logger.debug('Invalid metadata received. Hash does not checkout. Discarding.')
metadata_size = 0
metadata = set()
return False
logger.info('Metadata received.')
full_metadata = bdecode(full_metadata)
keep_running = False
finally:
get_metadatas_in_progress -= 1
return True
@asyncio.coroutine
def get_metadata_with_retries(loop, host, port, infohash):
for i in range(RETRIES):
ret = yield from get_metadata(loop, host, port, infohash)
if ret:
break
logger.debug('Retrying get_metadata...')
def distance(i, ih):
byte_distance = lambda x, y: bin(x ^ y).count('1')
return sum(byte_distance(b1, b2) for b1, b2 in zip(ih, i))
def get_closest_nodes(k, infohash):
return sorted(all_peers, key=lambda x: distance(x, infohash))[:k]
@asyncio.coroutine
def main(loop, infohash, filename):
global keep_running
logger.info('Using node ID: {}'.format(hexlify(nodeid).decode()))
# Use router.bittorrent.com as the bootstrapping node.
logger.info('Using router.bittorrent.com as the bootstrapping node.')
ip = yield from dns_resolve(loop, 'router.bittorrent.com')
logger.info('Resolved to: {}'.format(ip))
yield from nodes.put(inet_aton(ip) + struct.pack('!H', 6881))
# Recursively search for peers.
keep_running = True
while keep_running:
if values.qsize() > 0:
while values.qsize() > 0:
peer = yield from values.get()
host, port = inet_ntoa(peer[:4]), struct.unpack('!H', peer[4:])[0]
loop.create_task(
get_metadata_with_retries(loop, host, port, infohash))
elif get_peers_in_progress < 100 and get_metadatas_in_progress < 100 and nodes.qsize() > 0:
peer = yield from nodes.get()
host, port = inet_ntoa(peer[:4]), struct.unpack('!H', peer[4:])[0]
loop.create_task(get_peers(loop, host, port, infohash))
else:
yield
if get_peers_in_progress == 0 and get_metadatas_in_progress == 0 \
and nodes.qsize() == 0 and values.qsize() == 0:
logger.info('Nothing more to do. Quitting.')
keep_running = False
if full_metadata:
k = 8
n = get_closest_nodes(k, infohash)
n = [[inet_ntoa(p[:4]), struct.unpack('!H', p[4:])[0]]
for p in n]
torrent = {
'nodes': n,
'info': full_metadata
}
with open(filename, 'wb') as f:
f.write(bencode(torrent))
def main():
nodeid = os.urandom(20)
parser = argparse.ArgumentParser(
description='Convert an infohash into a trackerless torrent file.')
parser.add_argument('infohash', type=str,
help='The infohash of the torrent. Both base16 and '
'base32 formatted infohashes are acceptable. You can '
'also pass a magnet use a magnet URI for this argument.')
parser.add_argument('--file', '-f', type=str,
help='The name of the output torrent file. Defaults '
'to the infohash with a .torrent extension.')
args = parser.parse_args()
if args.infohash.startswith('magnet:'):
scheme, netloc, path, params, query, fragment = urlparse(args.infohash)
qs = parse_qs(query)
v = qs.get('xt', None)
if v == None or v == []:
print('Invalid magnet URI: no "xt" query parameter.')
exit(1)
v = v[0]
if not v.startswith('urn:btih:'):
print('Invalid magnet URI: "xt" value not valid for BitTorrent.')
exit(1)
args.infohash = v[len('urn:btih:'):]
if not args.file:
args.file = args.infohash + '.torrent'
args.infohash = args.infohash.upper()
try:
args.infohash = b32decode(args.infohash)
except binascii.Error:
try:
args.infohash = b16decode(args.infohash)
except binascii.Error:
print('Invalid infohash.')
exit(1)
if len(args.infohash) != 20:
print('Invalid infohash.')
exit(1)
nodes = SortedQueue(args.infohash)
logger = logging.getLogger('ih2torrent')
handler = StreamHandler(sys.stdout)
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(asctime)s][%(levelname)s] %(message)s')
handler.setFormatter(formatter)
logger.setLevel(logging.DEBUG)
logger.addHandler(handler)
try:
loop = asyncio.get_event_loop()
resolver = aiodns.DNSResolver(loop=loop)
loop.run_until_complete(main(loop, args.infohash, args.file))
except KeyboardInterrupt:
print()
print('Letting the remaining tasks finish before termination.')
except Exception as e:
print('Unexpected error:', e)
pending = asyncio.Task.all_tasks()
for task in pending:
task.cancel()
try:
loop.run_until_complete(asyncio.gather(*pending))
except CancelledError:
pass
loop.close()
if __name__ == '__main__':
main()
Fix typo in argument help string.
#!/usr/bin/env python3
import asyncio
import os
import sys
import logging
import struct
import hashlib
import argparse
import binascii
import aiodns
from urllib.parse import urlparse, parse_qs
from base64 import b32decode, b16decode
from concurrent.futures import FIRST_COMPLETED, CancelledError
from logging import StreamHandler
from socket import inet_aton, inet_ntoa
from random import randint
from binascii import hexlify
from bencodepy import encode as bencode, decode as bdecode, DecodingError
HANDSHAKE = 1
MESSAGE_LEN = 2
MESSAGE_TYPE = 3
MESSAGE_PAYLOAD = 4
class SetQueue(asyncio.Queue):
def _init(self, maxsize):
self._queue = set()
def _put(self, item):
self._queue.add(item)
def _get(self):
return self._queue.pop()
# A SortedQueue is constructed from an infohash, internally it removes
# duplicates, sorts items put into it based on their distance to the
# given infohash and yields the closer ones to the infohash first when
# asked.
class SortedQueue(asyncio.Queue):
def __init__(self, infohash):
super(SortedQueue, self).__init__()
self.infohash = infohash
def _init(self, maxsize):
self._queue = []
def _put(self, item):
if item not in self._queue:
self._queue.append(item)
self._queue.sort(key=lambda i: -distance(i, self.infohash))
def _get(self):
return self._queue.pop()
TIMEOUT = 5
RETRIES = 2
resolver = None
nodeid = None
nodes = None
values = SetQueue()
all_peers = set()
metadata_size = 0
metadata = set()
full_metadata = b''
keep_running = False
get_peers_in_progress = 0
get_metadatas_in_progress = 0
class BitTorrentProtocol:
def __init__(self, infohash, peerid):
self.handshake_complete = asyncio.Event()
self.extended_handshake_complete = asyncio.Event()
self.metadata_block_received = asyncio.Event()
self.error = asyncio.Event()
self.infohash = infohash
self.peerid = peerid
self.state = HANDSHAKE
self.field_len = 68
self.field = b''
self.leftover = b''
self.metadata_size = 0
self.metadata_block = b''
def connection_made(self, transport):
self.transport = transport
self.transport.write(
b'\x13BitTorrent protocol'
b'\x00\x00\x00\x00\x00\x10\x00\x04' + \
self.infohash + self.peerid
)
def data_received(self, data):
data = self.leftover + data
if len(self.field) + len(data) < self.field_len:
self.field += data
self.leftover = b''
elif len(self.field) + len(data) == self.field_len:
self.field += data
self.leftover = b''
self.parse_field()
else:
n = self.field_len - len(self.field)
self.field += data[:n]
self.leftover = data[n:]
self.parse_field()
if len(self.leftover) >= self.field_len and not self.error.is_set():
self.data_received(b'')
def eof_received(self):
logger.debug('EOF received.')
self.error.set()
def connection_lost(self, exc):
logger.debug('Connection lost: {}'.format(exc))
self.error.set()
def parse_field(self):
if self.state == HANDSHAKE:
if not self.field[:20] == b'\x13BitTorrent protocol':
logger.debug('Invalid pstr.')
self.error.set()
return
if int.from_bytes(self.field[20:28], byteorder='big') & 0x0000000000100000 == 0:
logger.debug('Peer does not support extension protocol.')
self.error.set()
return
if int.from_bytes(self.field[20:28], byteorder='big') & 0x0000000000000004 == 0:
logger.debug('Peer does not support fast protocol.')
self.error.set()
return
self.state = MESSAGE_LEN
self.field_len = 4
self.handshake_complete.set()
extended_handshake = bencode({
'm': {b'ut_metadata': 2},
'v': 'S.P.E.W.'
})
self.write_extended_message(0, extended_handshake)
logger.debug('Sent extended handshake.')
elif self.state == MESSAGE_LEN:
self.message_len = int.from_bytes(self.field, byteorder='big')
if self.message_len == 0:
self.state = MESSAGE_LEN
self.field = 4
else:
self.state = MESSAGE_TYPE
self.field_len = 1
elif self.state == MESSAGE_TYPE:
self.message_type = int.from_bytes(self.field, byteorder='big')
if self.message_len == 1:
self.state = MESSAGE_LEN
self.field = 4
else:
self.message_len -= 1
self.field_len = self.message_len
self.state = MESSAGE_PAYLOAD
elif self.state == MESSAGE_PAYLOAD:
self.parse_message()
self.field_len = 4
self.state = MESSAGE_LEN
else:
logger.error('Invalid state.')
self.error.set()
self.field = b''
def parse_message(self):
logger.debug({
0: 'CHOKE',
1: 'UNCHOKE',
2: 'INTERESTED',
3: 'NOT INTERESTED',
4: 'HAVE',
5: 'BITFIELD',
6: 'REQUEST',
7: 'PIECE',
8: 'CANCEL',
9: 'PORT',
13: 'SUGGEST PIECE',
14: 'HAVE ALL',
15: 'HAVE NONE',
16: 'REJECT REQUEST',
17: 'ALLOWED FAST',
20: 'EXTENDED'
}.get(self.message_type, 'UNKNOWN MESSAGE'))
if self.message_type == 20:
self.parse_extended_message()
def parse_extended_message(self):
extended_message_type = self.field[0]
message = self.field[1:]
if extended_message_type == 0:
try:
message = bdecode(message)
except DecodingError:
self.error.set()
return
if b'm' not in message:
logger.debug('"m" not in extended handshake.')
self.error.set()
return
self.extended_message_types = message[b'm']
if b'ut_metadata' not in self.extended_message_types:
logger.debug('Peer does not support metadata protocol.')
self.error.set()
return
if b'metadata_size' not in message:
logger.debug('Peer did not send "metadata_size" in extended handshake.')
self.error.set()
return
self.metadata_size = message[b'metadata_size']
logger.info('metadata size: {}'.format(self.metadata_size))
self.extended_handshake_complete.set()
self.write_message(15, b'') # have none
logger.debug('Sent HAVE NONE.')
self.write_message(0, b'') # choke
logger.debug('Sent CHOKE.')
self.write_message(3, b'') # not interesete
logger.debug('Sent NOT INTERESTED.')
elif extended_message_type == self.extended_message_types[b'ut_metadata']:
original_message = message
try:
message = bdecode(message)
except DecodingError:
self.error.set()
return
if message[b'msg_type'] == 0:
reply = {
'msg_type': 2,
'piece': message[b'piece']
}
elif message[b'msg_type'] == 2:
logger.debug('Request for metadata rejected.')
return
elif message[b'msg_type'] == 1:
size = len(original_message) - len(bencode(message))
logger.debug('Got a metadata block of size: {}'.format(size))
self.metadata_block = original_message[-size:]
self.metadata_block_received.set()
def get_metadata_block(self, n):
message = {
'msg_type': 0,
'piece': n
}
logger.info('Requesting piece {} of metadata.'.format(n))
msg = bencode(message)
self.write_extended_message(self.extended_message_types[b'ut_metadata'], msg)
def write_message(self, type, msg):
msg_len = 1 + len(msg)
self.transport.write(msg_len.to_bytes(length=4, byteorder='big') + \
bytes([type]) + msg)
def write_extended_message(self, ex_type, msg):
self.write_message(20, bytes([ex_type]) + msg)
class DhtProtocol:
def __init__(self, query_type, nodeid,
target=None,
infohash=None,
implied_port=None,
port=None,
token=None):
self.query_type = query_type
self.nodeid = nodeid
self.target = target
self.infohash = infohash
self.implied_port = implied_port
self.port = port
self.token = token
self.tid = struct.pack('!H', randint(0, 65535))
self.reply_received = asyncio.Event(loop=loop)
def construct_message(self):
args = {
'ping': {
'id': self.nodeid
},
'find_node': {
'id': self.nodeid,
'target': self.target
},
'get_peers': {
'id': self.nodeid,
'info_hash': self.infohash
},
'announce_peer': {
'id': self.nodeid,
'implied_port': self.implied_port,
'info_hash': self.infohash,
'port': self.port,
'token': self.token
}
}.get(self.query_type, None)
if args == None:
raise RuntimeError('Invalid DHT query type: {}'.format(
self.query_type))
return bencode({
't': self.tid,
'y': 'q',
'q': self.query_type,
'a': args
})
def connection_made(self, transport):
self.transport = transport
self.send_message()
def send_message(self):
message = self.construct_message()
logger.debug('Sending DHT query.')
self.transport.sendto(message)
def datagram_received(self, data, addr):
try:
message = bdecode(data)
except DecodingError:
logger.debug('Received invalid bencoding in reply. Discarded.')
return
if b't' not in message:
logger.debug('Received invalid reply. Discarded')
return
if message[b't'] != self.tid:
logger.debug('Received reply with invalid transaction ID. Discarded.')
return
if b'r' not in message or b'id' not in message[b'r']:
logger.debug('Received invalid reply. Discarded.')
return
logger.debug('Received DHT reply from {}:{} with node ID {}.'.format(
addr[0], addr[1], hexlify(message[b'r'][b'id']).decode()))
self.reply = message[b'r']
self.reply_received.set()
def error_received(self, exc):
pass
def connection_lost(self, exc):
pass
def retry(self):
logger.debug('Retrying...')
self.send_message()
@asyncio.coroutine
def ping(loop, host, port):
try:
transport, protocol = yield from loop.create_datagram_endpoint(
lambda: DhtProtocol('ping', nodeid=nodeid),
remote_addr=(host, port))
except OSError as e:
logger.debug('Error opening socket for "ping": {}'.format(e))
for i in range(RETRIES):
try:
yield from asyncio.wait_for(
protocol.reply_received.wait(),
timeout=TIMEOUT)
except asyncio.TimeoutError:
protocol.retry()
else:
break
transport.close()
if protocol.reply_received.is_set():
logger.debug('Reply:', protocol.reply)
logger.debug('Done.')
else:
logger.debug('No reply received.')
return protocol.reply_received.is_set()
@asyncio.coroutine
def get_peers(loop, host, port, infohash):
global get_peers_in_progress
get_peers_in_progress += 1
try:
try:
transport, protocol = yield from loop.create_datagram_endpoint(
lambda: DhtProtocol('get_peers', nodeid=nodeid, infohash=infohash),
remote_addr=(host, port))
except OSError as e:
logger.debug('Error opening socket for get_peers: {}'.format(e))
return
for i in range(RETRIES):
try:
yield from asyncio.wait_for(
protocol.reply_received.wait(),
timeout=5)
except asyncio.TimeoutError:
protocol.retry()
else:
break
transport.close()
if not protocol.reply_received.is_set():
logger.debug('get_peers: No reply received.')
return
if b'values' in protocol.reply:
peers = protocol.reply[b'values']
for p in peers:
if len(p) != 6:
logger.debug('Invalid peer "{}". Ignored.'.format(repr(p)))
else:
all_peers.add(p)
yield from values.put(p)
elif b'nodes' in protocol.reply:
peers = protocol.reply[b'nodes']
peers = [peers[i:i+26] for i in range(0, len(peers), 26)]
for p in peers:
yield from nodes.put(p[20:])
finally:
get_peers_in_progress -= 1
@asyncio.coroutine
def dns_resolve(loop, name):
logger.info('Resolving: {}'.format(name))
try:
result = yield from resolver.query(name, 'A')
except aiodns.error.DNSError as e:
raise RuntimeError('Could not resolve name:', name)
return result[0].host
@asyncio.coroutine
def get_metadata(loop, host, port, infohash):
global metadata, metadata_size, keep_running, full_metadata, get_metadatas_in_progress
if not keep_running:
return True
get_metadatas_in_progress += 1
try:
logger.info('Getting metadata from: {}:{}'.format(host, port))
try:
transport, protocol = yield from loop.create_connection(
lambda: BitTorrentProtocol(infohash, nodeid), host, port)
except OSError as e:
logger.debug('Connection error: {}'.format(e))
return False
logger.debug('Connected to peer: {}:{}'.format(host, port))
done, pending = yield from asyncio.wait(
[protocol.handshake_complete.wait(),
protocol.error.wait()],
return_when=FIRST_COMPLETED,
timeout=TIMEOUT)
for task in pending:
task.cancel()
if not done or protocol.error.is_set():
logger.debug('Error communicating with the peer while waiting for the handshake.')
transport.close()
return False
done, pending = yield from asyncio.wait(
[protocol.extended_handshake_complete.wait(),
protocol.error.wait()],
return_when=FIRST_COMPLETED,
timeout=TIMEOUT)
for task in pending:
task.cancel()
if not done or protocol.error.is_set():
logger.debug('Error communicating with the peer while waiting for the extended handshake.')
transport.close()
return False
if metadata_size > 0 and metadata_size != protocol.metadata_size:
logger.warning('Inconsistent metadata size received.')
metadata_size = protocol.metadata_size
metadata_nblocks = int(metadata_size / (16 * 1024))
metadata_nblocks += 0 if metadata_size % (16 * 1024) == 0 else 1
while keep_running:
protocol.metadata_block_received.clear()
try:
i = next(i for i in range(metadata_nblocks)
if i not in [m[0] for m in metadata])
except StopIteration as e:
transport.close()
return True
protocol.get_metadata_block(i)
done, pending = yield from asyncio.wait(
[protocol.metadata_block_received.wait(),
protocol.error.wait()],
return_when=FIRST_COMPLETED,
timeout=TIMEOUT)
for task in pending:
task.cancel()
if not done or protocol.error.is_set():
logger.debug('Error communicating with the peer while waiting for metadata block.')
transport.close()
return False
metadata.add((i, protocol.metadata_block))
if {m[0] for m in metadata} == set(range(metadata_nblocks)):
# metadata complete. hash check.
m = hashlib.sha1()
full_metadata = b''
for i, b in sorted(metadata, key=lambda m: m[0]):
full_metadata += b
m.update(b)
if m.digest() != infohash:
logger.debug('Invalid metadata received. Hash does not checkout. Discarding.')
metadata_size = 0
metadata = set()
return False
logger.info('Metadata received.')
full_metadata = bdecode(full_metadata)
keep_running = False
finally:
get_metadatas_in_progress -= 1
return True
@asyncio.coroutine
def get_metadata_with_retries(loop, host, port, infohash):
for i in range(RETRIES):
ret = yield from get_metadata(loop, host, port, infohash)
if ret:
break
logger.debug('Retrying get_metadata...')
def distance(i, ih):
byte_distance = lambda x, y: bin(x ^ y).count('1')
return sum(byte_distance(b1, b2) for b1, b2 in zip(ih, i))
def get_closest_nodes(k, infohash):
return sorted(all_peers, key=lambda x: distance(x, infohash))[:k]
@asyncio.coroutine
def main(loop, infohash, filename):
global keep_running
logger.info('Using node ID: {}'.format(hexlify(nodeid).decode()))
# Use router.bittorrent.com as the bootstrapping node.
logger.info('Using router.bittorrent.com as the bootstrapping node.')
ip = yield from dns_resolve(loop, 'router.bittorrent.com')
logger.info('Resolved to: {}'.format(ip))
yield from nodes.put(inet_aton(ip) + struct.pack('!H', 6881))
# Recursively search for peers.
keep_running = True
while keep_running:
if values.qsize() > 0:
while values.qsize() > 0:
peer = yield from values.get()
host, port = inet_ntoa(peer[:4]), struct.unpack('!H', peer[4:])[0]
loop.create_task(
get_metadata_with_retries(loop, host, port, infohash))
elif get_peers_in_progress < 100 and get_metadatas_in_progress < 100 and nodes.qsize() > 0:
peer = yield from nodes.get()
host, port = inet_ntoa(peer[:4]), struct.unpack('!H', peer[4:])[0]
loop.create_task(get_peers(loop, host, port, infohash))
else:
yield
if get_peers_in_progress == 0 and get_metadatas_in_progress == 0 \
and nodes.qsize() == 0 and values.qsize() == 0:
logger.info('Nothing more to do. Quitting.')
keep_running = False
if full_metadata:
k = 8
n = get_closest_nodes(k, infohash)
n = [[inet_ntoa(p[:4]), struct.unpack('!H', p[4:])[0]]
for p in n]
torrent = {
'nodes': n,
'info': full_metadata
}
with open(filename, 'wb') as f:
f.write(bencode(torrent))
def main():
nodeid = os.urandom(20)
parser = argparse.ArgumentParser(
description='Convert an infohash into a trackerless torrent file.')
parser.add_argument('infohash', type=str,
help='The infohash of the torrent. Both base16 and '
'base32 formatted infohashes are acceptable. You can '
'also pass a magnet URI for this argument.')
parser.add_argument('--file', '-f', type=str,
help='The name of the output torrent file. Defaults '
'to the infohash with a .torrent extension.')
args = parser.parse_args()
if args.infohash.startswith('magnet:'):
scheme, netloc, path, params, query, fragment = urlparse(args.infohash)
qs = parse_qs(query)
v = qs.get('xt', None)
if v == None or v == []:
print('Invalid magnet URI: no "xt" query parameter.')
exit(1)
v = v[0]
if not v.startswith('urn:btih:'):
print('Invalid magnet URI: "xt" value not valid for BitTorrent.')
exit(1)
args.infohash = v[len('urn:btih:'):]
if not args.file:
args.file = args.infohash + '.torrent'
args.infohash = args.infohash.upper()
try:
args.infohash = b32decode(args.infohash)
except binascii.Error:
try:
args.infohash = b16decode(args.infohash)
except binascii.Error:
print('Invalid infohash.')
exit(1)
if len(args.infohash) != 20:
print('Invalid infohash.')
exit(1)
nodes = SortedQueue(args.infohash)
logger = logging.getLogger('ih2torrent')
handler = StreamHandler(sys.stdout)
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(asctime)s][%(levelname)s] %(message)s')
handler.setFormatter(formatter)
logger.setLevel(logging.DEBUG)
logger.addHandler(handler)
try:
loop = asyncio.get_event_loop()
resolver = aiodns.DNSResolver(loop=loop)
loop.run_until_complete(main(loop, args.infohash, args.file))
except KeyboardInterrupt:
print()
print('Letting the remaining tasks finish before termination.')
except Exception as e:
print('Unexpected error:', e)
pending = asyncio.Task.all_tasks()
for task in pending:
task.cancel()
try:
loop.run_until_complete(asyncio.gather(*pending))
except CancelledError:
pass
loop.close()
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
# -*- mode: python; indent-tabs-mode nil; tab-width 4; python-indent 4; -*-
"""usage: gidget [options] <command> [<args>...]
Options:
-h, --help Show this screen
--version
gidget commands are:
help
list
describe
add
remove
run
"""
import sys
import os
import shlex
import subprocess
from subprocess import call
from docopt import docopt
from ConfigParser import SafeConfigParser
import gidget_help
import gidget_list
import gidget_describe
import gidget_add
import gidget_remove
#import gidget_run
if __name__ == '__main__':
# environment for subprocesses:
# parent environment plus info from config file to environment
subEnv = os.environ
configParserDefaults = {}
gidgetCommandsPath = os.path.realpath(os.getcwd() + '/../commands')
gidgetPythonExecutable = sys.executable
print "command path: " + gidgetCommandsPath
config = SafeConfigParser(defaults = {
'gidget_commands_dir':gidgetCommandsPath,
'gidget_python_executable':gidgetPythonExecutable})
# TODO: make config file location an optional command-line flag
# TODO: error checking on file existence
# TODO: error checking on sucessful config parsing
# TODO: check file permissions and warn or error if not private
config.read('.gidgetconfig')
gidgetConfigDefaults = {}
gidgetConfigSections = config.sections()
for section in gidgetConfigSections:
sectionOptions = config.options(section)
for option in sectionOptions:
# TODO: for now, sections are disregarded and everything is thrown
# into one dictionary object; break this out per section?
gidgetConfigDefaults[option] = config.get(section, option)
if section == 'MAF_PROCESSING':
# TODO: warn if this overwrites some existing envvars
# export env vars as uppercase, per convention;
# ConfigParser converts to lower.
subEnv[('gidget_'+option).upper()] = gidgetConfigDefaults[option]
# print "== gidget options =="
# for key,val in gidgetConfigDefaults.items():
# print key + ": " + val
# test env settings
# command = '/usr/bin/env'
# args = shlex.split(command)
# print "running ", args
# p = subprocess.Popen(args, env=subEnv)
# print detailed help by default
if len(sys.argv) < 2:
sys.argv.append("--help")
# for "-h" or "--help", docopt prints usage and exits cleanly
mainArgs = docopt(__doc__,
version = 'gidget version 0.20.2',
options_first = True)
#TODO: take action on any top-level options
#parse the remaining subcommand and its options
subCommandName = mainArgs['<command>']
subCommandArgs = mainArgs['<args>']
# subcommands are:
# help
# list
# describe
# add
# remove
# run
# contruct an 'args' for the subcommand
subCommandArgs = [subCommandName] + subCommandArgs
if subCommandName == 'help':
gidget_help.parse(subCommandArgs)
elif subCommandName == 'list':
print docopt(gidget_list.__doc__, argv = subCommandArgs)
elif subCommandName == 'describe':
print docopt(gidget_describe.__doc__, argv = subCommandArgs)
elif subCommandName == 'add':
print docopt(gidget_add.__doc__, argv = subCommandArgs)
elif subCommandName == 'remove':
print docopt(gidget_remove.__doc__, argv = subCommandArgs)
elif subCommandName == 'run':
exit(call(['python', 'gidget_run.py'] + subCommandArgs))# + ' '.join(subCommandArgs)]))
else:
print "command " + subCommandName + " not recognized."
print __doc__
#sys.exit(-1)
sys.exit(0)
bumping version for release to master
#!/usr/bin/env python
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
# -*- mode: python; indent-tabs-mode nil; tab-width 4; python-indent 4; -*-
"""usage: gidget [options] <command> [<args>...]
Options:
-h, --help Show this screen
--version
gidget commands are:
help
list
describe
add
remove
run
"""
import sys
import os
import shlex
import subprocess
from subprocess import call
from docopt import docopt
from ConfigParser import SafeConfigParser
import gidget_help
import gidget_list
import gidget_describe
import gidget_add
import gidget_remove
#import gidget_run
if __name__ == '__main__':
# environment for subprocesses:
# parent environment plus info from config file to environment
subEnv = os.environ
configParserDefaults = {}
gidgetCommandsPath = os.path.realpath(os.getcwd() + '/../commands')
gidgetPythonExecutable = sys.executable
print "command path: " + gidgetCommandsPath
config = SafeConfigParser(defaults = {
'gidget_commands_dir':gidgetCommandsPath,
'gidget_python_executable':gidgetPythonExecutable})
# TODO: make config file location an optional command-line flag
# TODO: error checking on file existence
# TODO: error checking on sucessful config parsing
# TODO: check file permissions and warn or error if not private
config.read('.gidgetconfig')
gidgetConfigDefaults = {}
gidgetConfigSections = config.sections()
for section in gidgetConfigSections:
sectionOptions = config.options(section)
for option in sectionOptions:
# TODO: for now, sections are disregarded and everything is thrown
# into one dictionary object; break this out per section?
gidgetConfigDefaults[option] = config.get(section, option)
if section == 'MAF_PROCESSING':
# TODO: warn if this overwrites some existing envvars
# export env vars as uppercase, per convention;
# ConfigParser converts to lower.
subEnv[('gidget_'+option).upper()] = gidgetConfigDefaults[option]
# print "== gidget options =="
# for key,val in gidgetConfigDefaults.items():
# print key + ": " + val
# test env settings
# command = '/usr/bin/env'
# args = shlex.split(command)
# print "running ", args
# p = subprocess.Popen(args, env=subEnv)
# print detailed help by default
if len(sys.argv) < 2:
sys.argv.append("--help")
# for "-h" or "--help", docopt prints usage and exits cleanly
mainArgs = docopt(__doc__,
version = 'gidget version 0.30.0',
options_first = True)
#TODO: take action on any top-level options
#parse the remaining subcommand and its options
subCommandName = mainArgs['<command>']
subCommandArgs = mainArgs['<args>']
# subcommands are:
# help
# list
# describe
# add
# remove
# run
# contruct an 'args' for the subcommand
subCommandArgs = [subCommandName] + subCommandArgs
if subCommandName == 'help':
gidget_help.parse(subCommandArgs)
elif subCommandName == 'list':
print docopt(gidget_list.__doc__, argv = subCommandArgs)
elif subCommandName == 'describe':
print docopt(gidget_describe.__doc__, argv = subCommandArgs)
elif subCommandName == 'add':
print docopt(gidget_add.__doc__, argv = subCommandArgs)
elif subCommandName == 'remove':
print docopt(gidget_remove.__doc__, argv = subCommandArgs)
elif subCommandName == 'run':
exit(call(['python', 'gidget_run.py'] + subCommandArgs))# + ' '.join(subCommandArgs)]))
else:
print "command " + subCommandName + " not recognized."
print __doc__
#sys.exit(-1)
sys.exit(0)
|
#!/usr/bin/env python
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
# -*- mode: python; indent-tabs-mode nil; tab-width 4; python-indent 4; -*-
"""usage: gidget [options] <command> [<args>...]
Options:
-h, --help Show this screen
--version
gidget commands are:
help
list
describe
add
remove
run
"""
import sys
from docopt import docopt
import gidget_help
import gidget_list
import gidget_describe
import gidget_add
import gidget_remove
import gidget_run
if __name__ == '__main__':
# print detailed help by default
if len(sys.argv) < 2:
sys.argv.append("--help")
# for "-h" or "--help", docopt prints usage and exits cleanly
mainArgs = docopt(__doc__,
version = 'gidget version 0.0.9',
options_first = True)
#TODO: take action on any top-level options
#parse the remaining subcommand and its options
subCommandName = mainArgs['<command>']
subCommandArgs = mainArgs['<args>']
# subcommands are:
# help
# list
# describe
# add
# remove
# run
# contruct an 'args' for the subcommand
subCommandArgs = [subCommandName] + subCommandArgs
if subCommandName == 'help':
gidget_help.parse(subCommandArgs)
elif subCommandName == 'list':
print docopt(gidget_list.__doc__, argv = subCommandArgs)
elif subCommandName == 'describe':
print docopt(gidget_describe.__doc__, argv = subCommandArgs)
elif subCommandName == 'add':
print docopt(gidget_add.__doc__, argv = subCommandArgs)
elif subCommandName == 'remove':
print docopt(gidget_remove.__doc__, argv = subCommandArgs)
elif subCommandName == 'run':
print docopt(gidget_run.__doc__, argv = subCommandArgs)
else:
print "command " + subCommandName + " not recognized."
print __doc__
sys.exit(-1)
sys.exit(0)
adding initial configuration file handling
#!/usr/bin/env python
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
# -*- mode: python; indent-tabs-mode nil; tab-width 4; python-indent 4; -*-
"""usage: gidget [options] <command> [<args>...]
Options:
-h, --help Show this screen
--version
gidget commands are:
help
list
describe
add
remove
run
"""
import sys
from docopt import docopt
import ConfigParser
import gidget_help
import gidget_list
import gidget_describe
import gidget_add
import gidget_remove
import gidget_run
if __name__ == '__main__':
config = ConfigParser.ConfigParser()
# TODO: make config file location an optional command-line flag
# TODO: error checking on file existence
# TODO: error checking on sucessful config parsing
# TODO: check file permissions and warn or error if not private
config.read('.gidgetconfig')
gidgetConfigDefaults = {}
gidgetConfigSections = config.sections()
for section in gidgetConfigSections:
sectionOptions = config.options(section)
for option in sectionOptions:
# TODO: for now, sections are disregarded and everything is thrown
# into one dictionary object; break this out per section?
gidgetConfigDefaults[option] = config.get(section, option)
# print "== gidget options =="
# for key,val in gidgetConfigDefaults.items():
# print key + ": " + val
# print detailed help by default
if len(sys.argv) < 2:
sys.argv.append("--help")
# for "-h" or "--help", docopt prints usage and exits cleanly
mainArgs = docopt(__doc__,
version = 'gidget version 0.0.9',
options_first = True)
#TODO: take action on any top-level options
#parse the remaining subcommand and its options
subCommandName = mainArgs['<command>']
subCommandArgs = mainArgs['<args>']
# subcommands are:
# help
# list
# describe
# add
# remove
# run
# contruct an 'args' for the subcommand
subCommandArgs = [subCommandName] + subCommandArgs
if subCommandName == 'help':
gidget_help.parse(subCommandArgs)
elif subCommandName == 'list':
print docopt(gidget_list.__doc__, argv = subCommandArgs)
elif subCommandName == 'describe':
print docopt(gidget_describe.__doc__, argv = subCommandArgs)
elif subCommandName == 'add':
print docopt(gidget_add.__doc__, argv = subCommandArgs)
elif subCommandName == 'remove':
print docopt(gidget_remove.__doc__, argv = subCommandArgs)
elif subCommandName == 'run':
print docopt(gidget_run.__doc__, argv = subCommandArgs)
else:
print "command " + subCommandName + " not recognized."
print __doc__
#sys.exit(-1)
sys.exit(0)
|
#!/usr/bin/env python
'''
Exports layers and paths to OpenRaster compatible file with
extra metadata usefule for importing into LOVE games.
'''
import csv
import errno
import os.path
import shutil
import xml.etree.cElementTree as et
from zipfile import ZipFile
import gimpfu
from gimp import pdb
def ora_love(img, active_layer, compression, dir_name, should_merge, should_zip):
''' Plugin entry point
'''
# Create the root now
root = et.Element('image')
root.set('w', unicode(img.width))
root.set('h', unicode(img.height))
stack = et.SubElement(root, 'stack')
# Create the image directory
name = os.path.splitext(os.path.basename(img.filename))[0]
base_dir = os.path.join(dir_name, name)
if os.access(base_dir, os.F_OK):
shutil.rmtree(base_dir, ignore_errors=False)
mkdirs(os.path.join(base_dir, 'data'))
# Save the layer images and metadata
for layer in img.layers:
to_save = process_layer(img, layer, stack, ['data'], base_dir, should_merge)
save_layers(img, to_save, compression, base_dir)
# Write the thumbnail
save_thumb(img, base_dir)
if len(img.vectors) > 0:
# Create the path directory
paths_path = os.path.join(base_dir, 'paths')
mkdirs(paths_path)
# Save the paths and metadata
paths_node = et.SubElement(root, 'paths')
for path in img.vectors:
to_save = process_path(path, paths_node, ['paths'])
save_paths(to_save, base_dir)
# Write the mimetype file
with open(os.path.join(base_dir, 'mimetype'), 'w') as output_file:
output_file.write('image/openraster')
# Write the metadata file
with open(os.path.join(base_dir, 'stack.xml'), 'w') as output_file:
et.ElementTree(root).write(output_file,
xml_declaration=True,
encoding='utf-8',
method='xml')
# Zip it, if requested
if should_zip:
with ZipFile(os.path.join(dir_name, '%s.ora' % name), 'w') as f:
old_cwd = os.getcwd()
os.chdir(base_dir)
try:
for root, dirs, files in os.walk('.'):
for filename in files:
full_path = os.path.join(root, filename)
f.write(full_path, full_path[2:])
finally:
os.chdir(old_cwd)
def process_layer(img, layer, stack, dir_stack, base_dir, should_merge):
processed = []
# If this layer is a layer has sublayers, recurse into them
if not should_merge and hasattr(layer, 'layers'):
new_dir_stack = dir_stack + [layer.name]
try:
os.makedirs(os.path.join(base_dir, *new_dir_stack))
except OSError, e:
if e.errno != errno.EEXIST:
raise
for sublayer in layer.layers:
processed.extend(process_layer(img, sublayer, stack, new_dir_stack, base_dir, should_merge))
else:
layer_name = layer.name
x, y = layer.offsets
filename = '/'.join(dir_stack + ['%s.png' % layer_name])
layer_node = et.SubElement(stack, 'layer')
layer_node.set('name', layer_name)
layer_node.set('src', filename)
layer_node.set('x', unicode(x))
layer_node.set('y', unicode(y))
# Hardcoded vals. FIXME one day
layer_node.set('composite-op', 'svg:src-over')
layer_node.set('opacity', '1.0')
layer_node.set('visibility', 'visible')
processed.append((filename, layer))
return processed
def save_layers(img, layers, compression, base_dir):
for rel_path, layer in layers:
rel_path = rel_path.replace('/', os.sep)
tmp_img = pdb.gimp_image_new(img.width, img.height, img.base_type)
tmp_layer = pdb.gimp_layer_new_from_drawable(layer, tmp_img)
tmp_layer.name = layer.name
tmp_img.add_layer(tmp_layer, 0)
tmp_img.resize_to_layers()
full_path = os.path.join(base_dir, rel_path)
filename = os.path.basename(rel_path)
pdb.file_png_save(
tmp_img,
tmp_img.layers[0],
full_path,
filename,
0, # interlace
compression, # compression
1, # bkgd
1, # gama
1, # offs
1, # phys
1 # time
)
def process_path(path, paths_node, base_dir):
data = [[None] * 8]
strokes_count = 0
for stroke in path.strokes:
strokes_count = strokes_count+1
stroke_points, is_closed = stroke.points
# copy triplets
for triplet in range(0, len(stroke_points), 6):
row = [path.name, strokes_count]
row.extend(stroke_points[triplet:triplet + 6])
data.append(row)
# for closed stroke, close with first triplet
if is_closed:
row = [path.name, strokes_count]
row.extend(stroke_points[:6])
data.append(row)
filename = '/'.join(base_dir + ['%s.csv' % path.name])
path_node = et.SubElement(paths_node, 'path')
path_node.set('name', path.name)
path_node.set('src', filename)
return [(filename, data)]
def save_paths(paths, base_dir):
for rel_path, path_data in paths:
rel_path = rel_path.replace('/', os.sep)
with open(os.path.join(base_dir, rel_path), 'w') as f:
writer = csv.writer(f)
writer.writerows(path_data)
def save_thumb(img, base_dir):
tmp_img = pdb.gimp_image_new(img.width, img.height, img.base_type)
for i, layer in enumerate(img.layers):
tmp_layer = pdb.gimp_layer_new_from_drawable(layer, tmp_img)
tmp_img.add_layer(tmp_layer, i)
flattened = tmp_img.flatten()
max_dim = 255
if img.width > max_dim or img.height > max_dim:
if img.width > img.height:
width = max_dim
height = width * img.height / img.width
elif img.width < img.height:
height = max_dim
width = height * img.width / img.height
else:
width = height = max_dim
pdb.gimp_image_scale(tmp_img, width, height)
thumb_path = os.path.join(base_dir, 'Thumbnails')
mkdirs(thumb_path)
thumb_filename = 'thumbnail.png'
pdb.file_png_save_defaults(tmp_img, flattened, os.path.join(thumb_path, thumb_filename), thumb_filename)
def mkdirs(dir_name):
try:
os.makedirs(dir_name)
except OSError, e:
if e.errno != errno.EEXIST:
raise
gimpfu.register(
# name
"ora-love",
# blurb
"OpenRaster-love exporter",
# help
"Exports layers and paths to OpenRaster file with extra metadata useful for importing into LOVE games",
# author
"Carlo Cabanilla",
# copyright
"Carlo Cabanilla",
# date
"2014",
# menupath
"<Image>/File/Export/Export as ora-love",
# imagetypes
"*",
# params
[
(gimpfu.PF_ADJUSTMENT, "compression", "PNG Compression level:", 0, (0, 9, 1)),
(gimpfu.PF_DIRNAME, "dir", "Directory", os.getcwd()),
(gimpfu.PF_BOOL, "should_merge", "Merge layer groups?", True),
(gimpfu.PF_BOOL, "should_zip", "Zip to .ora?", False),
],
# results
[],
# function
ora_love
)
gimpfu.main()
use unix line endings for path csvs
#!/usr/bin/env python
'''
Exports layers and paths to OpenRaster compatible file with
extra metadata usefule for importing into LOVE games.
'''
import csv
import errno
import os.path
import shutil
import xml.etree.cElementTree as et
from zipfile import ZipFile
import gimpfu
from gimp import pdb
def ora_love(img, active_layer, compression, dir_name, should_merge, should_zip):
''' Plugin entry point
'''
# Create the root now
root = et.Element('image')
root.set('w', unicode(img.width))
root.set('h', unicode(img.height))
stack = et.SubElement(root, 'stack')
# Create the image directory
name = os.path.splitext(os.path.basename(img.filename))[0]
base_dir = os.path.join(dir_name, name)
if os.access(base_dir, os.F_OK):
shutil.rmtree(base_dir, ignore_errors=False)
mkdirs(os.path.join(base_dir, 'data'))
# Save the layer images and metadata
for layer in img.layers:
to_save = process_layer(img, layer, stack, ['data'], base_dir, should_merge)
save_layers(img, to_save, compression, base_dir)
# Write the thumbnail
save_thumb(img, base_dir)
if len(img.vectors) > 0:
# Create the path directory
paths_path = os.path.join(base_dir, 'paths')
mkdirs(paths_path)
# Save the paths and metadata
paths_node = et.SubElement(root, 'paths')
for path in img.vectors:
to_save = process_path(path, paths_node, ['paths'])
save_paths(to_save, base_dir)
# Write the mimetype file
with open(os.path.join(base_dir, 'mimetype'), 'w') as output_file:
output_file.write('image/openraster')
# Write the metadata file
with open(os.path.join(base_dir, 'stack.xml'), 'w') as output_file:
et.ElementTree(root).write(output_file,
xml_declaration=True,
encoding='utf-8',
method='xml')
# Zip it, if requested
if should_zip:
with ZipFile(os.path.join(dir_name, '%s.ora' % name), 'w') as f:
old_cwd = os.getcwd()
os.chdir(base_dir)
try:
for root, dirs, files in os.walk('.'):
for filename in files:
full_path = os.path.join(root, filename)
f.write(full_path, full_path[2:])
finally:
os.chdir(old_cwd)
def process_layer(img, layer, stack, dir_stack, base_dir, should_merge):
processed = []
# If this layer is a layer has sublayers, recurse into them
if not should_merge and hasattr(layer, 'layers'):
new_dir_stack = dir_stack + [layer.name]
try:
os.makedirs(os.path.join(base_dir, *new_dir_stack))
except OSError, e:
if e.errno != errno.EEXIST:
raise
for sublayer in layer.layers:
processed.extend(process_layer(img, sublayer, stack, new_dir_stack, base_dir, should_merge))
else:
layer_name = layer.name
x, y = layer.offsets
filename = '/'.join(dir_stack + ['%s.png' % layer_name])
layer_node = et.SubElement(stack, 'layer')
layer_node.set('name', layer_name)
layer_node.set('src', filename)
layer_node.set('x', unicode(x))
layer_node.set('y', unicode(y))
# Hardcoded vals. FIXME one day
layer_node.set('composite-op', 'svg:src-over')
layer_node.set('opacity', '1.0')
layer_node.set('visibility', 'visible')
processed.append((filename, layer))
return processed
def save_layers(img, layers, compression, base_dir):
for rel_path, layer in layers:
rel_path = rel_path.replace('/', os.sep)
tmp_img = pdb.gimp_image_new(img.width, img.height, img.base_type)
tmp_layer = pdb.gimp_layer_new_from_drawable(layer, tmp_img)
tmp_layer.name = layer.name
tmp_img.add_layer(tmp_layer, 0)
tmp_img.resize_to_layers()
full_path = os.path.join(base_dir, rel_path)
filename = os.path.basename(rel_path)
pdb.file_png_save(
tmp_img,
tmp_img.layers[0],
full_path,
filename,
0, # interlace
compression, # compression
1, # bkgd
1, # gama
1, # offs
1, # phys
1 # time
)
def process_path(path, paths_node, base_dir):
data = [[None] * 8]
strokes_count = 0
for stroke in path.strokes:
strokes_count = strokes_count+1
stroke_points, is_closed = stroke.points
# copy triplets
for triplet in range(0, len(stroke_points), 6):
row = [path.name, strokes_count]
row.extend(stroke_points[triplet:triplet + 6])
data.append(row)
# for closed stroke, close with first triplet
if is_closed:
row = [path.name, strokes_count]
row.extend(stroke_points[:6])
data.append(row)
filename = '/'.join(base_dir + ['%s.csv' % path.name])
path_node = et.SubElement(paths_node, 'path')
path_node.set('name', path.name)
path_node.set('src', filename)
return [(filename, data)]
def save_paths(paths, base_dir):
for rel_path, path_data in paths:
rel_path = rel_path.replace('/', os.sep)
with open(os.path.join(base_dir, rel_path), 'w') as f:
writer = csv.writer(f, lineterminator='\n')
writer.writerows(path_data)
def save_thumb(img, base_dir):
tmp_img = pdb.gimp_image_new(img.width, img.height, img.base_type)
for i, layer in enumerate(img.layers):
tmp_layer = pdb.gimp_layer_new_from_drawable(layer, tmp_img)
tmp_img.add_layer(tmp_layer, i)
flattened = tmp_img.flatten()
max_dim = 255
if img.width > max_dim or img.height > max_dim:
if img.width > img.height:
width = max_dim
height = width * img.height / img.width
elif img.width < img.height:
height = max_dim
width = height * img.width / img.height
else:
width = height = max_dim
pdb.gimp_image_scale(tmp_img, width, height)
thumb_path = os.path.join(base_dir, 'Thumbnails')
mkdirs(thumb_path)
thumb_filename = 'thumbnail.png'
pdb.file_png_save_defaults(tmp_img, flattened, os.path.join(thumb_path, thumb_filename), thumb_filename)
def mkdirs(dir_name):
try:
os.makedirs(dir_name)
except OSError, e:
if e.errno != errno.EEXIST:
raise
gimpfu.register(
# name
"ora-love",
# blurb
"OpenRaster-love exporter",
# help
"Exports layers and paths to OpenRaster file with extra metadata useful for importing into LOVE games",
# author
"Carlo Cabanilla",
# copyright
"Carlo Cabanilla",
# date
"2014",
# menupath
"<Image>/File/Export/Export as ora-love",
# imagetypes
"*",
# params
[
(gimpfu.PF_ADJUSTMENT, "compression", "PNG Compression level:", 0, (0, 9, 1)),
(gimpfu.PF_DIRNAME, "dir", "Directory", os.getcwd()),
(gimpfu.PF_BOOL, "should_merge", "Merge layer groups?", True),
(gimpfu.PF_BOOL, "should_zip", "Zip to .ora?", False),
],
# results
[],
# function
ora_love
)
gimpfu.main()
|
"""
Imports site and captain data into ROOMS for a NRD batch data upload. "NRD" is hard-coded.
Intended to be run with the remote_api. Great instructions at
https://github.com/babybunny/rebuildingtogethercaptain/wiki/Import-Site-and-Captain-Data
# or for development..
bash> python $(which remote_api_shell.py) -s localhost:8081
dev~rebuildingtogethercaptain> reload(import_csv)
<module 'import_csv' from '/Users/babybunny/appengine/rebuildingtogethercaptain/import_csv.py'>
dev~rebuildingtogethercaptain> import_csv.import_sites(input_csv="../2012_ROOMS_site_info_sample.csv")
dev~rebuildingtogethercaptain> import_csv.import_captains(input_csv="../2012_ROOMS_Captain_email_sample.csv")
"""
import csv
import sys
import logging
from gae.room import ndb_models
def clean_get(d, k):
### bit of dirty data coverage ###
ok = k
if k not in d:
k = k.replace("Repair Application: ", "")
if k not in d:
k = "Repair Application: " + k
if k not in d:
raise KeyError("No column named \"{}\"".format(ok))
### bit of dirty data coverage ###
return d[k].replace('\n', ' ').replace('\xe2', "'").replace('\x80', "'").replace('\x99', '').replace('\xc3',
'').replace(
'\x95', '').encode('ascii', 'replace')
def get_program(year):
nrd_type, created = ndb_models.ProgramType.get_or_create("NRD")
program, created = ndb_models.Program.get_or_create(nrd_type.key, int(year))
return program
def sanity_check_headers(e, a, path):
if e != a:
for h in e - a:
print >> sys.stderr, "Expected header \"{}\" was not found in {}".format(h, path)
for h in a - e:
print >> sys.stderr, "Found header \"{}\" in {} which was not expected".format(h, path)
sys.stderr.flush()
raise SystemExit(1)
def import_sites(input_csv):
"""
input_csv is a path like "../2012_ROOMS_site_info_sample.csv"
"""
expected_headers = {"Announcement Subject", "Announcement Body",
"Program Year", "Site ID",
"Budget",
"Homeowner/Site Contact Name",
"Applicant Phone", "Applicant Mobile Phone",
"Street Address",
"City", "Zipcode", "Jurisdiction", "Sponsor Name",
}
reader = csv.DictReader(open(input_csv))
actual_headers = set(reader.fieldnames)
sanity_check_headers(expected_headers, actual_headers, input_csv)
for s in reader:
number = s["Site ID"]
site = ndb_models.NewSite.query(ndb_models.NewSite.number == number).get()
if site:
logging.info('site %s exists, skipping', number)
continue
else:
site = ndb_models.NewSite(number=number)
program = get_program(s['Program Year'])
site.program = program.name
site.program_key = program.key
budget = s.get("Budget", s.get("Budgeted Cost in Campaign"), "$0")
budget = budget.strip("$").replace(",", "").replace(".00", "") or '0'
site.budget = int(budget)
site.street_number = clean_get(s, "Street Address")
site.city_state_zip = "%s CA, %s" % (
clean_get(s, "City"),
clean_get(s, "Zipcode"))
site.name = clean_get(s, "Homeowner/Site Contact Name")
site.applicant = clean_get(s, "Homeowner/Site Contact Name")
site.applicant_home_phone = clean_get(s,
"Applicant Phone")
site.applicant_mobile_phone = clean_get(s,
"Applicant Mobile Phone")
site.sponsor = clean_get(s, "Sponsor Name")
# site.rrp_test = clean_get(s, "Repair Application: RRP Test Results")
# site.rrp_level = clean_get(s, "Repair Application: RRP Test Results")
# site.roof = clean_get(s, "Roof?")
site.jurisdiction = clean_get(s, "Jurisdiction")
site.announcement_subject = clean_get(s, "Announcement Subject")
site.announcement_body = clean_get(s, "Announcement Body")
site.put()
logging.info('put site %s', number)
def import_captains(input_csv):
"""
input_csv is a path like "../2012_ROOMS_site_info_sample.csv"
"""
expected_headers = {"Site ID", "Name", "ROOMS Captain ID", "Phone", "Email", "Project Role"}
reader = csv.DictReader(open(input_csv))
actual_headers = set(reader.fieldnames)
sanity_check_headers(expected_headers, actual_headers, input_csv)
for s in reader:
key = s.get('key')
email = clean_get(s, "Email")
rooms_id = clean_get(s, "ROOMS Captain ID")
name = clean_get(s, "Name")
captain = None
if key:
captain = ndb_models.Captain.get_by_id(int(key))
if captain:
logging.info('got captain from key %s', key)
if not captain:
captain = ndb_models.Captain.query(ndb_models.Captain.rooms_id == rooms_id).get()
if captain:
logging.info('got captain from rooms_id %s', rooms_id)
if not captain:
captain = ndb_models.Captain.query(ndb_models.Captain.email == email).get()
if captain:
logging.info('got captain from email %s', email)
if not captain:
logging.info('creating captain key %s name %s email %s rooms_id %s',
key, name, email, rooms_id)
captain = ndb_models.Captain(name=name, email=email, rooms_id=rooms_id)
# Over-write these values, assume volunteer database is more up to
# date.
captain.name = name
captain.email = email
captain.rooms_id = rooms_id
captain.put()
numbers = [n.strip() for n in s["Site ID"].split(',')]
for number in numbers:
site = ndb_models.NewSite.query(ndb_models.NewSite.number == number).get()
if not site:
logging.error('site %s does not exist, skipping', number)
continue
# In input type is like "Volunteer Captain" but in model it's
# "Volunteer"
input_type = s.get("Captain Type", s.get("Project Role"))
for t in ndb_models.SiteCaptain.type._choices:
if t in input_type:
break
query = ndb_models.SiteCaptain.query(ndb_models.SiteCaptain.site == site.key).filter(ndb_models.SiteCaptain.captain == captain.key)
sitecaptain = query.get()
if sitecaptain is None:
logging.info('Creating new SiteCaptain mapping %s to %s',
site.number, captain.name)
sitecaptain = ndb_models.SiteCaptain(site=site.key, captain=captain.key, type=t)
else:
logging.info('Found existing SiteCaptain')
sitecaptain.type = t
sitecaptain.put()
mods to column names and weird characters
"""
Imports site and captain data into ROOMS for a NRD batch data upload. "NRD" is hard-coded.
Intended to be run with the remote_api. Great instructions at
https://github.com/babybunny/rebuildingtogethercaptain/wiki/Import-Site-and-Captain-Data
# or for development..
bash> python $(which remote_api_shell.py) -s localhost:8081
dev~rebuildingtogethercaptain> reload(import_csv)
<module 'import_csv' from '/Users/babybunny/appengine/rebuildingtogethercaptain/import_csv.py'>
dev~rebuildingtogethercaptain> import_csv.import_sites(input_csv="../2012_ROOMS_site_info_sample.csv")
dev~rebuildingtogethercaptain> import_csv.import_captains(input_csv="../2012_ROOMS_Captain_email_sample.csv")
"""
import csv
import sys
import logging
from gae.room import ndb_models
def clean_get(d, k):
### bit of dirty data coverage ###
ok = k
if k not in d:
k = k.replace("Repair Application: ", "")
if k not in d:
k = "Repair Application: " + k
if k not in d:
raise KeyError("No column named \"{}\"".format(ok))
### bit of dirty data coverage ###
return d[k].replace(
'\n', ' ').replace(
'\xe2', "'").replace(
'\x80', "'").replace(
'\x99', '').replace(
'\xc2', '').replace(
'\xc3', '').replace(
'\x95', '').encode('ascii', 'replace')
def get_program(year):
nrd_type, created = ndb_models.ProgramType.get_or_create("NRD")
program, created = ndb_models.Program.get_or_create(nrd_type.key, int(year))
return program
def sanity_check_headers(e, a, path):
if e != a:
for h in e - a:
print >> sys.stderr, "Expected header \"{}\" was not found in {}".format(h, path)
for h in a - e:
print >> sys.stderr, "Found header \"{}\" in {} which was not expected".format(h, path)
sys.stderr.flush()
raise SystemExit(1)
def import_sites(input_csv):
"""
input_csv is a path like "../2012_ROOMS_site_info_sample.csv"
"""
expected_headers = {"Announcement Subject", "Announcement Body",
"Program Year", "Site ID",
"Budget",
"Homeowner/Site Contact Name",
"Applicant Phone", "Applicant Mobile Phone",
"Street Address",
"City", "Zipcode", "Jurisdiction", "Sponsor Name",
"CDBG Funding Source" # unused
}
reader = csv.DictReader(open(input_csv))
actual_headers = set(reader.fieldnames)
sanity_check_headers(expected_headers, actual_headers, input_csv)
for s in reader:
try:
number = s["Site ID"]
site = ndb_models.NewSite.query(ndb_models.NewSite.number == number).get()
if site:
logging.info('site %s exists, skipping', number)
continue
else:
site = ndb_models.NewSite(number=number)
program = get_program(s['Program Year'])
site.program = program.name
site.program_key = program.key
budget = s.get("Budget", s.get("Budgeted Cost in Campaign", "$0"))
budget = budget.strip("$").replace(",", "").replace(".00", "") or '0'
site.budget = int(budget)
site.street_number = clean_get(s, "Street Address")
site.city_state_zip = "%s CA, %s" % (
clean_get(s, "City"),
clean_get(s, "Zipcode"))
site.name = clean_get(s, "Homeowner/Site Contact Name")
site.applicant = clean_get(s, "Homeowner/Site Contact Name")
site.applicant_home_phone = clean_get(s,
"Applicant Phone")
site.applicant_mobile_phone = clean_get(s,
"Applicant Mobile Phone")
site.sponsor = clean_get(s, "Sponsor Name")
# site.rrp_test = clean_get(s, "Repair Application: RRP Test Results")
# site.rrp_level = clean_get(s, "Repair Application: RRP Test Results")
# site.roof = clean_get(s, "Roof?")
site.jurisdiction = clean_get(s, "Jurisdiction")
site.announcement_subject = clean_get(s, "Announcement Subject")
site.announcement_body = clean_get(s, "Announcement Body")
site.put()
except:
logging.error("error (%s)\nparsing line (%s)", sys.exc_info()[0], s)
logging.info('put site %s', number)
def import_captains(input_csv):
"""
input_csv is a path like "../2012_ROOMS_site_info_sample.csv"
"""
expected_headers = {"Site ID", "Name", "ROOMS Captain ID", "Phone", "Email", "Captain Type"}
reader = csv.DictReader(open(input_csv))
actual_headers = set(reader.fieldnames)
sanity_check_headers(expected_headers, actual_headers, input_csv)
for s in reader:
key = s.get('key')
email = clean_get(s, "Email")
rooms_id = clean_get(s, "ROOMS Captain ID")
name = clean_get(s, "Name")
captain = None
if key:
captain = ndb_models.Captain.get_by_id(int(key))
if captain:
logging.info('got captain from key %s', key)
if not captain:
captain = ndb_models.Captain.query(ndb_models.Captain.rooms_id == rooms_id).get()
if captain:
logging.info('got captain from rooms_id %s', rooms_id)
if not captain:
captain = ndb_models.Captain.query(ndb_models.Captain.email == email).get()
if captain:
logging.info('got captain from email %s', email)
if not captain:
logging.info('creating captain key %s name %s email %s rooms_id %s',
key, name, email, rooms_id)
captain = ndb_models.Captain(name=name, email=email, rooms_id=rooms_id)
# Over-write these values, assume volunteer database is more up to
# date.
captain.name = name
captain.email = email
captain.rooms_id = rooms_id
captain.put()
numbers = [n.strip() for n in s["Site ID"].split(',')]
for number in numbers:
site = ndb_models.NewSite.query(ndb_models.NewSite.number == number).get()
if not site:
logging.error('site %s does not exist, skipping', number)
continue
# In input type is like "Volunteer Captain" but in model it's
# "Volunteer"
input_type = s.get("Captain Type", s.get("Project Role"))
for t in ndb_models.SiteCaptain.type._choices:
if t in input_type:
break
query = ndb_models.SiteCaptain.query(ndb_models.SiteCaptain.site == site.key).filter(ndb_models.SiteCaptain.captain == captain.key)
sitecaptain = query.get()
if sitecaptain is None:
logging.info('Creating new SiteCaptain mapping %s to %s',
site.number, captain.name)
sitecaptain = ndb_models.SiteCaptain(site=site.key, captain=captain.key, type=t)
else:
logging.info('Found existing SiteCaptain')
sitecaptain.type = t
sitecaptain.put()
|
import sys
sys.path.append("/mnt/moehlc/home/idaf_library")
import mahotas
import libidaf.idafIO as io
path = '/mnt/moehlc/idaf/IDAF_Projects/140327_raman_bloodvessel_mri/data/segmented/angio_wt'
pattern = 'flowSkel'
fnames = io.getFilelistFromDir(path,pattern)
updated import_tst.py
import sys
sys.path.append("/mnt/moehlc/home/idaf_library")
#import mahotas
import vigra
import libidaf.idafIO as io
import numpy as np
from scipy import ndimage
from scipy.stats import nanmean
def nanmeanFilter(x):
return nanmean(x)
path = '/mnt/moehlc/idaf/IDAF_Projects/140327_raman_bloodvessel_mri/data/segmented/angio_wt/'
pattern = 'flowSkel'
filterSize = 30
fnames = io.getFilelistFromDir(path,pattern)
fname = fnames[0]
absname = path + fname
#import
zsize = vigra.impex.numberImages(absname)
im =vigra.readImage(absname, index = 0, dtype='FLOAT')
vol = np.zeros([im.height,im.width,zsize])
for i in range(zsize):
im=np.squeeze(vigra.readImage(absname, index = i, dtype='FLOAT'))
vol[:,:,i] = im
vol[vol == 0] = np.nan
res = ndimage.generic_filter(vol, nanmeanFilter,size = filterSize)
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# The MIT License (MIT)
#
# Copyright (c) 2012-2014 Martin Zimmermann.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Isso – a lightweight Disqus alternative
import pkg_resources
dist = pkg_resources.get_distribution("isso")
# check if exectuable is `isso` and gevent is available
import sys
if sys.argv[0].startswith("isso"):
try:
import gevent.monkey
gevent.monkey.patch_all()
except ImportError:
pass
import os
import errno
import logging
import tempfile
from os.path import abspath, dirname, exists, join
from argparse import ArgumentParser
from functools import partial, reduce
from itsdangerous import URLSafeTimedSerializer
from werkzeug.routing import Map
from werkzeug.exceptions import HTTPException, InternalServerError
from werkzeug.middleware.shared_data import SharedDataMiddleware
from werkzeug.local import Local, LocalManager
from werkzeug.serving import run_simple
from werkzeug.middleware.proxy_fix import ProxyFix
from werkzeug.middleware.profiler import ProfilerMiddleware
local = Local()
local_manager = LocalManager([local])
from isso import config, db, migrate, wsgi, ext, views
from isso.core import ThreadedMixin, ProcessMixin, uWSGIMixin
from isso.wsgi import origin, urlsplit
from isso.utils import http, JSONRequest, html, hash
from isso.views import comments
from isso.ext.notifications import Stdout, SMTP
logging.getLogger('werkzeug').setLevel(logging.WARN)
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s %(levelname)s: %(message)s")
logger = logging.getLogger("isso")
class ProxyFixCustom(ProxyFix):
def __init__(self, app):
# This is needed for werkzeug.wsgi.get_current_url called in isso/views/comments.py
# to work properly when isso is hosted under a sub-path
# cf. https://werkzeug.palletsprojects.com/en/1.0.x/middleware/proxy_fix/
super().__init__(app, x_prefix=1)
class Isso(object):
def __init__(self, conf):
self.conf = conf
self.db = db.SQLite3(conf.get('general', 'dbpath'), conf)
self.signer = URLSafeTimedSerializer(
self.db.preferences.get("session-key"))
self.markup = html.Markup(conf.section('markup'))
self.hasher = hash.new(conf.section("hash"))
super(Isso, self).__init__(conf)
subscribers = []
smtp_backend = False
for backend in conf.getlist("general", "notify"):
if backend == "stdout":
subscribers.append(Stdout(None))
elif backend in ("smtp", "SMTP"):
smtp_backend = True
else:
logger.warning("unknown notification backend '%s'", backend)
if smtp_backend or conf.getboolean("general", "reply-notifications"):
subscribers.append(SMTP(self))
self.signal = ext.Signal(*subscribers)
self.urls = Map()
views.Info(self)
views.Metrics(self)
comments.API(self, self.hasher)
def render(self, text):
return self.markup.render(text)
def sign(self, obj):
return self.signer.dumps(obj)
def unsign(self, obj, max_age=None):
return self.signer.loads(obj, max_age=max_age or self.conf.getint('general', 'max-age'))
def dispatch(self, request):
local.request = request
local.host = wsgi.host(request.environ)
local.origin = origin(self.conf.getiter(
"general", "host"))(request.environ)
adapter = self.urls.bind_to_environ(request.environ)
try:
handler, values = adapter.match()
except HTTPException as e:
return e
else:
try:
response = handler(request.environ, request, **values)
except HTTPException as e:
return e
except Exception:
logger.exception("%s %s", request.method,
request.environ["PATH_INFO"])
return InternalServerError()
else:
return response
def wsgi_app(self, environ, start_response):
response = self.dispatch(JSONRequest(environ))
return response(environ, start_response)
def __call__(self, environ, start_response):
return self.wsgi_app(environ, start_response)
def make_app(conf=None, threading=True, multiprocessing=False, uwsgi=False):
if not any((threading, multiprocessing, uwsgi)):
raise RuntimeError("either set threading, multiprocessing or uwsgi")
if threading:
class App(Isso, ThreadedMixin):
pass
elif multiprocessing:
class App(Isso, ProcessMixin):
pass
else:
class App(Isso, uWSGIMixin):
pass
isso = App(conf)
logger.info("Using database at '%s'",
abspath(isso.conf.get('general', 'dbpath')))
if not any(conf.getiter("general", "host")):
logger.error("No website(s) configured, Isso won't work.")
sys.exit(1)
# check HTTP server connection
for host in conf.getiter("general", "host"):
with http.curl('HEAD', host, '/', 5) as resp:
if resp is not None:
logger.info("connected to %s", host)
break
else:
logger.warning("unable to connect to your website, Isso will probably not "
"work correctly. Please make sure, Isso can reach your "
"website via HTTP(S).")
wrapper = [local_manager.make_middleware]
if isso.conf.getboolean("server", "profile"):
wrapper.append(partial(ProfilerMiddleware,
sort_by=("cumulative", ), restrictions=("isso/(?!lib)", 10)))
wrapper.append(partial(SharedDataMiddleware, exports={
'/js': join(dirname(__file__), 'js/'),
'/css': join(dirname(__file__), 'css/'),
'/img': join(dirname(__file__), 'img/'),
'/demo': join(dirname(__file__), 'demo/')
}))
wrapper.append(partial(wsgi.CORSMiddleware,
origin=origin(isso.conf.getiter("general", "host")),
allowed=("Origin", "Referer", "Content-Type"),
exposed=("X-Set-Cookie", "Date")))
wrapper.extend([wsgi.SubURI, ProxyFixCustom])
return reduce(lambda x, f: f(x), wrapper, isso)
def main():
parser = ArgumentParser(description="a blog comment hosting service")
subparser = parser.add_subparsers(help="commands", dest="command")
parser.add_argument('--version', action='version',
version='%(prog)s ' + dist.version)
parser.add_argument("-c", dest="conf", default="/etc/isso.cfg",
metavar="/etc/isso.cfg", help="set configuration file")
imprt = subparser.add_parser('import', help="import Disqus XML export")
imprt.add_argument("dump", metavar="FILE")
imprt.add_argument("-n", "--dry-run", dest="dryrun", action="store_true",
help="perform a trial run with no changes made")
imprt.add_argument("-t", "--type", dest="type", default=None,
choices=["disqus", "wordpress", "generic"], help="export type")
imprt.add_argument("--empty-id", dest="empty_id", action="store_true",
help="workaround for weird Disqus XML exports, #135")
# run Isso as stand-alone server
subparser.add_parser("run", help="run server")
args = parser.parse_args()
# ISSO_SETTINGS env var takes precedence over `-c` flag
conf_file = os.environ.get('ISSO_SETTINGS') or args.conf
if not conf_file:
logger.error("No configuration file specified! Exiting.")
sys.exit(1)
if exists(conf_file):
logger.info("Using configuration file '%s'", abspath(conf_file))
else:
logger.error("Specified config '%s' does not exist! Exiting.",
abspath(conf_file))
sys.exit(1)
conf = config.load(config.default_file(), conf_file)
if args.command == "import":
conf.set("guard", "enabled", "off")
if args.dryrun:
xxx = tempfile.NamedTemporaryFile()
dbpath = xxx.name
else:
dbpath = conf.get("general", "dbpath")
mydb = db.SQLite3(dbpath, conf)
migrate.dispatch(args.type, mydb, args.dump, args.empty_id)
sys.exit(0)
if conf.get("general", "log-file"):
handler = logging.FileHandler(conf.get("general", "log-file"))
logger.addHandler(handler)
logging.getLogger("werkzeug").addHandler(handler)
logger.propagate = False
logging.getLogger("werkzeug").propagate = False
if conf.get("server", "listen").startswith("http://"):
host, port, _ = urlsplit(conf.get("server", "listen"))
try:
from gevent.pywsgi import WSGIServer
WSGIServer((host, port), make_app(conf)).serve_forever()
except ImportError:
run_simple(host, port, make_app(conf), threaded=True,
use_reloader=conf.getboolean('server', 'reload'))
else:
sock = conf.get("server", "listen").partition("unix://")[2]
try:
os.unlink(sock)
except OSError as ex:
if ex.errno != errno.ENOENT:
raise
wsgi.SocketHTTPServer(sock, make_app(conf)).serve_forever()
wsgi: Return HTTP errors as JSON if client prefers it
Return HTTP errors in JSON format if `Accept:` header
prefers `application/json` by weight rating.
See also: https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/MIME_types
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# The MIT License (MIT)
#
# Copyright (c) 2012-2014 Martin Zimmermann.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Isso – a lightweight Disqus alternative
import pkg_resources
dist = pkg_resources.get_distribution("isso")
# check if exectuable is `isso` and gevent is available
import sys
if sys.argv[0].startswith("isso"):
try:
import gevent.monkey
gevent.monkey.patch_all()
except ImportError:
pass
import os
import errno
import logging
import tempfile
from os.path import abspath, dirname, exists, join
from argparse import ArgumentParser
from functools import partial, reduce
from itsdangerous import URLSafeTimedSerializer
from werkzeug.routing import Map
from werkzeug.exceptions import HTTPException, InternalServerError
from werkzeug.middleware.shared_data import SharedDataMiddleware
from werkzeug.local import Local, LocalManager
from werkzeug.serving import run_simple
from werkzeug.middleware.proxy_fix import ProxyFix
from werkzeug.middleware.profiler import ProfilerMiddleware
local = Local()
local_manager = LocalManager([local])
from isso import config, db, migrate, wsgi, ext, views
from isso.core import ThreadedMixin, ProcessMixin, uWSGIMixin
from isso.wsgi import origin, urlsplit
from isso.utils import http, JSONRequest, JSONResponse, html, hash
from isso.views import comments
from isso.ext.notifications import Stdout, SMTP
logging.getLogger('werkzeug').setLevel(logging.WARN)
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s %(levelname)s: %(message)s")
logger = logging.getLogger("isso")
def error_handler(env, request, error):
if request.accept_mimetypes.best == "application/json":
data = {'message': str(error)}
code = 500 if error.code is None else error.code
return JSONResponse(data, code)
return error
class ProxyFixCustom(ProxyFix):
def __init__(self, app):
# This is needed for werkzeug.wsgi.get_current_url called in isso/views/comments.py
# to work properly when isso is hosted under a sub-path
# cf. https://werkzeug.palletsprojects.com/en/1.0.x/middleware/proxy_fix/
super().__init__(app, x_prefix=1)
class Isso(object):
def __init__(self, conf):
self.conf = conf
self.db = db.SQLite3(conf.get('general', 'dbpath'), conf)
self.signer = URLSafeTimedSerializer(
self.db.preferences.get("session-key"))
self.markup = html.Markup(conf.section('markup'))
self.hasher = hash.new(conf.section("hash"))
super(Isso, self).__init__(conf)
subscribers = []
smtp_backend = False
for backend in conf.getlist("general", "notify"):
if backend == "stdout":
subscribers.append(Stdout(None))
elif backend in ("smtp", "SMTP"):
smtp_backend = True
else:
logger.warning("unknown notification backend '%s'", backend)
if smtp_backend or conf.getboolean("general", "reply-notifications"):
subscribers.append(SMTP(self))
self.signal = ext.Signal(*subscribers)
self.urls = Map()
views.Info(self)
views.Metrics(self)
comments.API(self, self.hasher)
def render(self, text):
return self.markup.render(text)
def sign(self, obj):
return self.signer.dumps(obj)
def unsign(self, obj, max_age=None):
return self.signer.loads(obj, max_age=max_age or self.conf.getint('general', 'max-age'))
def dispatch(self, request):
local.request = request
local.host = wsgi.host(request.environ)
local.origin = origin(self.conf.getiter(
"general", "host"))(request.environ)
adapter = self.urls.bind_to_environ(request.environ)
try:
handler, values = adapter.match()
except HTTPException as e:
return error_handler(request.environ, request, e)
else:
try:
response = handler(request.environ, request, **values)
except HTTPException as e:
return error_handler(request.environ, request, e)
except Exception:
logger.exception("%s %s", request.method,
request.environ["PATH_INFO"])
return error_handler(request.environ, request, InternalServerError())
else:
return response
def wsgi_app(self, environ, start_response):
response = self.dispatch(JSONRequest(environ))
return response(environ, start_response)
def __call__(self, environ, start_response):
return self.wsgi_app(environ, start_response)
def make_app(conf=None, threading=True, multiprocessing=False, uwsgi=False):
if not any((threading, multiprocessing, uwsgi)):
raise RuntimeError("either set threading, multiprocessing or uwsgi")
if threading:
class App(Isso, ThreadedMixin):
pass
elif multiprocessing:
class App(Isso, ProcessMixin):
pass
else:
class App(Isso, uWSGIMixin):
pass
isso = App(conf)
logger.info("Using database at '%s'",
abspath(isso.conf.get('general', 'dbpath')))
if not any(conf.getiter("general", "host")):
logger.error("No website(s) configured, Isso won't work.")
sys.exit(1)
# check HTTP server connection
for host in conf.getiter("general", "host"):
with http.curl('HEAD', host, '/', 5) as resp:
if resp is not None:
logger.info("connected to %s", host)
break
else:
logger.warning("unable to connect to your website, Isso will probably not "
"work correctly. Please make sure, Isso can reach your "
"website via HTTP(S).")
wrapper = [local_manager.make_middleware]
if isso.conf.getboolean("server", "profile"):
wrapper.append(partial(ProfilerMiddleware,
sort_by=("cumulative", ), restrictions=("isso/(?!lib)", 10)))
wrapper.append(partial(SharedDataMiddleware, exports={
'/js': join(dirname(__file__), 'js/'),
'/css': join(dirname(__file__), 'css/'),
'/img': join(dirname(__file__), 'img/'),
'/demo': join(dirname(__file__), 'demo/')
}))
wrapper.append(partial(wsgi.CORSMiddleware,
origin=origin(isso.conf.getiter("general", "host")),
allowed=("Origin", "Referer", "Content-Type"),
exposed=("X-Set-Cookie", "Date")))
wrapper.extend([wsgi.SubURI, ProxyFixCustom])
return reduce(lambda x, f: f(x), wrapper, isso)
def main():
parser = ArgumentParser(description="a blog comment hosting service")
subparser = parser.add_subparsers(help="commands", dest="command")
parser.add_argument('--version', action='version',
version='%(prog)s ' + dist.version)
parser.add_argument("-c", dest="conf", default="/etc/isso.cfg",
metavar="/etc/isso.cfg", help="set configuration file")
imprt = subparser.add_parser('import', help="import Disqus XML export")
imprt.add_argument("dump", metavar="FILE")
imprt.add_argument("-n", "--dry-run", dest="dryrun", action="store_true",
help="perform a trial run with no changes made")
imprt.add_argument("-t", "--type", dest="type", default=None,
choices=["disqus", "wordpress", "generic"], help="export type")
imprt.add_argument("--empty-id", dest="empty_id", action="store_true",
help="workaround for weird Disqus XML exports, #135")
# run Isso as stand-alone server
subparser.add_parser("run", help="run server")
args = parser.parse_args()
# ISSO_SETTINGS env var takes precedence over `-c` flag
conf_file = os.environ.get('ISSO_SETTINGS') or args.conf
if not conf_file:
logger.error("No configuration file specified! Exiting.")
sys.exit(1)
if exists(conf_file):
logger.info("Using configuration file '%s'", abspath(conf_file))
else:
logger.error("Specified config '%s' does not exist! Exiting.",
abspath(conf_file))
sys.exit(1)
conf = config.load(config.default_file(), conf_file)
if args.command == "import":
conf.set("guard", "enabled", "off")
if args.dryrun:
xxx = tempfile.NamedTemporaryFile()
dbpath = xxx.name
else:
dbpath = conf.get("general", "dbpath")
mydb = db.SQLite3(dbpath, conf)
migrate.dispatch(args.type, mydb, args.dump, args.empty_id)
sys.exit(0)
if conf.get("general", "log-file"):
handler = logging.FileHandler(conf.get("general", "log-file"))
logger.addHandler(handler)
logging.getLogger("werkzeug").addHandler(handler)
logger.propagate = False
logging.getLogger("werkzeug").propagate = False
if conf.get("server", "listen").startswith("http://"):
host, port, _ = urlsplit(conf.get("server", "listen"))
try:
from gevent.pywsgi import WSGIServer
WSGIServer((host, port), make_app(conf)).serve_forever()
except ImportError:
run_simple(host, port, make_app(conf), threaded=True,
use_reloader=conf.getboolean('server', 'reload'))
else:
sock = conf.get("server", "listen").partition("unix://")[2]
try:
os.unlink(sock)
except OSError as ex:
if ex.errno != errno.ENOENT:
raise
wsgi.SocketHTTPServer(sock, make_app(conf)).serve_forever()
|
#!/usr/bin/env python
import sys
import binascii
import getopt
import hashlib
import struct
import zlib
game_info = {
'zelda': {
'md5': b'32fc43935b17a1707a5c8a68b58b495b',
'key': b'124a3e853c7cd7ba88f92540ba76254446c33c38d4534d3f8911f8d716b0027c17139fc2cc6486deb256cfea5fcfb43b88002b32a9fa2eba469c805bfea4d58b9b259c6b6d6a63e75dad37b936ee90b0',
'offset': 41578496,
'length': 7985152,
},
'mario': {
'md5': b'efb7bf0ae783b1af39d5dc80ea61c4ed',
'key': b'2bf3702bf54b24df82c8644004bd10b6be1cf6c534327a58c11ae0a4b55a70bf136a8ce0042e1ca2e462e581ae675eff176459fb0cfb04fa255ac68b31bf89258e3162568757b05419f765a7ee3419cc',
'offset': 44120064,
'length': 6920192,
},
'mm03': {
'md5': b'c81a3b4e36762190aaa54ba60d6f5fb9',
'key': b'e563ab200ffbfb8e0f2cce9bade0c82f37e25e261eb2169b312cf09f2a3a30f92d372fa2b4b5383fdeadff7b5f8bb51c27a98de145fd518b4cf50e54f23ad894e93615fb58274f7fd5c699a5b3eb05dd',
'offset': 38205440,
'length': 4847616,
},
'ff1': {
'md5': b'1690b5c5b4e7f00e2011b1fd91ca925d',
'key': b'a762bbca183ae6fcb32cccfe58f41ac1562817704674d9e0293f1831809937174a7fbf42b47648c37793690f8faf353d9213e3009e7aecec8f4d2978f6080883e9b8ed1822616aeb18a82fddda046fb1',
'offset': 31680512,
'length': 2459648,
},
}
def getGameInfo(gameName, adb_data):
adb_md5 = hashlib.md5(adb_data).hexdigest()
#print("Info: adb md5 is %s" % adb_md5)
# Try to match by MD5
for gn in game_info.keys():
gi = game_info[gn]
if (gi['md5'] == adb_md5):
return gi
print("Warning: no matching md5")
# Try to match by name
for gn in game_info.keys():
if (gn == gameName):
return game_info[gn]
print("Error: no matching md5, no matching name")
def extractFile(gameName, adbFilename):
# Read in the whole alldata.bin file
adb_data = open(adbFilename, 'rb').read()
#print("Read %d bytes" % len(adb_data))
gi = getGameInfo(gameName, adb_data)
if (not gi):
return
gikey = gi['key']
gioff = gi['offset']
gilen = gi['length']
#print("Using %s/%d/%d" % (gikey, gioff, gilen))
key = bytearray(binascii.unhexlify(gikey))
key_len = len(key)
# Take a RW copy of the entire chunk
data = bytearray(adb_data[gioff : gioff + gilen])
# For each byte, XOR in our key
# +8 = skip the MDF magic + size
for i in range(len(data) -8):
data[i +8] ^= key[i % key_len]
# Decompress the unobfuscated data
raw_data = zlib.decompress(bytes(data[8:]))
# Write it out
print("Writing %d bytes to %s.gba" % (len(raw_data), adbFilename))
open(adbFilename + '.gba', 'wb').write(raw_data)
def injectFile(gameName, adbFilename, injectName):
# Read in the whole alldata.bin file into a RW array
adb_data = bytearray(open(adbFilename, 'rb').read())
gi = getGameInfo(gameName, adb_data)
if (not gi):
return
gikey = gi['key']
gioff = gi['offset']
gilen = gi['length']
key = bytearray(binascii.unhexlify(gikey))
key_len = len(key)
# Read in the rom file we want to inject
rom_data = bytearray(open(injectName, 'rb').read())
# Compress the data
compressed_data = bytearray(zlib.compress(bytes(rom_data), 9))
# Check it will fit
if (len(compressed_data) > gilen -8):
print("Compressed file too large for %s" % injectName)
return
# Insert the MDF header
adb_data[gioff : gioff +4] = b'mdf\0'
# Insert our injected file size (uncompressed)
# NOTE - the original is using little-endian! Heathens!
adb_data[gioff +4 : gioff +8] = struct.pack('<i', len(rom_data))
# For each byte, XOR in our key
for i in range(len(compressed_data)):
adb_data[gioff +8 + i] = compressed_data[i] ^ key[i % key_len]
# Pad with zeros
adb_data[gioff +8 + len(compressed_data) : gioff + gilen] = b'\00' * (gilen -8 - len(compressed_data))
# Write out the modified ADB
print("Saving as %s.adb" % injectName)
open(injectName + '.adb', 'wb').write(adb_data)
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "hla:emz", ["help", "list", "adb=", "extact", "ff1", "mario", "mm03", "zelda"])
except getopt.GetoptError as err:
print(str(err))
sys.exit(2)
showList = False
extract = False
gameName = ""
adbFilename = ""
for o, a in opts:
if o in ("-h", "--help"):
print(
"""
Usage: inject_gba.py [-h] [-l] [-a path/to/alldata.bin] [-m -z] [-e] [romfile] [romfile]
-h --help Print this message.
-l --list List known base games.
-a --adb Set the path to your alldata.bin file
This will try to guess which key/offset/length to use.
-m --mario Try using the Mario & Luigi Superstar Saga key/offset/length
--mm03 Try using the Megaman Zero 3 key/offset/length
--ff1 Try using the Final Fight One key/offset/length
-z --zelda Try using the Zelda Minish Cap key/offset/length
-e --extract Extract rom from the base game alldata.bin file to alldata.bin.gba
[romfile] Path to one or more uncompressed .gba files to inject.
You must specify the path to the adb file.
If the MD5 of the adb file matches a known file it will use the known key/offset/length.
If the MD5 does not match you can try using one of the known key/offset/length sets.
This will work if you are re-injecting into a modified adb, try using -e to verify.
The alldata.bin containing the injected file will be written to the same location as the .gba file with a .adb suffix
The ROM must compress to < 8M (<4M for mm03, <2M for ff1)
Some 16M roms work, some don't.
The other GBA VC titles are all <4M, so not worth using.
I can confirm C.O. Nell looks hawt on the big screen.
""")
sys.exit(2)
elif o in ("-a", "--adb"):
adbFilename = a
elif o in ("-e", "--extract"):
extract = True
elif o in ("-l", "--list"):
showList = True
elif o in ("--ff1"):
gameName = 'ff1'
elif o in ("-m", "--mario"):
gameName = 'mario'
elif o in ("-z", "--zelda"):
gameName = 'zelda'
elif o in ("--mm03"):
gameName = 'mm03'
else:
assert False, "unhandled option"
if (showList):
for gi_name in game_info.keys():
print("Name %s" % gi_name)
print("Key %s" % game_info[gi_name]['key'])
print("Offset %d" % game_info[gi_name]['offset'])
print("Length %d" % game_info[gi_name]['length'])
if (extract and len(adbFilename)):
extractFile(gameName, adbFilename)
if (len(adbFilename)):
for injectName in args:
injectFile(gameName, adbFilename, injectName)
if __name__ == "__main__":
main()
Added Megaman Zero
#!/usr/bin/env python
import sys
import binascii
import getopt
import hashlib
import struct
import zlib
game_info = {
'zelda': {
'md5': b'32fc43935b17a1707a5c8a68b58b495b',
'key': b'124a3e853c7cd7ba88f92540ba76254446c33c38d4534d3f8911f8d716b0027c17139fc2cc6486deb256cfea5fcfb43b88002b32a9fa2eba469c805bfea4d58b9b259c6b6d6a63e75dad37b936ee90b0',
'offset': 41578496,
'length': 7985152,
},
'mario': {
'md5': b'efb7bf0ae783b1af39d5dc80ea61c4ed',
'key': b'2bf3702bf54b24df82c8644004bd10b6be1cf6c534327a58c11ae0a4b55a70bf136a8ce0042e1ca2e462e581ae675eff176459fb0cfb04fa255ac68b31bf89258e3162568757b05419f765a7ee3419cc',
'offset': 44120064,
'length': 6920192,
},
'mm03': {
'md5': b'c81a3b4e36762190aaa54ba60d6f5fb9',
'key': b'e563ab200ffbfb8e0f2cce9bade0c82f37e25e261eb2169b312cf09f2a3a30f92d372fa2b4b5383fdeadff7b5f8bb51c27a98de145fd518b4cf50e54f23ad894e93615fb58274f7fd5c699a5b3eb05dd',
'offset': 38205440,
'length': 4847616,
},
'mm0': {
'md5': b'2a57596fbbb46a814231aaf16d8ab603',
'key': b'30fb905c1f61c9ab01f92a6c71e2bb24927b7c188e858268105c541f03e0f24f8e7e56c908f1809345789848f80a17bb3c6c4945f10fa2741dd59545f1ce5132b375808e50671485a0013a179d09ddf5',
'offset': 31289344,
'length': 3676160,
},
'ff1': {
'md5': b'1690b5c5b4e7f00e2011b1fd91ca925d',
'key': b'a762bbca183ae6fcb32cccfe58f41ac1562817704674d9e0293f1831809937174a7fbf42b47648c37793690f8faf353d9213e3009e7aecec8f4d2978f6080883e9b8ed1822616aeb18a82fddda046fb1',
'offset': 31680512,
'length': 2459648,
},
}
def getGameInfo(gameName, adb_data):
adb_md5 = hashlib.md5(adb_data).hexdigest()
#print("Info: adb md5 is %s" % adb_md5)
# Try to match by MD5
for gn in game_info.keys():
gi = game_info[gn]
if (gi['md5'] == adb_md5):
return gi
print("Warning: no matching md5")
# Try to match by name
for gn in game_info.keys():
if (gn == gameName):
return game_info[gn]
print("Error: no matching md5, no matching name")
def extractFile(gameName, adbFilename):
# Read in the whole alldata.bin file
adb_data = open(adbFilename, 'rb').read()
#print("Read %d bytes" % len(adb_data))
gi = getGameInfo(gameName, adb_data)
if (not gi):
return
gikey = gi['key']
gioff = gi['offset']
gilen = gi['length']
#print("Using %s/%d/%d" % (gikey, gioff, gilen))
key = bytearray(binascii.unhexlify(gikey))
key_len = len(key)
# Take a RW copy of the entire chunk
data = bytearray(adb_data[gioff : gioff + gilen])
# For each byte, XOR in our key
# +8 = skip the MDF magic + size
for i in range(len(data) -8):
data[i +8] ^= key[i % key_len]
# Decompress the unobfuscated data
raw_data = zlib.decompress(bytes(data[8:]))
# Write it out
print("Writing %d bytes to %s.gba" % (len(raw_data), adbFilename))
open(adbFilename + '.gba', 'wb').write(raw_data)
def injectFile(gameName, adbFilename, injectName):
# Read in the whole alldata.bin file into a RW array
adb_data = bytearray(open(adbFilename, 'rb').read())
gi = getGameInfo(gameName, adb_data)
if (not gi):
return
gikey = gi['key']
gioff = gi['offset']
gilen = gi['length']
key = bytearray(binascii.unhexlify(gikey))
key_len = len(key)
# Read in the rom file we want to inject
rom_data = bytearray(open(injectName, 'rb').read())
# Compress the data
compressed_data = bytearray(zlib.compress(bytes(rom_data), 9))
# Check it will fit
if (len(compressed_data) > gilen -8):
print("Compressed file too large for %s" % injectName)
return
# Insert the MDF header
adb_data[gioff : gioff +4] = b'mdf\0'
# Insert our injected file size (uncompressed)
# NOTE - the original is using little-endian! Heathens!
adb_data[gioff +4 : gioff +8] = struct.pack('<i', len(rom_data))
# For each byte, XOR in our key
for i in range(len(compressed_data)):
adb_data[gioff +8 + i] = compressed_data[i] ^ key[i % key_len]
# Pad with zeros
adb_data[gioff +8 + len(compressed_data) : gioff + gilen] = b'\00' * (gilen -8 - len(compressed_data))
# Write out the modified ADB
print("Saving as %s.adb" % injectName)
open(injectName + '.adb', 'wb').write(adb_data)
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "hla:emz", ["help", "list", "adb=", "extact", "ff1", "mario", "mm0", "mm03", "zelda"])
except getopt.GetoptError as err:
print(str(err))
sys.exit(2)
showList = False
extract = False
gameName = ""
adbFilename = ""
for o, a in opts:
if o in ("-h", "--help"):
print(
"""
Usage: inject_gba.py [-h] [-l] [-a path/to/alldata.bin] [-m -z] [-e] [romfile] [romfile]
-h --help Print this message.
-l --list List known base games.
-a --adb Set the path to your alldata.bin file
This will try to guess which key/offset/length to use.
If the adb is not recognised, try using the key/offset/length from
--ff1 Final Fight One
-m --mario Mario & Luigi Superstar Saga
--mm0 Megaman Zero
--mm03 Megaman Zero 3
-z --zelda Zelda Minish Cap
-e --extract Extract rom from the base game alldata.bin file to alldata.bin.gba
[romfile] Path to one or more uncompressed .gba files to inject.
You must specify the path to the adb file.
If the MD5 of the adb file matches a known file it will use the known key/offset/length.
If the MD5 does not match you can try using one of the known key/offset/length sets.
This will work if you are re-injecting into a modified adb, try using -e to verify.
The alldata.bin containing the injected file will be written to the same location as the .gba file with a .adb suffix
The ROM must compress to < 8M (<4M for mm03, <2M for ff1)
Some 16M roms work, some don't.
The other GBA VC titles are all <4M, so not worth using.
I can confirm C.O. Nell looks hawt on the big screen.
""")
sys.exit(2)
elif o in ("-a", "--adb"):
adbFilename = a
elif o in ("-e", "--extract"):
extract = True
elif o in ("-l", "--list"):
showList = True
elif o in ("--ff1"):
gameName = 'ff1'
elif o in ("-m", "--mario"):
gameName = 'mario'
elif o in ("-z", "--zelda"):
gameName = 'zelda'
elif o in ("--mm0"):
gameName = 'mm0'
elif o in ("--mm03"):
gameName = 'mm03'
else:
assert False, "unhandled option"
if (showList):
for gi_name in game_info.keys():
print("Name %s" % gi_name)
print("Key %s" % game_info[gi_name]['key'])
print("Offset %d" % game_info[gi_name]['offset'])
print("Length %d" % game_info[gi_name]['length'])
if (extract and len(adbFilename)):
extractFile(gameName, adbFilename)
if (len(adbFilename)):
for injectName in args:
injectFile(gameName, adbFilename, injectName)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
import sys
import socket
import argparse
import requests
import re
import time
from random import randint
import csv
import os
import pythonwhois as whois #http://cryto.net/pythonwhois/usage.html https://github.com/joepie91/python-whois
from ipwhois import IPWhois as ipw #https://pypi.python.org/pypi/ipwhois
import ipaddress as ipa #https://docs.python.org/3/library/ipaddress.html
import dns.resolver,dns.reversename,dns.name #http://www.dnspython.org/docs/1.12.0/
import shodan #https://shodan.readthedocs.org/en/latest/index.html
class Host(object):
'''
Host represents an entity on the internet. Can originate from a domain or from an IP.
Attributes are:
domain - DNS/Domain name e.g. google.com
type - either 'domain' or 'ip', depending on original information passed to __init__
ips - List of instances of IP. Each instance contains:
ip - Str representation of IP e.g. 8.8.8.8
rev_domains - list of str representing reverse domains related to ip
whois_ip - Dict containing results from a Whois lookup on an IP
shodan - Dict containing results from Shodan lookups
cidr - Str of CIDR that this ip is part of (taken from whois_ip results)
mx - Set of Hosts for each DNS MX entry for original self.domain
ns - Set of Hosts for each DNS NS entry for original self.domain
whois_domain - str representation of the Whois query
subdomains - Set of Hosts for each related Host found that is a subdomain of self.domain
linkedin_page - Str of LinkedIn url that contains domain in html
related_hosts - Set of Hosts that may be related to self. Taken from
'''
def __init__(self,domain=None,ips=(),reverse_domains=()):
#Type check - depends on what parameters have been passed
if domain: #target is domain, with or without ip already resolved by scan.add_host()
self.type = 'domain'
elif ips: #target is ip
self.type = 'ip'
else:
raise ValueError
self.domain = domain
self.ips = [ IP(str(ip),reverse_domains) for ip in ips ]
self.mx = set()
self.ns = set()
self.whois_domain = None
self.subdomains = set()
self.related_hosts = set()
self.linkedin_page = None
def __str__(self):
if self.type == 'domain':
return str(self.domain)
elif self.type == 'ip':
return str(self.ips[0])
def __hash__(self):
if self.type == 'domain':
return hash(('domain',self.domain))
elif self.type == 'ip':
return hash(('ip',','.join([str(ip) for ip in self.ips])))
def __eq__(self,other):
if self.type == 'domain':
return self.domain == other.domain
elif self.type == 'ip':
return self.ips == other.ips
def dns_lookups(self):
'''
Does basic DNS lookups on a host.
1) Direct DNS lookup on self.domain
2) Reverse DNS lookup on each self.ips
'''
if self.type == 'domain':
self._get_ips()
for ip in self.ips: ip.get_rev_domains()
if self.type == 'ip':
for ip in self.ips: ip.get_rev_domains()
return self
def mx_dns_lookup(self):
'''
DNS lookup to find MX entries i.e. mail servers responsible for self.domain
'''
if self.domain:
mx_list = self._ret_mx_by_name(self.domain)
if mx_list:
self.mx.update([ Host(domain=mx).dns_lookups() for mx in mx_list ])
self._add_to_subdomains_if_valid(subdomains_as_hosts=self.mx)
def ns_dns_lookup(self):
'''
DNS lookup to find NS entries i.e. name/DNS servers responsible for self.domain
'''
if self.domain:
ns_list = self._ret_ns_by_name(self.domain)
if ns_list:
self.ns.update([ Host(domain=ns).dns_lookups() for ns in ns_list ])
self._add_to_subdomains_if_valid(subdomains_as_hosts=self.ns)
def google_lookups(self):
'''
Queries google to find related subdomains and linkedin pages.
'''
if self.domain:
self.linkedin_page = self._ret_linkedin_page_from_google(self.domain)
self._add_to_subdomains_if_valid(subdomains_as_str=self._ret_subdomains_from_google())
def get_rev_domains_for_ips(self):
'''
Reverse DNS lookup on each IP in self.ips
'''
if self.ips:
for ip in self.ips:
ip.get_rev_domains()
return self
def get_whois_domain(self,num=0):
'''
Whois lookup on self.domain. Saved in self.whois_domain as string,
since each top-level domain has a way of representing data.
This makes it hard to index it, almost a project on its on.
'''
try:
if self.domain:
query = whois.get_whois(self.domain)
if 'raw' in query:
self.whois_domain = query['raw'][0].lstrip().rstrip()
except Exception as e:
Scan.error(e,sys._getframe().f_code.co_name)
pass
def get_all_whois_ip(self):
'''
IP Whois lookups on each ip within self.ips
Saved in each ip.whois_ip as dict, since this is how it is returned by ipwhois library.
'''
#Keeps track of lookups already made - cidr as key, whois_ip dict as val
cidrs_found = {}
for ip in self.ips:
cidr_found = False
for cidr,whois_ip in cidrs_found.iteritems():
if ipa.ip_address(ip.ip.decode('unicode-escape')) in ipa.ip_network(cidr.decode('unicode-escape')):
#If cidr is already in Host, we won't get_whois_ip again.
#Instead we will save cidr in ip just to make it easier to relate them later
ip.cidr,ip.whois_ip = cidr,whois_ip
cidr_found = True
break
if not cidr_found:
ip.get_whois_ip()
cidrs_found[ip.cidr] = ip.whois_ip
def get_all_shodan(self,key):
'''
Shodan lookups for each ip within self.ips.
Saved in ip.shodan as dict.
'''
if key:
for ip in self.ips:
ip.get_shodan(key)
def _get_ips(self):
'''
Does direct DNS lookup to get IPs from self.domains.
Used internally by self.dns_lookups()
'''
if self.domain and not self.ips:
ips = self._ret_host_by_name(self.domain)
if ips:
self.ips = [ IP(str(ip)) for ip in ips ]
return self
@staticmethod
def _ret_host_by_name(name):
try:
return Scan.dns_resolver.query(name)
except (dns.resolver.NXDOMAIN,dns.resolver.NoAnswer) as e:
Scan.error('[-] Host lookup failed for '+name,sys._getframe().f_code.co_name)
pass
@staticmethod
def _ret_mx_by_name(name):
try:
#rdata.exchange for domains and rdata.preference for integer
return [str(mx.exchange).rstrip('.') for mx in Scan.dns_resolver.query(name,'MX')]
except (dns.resolver.NXDOMAIN,dns.resolver.NoAnswer) as e:
Scan.error('[-] MX lookup failed for '+name,sys._getframe().f_code.co_name)
pass
@staticmethod
def _ret_ns_by_name(name):
try:
#rdata.exchange for domains and rdata.preference for integer
return [str(ns).rstrip('.') for ns in Scan.dns_resolver.query(name,'NS')]
except (dns.resolver.NXDOMAIN,dns.resolver.NoAnswer) as e:
Scan.error('[-] NS lookup failed for '+name,sys._getframe().f_code.co_name)
pass
@staticmethod
def _ret_linkedin_page_from_google(name):
'''
Uses a google query to find a possible LinkedIn page related to name (usually self.domain)
Google query is "site:linkedin.com/company name", and first result is used
'''
try:
request='http://google.com/search?hl=en&meta=&num=10&q=site:linkedin.com/company%20"'+name+'"'
google_search = requests.get(request)
google_results = re.findall('<cite>(.+?)<\/cite>', google_search.text)
for url in google_results:
if 'linkedin.com/company/' in url:
return re.sub('<.*?>', '', url)
except Exception as e:
Scan.error(e,sys._getframe().f_code.co_name)
def _ret_subdomains_from_google(self):
'''
This method uses google dorks to get as many subdomains from google as possible
It returns a set of Hosts for each subdomain found in google
Each Host will have dns_lookups() already callled, with possibly ips and rev_domains filled
'''
def _google_subdomains_lookup(domain,subdomains_to_avoid,num,counter):
'''
Sub method that reaches out to google using the following query:
site:*.domain -site:subdomain_to_avoid1 -site:subdomain_to_avoid2 -site:subdomain_to_avoid3...
Returns list of unique subdomain strings
'''
#Sleep some time between 0 - 4.999 seconds
time.sleep(randint(0,4)+randint(0,1000)*0.001)
request = 'http://google.com/search?hl=en&meta=&num='+str(num)+'&start='+str(counter)+'&q='+\
'site%3A%2A'+domain
for subdomain in subdomains_to_avoid:
#Don't want to remove original name from google query
if subdomain != domain:
request = ''.join([request,'%20%2Dsite%3A',str(subdomain)])
google_search = None
try:
google_search = requests.get(request)
except Exception as e:
Scan.error(e,sys._getframe().f_code.co_name)
new_subdomains = set()
if google_search:
google_results = re.findall('<cite>(.+?)<\/cite>', google_search.text)
for url in set(google_results):
#Removing html tags from inside url (sometimes they ise <b> or <i> for ads)
url = re.sub('<.*?>', '', url)
#Follows Javascript pattern of accessing URLs
g_host = url
g_protocol = ''
g_pathname = ''
temp = url.split('://')
#If there is g_protocol e.g. http://, ftp://, etc
if len(temp)>1:
g_protocol = temp[0]
#remove g_protocol from url
url = ''.join(temp[1:])
temp = url.split('/')
#if there is a pathname after host
if len(temp)>1:
g_pathname = '/'.join(temp[1:])
g_host = temp[0]
new_subdomains.add(g_host)
#TODO do something with g_pathname and g_protocol
#Currently not using protocol or pathname for anything
return list(new_subdomains)
#Keeps subdomains found by _google_subdomains_lookup
subdomains_discovered = []
#Variable to check if there is any new result in the last iteration
subdomains_in_last_iteration = -1
while len(subdomains_discovered) > subdomains_in_last_iteration:
subdomains_in_last_iteration = len(subdomains_discovered)
subdomains_discovered += _google_subdomains_lookup(self.domain,subdomains_discovered,100,0)
subdomains_discovered = list(set(subdomains_discovered))
subdomains_discovered += _google_subdomains_lookup(self.domain,subdomains_discovered,100,100)
subdomains_discovered = list(set(subdomains_discovered))
return subdomains_discovered
def _add_to_subdomains_if_valid(self,subdomains_as_str=None,subdomains_as_hosts=None):
'''
Will add Hosts from subdomains_as_str or subdomains_as_hosts to self.subdomain if indeed these hosts are subdomains
subdomains_as_hosts and subdomains_as_str should be iterable list or set
'''
if subdomains_as_str:
self.subdomains.update(
[ Host(domain=subdomain).dns_lookups() for subdomain in subdomains_as_str if self._is_parent_domain_of(subdomain) ]
)
elif subdomains_as_hosts:
self.subdomains.update(
[ subdomain for subdomain in subdomains_as_hosts if self._is_parent_domain_of(subdomain) ]
)
def _is_parent_domain_of(self,subdomain):
'''
Checks if subdomain is indeed a subdomain of self.domain
In addition it filters out invalid dns names
'''
if isinstance(subdomain, Host):
#If subdomain has .domain (probably found through google lookup)
if subdomain.domain:
try:
return dns.name.from_text(subdomain.domain).is_subdomain(dns.name.from_text(self.domain))
except Exception as e:
pass
#If subdomain doesn't have .domain, if was found through reverse dns scan on cidr
#So I must add the rev parameter to subdomain as .domain, so it looks better on the csv
elif subdomain.ips[0].rev_domains:
for rev in subdomain.ips[0].rev_domains:
try:
if dns.name.from_text(rev).is_subdomain(dns.name.from_text(self.domain)):
#Adding a .rev_domains str to .domain
subdomain.domain = rev
return True
except Exception as e:
pass
else:
try:
return dns.name.from_text(subdomain).is_subdomain(dns.name.from_text(self.domain))
except Exception as e:
pass
return False
def reverse_dns_lookup_on_related_cidrs(self,feedback=False):
'''
Does reverse dns lookups in all cidrs discovered that are related to this host
Will be used to check for subdomains found through reverse lookup
'''
cidrs = set([ ip.cidr for ip in self.ips ])
#Reverse DNS lookup on all self.cidrs
for cidr in cidrs:
#For each ip in network cidr
for ip in ipa.ip_network(cidr.decode('unicode-escape')):
reverse_lookup = None
try:
reverse_lookup = Scan.dns_resolver.query(dns.reversename.from_address(str(ip)),'PTR')
except (dns.resolver.NXDOMAIN,dns.resolver.NoAnswer) as e:
pass
except KeyboardInterrupt:
if raw_input('[-] Sure you want to stop scanning '+str(cidr)+\
'? Program flow will continue normally. (Y/n):') in ['y','Y','']:
break
else:
try:
reverse_lookup = Scan.dns_resolver.query(dns.reversename.from_address(str(ip)),'PTR')
except dns.resolver.NXDOMAIN as e:
pass
if reverse_lookup:
#Organizing reverse lookup results
reverse_domains = [ str(domain).rstrip('.') for domain in reverse_lookup ]
#Creating new host
new_host = Host(ips=[ip],reverse_domains=reverse_domains)
#Append host to current host self.related_hosts
self.related_hosts.add(new_host)
#Adds new_host to self.subdomains if new_host indeed is subdomain
self._add_to_subdomains_if_valid(subdomains_as_hosts=[new_host])
if feedback: print new_host.print_all_ips()
def print_all_ips(self):
if self.ips:
return '\n'.join([ ip.print_ip() for ip in self.ips ]).rstrip()
def print_subdomains(self):
return self._print_domains(sorted(self.subdomains, key=lambda x: x.domain))
@staticmethod
def _print_domains(hosts):
#Static method that prints a list of domains with its respective ips and rev_domains
#domains should be a list of Hosts
if hosts:
ret = ''
for host in hosts:
ret = ''.join([ret,host.domain])
p = host.print_all_ips()
if p: ret = ''.join([ret,'\n\t',p.replace('\n','\n\t')])
ret = ''.join([ret,'\n'])
return ret.rstrip().lstrip()
def print_all_ns(self):
#Print all NS records
return self._print_domains(self.ns)
def print_all_mx(self):
#Print all MS records
return self._print_domains(self.mx)
def print_all_whois_ip(self):
#Prints whois_ip records related to all self.ips
ret = set([ip.print_whois_ip() for ip in self.ips if ip.whois_ip])
return '\n'.join(ret).lstrip().rstrip()
def print_all_shodan(self):
#Print all Shodan entries (one for each IP in self.ips)
ret = [ ip.print_shodan() for ip in self.ips if ip.shodan ]
return '\n'.join(ret).lstrip().rstrip()
def print_as_csv_lines(self):
#Generator that yields each IP within self.ips will be returned as a csv line, one at a time
#one Host csv line at a time
yield ['Target: '+self.domain]
if self.ips:
yield [
'Domain',
'IP',
'Reverse domains',
'NS',
'MX',
'Subdomains',
'Domain whois',
'IP whois',
'Shodan',
'LinkedIn page',
]
for ip in self.ips:
yield [
self.domain,
str(ip),
'\n'.join(ip.rev_domains),
self.print_all_ns(),
self.print_all_mx(),
self.print_subdomains(),
self.whois_domain,
ip.print_whois_ip(),
ip.print_shodan(),
self.linkedin_page,
]
if self.subdomains:
yield ['\n']
yield ['Subdomains for '+str(self.domain)]
yield ['Domain','IP','Reverse domains']
for sub in sorted(self.subdomains):
for ip in sub.ips:
if sub.domain:
yield [ sub.domain,ip.ip,','.join( ip.rev_domains ) ]
else:
yield [ ip.rev_domains[0],ip.ip,','.join( ip.rev_domains ) ]
if self.related_hosts:
yield ['\n']
yield ['Hosts in same CIDR as '+str(self.domain)]
yield ['IP','Reverse domains']
for sub in sorted(self.related_hosts):
yield [
','.join([ str(ip) for ip in sub.ips ]),
','.join([ ','.join(ip.rev_domains) for ip in sub.ips ]),
]
def do_all_lookups(self, shodan_key=None):
'''
This method does all possible direct lookups for a Host.
Not called by any Host or Scan function, only here for testing purposes.
'''
self.dns_lookups()
self.ns_dns_lookup()
self.mx_dns_lookup()
self.get_whois_domain()
self.get_all_whois_ip()
if shodan_key:
self.get_all_shodan(shodan_key)
self.google_lookups()
class IP(Host):
'''
IP and information specific to it. Hosts contain multiple IPs,
as a domain can resolve to multiple IPs.
Attirbutes are:
ip - Str representation of this ip e.g. '8.8.8.8'
whois_ip - Dict containing results for Whois lookups against self.ip
cidr - Str CIDR that contains self.ip (taken from whois_ip), e.g. 8.8.8.0/24
rev_domains - List of str for each reverse domain for self.ip, found through reverse DNS lookups
shodan - Dict containing Shodan results
'''
def __init__(self,ip,rev_domains=None):
if rev_domains is None: rev_domains = []
self.ip = str(ip)
self.rev_domains = rev_domains
self.whois_ip = {}
self.cidr = None
self.shodan = None
def __str__(self):
return str(self.ip)
def __hash__(self):
return hash(('ip',self.ip))
def __eq__(self,other):
return self.ip == other.ip
@staticmethod
def _ret_host_by_ip(ip):
try:
return Scan.dns_resolver.query(dns.reversename.from_address(ip),'PTR')
except (dns.resolver.NXDOMAIN,dns.resolver.NoAnswer) as e:
Scan.error('[-] Host lookup failed for '+ip,sys._getframe().f_code.co_name)
def get_rev_domains(self):
rev_domains = None
rev_domains = self._ret_host_by_ip(self.ip)
if rev_domains:
self.rev_domains = [ str(domain).rstrip('.') for domain in rev_domains ]
return self
def get_shodan(self, key):
try:
shodan_api_key = key
api = shodan.Shodan(shodan_api_key)
self.shodan = api.host(str(self))
except Exception as e:
Scan.error(e,sys._getframe().f_code.co_name)
def get_whois_ip(self):
try:
self.whois_ip = ipw(str(self)).lookup() or None
except Exception as e:
Scan.error(e,sys._getframe().f_code.co_name)
if self.whois_ip:
if 'nets' in self.whois_ip:
if self.whois_ip['nets']:
self.cidr = self.whois_ip['nets'][0]['cidr']
return self
def print_ip(self):
ret = str(self.ip)
if self.rev_domains:
if len(self.rev_domains) < 2:
ret = ''.join([ret,' - ',self.rev_domains[0]])
else:
for rev in self.rev_domains:
ret = '\t'.join([ret,'\n',rev])
return ret
def print_whois_ip(self):
if self.whois_ip:
result = ''
#Printing all lines except 'nets'
for key,val in sorted(self.whois_ip.iteritems()):
if val and key not in ['nets']:
result = '\n'.join([result,key+': '+str(val)])
#Printing each dict within 'nets'
for key,val in enumerate(self.whois_ip['nets']):
result = '\n'.join([result,'net '+str(key)+':'])
for key2,val2 in sorted(val.iteritems()):
result = '\n\t'.join([result,key2+': '+str(val2).replace('\n',', ')])
return result.lstrip().rstrip()
def print_shodan(self):
if self.shodan:
result = ''.join(['IP: ',self.shodan.get('ip_str'),'\n'])
result = ''.join([result,'Organization: ',self.shodan.get('org','n/a'),'\n'])
if self.shodan.get('os','n/a'):
result = ''.join([result,'OS: ',self.shodan.get('os','n/a'),'\n'])
if self.shodan.get('isp','n/a'):
result = ''.join([result,'ISP: ',self.shodan.get('isp','n/a'),'\n'])
if len(self.shodan['data']) > 0:
for item in self.shodan['data']:
result = ''.join([
result,
'Port: {}'.format(item['port']),
'\n',
'Banner: {}'.format(item['data'].replace('\n','\n\t').rstrip()),
])
return result.rstrip().lstrip()
class Scan(object):
'''
Object that will hold all Host entries, interpret uset given flags, manage scans, threads and outputs.
Attributes are:
feedback - Bool flag for output printing. Static variable.
versobe - Bool flag for verbose output printing. Static variable.
nameserver - Str DNS server to be used for lookups (consumed by dns.resolver module)
shodan_key - Str key used for Shodan lookups
targets - Set of Hosts that will be scanned
target_networks - Set of ipaddress.IPv4Networks to be scanned
bad_targets - Set of user inputs that could not be understood or resolved
'''
feedback = False
verbose = False
dns_resolver = dns.resolver.Resolver()
def __init__(self,nameserver=None,shodan_key=None,feedback=False,verbose=False):
Scan.feedback = feedback
Scan.verbose=verbose
if nameserver: Scan.dns_resolver.nameservers = [nameserver]
self.shodan_key = shodan_key
self.targets = set()
self.target_networks = set()
self.bad_targets = set()
@staticmethod
def error(e, method_name=None):
if Scan.feedback and Scan.verbose:
print '# Error:', str(e),'| method:',method_name
def populate(self, user_supplied_list):
for user_supplied in user_supplied_list:
self.add_host(user_supplied)
if self.feedback:
if not self.targets:
print '# No hosts to scan'
else:
print '# Scanning',str(len(self.targets))+'/'+str(len(user_supplied_list)),'hosts'
if not self.shodan_key:
print '# No Shodan key provided'
else:
print'# Shodan key provided -',self.shodan_key
def add_host(self, user_supplied):
'''
Add string passed by user to self.targets as proper Host objects
For this, it parses user_supplied strings to separate IPs, Domains, and Networks.
'''
#is it an IP?
try:
ip = ipa.ip_address(user_supplied.decode('unicode-escape'))
#if not (ip.is_multicast or ip.is_unspecified or ip.is_reserved or ip.is_loopback):
self.targets.add(Host(ips=[str(ip)]))
return
except ValueError as e:
pass
#is it a valid network range?
try:
net = ipa.ip_network(user_supplied.decode('unicode-escape'),strict=False)
if net.prefixlen < 16:
if Scan.feedback: print '[-] Error: Not a good idea to scan anything bigger than a /16 is it?', user_supplied
else:
for ip in net:
self.add_host(str(ip), True)
return
except ValueError as e:
pass
#is it a valid DNS?
try:
ips = Scan.dns_resolver.query(user_supplied)
self.targets.add(Host(domain=user_supplied,ips=[str(ip) for ip in ips]))
return
except (dns.resolver.NXDOMAIN, dns.exception.SyntaxError) as e:
#If here so results from network won't be so verbose
if Scan.feedback: print '[-] Error: Couldn\'t resolve or understand', user_supplied
pass
self.bad_targets.add(user_supplied)
def scan_targets(self):
for host in self.targets:
self.full_scan_on_host(host)
for host in self.targets:
self.scan_related_cidrs(host)
def full_scan_on_host(self,host):
'''Does all scans for each host in self.targets'''
fb = self.feedback
if fb:
print ''
print '# ____________________ Scanning {} ____________________ #'.format(str(host))
###DNS and Whois lookups
if fb:
print ''
print '# DNS lookups'
host.dns_lookups()
if fb:
if host.domain:
print ''
print '[*] Domain: '+host.domain
#IPs and reverse domains
if host.ips:
print ''
print '[*] IPs & reverse DNS: '
print host.print_all_ips()
host.ns_dns_lookup()
#NS records
if host.ns and fb:
print ''
print '[*] NS records:'
print host.print_all_ns()
host.mx_dns_lookup()
#MX records
if host.mx and fb:
print ''
print '[*] MX records:'
print host.print_all_mx()
if fb:
print ''
print '# Whois lookups'
host.get_whois_domain()
if host.whois_domain and fb:
print ''
print '[*] Whois domain:'
print host.whois_domain
host.get_all_whois_ip()
if fb:
m = host.print_all_whois_ip()
if m:
print ''
print '[*] Whois IP:'
print m
#Shodan lookup
if self.shodan_key:
if fb:
print ''
print '# Querying Shodan for open ports'
host.get_all_shodan(self.shodan_key)
if fb:
m = host.print_all_shodan()
if m:
print '[*] Shodan:'
print m
#Google subdomains lookup
if host.domain:
if fb:
print ''
print '# Querying Google for subdomains and Linkedin pages, this might take a while'
host.google_lookups()
if fb:
if host.linkedin_page:
print ''
print '[*] Possible LinkedIn page: '+host.linkedin_page
if host.subdomains:
print ''
print '[*] Subdomains:'+'\n'+host.print_subdomains()
else:
print '[-] Error: No subdomains found in Google. If you are scanning a lot, Google might be blocking your requests.'
def scan_related_cidrs(self,host):
'''DNS lookups on entire CIDRs taken from host.get_whois_ip()'''
fb = self.feedback
cidrs = set([ip.cidr for ip in host.ips])
if cidrs:
if fb:
print ''
print '# Reverse DNS lookup on range(s) {} (related to {})'.format(', '.join(cidrs),str(host))
host.reverse_dns_lookup_on_related_cidrs(feedback=True)
def write_output_csv(self, filename=None):
'''Writes output for each target as csv in filename'''
if filename:
filename = os.path.expanduser(filename)
fb = self.feedback
if fb:
print ''
print '# Saving output csv file'
output_as_lines = []
for host in self.targets:
try:
#Using generator to get one csv line at a time (one Host can yield multiple lines)
generator = host.print_as_csv_lines()
while True:
output_as_lines.append(generator.next())
except StopIteration:
pass
output_written = False
while not output_written:
try:
with open(filename, 'wb') as f:
writer = csv.writer(f)
for line in output_as_lines:
writer.writerow(line)
output_written = True
except Exception as e:
error = '[-] Something went wrong, can\'t open output file. Press anything to try again.'
if self.verbose: error = ''.join([error,'\nError: ',str(e)])
raw_input(error)
except KeyboardInterrupt:
if raw_input('[-] Sure you want to exit without saving your file (Y/n)?') in ['y','Y','']:
sys.exit('# Scan interrupted')
def exit(self):
if self.feedback: print '# Done'
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='InstaRecon')
parser.add_argument('targets', nargs='+', help='targets')
parser.add_argument('-o', '--output', required=False,nargs='?',help='Output filename as csv.')
parser.add_argument('-n', '--nameserver', required=False, nargs='?',help='Alternative DNS server to query.')
parser.add_argument('-s', '--shodan_key', required=False,nargs='?',help='Shodan key for automated lookups. To get one, simply register on https://www.shodan.io/.')
parser.add_argument('-v','--verbose', action='store_true',help='Verbose errors')
parser.add_argument('-d','--dns_only', action='store_true',help='Direct and reverse DNS lookups only.')
args = parser.parse_args()
targets = sorted(set(args.targets)) #removes duplicates
scan = Scan(
nameserver=args.nameserver,
shodan_key=args.shodan_key,
feedback=True,
verbose=args.verbose,
)
try:
scan.populate(targets)
scan.scan_targets()
scan.write_output_csv(args.output)
scan.exit()
except KeyboardInterrupt:
sys.exit('# Scan interrupted')
reverse_dns_lookup_on_related_cidrs now being called as part of full_scan_on_host
#!/usr/bin/env python
import sys
import socket
import argparse
import requests
import re
import time
from random import randint
import csv
import os
import pythonwhois as whois #http://cryto.net/pythonwhois/usage.html https://github.com/joepie91/python-whois
from ipwhois import IPWhois as ipw #https://pypi.python.org/pypi/ipwhois
import ipaddress as ipa #https://docs.python.org/3/library/ipaddress.html
import dns.resolver,dns.reversename,dns.name #http://www.dnspython.org/docs/1.12.0/
import shodan #https://shodan.readthedocs.org/en/latest/index.html
class Host(object):
'''
Host represents an entity on the internet. Can originate from a domain or from an IP.
Attributes are:
domain - DNS/Domain name e.g. google.com
type - either 'domain' or 'ip', depending on original information passed to __init__
ips - List of instances of IP. Each instance contains:
ip - Str representation of IP e.g. 8.8.8.8
rev_domains - list of str representing reverse domains related to ip
whois_ip - Dict containing results from a Whois lookup on an IP
shodan - Dict containing results from Shodan lookups
cidr - Str of CIDR that this ip is part of (taken from whois_ip results)
mx - Set of Hosts for each DNS MX entry for original self.domain
ns - Set of Hosts for each DNS NS entry for original self.domain
whois_domain - str representation of the Whois query
subdomains - Set of Hosts for each related Host found that is a subdomain of self.domain
linkedin_page - Str of LinkedIn url that contains domain in html
related_hosts - Set of Hosts that may be related to host, as they're part of the same cidrs
cidrs - set of strs for each ip.cidr
'''
def __init__(self,domain=None,ips=(),reverse_domains=()):
#Type check - depends on what parameters have been passed
if domain: #target is domain, with or without ip already resolved by scan.add_host()
self.type = 'domain'
elif ips: #target is ip
self.type = 'ip'
else:
raise ValueError
self.domain = domain
self.ips = [ IP(str(ip),reverse_domains) for ip in ips ]
self.mx = set()
self.ns = set()
self.whois_domain = None
self.subdomains = set()
self.linkedin_page = None
self.related_hosts = set()
self.cidrs = set()
def __str__(self):
if self.type == 'domain':
return str(self.domain)
elif self.type == 'ip':
return str(self.ips[0])
def __hash__(self):
if self.type == 'domain':
return hash(('domain',self.domain))
elif self.type == 'ip':
return hash(('ip',','.join([str(ip) for ip in self.ips])))
def __eq__(self,other):
if self.type == 'domain':
return self.domain == other.domain
elif self.type == 'ip':
return self.ips == other.ips
def dns_lookups(self):
'''
Does basic DNS lookups on a host.
1) Direct DNS lookup on self.domain
2) Reverse DNS lookup on each self.ips
'''
if self.type == 'domain':
self._get_ips()
for ip in self.ips: ip.get_rev_domains()
if self.type == 'ip':
for ip in self.ips: ip.get_rev_domains()
return self
def mx_dns_lookup(self):
'''
DNS lookup to find MX entries i.e. mail servers responsible for self.domain
'''
if self.domain:
mx_list = self._ret_mx_by_name(self.domain)
if mx_list:
self.mx.update([ Host(domain=mx).dns_lookups() for mx in mx_list ])
self._add_to_subdomains_if_valid(subdomains_as_hosts=self.mx)
def ns_dns_lookup(self):
'''
DNS lookup to find NS entries i.e. name/DNS servers responsible for self.domain
'''
if self.domain:
ns_list = self._ret_ns_by_name(self.domain)
if ns_list:
self.ns.update([ Host(domain=ns).dns_lookups() for ns in ns_list ])
self._add_to_subdomains_if_valid(subdomains_as_hosts=self.ns)
def google_lookups(self):
'''
Queries google to find related subdomains and linkedin pages.
'''
if self.domain:
self.linkedin_page = self._ret_linkedin_page_from_google(self.domain)
self._add_to_subdomains_if_valid(subdomains_as_str=self._ret_subdomains_from_google())
def get_rev_domains_for_ips(self):
'''
Reverse DNS lookup on each IP in self.ips
'''
if self.ips:
for ip in self.ips:
ip.get_rev_domains()
return self
def get_whois_domain(self,num=0):
'''
Whois lookup on self.domain. Saved in self.whois_domain as string,
since each top-level domain has a way of representing data.
This makes it hard to index it, almost a project on its on.
'''
try:
if self.domain:
query = whois.get_whois(self.domain)
if 'raw' in query:
self.whois_domain = query['raw'][0].lstrip().rstrip()
except Exception as e:
Scan.error(e,sys._getframe().f_code.co_name)
pass
def get_all_whois_ip(self):
'''
IP Whois lookups on each ip within self.ips
Saved in each ip.whois_ip as dict, since this is how it is returned by ipwhois library.
'''
#Keeps track of lookups already made - cidr as key, whois_ip dict as val
cidrs_found = {}
for ip in self.ips:
cidr_found = False
for cidr,whois_ip in cidrs_found.iteritems():
if ipa.ip_address(ip.ip.decode('unicode-escape')) in ipa.ip_network(cidr.decode('unicode-escape')):
#If cidr is already in Host, we won't get_whois_ip again.
#Instead we will save cidr in ip just to make it easier to relate them later
ip.cidr,ip.whois_ip = cidr,whois_ip
cidr_found = True
break
if not cidr_found:
ip.get_whois_ip()
cidrs_found[ip.cidr] = ip.whois_ip
self.cidrs = set([ip.cidr for ip in self.ips])
def get_all_shodan(self,key):
'''
Shodan lookups for each ip within self.ips.
Saved in ip.shodan as dict.
'''
if key:
for ip in self.ips:
ip.get_shodan(key)
def _get_ips(self):
'''
Does direct DNS lookup to get IPs from self.domains.
Used internally by self.dns_lookups()
'''
if self.domain and not self.ips:
ips = self._ret_host_by_name(self.domain)
if ips:
self.ips = [ IP(str(ip)) for ip in ips ]
return self
@staticmethod
def _ret_host_by_name(name):
try:
return Scan.dns_resolver.query(name)
except (dns.resolver.NXDOMAIN,dns.resolver.NoAnswer) as e:
Scan.error('[-] Host lookup failed for '+name,sys._getframe().f_code.co_name)
pass
@staticmethod
def _ret_mx_by_name(name):
try:
#rdata.exchange for domains and rdata.preference for integer
return [str(mx.exchange).rstrip('.') for mx in Scan.dns_resolver.query(name,'MX')]
except (dns.resolver.NXDOMAIN,dns.resolver.NoAnswer) as e:
Scan.error('[-] MX lookup failed for '+name,sys._getframe().f_code.co_name)
pass
@staticmethod
def _ret_ns_by_name(name):
try:
#rdata.exchange for domains and rdata.preference for integer
return [str(ns).rstrip('.') for ns in Scan.dns_resolver.query(name,'NS')]
except (dns.resolver.NXDOMAIN,dns.resolver.NoAnswer) as e:
Scan.error('[-] NS lookup failed for '+name,sys._getframe().f_code.co_name)
pass
@staticmethod
def _ret_linkedin_page_from_google(name):
'''
Uses a google query to find a possible LinkedIn page related to name (usually self.domain)
Google query is "site:linkedin.com/company name", and first result is used
'''
try:
request='http://google.com/search?hl=en&meta=&num=10&q=site:linkedin.com/company%20"'+name+'"'
google_search = requests.get(request)
google_results = re.findall('<cite>(.+?)<\/cite>', google_search.text)
for url in google_results:
if 'linkedin.com/company/' in url:
return re.sub('<.*?>', '', url)
except Exception as e:
Scan.error(e,sys._getframe().f_code.co_name)
def _ret_subdomains_from_google(self):
'''
This method uses google dorks to get as many subdomains from google as possible
It returns a set of Hosts for each subdomain found in google
Each Host will have dns_lookups() already callled, with possibly ips and rev_domains filled
'''
def _google_subdomains_lookup(domain,subdomains_to_avoid,num,counter):
'''
Sub method that reaches out to google using the following query:
site:*.domain -site:subdomain_to_avoid1 -site:subdomain_to_avoid2 -site:subdomain_to_avoid3...
Returns list of unique subdomain strings
'''
#Sleep some time between 0 - 4.999 seconds
time.sleep(randint(0,4)+randint(0,1000)*0.001)
request = 'http://google.com/search?hl=en&meta=&num='+str(num)+'&start='+str(counter)+'&q='+\
'site%3A%2A'+domain
for subdomain in subdomains_to_avoid:
#Don't want to remove original name from google query
if subdomain != domain:
request = ''.join([request,'%20%2Dsite%3A',str(subdomain)])
google_search = None
try:
google_search = requests.get(request)
except Exception as e:
Scan.error(e,sys._getframe().f_code.co_name)
new_subdomains = set()
if google_search:
google_results = re.findall('<cite>(.+?)<\/cite>', google_search.text)
for url in set(google_results):
#Removing html tags from inside url (sometimes they ise <b> or <i> for ads)
url = re.sub('<.*?>', '', url)
#Follows Javascript pattern of accessing URLs
g_host = url
g_protocol = ''
g_pathname = ''
temp = url.split('://')
#If there is g_protocol e.g. http://, ftp://, etc
if len(temp)>1:
g_protocol = temp[0]
#remove g_protocol from url
url = ''.join(temp[1:])
temp = url.split('/')
#if there is a pathname after host
if len(temp)>1:
g_pathname = '/'.join(temp[1:])
g_host = temp[0]
new_subdomains.add(g_host)
#TODO do something with g_pathname and g_protocol
#Currently not using protocol or pathname for anything
return list(new_subdomains)
#Keeps subdomains found by _google_subdomains_lookup
subdomains_discovered = []
#Variable to check if there is any new result in the last iteration
subdomains_in_last_iteration = -1
while len(subdomains_discovered) > subdomains_in_last_iteration:
subdomains_in_last_iteration = len(subdomains_discovered)
subdomains_discovered += _google_subdomains_lookup(self.domain,subdomains_discovered,100,0)
subdomains_discovered = list(set(subdomains_discovered))
subdomains_discovered += _google_subdomains_lookup(self.domain,subdomains_discovered,100,100)
subdomains_discovered = list(set(subdomains_discovered))
return subdomains_discovered
def _add_to_subdomains_if_valid(self,subdomains_as_str=None,subdomains_as_hosts=None):
'''
Will add Hosts from subdomains_as_str or subdomains_as_hosts to self.subdomain if indeed these hosts are subdomains
subdomains_as_hosts and subdomains_as_str should be iterable list or set
'''
if subdomains_as_str:
self.subdomains.update(
[ Host(domain=subdomain).dns_lookups() for subdomain in subdomains_as_str if self._is_parent_domain_of(subdomain) ]
)
elif subdomains_as_hosts:
self.subdomains.update(
[ subdomain for subdomain in subdomains_as_hosts if self._is_parent_domain_of(subdomain) ]
)
def _is_parent_domain_of(self,subdomain):
'''
Checks if subdomain is indeed a subdomain of self.domain
In addition it filters out invalid dns names
'''
if isinstance(subdomain, Host):
#If subdomain has .domain (probably found through google lookup)
if subdomain.domain:
try:
return dns.name.from_text(subdomain.domain).is_subdomain(dns.name.from_text(self.domain))
except Exception as e:
pass
#If subdomain doesn't have .domain, if was found through reverse dns scan on cidr
#So I must add the rev parameter to subdomain as .domain, so it looks better on the csv
elif subdomain.ips[0].rev_domains:
for rev in subdomain.ips[0].rev_domains:
try:
if dns.name.from_text(rev).is_subdomain(dns.name.from_text(self.domain)):
#Adding a .rev_domains str to .domain
subdomain.domain = rev
return True
except Exception as e:
pass
else:
try:
return dns.name.from_text(subdomain).is_subdomain(dns.name.from_text(self.domain))
except Exception as e:
pass
return False
def reverse_dns_lookup_on_related_cidrs(self,feedback=False):
'''
Does reverse dns lookups in all cidrs discovered that are related to this host
Will be used to check for subdomains found through reverse lookup
'''
for cidr in self.cidrs:
#For each ip in network cidr
for ip in ipa.ip_network(cidr.decode('unicode-escape')):
reverse_lookup = None
try:
reverse_lookup = Scan.dns_resolver.query(dns.reversename.from_address(str(ip)),'PTR')
except (dns.resolver.NXDOMAIN,dns.resolver.NoAnswer) as e:
pass
except KeyboardInterrupt:
if raw_input('[-] Sure you want to stop scanning '+str(cidr)+\
'? Program flow will continue normally. (Y/n):') in ['y','Y','']:
break
else:
try:
reverse_lookup = Scan.dns_resolver.query(dns.reversename.from_address(str(ip)),'PTR')
except dns.resolver.NXDOMAIN as e:
pass
if reverse_lookup:
#Organizing reverse lookup results
reverse_domains = [ str(domain).rstrip('.') for domain in reverse_lookup ]
#Creating new host
new_host = Host(ips=[ip],reverse_domains=reverse_domains)
#Append host to current host self.related_hosts
self.related_hosts.add(new_host)
#Adds new_host to self.subdomains if new_host indeed is subdomain
self._add_to_subdomains_if_valid(subdomains_as_hosts=[new_host])
if feedback: print new_host.print_all_ips()
def print_all_ips(self):
if self.ips:
return '\n'.join([ ip.print_ip() for ip in self.ips ]).rstrip()
def print_subdomains(self):
return self._print_domains(sorted(self.subdomains, key=lambda x: x.domain))
@staticmethod
def _print_domains(hosts):
#Static method that prints a list of domains with its respective ips and rev_domains
#domains should be a list of Hosts
if hosts:
ret = ''
for host in hosts:
ret = ''.join([ret,host.domain])
p = host.print_all_ips()
if p: ret = ''.join([ret,'\n\t',p.replace('\n','\n\t')])
ret = ''.join([ret,'\n'])
return ret.rstrip().lstrip()
def print_all_ns(self):
#Print all NS records
return self._print_domains(self.ns)
def print_all_mx(self):
#Print all MS records
return self._print_domains(self.mx)
def print_all_whois_ip(self):
#Prints whois_ip records related to all self.ips
ret = set([ip.print_whois_ip() for ip in self.ips if ip.whois_ip])
return '\n'.join(ret).lstrip().rstrip()
def print_all_shodan(self):
#Print all Shodan entries (one for each IP in self.ips)
ret = [ ip.print_shodan() for ip in self.ips if ip.shodan ]
return '\n'.join(ret).lstrip().rstrip()
def print_as_csv_lines(self):
#Generator that yields each IP within self.ips will be returned as a csv line, one at a time
#one Host csv line at a time
yield ['Target: '+self.domain]
if self.ips:
yield [
'Domain',
'IP',
'Reverse domains',
'NS',
'MX',
'Subdomains',
'Domain whois',
'IP whois',
'Shodan',
'LinkedIn page',
]
for ip in self.ips:
yield [
self.domain,
str(ip),
'\n'.join(ip.rev_domains),
self.print_all_ns(),
self.print_all_mx(),
self.print_subdomains(),
self.whois_domain,
ip.print_whois_ip(),
ip.print_shodan(),
self.linkedin_page,
]
if self.subdomains:
yield ['\n']
yield ['Subdomains for '+str(self.domain)]
yield ['Domain','IP','Reverse domains']
for sub in sorted(self.subdomains):
for ip in sub.ips:
if sub.domain:
yield [ sub.domain,ip.ip,','.join( ip.rev_domains ) ]
else:
yield [ ip.rev_domains[0],ip.ip,','.join( ip.rev_domains ) ]
if self.related_hosts:
yield ['\n']
yield ['Hosts in same CIDR as '+str(self.domain)]
yield ['IP','Reverse domains']
for sub in sorted(self.related_hosts):
yield [
','.join([ str(ip) for ip in sub.ips ]),
','.join([ ','.join(ip.rev_domains) for ip in sub.ips ]),
]
def do_all_lookups(self, shodan_key=None):
'''
This method does all possible direct lookups for a Host.
Not called by any Host or Scan function, only here for testing purposes.
'''
self.dns_lookups()
self.ns_dns_lookup()
self.mx_dns_lookup()
self.get_whois_domain()
self.get_all_whois_ip()
if shodan_key:
self.get_all_shodan(shodan_key)
self.google_lookups()
class IP(Host):
'''
IP and information specific to it. Hosts contain multiple IPs,
as a domain can resolve to multiple IPs.
Attirbutes are:
ip - Str representation of this ip e.g. '8.8.8.8'
whois_ip - Dict containing results for Whois lookups against self.ip
cidr - Str CIDR that contains self.ip (taken from whois_ip), e.g. 8.8.8.0/24
rev_domains - List of str for each reverse domain for self.ip, found through reverse DNS lookups
shodan - Dict containing Shodan results
'''
def __init__(self,ip,rev_domains=None):
if rev_domains is None: rev_domains = []
self.ip = str(ip)
self.rev_domains = rev_domains
self.whois_ip = {}
self.cidr = None
self.shodan = None
def __str__(self):
return str(self.ip)
def __hash__(self):
return hash(('ip',self.ip))
def __eq__(self,other):
return self.ip == other.ip
@staticmethod
def _ret_host_by_ip(ip):
try:
return Scan.dns_resolver.query(dns.reversename.from_address(ip),'PTR')
except (dns.resolver.NXDOMAIN,dns.resolver.NoAnswer) as e:
Scan.error('[-] Host lookup failed for '+ip,sys._getframe().f_code.co_name)
def get_rev_domains(self):
rev_domains = None
rev_domains = self._ret_host_by_ip(self.ip)
if rev_domains:
self.rev_domains = [ str(domain).rstrip('.') for domain in rev_domains ]
return self
def get_shodan(self, key):
try:
shodan_api_key = key
api = shodan.Shodan(shodan_api_key)
self.shodan = api.host(str(self))
except Exception as e:
Scan.error(e,sys._getframe().f_code.co_name)
def get_whois_ip(self):
try:
self.whois_ip = ipw(str(self)).lookup() or None
except Exception as e:
Scan.error(e,sys._getframe().f_code.co_name)
if self.whois_ip:
if 'nets' in self.whois_ip:
if self.whois_ip['nets']:
self.cidr = self.whois_ip['nets'][0]['cidr']
return self
def print_ip(self):
ret = str(self.ip)
if self.rev_domains:
if len(self.rev_domains) < 2:
ret = ''.join([ret,' - ',self.rev_domains[0]])
else:
for rev in self.rev_domains:
ret = '\t'.join([ret,'\n',rev])
return ret
def print_whois_ip(self):
if self.whois_ip:
result = ''
#Printing all lines except 'nets'
for key,val in sorted(self.whois_ip.iteritems()):
if val and key not in ['nets','query']:
result = '\n'.join([result,key+': '+str(val)])
#Printing each dict within 'nets'
for key,val in enumerate(self.whois_ip['nets']):
result = '\n'.join([result,'net '+str(key)+':'])
for key2,val2 in sorted(val.iteritems()):
result = '\n\t'.join([result,key2+': '+str(val2).replace('\n',', ')])
return result.lstrip().rstrip()
def print_shodan(self):
if self.shodan:
result = ''.join(['IP: ',self.shodan.get('ip_str'),'\n'])
result = ''.join([result,'Organization: ',self.shodan.get('org','n/a'),'\n'])
if self.shodan.get('os','n/a'):
result = ''.join([result,'OS: ',self.shodan.get('os','n/a'),'\n'])
if self.shodan.get('isp','n/a'):
result = ''.join([result,'ISP: ',self.shodan.get('isp','n/a'),'\n'])
if len(self.shodan['data']) > 0:
for item in self.shodan['data']:
result = ''.join([
result,
'Port: {}'.format(item['port']),
'\n',
'Banner: {}'.format(item['data'].replace('\n','\n\t').rstrip()),
])
return result.rstrip().lstrip()
class Scan(object):
'''
Object that will hold all Host entries, interpret uset given flags, manage scans, threads and outputs.
Attributes are:
feedback - Bool flag for output printing. Static variable.
versobe - Bool flag for verbose output printing. Static variable.
nameserver - Str DNS server to be used for lookups (consumed by dns.resolver module)
shodan_key - Str key used for Shodan lookups
targets - Set of Hosts that will be scanned
target_networks - Set of ipaddress.IPv4Networks to be scanned
bad_targets - Set of user inputs that could not be understood or resolved
'''
feedback = False
verbose = False
dns_resolver = dns.resolver.Resolver()
def __init__(self,nameserver=None,shodan_key=None,feedback=False,verbose=False):
Scan.feedback = feedback
Scan.verbose=verbose
if nameserver: Scan.dns_resolver.nameservers = [nameserver]
self.shodan_key = shodan_key
self.targets = set()
self.target_networks = set()
self.bad_targets = set()
@staticmethod
def error(e, method_name=None):
if Scan.feedback and Scan.verbose:
print '# Error:', str(e),'| method:',method_name
def populate(self, user_supplied_list):
for user_supplied in user_supplied_list:
self.add_host(user_supplied)
if self.feedback:
if not self.targets:
print '# No hosts to scan'
else:
print '# Scanning',str(len(self.targets))+'/'+str(len(user_supplied_list)),'hosts'
if not self.shodan_key:
print '# No Shodan key provided'
else:
print'# Shodan key provided -',self.shodan_key
def add_host(self, user_supplied):
'''
Add string passed by user to self.targets as proper Host objects
For this, it parses user_supplied strings to separate IPs, Domains, and Networks.
'''
#is it an IP?
try:
ip = ipa.ip_address(user_supplied.decode('unicode-escape'))
#if not (ip.is_multicast or ip.is_unspecified or ip.is_reserved or ip.is_loopback):
self.targets.add(Host(ips=[str(ip)]))
return
except ValueError as e:
pass
#is it a valid network range?
try:
net = ipa.ip_network(user_supplied.decode('unicode-escape'),strict=False)
if net.prefixlen < 16:
if Scan.feedback: print '[-] Error: Not a good idea to scan anything bigger than a /16 is it?', user_supplied
else:
for ip in net:
self.add_host(str(ip), True)
return
except ValueError as e:
pass
#is it a valid DNS?
try:
ips = Scan.dns_resolver.query(user_supplied)
self.targets.add(Host(domain=user_supplied,ips=[str(ip) for ip in ips]))
return
except (dns.resolver.NXDOMAIN, dns.exception.SyntaxError) as e:
#If here so results from network won't be so verbose
if Scan.feedback: print '[-] Error: Couldn\'t resolve or understand', user_supplied
pass
self.bad_targets.add(user_supplied)
def scan_targets(self):
for host in self.targets:
self.full_scan_on_host(host)
def full_scan_on_host(self,host):
'''Does all possible scans for each host in self.targets'''
fb = self.feedback
if fb:
print ''
print '# ____________________ Scanning {} ____________________ #'.format(str(host))
###DNS and Whois lookups
if fb:
print ''
print '# DNS lookups'
host.dns_lookups()
if fb:
if host.domain:
print ''
print '[*] Domain: '+host.domain
#IPs and reverse domains
if host.ips:
print ''
print '[*] IPs & reverse DNS: '
print host.print_all_ips()
host.ns_dns_lookup()
#NS records
if host.ns and fb:
print ''
print '[*] NS records:'
print host.print_all_ns()
host.mx_dns_lookup()
#MX records
if host.mx and fb:
print ''
print '[*] MX records:'
print host.print_all_mx()
if fb:
print ''
print '# Whois lookups'
host.get_whois_domain()
if host.whois_domain and fb:
print ''
print '[*] Whois domain:'
print host.whois_domain
host.get_all_whois_ip()
if fb:
m = host.print_all_whois_ip()
if m:
print ''
print '[*] Whois IP:'
print m
#Shodan lookup
if self.shodan_key:
if fb:
print ''
print '# Querying Shodan for open ports'
host.get_all_shodan(self.shodan_key)
if fb:
m = host.print_all_shodan()
if m:
print '[*] Shodan:'
print m
#Google subdomains lookup
if host.domain:
if fb:
print ''
print '# Querying Google for subdomains and Linkedin pages, this might take a while'
host.google_lookups()
if fb:
if host.linkedin_page:
print ''
print '[*] Possible LinkedIn page: '+host.linkedin_page
if host.subdomains:
print ''
print '[*] Subdomains:'+'\n'+host.print_subdomains()
else:
print '[-] Error: No subdomains found in Google. If you are scanning a lot, Google might be blocking your requests.'
#DNS lookups on entire CIDRs taken from host.get_whois_ip()
if host.cidrs:
if fb:
print ''
print '# Reverse DNS lookup on range(s) {}'.format(', '.join(host.cidrs))
host.reverse_dns_lookup_on_related_cidrs(feedback=True)
def write_output_csv(self, filename=None):
'''Writes output for each target as csv in filename'''
if filename:
filename = os.path.expanduser(filename)
fb = self.feedback
if fb:
print ''
print '# Saving output csv file'
output_as_lines = []
for host in self.targets:
try:
#Using generator to get one csv line at a time (one Host can yield multiple lines)
generator = host.print_as_csv_lines()
while True:
output_as_lines.append(generator.next())
except StopIteration:
pass
output_written = False
while not output_written:
try:
with open(filename, 'wb') as f:
writer = csv.writer(f)
for line in output_as_lines:
writer.writerow(line)
output_written = True
except Exception as e:
error = '[-] Something went wrong, can\'t open output file. Press anything to try again.'
if self.verbose: error = ''.join([error,'\nError: ',str(e)])
raw_input(error)
except KeyboardInterrupt:
if raw_input('[-] Sure you want to exit without saving your file (Y/n)?') in ['y','Y','']:
sys.exit('# Scan interrupted')
def exit(self):
if self.feedback: print '# Done'
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='InstaRecon')
parser.add_argument('targets', nargs='+', help='targets')
parser.add_argument('-o', '--output', required=False,nargs='?',help='Output filename as csv.')
parser.add_argument('-n', '--nameserver', required=False, nargs='?',help='Alternative DNS server to query.')
parser.add_argument('-s', '--shodan_key', required=False,nargs='?',help='Shodan key for automated lookups. To get one, simply register on https://www.shodan.io/.')
parser.add_argument('-v','--verbose', action='store_true',help='Verbose errors')
parser.add_argument('-d','--dns_only', action='store_true',help='Direct and reverse DNS lookups only.')
args = parser.parse_args()
targets = sorted(set(args.targets)) #removes duplicates
scan = Scan(
nameserver=args.nameserver,
shodan_key=args.shodan_key,
feedback=True,
verbose=args.verbose,
)
try:
scan.populate(targets)
scan.scan_targets()
scan.write_output_csv(args.output)
scan.exit()
except KeyboardInterrupt:
sys.exit('# Scan interrupted') |
#!/usr/bin/env python
# * coding: utf8 *
'''
main.py
A module that contains the main forklift pallets for deq
Note: There is a separate scheduled task that runs this pallet for SGID10.ENVIRONMENT.DAQAirMonitorByStation
on an hourly basis.
'''
import arcpy
import build_json
import settings
from settings import fieldnames
import update_sgid
import update_fgdb
import pystache
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from forklift.models import Pallet, Crate
from forklift.messaging import send_email
from forklift import lift
from forklift import core
from os import path
current_folder = path.dirname(path.realpath(__file__))
services = [('DEQEnviro/Secure', 'MapServer'),
('DEQEnviro/MapService', 'MapServer'),
('DEQEnviro/ExportWebMap', 'GPServer'),
('DEQEnviro/Toolbox', 'GPServer')]
STREAMS = 'StreamsNHDHighRes'
def send_report_email(name, report_data):
report_data['name'] = name
template = path.join(path.abspath(path.dirname(__file__)), 'report_template.html')
with open(template, 'r') as template_file:
email_content = pystache.render(template_file.read(), report_data)
message = MIMEMultipart()
message.attach(MIMEText(email_content, 'html'))
send_email(settings.reportEmail, 'DEQ Nightly Report'.format(name), message)
#: pallets are executed in alphabetical order
class DEQNightly0TempTablesPallet(Pallet):
#: this is for source tables -> point feature classes
#: it first copies the tables to a temp gdb
#: then it etl's them directly into sgid
def __init__(self, test_layer=None):
super(DEQNightly0TempTablesPallet, self).__init__()
self.problem_layer_infos = []
self.test_layer = test_layer
def build(self, target):
crate_infos, errors = update_sgid.get_temp_crate_infos(self.test_layer)
self.add_crates(crate_infos)
if len(errors) > 0:
self.success = (False, '\n\n'.join(errors))
def process(self):
self.log.info('ETL-ing temp tables to points in SGID...')
update_sgid.start_etl(self.get_crates())
def ship(self):
send_report_email('Temp Tables', self.get_report())
class DEQNightly1SDEUpdatePallet(Pallet):
#: this pallet assumes that the destination data in SGID already exits
#: this is for all non-etl data updates to SGID
def __init__(self, test_layer=None):
super(DEQNightly1SDEUpdatePallet, self).__init__()
self.problem_layer_infos = []
self.test_layer = test_layer
def build(self, target):
sgid_stage = path.join(self.staging_rack, 'sgid_stage.gdb')
if not arcpy.Exists(sgid_stage):
arcpy.CreateFileGDB_management(path.dirname(sgid_stage), path.basename(sgid_stage))
if self.test_layer is not None:
crate_infos, errors = update_sgid.get_crate_infos(sgid_stage, self.test_layer)
else:
crate_infos, errors = update_sgid.get_crate_infos(sgid_stage)
if len(errors) > 0:
self.success = (False, '\n\n'.join(errors))
self.add_crates([info for info in crate_infos if info[3] not in settings.PROBLEM_LAYERS])
self.problem_layer_infos = [info for info in crate_infos if info[3] in settings.PROBLEM_LAYERS]
def process(self):
update_sgid.update_sgid_for_crates(self.get_crates())
def update_problem_layers(self):
for source_name, source_workspace, destination_workspace, destination_name, id_field in self.problem_layer_infos:
if self.test_layer and self.test_layer.split('.')[-1] != destination_name:
continue
try:
source = path.join(source_workspace, source_name)
destination = path.join(destination_workspace, destination_name)
self.log.info('manually updating %s', destination)
arcpy.TruncateTable_management(destination)
arcpy.Append_management(source, destination, 'TEST')
except:
self.log.error('error manually updating %s!', destination)
self.success = (Crate.UNHANDLED_EXCEPTION, 'Error updating {}'.format(destination_name))
def ship(self):
self.update_problem_layers()
send_report_email('SGID', self.get_report())
class DEQNightly2FGDBUpdatePallet(Pallet):
#: this pallet updates the deqquerylayers.gdb from SGID
def __init__(self, test_layer=None):
super(DEQNightly2FGDBUpdatePallet, self).__init__()
self.problem_layer_infos = []
self.test_layer = test_layer
def validate_crate(self, crate):
return update_fgdb.validate_crate(crate)
def build(self, configuration):
self.configuration = configuration
self.arcgis_services = services
self.copy_data = [path.join(self.staging_rack, settings.fgd)]
def requires_processing(self):
return True
def process(self):
#: This needs to happen after the crates in DEQNightly0TempTables
#: have been processed. That's why I'm creating them and manually processing them.
if self.test_layer is not None:
crate_infos = update_fgdb.get_crate_infos(self.staging_rack, self.test_layer)
else:
crate_infos = update_fgdb.get_crate_infos(self.staging_rack)
self.add_crates([info for info in crate_infos if info[3] not in settings.PROBLEM_LAYERS])
lift.process_crates_for([self], core.update, self.configuration)
self.problem_layer_infos = [info for info in crate_infos if info[3] in settings.PROBLEM_LAYERS]
self.update_problem_layers()
for crate in self.get_crates():
if crate.result[0] in [Crate.CREATED, Crate.UPDATED]:
self.log.info('post processing crate: %s', crate.destination_name)
update_fgdb.post_process_crate(crate)
update_fgdb.create_relationship_classes(self.staging_rack, self.test_layer)
def update_problem_layers(self):
for source_name, source_workspace, destination_workspace, destination_name in self.problem_layer_infos:
if self.test_layer and self.test_layer.split('.')[-1] != destination_name:
continue
try:
crate = Crate(source_name, source_workspace, destination_workspace, destination_name)
source = path.join(source_workspace, source_name)
destination = path.join(destination_workspace, destination_name)
if not arcpy.Exists(destination):
self.log.info('creating %s', destination)
arcpy.Copy_management(source, destination)
crate.result = (Crate.CREATED, None)
else:
self.log.info('manually updating %s', destination)
arcpy.TruncateTable_management(destination)
arcpy.Append_management(source, destination, 'TEST')
crate.result = (Crate.UPDATED, None)
except Exception as ex:
self.log.error('error manually updating %s!', destination)
crate.result = (Crate.UNHANDLED_EXCEPTION, ex)
self._crates.append(crate)
def ship(self):
try:
self.log.info('BUILDING JSON FILE')
build_json.run()
except:
raise
finally:
send_report_email('App Data', self.get_report())
class DEQNightly3ReferenceDataPallet(Pallet):
def __init__(self, test_layer=None):
super(DEQNightly3ReferenceDataPallet, self).__init__()
self.test_layer = test_layer
self.arcgis_services = services
self.sgid = path.join(self.garage, 'SGID10.sde')
self.boundaries = path.join(self.staging_rack, 'boundaries.gdb')
self.water = path.join(self.staging_rack, 'water.gdb')
self.environment = path.join(self.staging_rack, 'environment.gdb')
self.deqquerylayers = path.join(self.staging_rack, settings.fgd)
self.copy_data = [self.boundaries,
self.water,
self.environment,
self.deqquerylayers]
self.static_data = [path.join(current_folder, '..', '..', 'data', 'deqreferencedata.gdb')]
def build(self, target):
if self.test_layer is None:
self.add_crate(('Counties', self.sgid, self.boundaries))
self.add_crates(['HUC', STREAMS], {
'source_workspace': self.sgid,
'destination_workspace': self.water
})
self.add_crate(('ICBUFFERZONES', self.sgid, self.environment))
def process(self):
for crate in self.get_crates():
if crate.destination_name == STREAMS:
if crate.result[0] in [Crate.CREATED, Crate.UPDATED]:
self.log.info('post processing streams data')
scratch = arcpy.env.scratchGDB
temp_field = 'TEMP'
#: temporary datasets
dissolved = path.join(scratch, 'DissolvedStreams')
identified = path.join(scratch, 'IdentifiedStreams')
#: layers
streams_layer = 'streams_layer'
no_name_layer = 'no_name_layer'
#: field names
GNIS_Name = fieldnames.GNIS_Name
NAME = fieldnames.NAME
COUNTY = fieldnames.COUNTY
#: final output
search_streams = path.join(self.deqquerylayers, 'SearchStreams')
#: clean up from last run, if needed
for cleanup_dataset in [dissolved, identified, search_streams]:
if arcpy.Exists(cleanup_dataset):
arcpy.Delete_management(cleanup_dataset)
query = '{0} IS NOT NULL AND {0} <> \'\''.format(GNIS_Name)
arcpy.MakeFeatureLayer_management(crate.destination, streams_layer, query)
arcpy.Dissolve_management(streams_layer, dissolved, dissolve_field=GNIS_Name, unsplit_lines='UNSPLIT_LINES')
arcpy.Identity_analysis(dissolved, path.join(self.boundaries, 'Counties'), identified)
arcpy.AddField_management(identified, temp_field, 'TEXT', '', '', 50)
arcpy.CalculateField_management(identified, temp_field, '!{}! + !{}!'.format(GNIS_Name, NAME), 'PYTHON')
arcpy.MakeFeatureLayer_management(identified, no_name_layer, '{0} IS NOT NULL AND {0} <> \'\''.format(NAME))
arcpy.Dissolve_management(no_name_layer, search_streams, temp_field)
arcpy.JoinField_management(search_streams, temp_field, no_name_layer, temp_field, [GNIS_Name, NAME])
arcpy.AddField_management(search_streams, COUNTY, 'TEXT', '', '', 25)
arcpy.CalculateField_management(search_streams, COUNTY, '!{}!'.format(NAME), 'PYTHON')
arcpy.DeleteField_management(search_streams, NAME)
arcpy.DeleteField_management(search_streams, temp_field)
for delete_layer in [streams_layer, no_name_layer]:
arcpy.Delete_management(delete_layer)
break
repoint to local version of static data
#!/usr/bin/env python
# * coding: utf8 *
'''
main.py
A module that contains the main forklift pallets for deq
Note: There is a separate scheduled task that runs this pallet for SGID10.ENVIRONMENT.DAQAirMonitorByStation
on an hourly basis.
'''
import arcpy
import build_json
import settings
from settings import fieldnames
import update_sgid
import update_fgdb
import pystache
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from forklift.models import Pallet, Crate
from forklift.messaging import send_email
from forklift import lift
from forklift import core
from os import path
current_folder = path.dirname(path.realpath(__file__))
services = [('DEQEnviro/Secure', 'MapServer'),
('DEQEnviro/MapService', 'MapServer'),
('DEQEnviro/ExportWebMap', 'GPServer'),
('DEQEnviro/Toolbox', 'GPServer')]
STREAMS = 'StreamsNHDHighRes'
def send_report_email(name, report_data):
report_data['name'] = name
template = path.join(path.abspath(path.dirname(__file__)), 'report_template.html')
with open(template, 'r') as template_file:
email_content = pystache.render(template_file.read(), report_data)
message = MIMEMultipart()
message.attach(MIMEText(email_content, 'html'))
send_email(settings.reportEmail, 'DEQ Nightly Report'.format(name), message)
#: pallets are executed in alphabetical order
class DEQNightly0TempTablesPallet(Pallet):
#: this is for source tables -> point feature classes
#: it first copies the tables to a temp gdb
#: then it etl's them directly into sgid
def __init__(self, test_layer=None):
super(DEQNightly0TempTablesPallet, self).__init__()
self.problem_layer_infos = []
self.test_layer = test_layer
def build(self, target):
crate_infos, errors = update_sgid.get_temp_crate_infos(self.test_layer)
self.add_crates(crate_infos)
if len(errors) > 0:
self.success = (False, '\n\n'.join(errors))
def process(self):
self.log.info('ETL-ing temp tables to points in SGID...')
update_sgid.start_etl(self.get_crates())
def ship(self):
send_report_email('Temp Tables', self.get_report())
class DEQNightly1SDEUpdatePallet(Pallet):
#: this pallet assumes that the destination data in SGID already exits
#: this is for all non-etl data updates to SGID
def __init__(self, test_layer=None):
super(DEQNightly1SDEUpdatePallet, self).__init__()
self.problem_layer_infos = []
self.test_layer = test_layer
def build(self, target):
sgid_stage = path.join(self.staging_rack, 'sgid_stage.gdb')
if not arcpy.Exists(sgid_stage):
arcpy.CreateFileGDB_management(path.dirname(sgid_stage), path.basename(sgid_stage))
if self.test_layer is not None:
crate_infos, errors = update_sgid.get_crate_infos(sgid_stage, self.test_layer)
else:
crate_infos, errors = update_sgid.get_crate_infos(sgid_stage)
if len(errors) > 0:
self.success = (False, '\n\n'.join(errors))
self.add_crates([info for info in crate_infos if info[3] not in settings.PROBLEM_LAYERS])
self.problem_layer_infos = [info for info in crate_infos if info[3] in settings.PROBLEM_LAYERS]
def process(self):
update_sgid.update_sgid_for_crates(self.get_crates())
def update_problem_layers(self):
for source_name, source_workspace, destination_workspace, destination_name, id_field in self.problem_layer_infos:
if self.test_layer and self.test_layer.split('.')[-1] != destination_name:
continue
try:
source = path.join(source_workspace, source_name)
destination = path.join(destination_workspace, destination_name)
self.log.info('manually updating %s', destination)
arcpy.TruncateTable_management(destination)
arcpy.Append_management(source, destination, 'TEST')
except:
self.log.error('error manually updating %s!', destination)
self.success = (Crate.UNHANDLED_EXCEPTION, 'Error updating {}'.format(destination_name))
def ship(self):
self.update_problem_layers()
send_report_email('SGID', self.get_report())
class DEQNightly2FGDBUpdatePallet(Pallet):
#: this pallet updates the deqquerylayers.gdb from SGID
def __init__(self, test_layer=None):
super(DEQNightly2FGDBUpdatePallet, self).__init__()
self.problem_layer_infos = []
self.test_layer = test_layer
def validate_crate(self, crate):
return update_fgdb.validate_crate(crate)
def build(self, configuration):
self.configuration = configuration
self.arcgis_services = services
self.copy_data = [path.join(self.staging_rack, settings.fgd)]
def requires_processing(self):
return True
def process(self):
#: This needs to happen after the crates in DEQNightly0TempTables
#: have been processed. That's why I'm creating them and manually processing them.
if self.test_layer is not None:
crate_infos = update_fgdb.get_crate_infos(self.staging_rack, self.test_layer)
else:
crate_infos = update_fgdb.get_crate_infos(self.staging_rack)
self.add_crates([info for info in crate_infos if info[3] not in settings.PROBLEM_LAYERS])
lift.process_crates_for([self], core.update, self.configuration)
self.problem_layer_infos = [info for info in crate_infos if info[3] in settings.PROBLEM_LAYERS]
self.update_problem_layers()
for crate in self.get_crates():
if crate.result[0] in [Crate.CREATED, Crate.UPDATED]:
self.log.info('post processing crate: %s', crate.destination_name)
update_fgdb.post_process_crate(crate)
update_fgdb.create_relationship_classes(self.staging_rack, self.test_layer)
def update_problem_layers(self):
for source_name, source_workspace, destination_workspace, destination_name in self.problem_layer_infos:
if self.test_layer and self.test_layer.split('.')[-1] != destination_name:
continue
try:
crate = Crate(source_name, source_workspace, destination_workspace, destination_name)
source = path.join(source_workspace, source_name)
destination = path.join(destination_workspace, destination_name)
if not arcpy.Exists(destination):
self.log.info('creating %s', destination)
arcpy.Copy_management(source, destination)
crate.result = (Crate.CREATED, None)
else:
self.log.info('manually updating %s', destination)
arcpy.TruncateTable_management(destination)
arcpy.Append_management(source, destination, 'TEST')
crate.result = (Crate.UPDATED, None)
except Exception as ex:
self.log.error('error manually updating %s!', destination)
crate.result = (Crate.UNHANDLED_EXCEPTION, ex)
self._crates.append(crate)
def ship(self):
try:
self.log.info('BUILDING JSON FILE')
build_json.run()
except:
raise
finally:
send_report_email('App Data', self.get_report())
class DEQNightly3ReferenceDataPallet(Pallet):
def __init__(self, test_layer=None):
super(DEQNightly3ReferenceDataPallet, self).__init__()
self.test_layer = test_layer
self.arcgis_services = services
self.sgid = path.join(self.garage, 'SGID10.sde')
self.boundaries = path.join(self.staging_rack, 'boundaries.gdb')
self.water = path.join(self.staging_rack, 'water.gdb')
self.environment = path.join(self.staging_rack, 'environment.gdb')
self.deqquerylayers = path.join(self.staging_rack, settings.fgd)
self.copy_data = [self.boundaries,
self.water,
self.environment,
self.deqquerylayers]
self.static_data = [path.join(r'C:\Scheduled\static', 'deqreferencedata.gdb')]
def build(self, target):
if self.test_layer is None:
self.add_crate(('Counties', self.sgid, self.boundaries))
self.add_crates(['HUC', STREAMS], {
'source_workspace': self.sgid,
'destination_workspace': self.water
})
self.add_crate(('ICBUFFERZONES', self.sgid, self.environment))
def process(self):
for crate in self.get_crates():
if crate.destination_name == STREAMS:
if crate.result[0] in [Crate.CREATED, Crate.UPDATED]:
self.log.info('post processing streams data')
scratch = arcpy.env.scratchGDB
temp_field = 'TEMP'
#: temporary datasets
dissolved = path.join(scratch, 'DissolvedStreams')
identified = path.join(scratch, 'IdentifiedStreams')
#: layers
streams_layer = 'streams_layer'
no_name_layer = 'no_name_layer'
#: field names
GNIS_Name = fieldnames.GNIS_Name
NAME = fieldnames.NAME
COUNTY = fieldnames.COUNTY
#: final output
search_streams = path.join(self.deqquerylayers, 'SearchStreams')
#: clean up from last run, if needed
for cleanup_dataset in [dissolved, identified, search_streams]:
if arcpy.Exists(cleanup_dataset):
arcpy.Delete_management(cleanup_dataset)
query = '{0} IS NOT NULL AND {0} <> \'\''.format(GNIS_Name)
arcpy.MakeFeatureLayer_management(crate.destination, streams_layer, query)
arcpy.Dissolve_management(streams_layer, dissolved, dissolve_field=GNIS_Name, unsplit_lines='UNSPLIT_LINES')
arcpy.Identity_analysis(dissolved, path.join(self.boundaries, 'Counties'), identified)
arcpy.AddField_management(identified, temp_field, 'TEXT', '', '', 50)
arcpy.CalculateField_management(identified, temp_field, '!{}! + !{}!'.format(GNIS_Name, NAME), 'PYTHON')
arcpy.MakeFeatureLayer_management(identified, no_name_layer, '{0} IS NOT NULL AND {0} <> \'\''.format(NAME))
arcpy.Dissolve_management(no_name_layer, search_streams, temp_field)
arcpy.JoinField_management(search_streams, temp_field, no_name_layer, temp_field, [GNIS_Name, NAME])
arcpy.AddField_management(search_streams, COUNTY, 'TEXT', '', '', 25)
arcpy.CalculateField_management(search_streams, COUNTY, '!{}!'.format(NAME), 'PYTHON')
arcpy.DeleteField_management(search_streams, NAME)
arcpy.DeleteField_management(search_streams, temp_field)
for delete_layer in [streams_layer, no_name_layer]:
arcpy.Delete_management(delete_layer)
break
|
#!/usr/bin/env python2.7
"""
parallelMappingEvaluation.py: Run the mapping evaluation on all the servers in
parallel.
BAM files with reads must have been already downloaded.
"""
import argparse, sys, os, os.path, random, subprocess, shutil, itertools, glob
import doctest, re, json, collections, time, timeit
import logging, logging.handlers, SocketServer, struct, socket, threading
import string
import urlparse
import fnmatch
import dateutil.parser
from toil.job import Job
from toillib import *
def parse_args(args):
"""
Takes in the command-line arguments list (args), and returns a nice argparse
result with fields for all the options.
Borrows heavily from the argparse documentation examples:
<http://docs.python.org/library/argparse.html>
"""
# Construct the parser (which is stored in parser)
# Module docstring lives in __doc__
# See http://python-forum.com/pythonforum/viewtopic.php?f=3&t=36847
# And a formatter class so our examples in the docstring look good. Isn't it
# convenient how we already wrapped it to 80 characters?
# See http://docs.python.org/library/argparse.html#formatter-class
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
# Add the Toil options so the job store is the first argument
Job.Runner.addToilOptions(parser)
# General options
parser.add_argument("server_list", type=argparse.FileType("r"),
help="TSV file continaing <region>\t<url> lines for servers to test")
parser.add_argument("sample_store",
help="sample input IOStore with <region>/<sample>/<sample>.bam.fq")
parser.add_argument("out_store",
help="output IOStore to create and fill with alignments and stats")
parser.add_argument("--server_version", default="v0.6.g",
help="server version to add to URLs")
parser.add_argument("--sample_pattern", default="*",
help="fnmatch-style pattern for sample names")
parser.add_argument("--sample_limit", type=int, default=float("inf"),
help="number of samples to use")
parser.add_argument("--edge_max", type=int, default=0,
help="maximum edges to cross in index")
parser.add_argument("--kmer_size", type=int, default=10,
help="size of kmers to use in indexing and mapping")
parser.add_argument("--bin_url",
default="https://hgvm.blob.core.windows.net/hgvm-bin",
help="URL to download sg2vg and vg binaries from, without Docker")
parser.add_argument("--use_path_binaries", action="store_true",
help="use system vg and sg2vg instead of downloading them")
parser.add_argument("--overwrite", default=False, action="store_true",
help="overwrite existing result files")
parser.add_argument("--restat", default=False, action="store_true",
help="recompute and overwrite existing stats files")
parser.add_argument("--reindex", default=False, action="store_true",
help="don't re-use existing indexed graphs")
parser.add_argument("--too_old", default=None, type=str,
help="recompute stats files older than this date")
parser.add_argument("--index_mode", choices=["rocksdb", "gcsa-kmer",
"gcsa-mem"], default="gcsa-mem",
help="type of vg index to use for mapping")
parser.add_argument("--include_pruned", action="store_true",
help="use the pruned graph in the index")
parser.add_argument("--include_primary", action="store_true",
help="use the primary path in the index")
# The command line arguments start with the program name, which we don't
# want to treat as an argument for argparse. So we remove it.
args = args[1:]
return parser.parse_args(args)
# Reverse complement needs a global translation table
reverse_complement_translation_table = string.maketrans("ACGTN", "TGCAN")
def reverse_complement(sequence):
"""
Compute the reverse complement of a DNA sequence.
Follows algorithm from <http://stackoverflow.com/a/26615937>
"""
if isinstance(sequence, unicode):
# Encode the sequence in ASCII for easy translation
sequence = sequence.encode("ascii", "replace")
# Translate and then reverse
return sequence.translate(reverse_complement_translation_table)[::-1]
def count_Ns(sequence):
"""
Return the number of N bases in the given DNA sequence
"""
n_count = 0
for item in sequence:
if item == "N":
n_count += 1
return n_count
def run_all_alignments(job, options):
"""
For each server listed in the server_list tsv, kick off child jobs to
align and evaluate it.
"""
# Set up the IO stores each time, since we can't unpickle them on Azure for
# some reason.
sample_store = IOStore.get(options.sample_store)
out_store = IOStore.get(options.out_store)
if options.use_path_binaries:
# We don't download any bianries and don't maintain a bin_dir
bin_dir_id = None
else:
# Retrieve binaries we need
RealTimeLogger.get().info("Retrieving binaries from {}".format(
options.bin_url))
bin_dir = "{}/bin".format(job.fileStore.getLocalTempDir())
robust_makedirs(bin_dir)
subprocess.check_call(["wget", "{}/sg2vg".format(options.bin_url),
"-O", "{}/sg2vg".format(bin_dir)])
subprocess.check_call(["wget", "{}/vg".format(options.bin_url),
"-O", "{}/vg".format(bin_dir)])
# Make them executable
os.chmod("{}/sg2vg".format(bin_dir), 0o744)
os.chmod("{}/vg".format(bin_dir), 0o744)
# Upload the bin directory to the file store
bin_dir_id = write_global_directory(job.fileStore, bin_dir,
cleanup=True)
# Make sure we skip the header
is_first = True
for line in options.server_list:
if is_first:
# This is the header, skip it.
is_first = False
continue
# We need to read each non-header line
# Break it into its fields
parts = line.split("\t")
if parts[0].startswith("#"):
# Skip comments
continue
if parts[0].startswith("\n"):
# Skip newlines
continue
# Pull out the first 3 fields
region, url, generator = parts[0:3]
# We cleverly just split the lines out to different nodes
job.addChildJobFn(run_region_alignments, options, bin_dir_id, region,
url, cores=16, memory="100G", disk="50G")
# Say what we did
RealTimeLogger.get().info("Running child for {}".format(parts[1]))
def run_region_alignments(job, options, bin_dir_id, region, url):
"""
For the given region, download, index, and then align to the given graph.
"""
RealTimeLogger.get().info("Running on {} for {}".format(url, region))
# Set up the IO stores each time, since we can't unpickle them on Azure for
# some reason.
sample_store = IOStore.get(options.sample_store)
out_store = IOStore.get(options.out_store)
if bin_dir_id is not None:
# Download the binaries
bin_dir = "{}/bin".format(job.fileStore.getLocalTempDir())
read_global_directory(job.fileStore, bin_dir_id, bin_dir)
# We define a string we can just tack onto the binary name and get
# either the system or the downloaded version.
bin_prefix = bin_dir + "/"
else:
bin_prefix = ""
# Parse the graph URL. It may be either an http(s) URL to a GA4GH server, or
# a direct file: URL to a vg graph.
url_parts = urlparse.urlparse(url, "file")
# Either way, it has to include the graph base name, which we use to
# identify the graph, and which has to contain the region name (like brca2)
# and the graph type name (like cactus). For a server it's the last folder
# in the YURL, with a trailing slash, and for a file it's the name of the
# .vg file.
basename = re.match('.*/(.*)(/|\.vg)$', url_parts.path).group(1)
# Get graph name (without region and its associated dash) from basename
graph_name = basename.replace("-{}".format(region), "").replace(
"{}-".format(region), "")
# Where do we look for samples for this region in the input?
region_dir = region.upper()
# What samples do we do? List input sample names up to the given limit.
input_samples = [n for n in sample_store.list_input_directory(region_dir) \
if fnmatch.fnmatchcase(n, options.sample_pattern)]
if len(input_samples) > options.sample_limit:
input_samples = input_samples[:options.sample_limit]
# Work out the directory for the alignments to be dumped in in the output
alignment_dir = "alignments/{}/{}".format(region, graph_name)
# Also for statistics
stats_dir = "stats/{}/{}".format(region, graph_name)
# What smaples have been completed? Map from ID to mtime
completed_samples = {}
for filename, mtime in out_store.list_input_directory(stats_dir,
with_times=True):
# See if every file is a stats file
match = re.match("(.*)\.json$", filename)
if match and (options.too_old is None or mtime > options.too_old):
# We found a sample's stats file, and it's new enough.
completed_samples[match.group(1)] = mtime
if match and (options.too_old is not None and mtime < options.too_old):
# Say we hit an mtime thing
RealTimeLogger.get().info("Need to re-run {} because "
"{} < {}".format(match.group(1), mtime.ctime(),
options.too_old.ctime()))
RealTimeLogger.get().info("Already have {} completed samples for {} in "
"{}".format(len(completed_samples), basename, stats_dir))
# What samples haven't been done yet and need doing
samples_to_run = []
for sample in input_samples:
# Split out over each sample
if ((not options.overwrite) and (not options.restat) and
completed_samples.has_key(sample)):
# This is already done.
RealTimeLogger.get().info("Skipping completed alignment of "
"{} to {} {}".format(sample, graph_name, region))
continue
else:
# We need to run this sample
samples_to_run.append(sample)
if len(samples_to_run) == 0 and not options.reindex:
# Don't bother indexing the graph if all the samples are done, and we
# didn't explicitly ask to do it.
RealTimeLogger.get().info("Nothing to align to {}".format(basename))
return
# Where will the indexed graph go in the output
index_key = "indexes/{}-{}-{}/{}/{}.tar.gz".format(options.index_mode,
options.kmer_size, options.edge_max, region, graph_name)
if (not options.reindex) and out_store.exists(index_key):
# See if we have an index already available in the output store from a
# previous run
RealTimeLogger.get().info("Retrieving indexed {} graph from output "
"store".format(basename))
# Download the pre-made index directory
tgz_file = "{}/index.tar.gz".format(job.fileStore.getLocalTempDir())
out_store.read_input_file(index_key, tgz_file)
# Save it to the global file store and keep around the ID.
# Will be compatible with read_global_directory
index_dir_id = job.fileStore.writeGlobalFile(tgz_file, cleanup=True)
RealTimeLogger.get().info("Index for {} retrieved "
"successfully".format(basename))
else:
# Download the graph, build the index, and store it in the output store
# Work out where the graph goes
# it will be graph.vg in here
graph_dir = "{}/graph".format(job.fileStore.getLocalTempDir())
robust_makedirs(graph_dir)
graph_filename = "{}/graph.vg".format(graph_dir)
# Download and fix up the graph with this ugly subprocess pipeline
# sg2vg "${URL}" -u | vg view -Jv - | vg mod -X 100 - |
# vg ids -s - > "graphs/${BASENAME}.vg"
with open(graph_filename, "w") as output_file:
# Hold all the popen objects we need for this
tasks = []
if url_parts.scheme == "file":
# Grab the vg graph from a local file
RealTimeLogger.get().info("Reading {} to {}".format(
url, graph_filename))
# Just cat the file. We need a process so we can do the
# tasks[-1].stdout trick.
tasks.append(subprocess.Popen(["cat", url_parts.path],
stdout=subprocess.PIPE))
else:
# Assume it's on a server
# Make the real URL with the version
versioned_url = url + options.server_version
RealTimeLogger.get().info("Downloading {} to {}".format(
versioned_url, graph_filename))
# Do the download
tasks.append(subprocess.Popen(["{}sg2vg".format(bin_prefix),
versioned_url, "-u"], stdout=subprocess.PIPE))
# Convert to vg
tasks.append(subprocess.Popen(["{}vg".format(bin_prefix),
"view", "-Jv", "-"], stdin=tasks[-1].stdout,
stdout=subprocess.PIPE))
# And cut nodes
tasks.append(subprocess.Popen(["{}vg".format(bin_prefix), "mod",
"-X100", "-"], stdin=tasks[-1].stdout, stdout=subprocess.PIPE))
# And sort ids
tasks.append(subprocess.Popen(["{}vg".format(bin_prefix), "ids",
"-s", "-"], stdin=tasks[-1].stdout, stdout=output_file))
# Did we make it through all the tasks OK?
for task in tasks:
if task.wait() != 0:
raise RuntimeError("Pipeline step returned {}".format(
task.returncode))
# TODO: We sometimes don't see the files written immediately, for some
# reason. Maybe because we opened them? Anyway, this is a hack to wait
# for them to be on disk and readable.
time.sleep(1)
# Now run the indexer.
# TODO: support both indexing modes
RealTimeLogger.get().info("Indexing {}".format(graph_filename))
if options.index_mode == "rocksdb":
# Make the RocksDB index
subprocess.check_call(["{}vg".format(bin_prefix), "index", "-s", "-k",
str(options.kmer_size), "-e", str(options.edge_max),
"-t", str(job.cores), graph_filename, "-d",
graph_filename + ".index"])
elif (options.index_mode == "gcsa-kmer" or
options.index_mode == "gcsa-mem"):
# We want a GCSA2/xg index. We have to prune the graph ourselves.
# See <https://github.com/vgteam/vg/issues/286>.
# What will we use as our temp combined graph file (containing only
# the bits of the graph we want to index, used for deduplication)?
to_index_filename = "{}/to_index.vg".format(
job.fileStore.getLocalTempDir())
# Where will we save the kmers?
kmers_filename = "{}/index.graph".format(
job.fileStore.getLocalTempDir())
with open(to_index_filename, "w") as to_index_file:
if options.include_pruned:
RealTimeLogger.get().info("Pruning {} to {}".format(
graph_filename, to_index_filename))
# Prune out hard bits of the graph
tasks = []
# Prune out complex regions
tasks.append(subprocess.Popen(["{}vg".format(bin_prefix), "mod",
"-p", "-l", str(options.kmer_size), "-t", str(job.cores),
"-e", str(options.edge_max), graph_filename],
stdout=subprocess.PIPE))
# Throw out short disconnected chunks
tasks.append(subprocess.Popen(["{}vg".format(bin_prefix), "mod",
"-S", "-l", str(options.kmer_size * 2),
"-t", str(job.cores), "-"], stdin=tasks[-1].stdout,
stdout=to_index_file))
# Did we make it through all the tasks OK?
for task in tasks:
if task.wait() != 0:
raise RuntimeError("Pipeline step returned {}".format(
task.returncode))
time.sleep(1)
if options.include_primary:
# Then append in the primary path. Since we don't knoiw what
# "it's called, we retain "ref" and all the 19", "6", etc paths
# "from 1KG.
RealTimeLogger.get().info(
"Adding primary path to {}".format(to_index_filename))
# See
# https://github.com/vgteam/vg/issues/318#issuecomment-215102199
# Generate all the paths names we might have for primary paths.
# It should be "ref" but some graphs don't listen
ref_names = (["ref", "x", "X", "y", "Y", "m", "M"] +
[str(x) for x in xrange(1, 23)])
ref_options = []
for name in ref_names:
# Put each in a -r option to retain the path
ref_options.append("-r")
ref_options.append(name)
tasks = []
# Retain only the specified paths (only one should really exist)
tasks.append(subprocess.Popen(
["{}vg".format(bin_prefix), "mod", "-N"] + ref_options +
["-t", str(job.cores), graph_filename],
stdout=to_index_file))
# TODO: if we merged the primary path back on itself, it's
# possible for it to braid with itself. Right now we just ignore
# this and let those graphs take a super long time to index.
# Wait for this second pipeline. We don't parallelize with the
# first one so we don't need to use an extra cat step.
for task in tasks:
if task.wait() != 0:
raise RuntimeError("Pipeline step returned {}".format(
task.returncode))
# Wait to make sure no weird file-not-being-full bugs happen
# TODO: how do I wait on child process output?
time.sleep(1)
time.sleep(1)
# Now we have the combined to-index graph in one vg file. We'll load
# it (which deduplicates nodes/edges) and then find kmers.
# Save the problematic file
out_store.write_output_file(to_index_filename,
"debug/{}-{}-{}-{}-{}.vg".format(options.index_mode,
options.kmer_size, options.edge_max, region, graph_name))
with open(kmers_filename, "w") as kmers_file:
tasks = []
RealTimeLogger.get().info("Finding kmers in {} to {}".format(
to_index_filename, kmers_filename))
# Deduplicate the graph
tasks.append(subprocess.Popen(["{}vg".format(bin_prefix),
"view", "-v", to_index_filename],
stdout=subprocess.PIPE))
# Make the GCSA2 kmers file
tasks.append(subprocess.Popen(["{}vg".format(bin_prefix),
"kmers", "-g", "-B", "-k", str(options.kmer_size),
"-H", "1000000000", "-T", "1000000001",
"-t", str(job.cores), "-"], stdin=tasks[-1].stdout,
stdout=kmers_file))
# Did we make it through all the tasks OK?
task_number = 0
for task in tasks:
if task.wait() != 0:
raise RuntimeError(
"Pipeline step {} returned {}".format(
task_number, task.returncode))
task_number += 1
# Wait to make sure no weird file-not-being-full bugs happen
# TODO: how do I wait on child process output?
time.sleep(1)
time.sleep(1)
# Where do we put the GCSA2 index?
gcsa_filename = graph_filename + ".gcsa"
RealTimeLogger.get().info("GCSA-indexing {} to {}".format(
kmers_filename, gcsa_filename))
# Make the gcsa2 index. Make sure to use 3 doubling steps to work
# around <https://github.com/vgteam/vg/issues/301>
subprocess.check_call(["{}vg".format(bin_prefix), "index", "-t",
str(job.cores), "-i", kmers_filename, "-g", gcsa_filename,
"-X", "3"])
# Where do we put the XG index?
xg_filename = graph_filename + ".xg"
RealTimeLogger.get().info("XG-indexing {} to {}".format(
graph_filename, xg_filename))
subprocess.check_call(["{}vg".format(bin_prefix), "index", "-t",
str(job.cores), "-x", xg_filename, graph_filename])
else:
raise RuntimeError("Invalid indexing mode: " + options.index_mode)
# Define a file to keep the compressed index in, so we can send it to
# the output store.
index_dir_tgz = "{}/index.tar.gz".format(
job.fileStore.getLocalTempDir())
# Now save the indexed graph directory to the file store. It can be
# cleaned up since only our children use it.
RealTimeLogger.get().info("Compressing index of {}".format(
graph_filename))
index_dir_id = write_global_directory(job.fileStore, graph_dir,
cleanup=True, tee=index_dir_tgz)
# Save it as output
RealTimeLogger.get().info("Uploading index of {}".format(
graph_filename))
out_store.write_output_file(index_dir_tgz, index_key)
RealTimeLogger.get().info("Index {} uploaded successfully".format(
index_key))
RealTimeLogger.get().info("Queueing alignment of {} samples to "
"{} {}".format(len(samples_to_run), graph_name, region))
job.addChildJobFn(recursively_run_samples, options, bin_dir_id,
graph_name, region, index_dir_id, samples_to_run,
cores=1, memory="4G", disk="4G")
RealTimeLogger.get().info("Done making children for {}".format(basename))
def recursively_run_samples(job, options, bin_dir_id, graph_name, region,
index_dir_id, samples_to_run, num_per_call=10):
"""
Create child jobs to run a few samples from the samples_to_run list, and a
recursive child job to create a few more.
This is a hack to deal with the problems produced by having a job with
thousands of children on the Azure job store: the job graph gets cut up into
tiny chunks of data and stored as table values, and when you have many table
store operations one of them is likely to fail and screw up your whole
serialization process.
We have some logic here to decide how much of the sample needs to be rerun.
If we get a sample, all we know is that it doesn't have an up to date stats
file, but it may or may not have an alignment file already.
"""
# Set up the IO stores each time, since we can't unpickle them on Azure for
# some reason.
sample_store = IOStore.get(options.sample_store)
out_store = IOStore.get(options.out_store)
# Get some samples to run
samples_to_run_now = samples_to_run[:num_per_call]
samples_to_run_later = samples_to_run[num_per_call:]
# Work out where samples for this region live
region_dir = region.upper()
# Work out the directory for the alignments to be dumped in in the output
alignment_dir = "alignments/{}/{}".format(region, graph_name)
# Also for statistics
stats_dir = "stats/{}/{}".format(region, graph_name)
for sample in samples_to_run_now:
# Split out over each sample that needs to be run
# For each sample, know the FQ name
sample_fastq = "{}/{}/{}.bam.fq".format(region_dir, sample, sample)
# And know where we're going to put the output
alignment_file_key = "{}/{}.gam".format(alignment_dir, sample)
stats_file_key = "{}/{}.json".format(stats_dir, sample)
if (not options.overwrite and out_store.exists(alignment_file_key)):
# If the alignment exists and is OK, we can skip the alignment
mtime = out_store.get_mtime(stats_file_key)
if mtime is None or mtime < options.too_old or options.restat:
# All we need to do for this sample is run stats
RealTimeLogger.get().info("Queueing stat recalculation"
" of {} on {} {}".format(sample, graph_name, region))
job.addFollowOnJobFn(run_stats, options, bin_dir_id,
index_dir_id, alignment_file_key, stats_file_key,
run_time=None, cores=2, memory="4G", disk="10G")
else:
# The stats are up to date and the alignment doesn't need
# rerunning. This shouldn't happen because this sample shouldn't
# be on the todo list. But it means we can just skip the sample.
RealTimeLogger.get().warning("SKIPPING sample "
"{} on {} {}".format(sample, graph_name, region))
else:
# Otherwise we need to do an alignment, and then stats.
RealTimeLogger.get().info("Queueing alignment"
" of {} to {} {}".format(sample, graph_name, region))
# Go and bang that input fastq against the correct indexed graph.
# Its output will go to the right place in the output store.
job.addChildJobFn(run_alignment, options, bin_dir_id, sample,
graph_name, region, index_dir_id, sample_fastq,
alignment_file_key, stats_file_key,
cores=16, memory="100G", disk="50G")
if len(samples_to_run_later) > 0:
# We need to recurse and run more later.
RealTimeLogger.get().debug("Postponing queueing {} samples".format(
len(samples_to_run_later)))
if len(samples_to_run_later) < num_per_call:
# Just run them all in one batch
job.addChildJobFn(recursively_run_samples, options, bin_dir_id,
graph_name, region, index_dir_id, samples_to_run_later,
num_per_call, cores=1, memory="4G", disk="4G")
else:
# Split them up
part_size = len(samples_to_run_later) / num_per_call
RealTimeLogger.get().info("Splitting remainder of {} {} into {} "
"parts of {}".format(graph_name, region, num_per_call,
part_size))
for i in xrange(num_per_call + 1):
# Do 1 more part for any remainder
# Grab this bit of the rest
part = samples_to_run_later[(i * part_size) :
((i + 1) * part_size)]
if len(part) > 0:
# Make a job to run it
job.addChildJobFn(recursively_run_samples, options,
bin_dir_id, graph_name, region, index_dir_id, part,
num_per_call, cores=1, memory="4G", disk="4G")
def save_indexed_graph(job, options, index_dir_id, output_key):
"""
Save the index dir tar file in the given output key.
Runs as a child to ensure that the global file store can actually
produce the file when asked (because within the same job, depending on Toil
guarantees, it might still be uploading).
"""
RealTimeLogger.get().info("Uploading {} to output store...".format(
output_key))
# Set up the IO stores each time, since we can't unpickle them on Azure for
# some reason.
sample_store = IOStore.get(options.sample_store)
out_store = IOStore.get(options.out_store)
# Get the tar.gz file
RealTimeLogger.get().info("Downloading global file {}".format(index_dir_id))
local_path = job.fileStore.readGlobalFile(index_dir_id)
size = os.path.getsize(local_path)
RealTimeLogger.get().info("Global file {} ({} bytes) for {} read".format(
index_dir_id, size, output_key))
# Save it as output
out_store.write_output_file(local_path, output_key)
RealTimeLogger.get().info("Index {} uploaded successfully".format(
output_key))
def run_alignment(job, options, bin_dir_id, sample, graph_name, region,
index_dir_id, sample_fastq_key, alignment_file_key, stats_file_key):
"""
Align the the given fastq from the input store against the given indexed
graph (in the file store as a directory) and put the GAM and statistics in
the given output keys in the output store.
Assumes that the alignment actually needs to be redone.
"""
# Set up the IO stores each time, since we can't unpickle them on Azure for
# some reason.
sample_store = IOStore.get(options.sample_store)
out_store = IOStore.get(options.out_store)
# How long did the alignment take to run, in seconds?
run_time = None
if bin_dir_id is not None:
# Download the binaries
bin_dir = "{}/bin".format(job.fileStore.getLocalTempDir())
read_global_directory(job.fileStore, bin_dir_id, bin_dir)
# We define a string we can just tack onto the binary name and get
# either the system or the downloaded version.
bin_prefix = bin_dir + "/"
else:
bin_prefix = ""
# Download the indexed graph to a directory we can use
graph_dir = "{}/graph".format(job.fileStore.getLocalTempDir())
read_global_directory(job.fileStore, index_dir_id, graph_dir)
# We know what the vg file in there will be named
graph_file = "{}/graph.vg".format(graph_dir)
# Also we need the sample fastq
fastq_file = "{}/input.fq".format(job.fileStore.getLocalTempDir())
sample_store.read_input_file(sample_fastq_key, fastq_file)
# And a temp file for our aligner output
output_file = "{}/output.gam".format(job.fileStore.getLocalTempDir())
# Open the file stream for writing
with open(output_file, "w") as alignment_file:
# Start the aligner and have it write to the file
# Plan out what to run
vg_parts = ["{}vg".format(bin_prefix), "map", "-f", fastq_file,
"-i", "-M2", "-W", "500", "-u", "0", "-U", "-t", str(job.cores), graph_file]
if options.index_mode == "rocksdb":
vg_parts += ["-d", graph_file + ".index", "-n3", "-k",
str(options.kmer_size)]
elif options.index_mode == "gcsa-kmer":
# Use the new default context size in this case
vg_parts += ["-x", graph_file + ".xg", "-g", graph_file + ".gcsa",
"-n5", "-k", str(options.kmer_size)]
elif options.index_mode == "gcsa-mem":
# Don't pass the kmer size, so MEM matching is used
vg_parts += ["-x", graph_file + ".xg", "-g", graph_file + ".gcsa",
"-n5"]
else:
raise RuntimeError("invalid indexing mode: " + options.index_mode)
RealTimeLogger.get().info(
"Running VG for {} against {} {}: {}".format(sample, graph_name,
region, " ".join(vg_parts)))
# Mark when we start the alignment
start_time = timeit.default_timer()
process = subprocess.Popen(vg_parts, stdout=alignment_file)
if process.wait() != 0:
# Complain if vg dies
raise RuntimeError("vg died with error {}".format(
process.returncode))
# Mark when it's done
end_time = timeit.default_timer()
run_time = end_time - start_time
RealTimeLogger.get().info("Aligned {}".format(output_file))
# Upload the alignment
out_store.write_output_file(output_file, alignment_file_key)
RealTimeLogger.get().info("Need to recompute stats for new "
"alignment: {}".format(stats_file_key))
# Add a follow-on to calculate stats. It only needs 2 cores since it's
# not really prarllel.
job.addFollowOnJobFn(run_stats, options, bin_dir_id, index_dir_id,
alignment_file_key, stats_file_key, run_time=run_time,
cores=2, memory="4G", disk="10G")
def run_stats(job, options, bin_dir_id, index_dir_id, alignment_file_key,
stats_file_key, run_time=None):
"""
If the stats aren't done, or if they need to be re-done, retrieve the
alignment file from the output store under alignment_file_key and compute the
stats file, saving it under stats_file_key.
Uses index_dir_id to get the graph, and thus the reference sequence that
each read is aligned against, for the purpose of discounting Ns.
Can take a run time to put in the stats.
Assumes that stats actually do need to be computed, and overwrites any old
stats.
TODO: go through the proper file store (and cache) for getting alignment
data.
"""
# Set up the IO stores each time, since we can't unpickle them on Azure for
# some reason.
sample_store = IOStore.get(options.sample_store)
out_store = IOStore.get(options.out_store)
RealTimeLogger.get().info("Computing stats for {}".format(stats_file_key))
if bin_dir_id is not None:
# Download the binaries
bin_dir = "{}/bin".format(job.fileStore.getLocalTempDir())
read_global_directory(job.fileStore, bin_dir_id, bin_dir)
# We define a string we can just tack onto the binary name and get either
# the system or the downloaded version.
bin_prefix = bin_dir + "/"
else:
bin_prefix = ""
# Download the indexed graph to a directory we can use
graph_dir = "{}/graph".format(job.fileStore.getLocalTempDir())
read_global_directory(job.fileStore, index_dir_id, graph_dir)
# We know what the vg file in there will be named
graph_file = "{}/graph.vg".format(graph_dir)
# Load the node sequences into memory. This holds node sequence string by
# ID.
node_sequences = {}
# Read the alignments in in JSON-line format
read_graph = subprocess.Popen(["{}vg".format(bin_prefix), "view", "-j",
graph_file], stdout=subprocess.PIPE)
for line in read_graph.stdout:
# Parse the graph chunk JSON
graph_chunk = json.loads(line)
for node_dict in graph_chunk.get("node", []):
# For each node, store its sequence under its id. We want to crash
# if a node exists for which one or the other isn't defined.
node_sequences[node_dict["id"]] = node_dict["sequence"]
if read_graph.wait() != 0:
# Complain if vg dies
raise RuntimeError("vg died with error {}".format(
read_graph.returncode))
# Declare local files for everything
stats_file = "{}/stats.json".format(job.fileStore.getLocalTempDir())
alignment_file = "{}/output.gam".format(job.fileStore.getLocalTempDir())
# Download the alignment
out_store.read_input_file(alignment_file_key, alignment_file)
# Read the alignments in in JSON-line format
read_alignment = subprocess.Popen(["{}vg".format(bin_prefix), "view", "-aj",
alignment_file], stdout=subprocess.PIPE)
# Count up the stats
stats = {
"total_reads": 0,
"total_mapped": 0,
"total_multimapped": 0,
"mapped_lengths": collections.Counter(),
"unmapped_lengths": collections.Counter(),
"aligned_lengths": collections.Counter(),
"primary_scores": collections.Counter(),
"primary_mapqs": collections.Counter(),
"primary_identities": collections.Counter(),
"primary_mismatches": collections.Counter(),
"primary_indels": collections.Counter(),
"primary_substitutions": collections.Counter(),
"secondary_scores": collections.Counter(),
"secondary_mapqs": collections.Counter(),
"secondary_identities": collections.Counter(),
"secondary_mismatches": collections.Counter(),
"secondary_indels": collections.Counter(),
"secondary_substitutions": collections.Counter(),
"run_time": run_time
}
last_alignment = None
for line in read_alignment.stdout:
# Parse the alignment JSON
alignment = json.loads(line)
# How long is this read?
length = len(alignment["sequence"])
if alignment.has_key("score"):
# This alignment is aligned.
# Grab its score
score = alignment["score"]
# Get the mappings
mappings = alignment.get("path", {}).get("mapping", [])
# Calculate the exact match bases
matches = 0
# And total up the instances of indels (only counting those where
# the reference has no Ns, and which aren't leading or trailing soft
# clips)
indels = 0
# And total up the number of substitutions (mismatching/alternative
# bases in edits with equal lengths where the reference has no Ns).
substitutions = 0
# What should the denominator for substitution rate be for this
# read? How many bases are in the read and aligned?
aligned_length = 0
# What's the mapping quality?
mapq = alignment["mapping_quality"]
# And the identity?
identity = alignment["identity"]
for mapping_number, mapping in enumerate(mappings):
# Figure out what the reference sequence for this mapping should
# be
position = mapping.get("position", {})
if position.has_key("node_id"):
# We actually are mapped to a reference node
ref_sequence = node_sequences[position["node_id"]]
# Grab the offset
offset = position.get("offset", 0)
if mapping.get("is_reverse", False):
# We start at the offset base on the reverse strand.
# Add 1 to make the offset inclusive as an end poiint
ref_sequence = reverse_complement(
ref_sequence[0:offset + 1])
else:
# Just clip so we start at the specified offset
ref_sequence = ref_sequence[offset:]
else:
# We're aligned against no node, and thus an empty reference
# sequence (and thus must be all insertions)
ref_sequence = ""
# Start at the beginning of the reference sequence for the
# mapping.
index_in_ref = 0
# Pull out the edits
edits = mapping.get("edit", [])
for edit_number, edit in enumerate(edits):
# An edit may be a soft clip if it's either the first edit
# in the first mapping, or the last edit in the last
# mapping. This flag stores whether that is the case
# (although to actually be a soft clip it also has to be an
# insertion, and not either a substitution or a perfect
# match as spelled by the aligner).
may_be_soft_clip = ((edit_number == 0 and
mapping_number == 0) or
(edit_number == len(edits) - 1 and
mapping_number == len(mappings) - 1))
# Count up the Ns in the reference sequence for the edit. We
# get the part of the reference string that should belong to
# this edit.
reference_N_count = count_Ns(ref_sequence[
index_in_ref:index_in_ref + edit.get("from_length", 0)])
if edit.get("to_length", 0) == edit.get("from_length", 0):
# Add in the length of this edit if it's actually
# aligned (not an indel or softclip)
aligned_length += edit.get("to_length", 0)
if (not edit.has_key("sequence") and
edit.get("to_length", 0) == edit.get("from_length", 0)):
# The edit has equal from and to lengths, but no
# sequence provided.
# We found a perfect match edit. Grab its length
matches += edit["from_length"]
# We don't care about Ns when evaluating perfect
# matches. VG already split out any mismatches into non-
# perfect matches, and we ignore the N-matched-to-N
# case.
if not may_be_soft_clip and (edit.get("to_length", 0) !=
edit.get("from_length", 0)):
# This edit is an indel and isn't on the very end of a
# read.
if reference_N_count == 0:
# Only count the indel if it's not against an N in
# the reference
indels += 1
if (edit.get("to_length", 0) ==
edit.get("from_length", 0) and
edit.has_key("sequence")):
# The edit has equal from and to lengths, and a provided
# sequence. This edit is thus a SNP or MNP. It
# represents substitutions.
# We take as substituted all the bases except those
# opposite reference Ns. Sequence Ns are ignored.
substitutions += (edit.get("to_length", 0) -
reference_N_count)
# Pull those Ns out of the substitution rate denominator
# as well.
aligned_length -= reference_N_count
# We still count query Ns as "aligned" when not in indels
# Advance in the reference sequence
index_in_ref += edit.get("from_length", 0)
# Calculate mismatches as what's not perfect matches
mismatches = length - matches
if alignment.get("is_secondary", False):
# It's a multimapping. We can have max 1 per read, so it's a
# multimapped read.
if (last_alignment is None or
last_alignment.get("name") != alignment.get("name") or
last_alignment.get("is_secondary", False)):
# This is a secondary alignment without a corresponding primary
# alignment (which would have to be right before it given the
# way vg dumps buffers
raise RuntimeError("{} secondary alignment comes after "
"alignment of {} instead of corresponding primary "
"alignment\n".format(alignment.get("name"),
last_alignment.get("name") if last_alignment is not None
else "nothing"))
# Log its stats as multimapped
stats["total_multimapped"] += 1
stats["secondary_scores"][score] += 1
stats["secondary_mismatches"][mismatches] += 1
stats["secondary_indels"][indels] += 1
stats["secondary_substitutions"][substitutions] += 1
stats["secondary_mapqs"][mapq] += 1
stats["secondary_identities"][identity] += 1
else:
# Log its stats as primary. We'll get exactly one of these per
# read with any mappings.
stats["total_mapped"] += 1
stats["primary_scores"][score] += 1
stats["primary_mismatches"][mismatches] += 1
stats["primary_indels"][indels] += 1
stats["primary_substitutions"][substitutions] += 1
stats["primary_mapqs"][mapq] += 1
stats["primary_identities"][identity] += 1
# Record that a read of this length was mapped
stats["mapped_lengths"][length] += 1
# And that a read with this many aligned primary bases was found
stats["aligned_lengths"][aligned_length] += 1
# We won't see an unaligned primary alignment for this read, so
# count the read
stats["total_reads"] += 1
elif not alignment.get("is_secondary", False):
# We have an unmapped primary "alignment"
# Count the read by its primary alignment
stats["total_reads"] += 1
# Record that an unmapped read has this length
stats["unmapped_lengths"][length] += 1
# Save the alignment for checking for wayward secondaries
last_alignment = alignment
with open(stats_file, "w") as stats_handle:
# Save the stats as JSON
json.dump(stats, stats_handle)
if read_alignment.wait() != 0:
# Complain if vg dies
raise RuntimeError("vg died with error {}".format(
read_alignment.returncode))
# Now send the stats to the output store where they belong.
out_store.write_output_file(stats_file, stats_file_key)
def main(args):
"""
Parses command line arguments and do the work of the program.
"args" specifies the program arguments, with args[0] being the executable
name. The return value should be used as the program's exit code.
"""
if len(args) == 2 and args[1] == "--test":
# Run the tests
return doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
options = parse_args(args) # This holds the nicely-parsed options object
if options.too_old is not None:
# Parse the too-old date
options.too_old = dateutil.parser.parse(options.too_old)
assert(options.too_old.tzinfo != None)
RealTimeLogger.start_master()
# Pre-read the input file so we don't try to send file handles over the
# network.
options.server_list = list(options.server_list)
# Make a root job
root_job = Job.wrapJobFn(run_all_alignments, options,
cores=1, memory="4G", disk="50G")
# Run it and see how many jobs fail
failed_jobs = Job.Runner.startToil(root_job, options)
if failed_jobs > 0:
raise Exception("{} jobs failed!".format(failed_jobs))
print("All jobs completed successfully")
RealTimeLogger.stop_master()
if __name__ == "__main__" :
sys.exit(main(sys.argv))
Fix for unmapped alignments with no mapq
#!/usr/bin/env python2.7
"""
parallelMappingEvaluation.py: Run the mapping evaluation on all the servers in
parallel.
BAM files with reads must have been already downloaded.
"""
import argparse, sys, os, os.path, random, subprocess, shutil, itertools, glob
import doctest, re, json, collections, time, timeit
import logging, logging.handlers, SocketServer, struct, socket, threading
import string
import urlparse
import fnmatch
import dateutil.parser
from toil.job import Job
from toillib import *
def parse_args(args):
"""
Takes in the command-line arguments list (args), and returns a nice argparse
result with fields for all the options.
Borrows heavily from the argparse documentation examples:
<http://docs.python.org/library/argparse.html>
"""
# Construct the parser (which is stored in parser)
# Module docstring lives in __doc__
# See http://python-forum.com/pythonforum/viewtopic.php?f=3&t=36847
# And a formatter class so our examples in the docstring look good. Isn't it
# convenient how we already wrapped it to 80 characters?
# See http://docs.python.org/library/argparse.html#formatter-class
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
# Add the Toil options so the job store is the first argument
Job.Runner.addToilOptions(parser)
# General options
parser.add_argument("server_list", type=argparse.FileType("r"),
help="TSV file continaing <region>\t<url> lines for servers to test")
parser.add_argument("sample_store",
help="sample input IOStore with <region>/<sample>/<sample>.bam.fq")
parser.add_argument("out_store",
help="output IOStore to create and fill with alignments and stats")
parser.add_argument("--server_version", default="v0.6.g",
help="server version to add to URLs")
parser.add_argument("--sample_pattern", default="*",
help="fnmatch-style pattern for sample names")
parser.add_argument("--sample_limit", type=int, default=float("inf"),
help="number of samples to use")
parser.add_argument("--edge_max", type=int, default=0,
help="maximum edges to cross in index")
parser.add_argument("--kmer_size", type=int, default=10,
help="size of kmers to use in indexing and mapping")
parser.add_argument("--bin_url",
default="https://hgvm.blob.core.windows.net/hgvm-bin",
help="URL to download sg2vg and vg binaries from, without Docker")
parser.add_argument("--use_path_binaries", action="store_true",
help="use system vg and sg2vg instead of downloading them")
parser.add_argument("--overwrite", default=False, action="store_true",
help="overwrite existing result files")
parser.add_argument("--restat", default=False, action="store_true",
help="recompute and overwrite existing stats files")
parser.add_argument("--reindex", default=False, action="store_true",
help="don't re-use existing indexed graphs")
parser.add_argument("--too_old", default=None, type=str,
help="recompute stats files older than this date")
parser.add_argument("--index_mode", choices=["rocksdb", "gcsa-kmer",
"gcsa-mem"], default="gcsa-mem",
help="type of vg index to use for mapping")
parser.add_argument("--include_pruned", action="store_true",
help="use the pruned graph in the index")
parser.add_argument("--include_primary", action="store_true",
help="use the primary path in the index")
# The command line arguments start with the program name, which we don't
# want to treat as an argument for argparse. So we remove it.
args = args[1:]
return parser.parse_args(args)
# Reverse complement needs a global translation table
reverse_complement_translation_table = string.maketrans("ACGTN", "TGCAN")
def reverse_complement(sequence):
"""
Compute the reverse complement of a DNA sequence.
Follows algorithm from <http://stackoverflow.com/a/26615937>
"""
if isinstance(sequence, unicode):
# Encode the sequence in ASCII for easy translation
sequence = sequence.encode("ascii", "replace")
# Translate and then reverse
return sequence.translate(reverse_complement_translation_table)[::-1]
def count_Ns(sequence):
"""
Return the number of N bases in the given DNA sequence
"""
n_count = 0
for item in sequence:
if item == "N":
n_count += 1
return n_count
def run_all_alignments(job, options):
"""
For each server listed in the server_list tsv, kick off child jobs to
align and evaluate it.
"""
# Set up the IO stores each time, since we can't unpickle them on Azure for
# some reason.
sample_store = IOStore.get(options.sample_store)
out_store = IOStore.get(options.out_store)
if options.use_path_binaries:
# We don't download any bianries and don't maintain a bin_dir
bin_dir_id = None
else:
# Retrieve binaries we need
RealTimeLogger.get().info("Retrieving binaries from {}".format(
options.bin_url))
bin_dir = "{}/bin".format(job.fileStore.getLocalTempDir())
robust_makedirs(bin_dir)
subprocess.check_call(["wget", "{}/sg2vg".format(options.bin_url),
"-O", "{}/sg2vg".format(bin_dir)])
subprocess.check_call(["wget", "{}/vg".format(options.bin_url),
"-O", "{}/vg".format(bin_dir)])
# Make them executable
os.chmod("{}/sg2vg".format(bin_dir), 0o744)
os.chmod("{}/vg".format(bin_dir), 0o744)
# Upload the bin directory to the file store
bin_dir_id = write_global_directory(job.fileStore, bin_dir,
cleanup=True)
# Make sure we skip the header
is_first = True
for line in options.server_list:
if is_first:
# This is the header, skip it.
is_first = False
continue
# We need to read each non-header line
# Break it into its fields
parts = line.split("\t")
if parts[0].startswith("#"):
# Skip comments
continue
if parts[0].startswith("\n"):
# Skip newlines
continue
# Pull out the first 3 fields
region, url, generator = parts[0:3]
# We cleverly just split the lines out to different nodes
job.addChildJobFn(run_region_alignments, options, bin_dir_id, region,
url, cores=16, memory="100G", disk="50G")
# Say what we did
RealTimeLogger.get().info("Running child for {}".format(parts[1]))
def run_region_alignments(job, options, bin_dir_id, region, url):
"""
For the given region, download, index, and then align to the given graph.
"""
RealTimeLogger.get().info("Running on {} for {}".format(url, region))
# Set up the IO stores each time, since we can't unpickle them on Azure for
# some reason.
sample_store = IOStore.get(options.sample_store)
out_store = IOStore.get(options.out_store)
if bin_dir_id is not None:
# Download the binaries
bin_dir = "{}/bin".format(job.fileStore.getLocalTempDir())
read_global_directory(job.fileStore, bin_dir_id, bin_dir)
# We define a string we can just tack onto the binary name and get
# either the system or the downloaded version.
bin_prefix = bin_dir + "/"
else:
bin_prefix = ""
# Parse the graph URL. It may be either an http(s) URL to a GA4GH server, or
# a direct file: URL to a vg graph.
url_parts = urlparse.urlparse(url, "file")
# Either way, it has to include the graph base name, which we use to
# identify the graph, and which has to contain the region name (like brca2)
# and the graph type name (like cactus). For a server it's the last folder
# in the YURL, with a trailing slash, and for a file it's the name of the
# .vg file.
basename = re.match('.*/(.*)(/|\.vg)$', url_parts.path).group(1)
# Get graph name (without region and its associated dash) from basename
graph_name = basename.replace("-{}".format(region), "").replace(
"{}-".format(region), "")
# Where do we look for samples for this region in the input?
region_dir = region.upper()
# What samples do we do? List input sample names up to the given limit.
input_samples = [n for n in sample_store.list_input_directory(region_dir) \
if fnmatch.fnmatchcase(n, options.sample_pattern)]
if len(input_samples) > options.sample_limit:
input_samples = input_samples[:options.sample_limit]
# Work out the directory for the alignments to be dumped in in the output
alignment_dir = "alignments/{}/{}".format(region, graph_name)
# Also for statistics
stats_dir = "stats/{}/{}".format(region, graph_name)
# What smaples have been completed? Map from ID to mtime
completed_samples = {}
for filename, mtime in out_store.list_input_directory(stats_dir,
with_times=True):
# See if every file is a stats file
match = re.match("(.*)\.json$", filename)
if match and (options.too_old is None or mtime > options.too_old):
# We found a sample's stats file, and it's new enough.
completed_samples[match.group(1)] = mtime
if match and (options.too_old is not None and mtime < options.too_old):
# Say we hit an mtime thing
RealTimeLogger.get().info("Need to re-run {} because "
"{} < {}".format(match.group(1), mtime.ctime(),
options.too_old.ctime()))
RealTimeLogger.get().info("Already have {} completed samples for {} in "
"{}".format(len(completed_samples), basename, stats_dir))
# What samples haven't been done yet and need doing
samples_to_run = []
for sample in input_samples:
# Split out over each sample
if ((not options.overwrite) and (not options.restat) and
completed_samples.has_key(sample)):
# This is already done.
RealTimeLogger.get().info("Skipping completed alignment of "
"{} to {} {}".format(sample, graph_name, region))
continue
else:
# We need to run this sample
samples_to_run.append(sample)
if len(samples_to_run) == 0 and not options.reindex:
# Don't bother indexing the graph if all the samples are done, and we
# didn't explicitly ask to do it.
RealTimeLogger.get().info("Nothing to align to {}".format(basename))
return
# Where will the indexed graph go in the output
index_key = "indexes/{}-{}-{}/{}/{}.tar.gz".format(options.index_mode,
options.kmer_size, options.edge_max, region, graph_name)
if (not options.reindex) and out_store.exists(index_key):
# See if we have an index already available in the output store from a
# previous run
RealTimeLogger.get().info("Retrieving indexed {} graph from output "
"store".format(basename))
# Download the pre-made index directory
tgz_file = "{}/index.tar.gz".format(job.fileStore.getLocalTempDir())
out_store.read_input_file(index_key, tgz_file)
# Save it to the global file store and keep around the ID.
# Will be compatible with read_global_directory
index_dir_id = job.fileStore.writeGlobalFile(tgz_file, cleanup=True)
RealTimeLogger.get().info("Index for {} retrieved "
"successfully".format(basename))
else:
# Download the graph, build the index, and store it in the output store
# Work out where the graph goes
# it will be graph.vg in here
graph_dir = "{}/graph".format(job.fileStore.getLocalTempDir())
robust_makedirs(graph_dir)
graph_filename = "{}/graph.vg".format(graph_dir)
# Download and fix up the graph with this ugly subprocess pipeline
# sg2vg "${URL}" -u | vg view -Jv - | vg mod -X 100 - |
# vg ids -s - > "graphs/${BASENAME}.vg"
with open(graph_filename, "w") as output_file:
# Hold all the popen objects we need for this
tasks = []
if url_parts.scheme == "file":
# Grab the vg graph from a local file
RealTimeLogger.get().info("Reading {} to {}".format(
url, graph_filename))
# Just cat the file. We need a process so we can do the
# tasks[-1].stdout trick.
tasks.append(subprocess.Popen(["cat", url_parts.path],
stdout=subprocess.PIPE))
else:
# Assume it's on a server
# Make the real URL with the version
versioned_url = url + options.server_version
RealTimeLogger.get().info("Downloading {} to {}".format(
versioned_url, graph_filename))
# Do the download
tasks.append(subprocess.Popen(["{}sg2vg".format(bin_prefix),
versioned_url, "-u"], stdout=subprocess.PIPE))
# Convert to vg
tasks.append(subprocess.Popen(["{}vg".format(bin_prefix),
"view", "-Jv", "-"], stdin=tasks[-1].stdout,
stdout=subprocess.PIPE))
# And cut nodes
tasks.append(subprocess.Popen(["{}vg".format(bin_prefix), "mod",
"-X100", "-"], stdin=tasks[-1].stdout, stdout=subprocess.PIPE))
# And sort ids
tasks.append(subprocess.Popen(["{}vg".format(bin_prefix), "ids",
"-s", "-"], stdin=tasks[-1].stdout, stdout=output_file))
# Did we make it through all the tasks OK?
for task in tasks:
if task.wait() != 0:
raise RuntimeError("Pipeline step returned {}".format(
task.returncode))
# TODO: We sometimes don't see the files written immediately, for some
# reason. Maybe because we opened them? Anyway, this is a hack to wait
# for them to be on disk and readable.
time.sleep(1)
# Now run the indexer.
# TODO: support both indexing modes
RealTimeLogger.get().info("Indexing {}".format(graph_filename))
if options.index_mode == "rocksdb":
# Make the RocksDB index
subprocess.check_call(["{}vg".format(bin_prefix), "index", "-s", "-k",
str(options.kmer_size), "-e", str(options.edge_max),
"-t", str(job.cores), graph_filename, "-d",
graph_filename + ".index"])
elif (options.index_mode == "gcsa-kmer" or
options.index_mode == "gcsa-mem"):
# We want a GCSA2/xg index. We have to prune the graph ourselves.
# See <https://github.com/vgteam/vg/issues/286>.
# What will we use as our temp combined graph file (containing only
# the bits of the graph we want to index, used for deduplication)?
to_index_filename = "{}/to_index.vg".format(
job.fileStore.getLocalTempDir())
# Where will we save the kmers?
kmers_filename = "{}/index.graph".format(
job.fileStore.getLocalTempDir())
with open(to_index_filename, "w") as to_index_file:
if options.include_pruned:
RealTimeLogger.get().info("Pruning {} to {}".format(
graph_filename, to_index_filename))
# Prune out hard bits of the graph
tasks = []
# Prune out complex regions
tasks.append(subprocess.Popen(["{}vg".format(bin_prefix), "mod",
"-p", "-l", str(options.kmer_size), "-t", str(job.cores),
"-e", str(options.edge_max), graph_filename],
stdout=subprocess.PIPE))
# Throw out short disconnected chunks
tasks.append(subprocess.Popen(["{}vg".format(bin_prefix), "mod",
"-S", "-l", str(options.kmer_size * 2),
"-t", str(job.cores), "-"], stdin=tasks[-1].stdout,
stdout=to_index_file))
# Did we make it through all the tasks OK?
for task in tasks:
if task.wait() != 0:
raise RuntimeError("Pipeline step returned {}".format(
task.returncode))
time.sleep(1)
if options.include_primary:
# Then append in the primary path. Since we don't knoiw what
# "it's called, we retain "ref" and all the 19", "6", etc paths
# "from 1KG.
RealTimeLogger.get().info(
"Adding primary path to {}".format(to_index_filename))
# See
# https://github.com/vgteam/vg/issues/318#issuecomment-215102199
# Generate all the paths names we might have for primary paths.
# It should be "ref" but some graphs don't listen
ref_names = (["ref", "x", "X", "y", "Y", "m", "M"] +
[str(x) for x in xrange(1, 23)])
ref_options = []
for name in ref_names:
# Put each in a -r option to retain the path
ref_options.append("-r")
ref_options.append(name)
tasks = []
# Retain only the specified paths (only one should really exist)
tasks.append(subprocess.Popen(
["{}vg".format(bin_prefix), "mod", "-N"] + ref_options +
["-t", str(job.cores), graph_filename],
stdout=to_index_file))
# TODO: if we merged the primary path back on itself, it's
# possible for it to braid with itself. Right now we just ignore
# this and let those graphs take a super long time to index.
# Wait for this second pipeline. We don't parallelize with the
# first one so we don't need to use an extra cat step.
for task in tasks:
if task.wait() != 0:
raise RuntimeError("Pipeline step returned {}".format(
task.returncode))
# Wait to make sure no weird file-not-being-full bugs happen
# TODO: how do I wait on child process output?
time.sleep(1)
time.sleep(1)
# Now we have the combined to-index graph in one vg file. We'll load
# it (which deduplicates nodes/edges) and then find kmers.
# Save the problematic file
out_store.write_output_file(to_index_filename,
"debug/{}-{}-{}-{}-{}.vg".format(options.index_mode,
options.kmer_size, options.edge_max, region, graph_name))
with open(kmers_filename, "w") as kmers_file:
tasks = []
RealTimeLogger.get().info("Finding kmers in {} to {}".format(
to_index_filename, kmers_filename))
# Deduplicate the graph
tasks.append(subprocess.Popen(["{}vg".format(bin_prefix),
"view", "-v", to_index_filename],
stdout=subprocess.PIPE))
# Make the GCSA2 kmers file
tasks.append(subprocess.Popen(["{}vg".format(bin_prefix),
"kmers", "-g", "-B", "-k", str(options.kmer_size),
"-H", "1000000000", "-T", "1000000001",
"-t", str(job.cores), "-"], stdin=tasks[-1].stdout,
stdout=kmers_file))
# Did we make it through all the tasks OK?
task_number = 0
for task in tasks:
if task.wait() != 0:
raise RuntimeError(
"Pipeline step {} returned {}".format(
task_number, task.returncode))
task_number += 1
# Wait to make sure no weird file-not-being-full bugs happen
# TODO: how do I wait on child process output?
time.sleep(1)
time.sleep(1)
# Where do we put the GCSA2 index?
gcsa_filename = graph_filename + ".gcsa"
RealTimeLogger.get().info("GCSA-indexing {} to {}".format(
kmers_filename, gcsa_filename))
# Make the gcsa2 index. Make sure to use 3 doubling steps to work
# around <https://github.com/vgteam/vg/issues/301>
subprocess.check_call(["{}vg".format(bin_prefix), "index", "-t",
str(job.cores), "-i", kmers_filename, "-g", gcsa_filename,
"-X", "3"])
# Where do we put the XG index?
xg_filename = graph_filename + ".xg"
RealTimeLogger.get().info("XG-indexing {} to {}".format(
graph_filename, xg_filename))
subprocess.check_call(["{}vg".format(bin_prefix), "index", "-t",
str(job.cores), "-x", xg_filename, graph_filename])
else:
raise RuntimeError("Invalid indexing mode: " + options.index_mode)
# Define a file to keep the compressed index in, so we can send it to
# the output store.
index_dir_tgz = "{}/index.tar.gz".format(
job.fileStore.getLocalTempDir())
# Now save the indexed graph directory to the file store. It can be
# cleaned up since only our children use it.
RealTimeLogger.get().info("Compressing index of {}".format(
graph_filename))
index_dir_id = write_global_directory(job.fileStore, graph_dir,
cleanup=True, tee=index_dir_tgz)
# Save it as output
RealTimeLogger.get().info("Uploading index of {}".format(
graph_filename))
out_store.write_output_file(index_dir_tgz, index_key)
RealTimeLogger.get().info("Index {} uploaded successfully".format(
index_key))
RealTimeLogger.get().info("Queueing alignment of {} samples to "
"{} {}".format(len(samples_to_run), graph_name, region))
job.addChildJobFn(recursively_run_samples, options, bin_dir_id,
graph_name, region, index_dir_id, samples_to_run,
cores=1, memory="4G", disk="4G")
RealTimeLogger.get().info("Done making children for {}".format(basename))
def recursively_run_samples(job, options, bin_dir_id, graph_name, region,
index_dir_id, samples_to_run, num_per_call=10):
"""
Create child jobs to run a few samples from the samples_to_run list, and a
recursive child job to create a few more.
This is a hack to deal with the problems produced by having a job with
thousands of children on the Azure job store: the job graph gets cut up into
tiny chunks of data and stored as table values, and when you have many table
store operations one of them is likely to fail and screw up your whole
serialization process.
We have some logic here to decide how much of the sample needs to be rerun.
If we get a sample, all we know is that it doesn't have an up to date stats
file, but it may or may not have an alignment file already.
"""
# Set up the IO stores each time, since we can't unpickle them on Azure for
# some reason.
sample_store = IOStore.get(options.sample_store)
out_store = IOStore.get(options.out_store)
# Get some samples to run
samples_to_run_now = samples_to_run[:num_per_call]
samples_to_run_later = samples_to_run[num_per_call:]
# Work out where samples for this region live
region_dir = region.upper()
# Work out the directory for the alignments to be dumped in in the output
alignment_dir = "alignments/{}/{}".format(region, graph_name)
# Also for statistics
stats_dir = "stats/{}/{}".format(region, graph_name)
for sample in samples_to_run_now:
# Split out over each sample that needs to be run
# For each sample, know the FQ name
sample_fastq = "{}/{}/{}.bam.fq".format(region_dir, sample, sample)
# And know where we're going to put the output
alignment_file_key = "{}/{}.gam".format(alignment_dir, sample)
stats_file_key = "{}/{}.json".format(stats_dir, sample)
if (not options.overwrite and out_store.exists(alignment_file_key)):
# If the alignment exists and is OK, we can skip the alignment
mtime = out_store.get_mtime(stats_file_key)
if mtime is None or mtime < options.too_old or options.restat:
# All we need to do for this sample is run stats
RealTimeLogger.get().info("Queueing stat recalculation"
" of {} on {} {}".format(sample, graph_name, region))
job.addFollowOnJobFn(run_stats, options, bin_dir_id,
index_dir_id, alignment_file_key, stats_file_key,
run_time=None, cores=2, memory="4G", disk="10G")
else:
# The stats are up to date and the alignment doesn't need
# rerunning. This shouldn't happen because this sample shouldn't
# be on the todo list. But it means we can just skip the sample.
RealTimeLogger.get().warning("SKIPPING sample "
"{} on {} {}".format(sample, graph_name, region))
else:
# Otherwise we need to do an alignment, and then stats.
RealTimeLogger.get().info("Queueing alignment"
" of {} to {} {}".format(sample, graph_name, region))
# Go and bang that input fastq against the correct indexed graph.
# Its output will go to the right place in the output store.
job.addChildJobFn(run_alignment, options, bin_dir_id, sample,
graph_name, region, index_dir_id, sample_fastq,
alignment_file_key, stats_file_key,
cores=16, memory="100G", disk="50G")
if len(samples_to_run_later) > 0:
# We need to recurse and run more later.
RealTimeLogger.get().debug("Postponing queueing {} samples".format(
len(samples_to_run_later)))
if len(samples_to_run_later) < num_per_call:
# Just run them all in one batch
job.addChildJobFn(recursively_run_samples, options, bin_dir_id,
graph_name, region, index_dir_id, samples_to_run_later,
num_per_call, cores=1, memory="4G", disk="4G")
else:
# Split them up
part_size = len(samples_to_run_later) / num_per_call
RealTimeLogger.get().info("Splitting remainder of {} {} into {} "
"parts of {}".format(graph_name, region, num_per_call,
part_size))
for i in xrange(num_per_call + 1):
# Do 1 more part for any remainder
# Grab this bit of the rest
part = samples_to_run_later[(i * part_size) :
((i + 1) * part_size)]
if len(part) > 0:
# Make a job to run it
job.addChildJobFn(recursively_run_samples, options,
bin_dir_id, graph_name, region, index_dir_id, part,
num_per_call, cores=1, memory="4G", disk="4G")
def save_indexed_graph(job, options, index_dir_id, output_key):
"""
Save the index dir tar file in the given output key.
Runs as a child to ensure that the global file store can actually
produce the file when asked (because within the same job, depending on Toil
guarantees, it might still be uploading).
"""
RealTimeLogger.get().info("Uploading {} to output store...".format(
output_key))
# Set up the IO stores each time, since we can't unpickle them on Azure for
# some reason.
sample_store = IOStore.get(options.sample_store)
out_store = IOStore.get(options.out_store)
# Get the tar.gz file
RealTimeLogger.get().info("Downloading global file {}".format(index_dir_id))
local_path = job.fileStore.readGlobalFile(index_dir_id)
size = os.path.getsize(local_path)
RealTimeLogger.get().info("Global file {} ({} bytes) for {} read".format(
index_dir_id, size, output_key))
# Save it as output
out_store.write_output_file(local_path, output_key)
RealTimeLogger.get().info("Index {} uploaded successfully".format(
output_key))
def run_alignment(job, options, bin_dir_id, sample, graph_name, region,
index_dir_id, sample_fastq_key, alignment_file_key, stats_file_key):
"""
Align the the given fastq from the input store against the given indexed
graph (in the file store as a directory) and put the GAM and statistics in
the given output keys in the output store.
Assumes that the alignment actually needs to be redone.
"""
# Set up the IO stores each time, since we can't unpickle them on Azure for
# some reason.
sample_store = IOStore.get(options.sample_store)
out_store = IOStore.get(options.out_store)
# How long did the alignment take to run, in seconds?
run_time = None
if bin_dir_id is not None:
# Download the binaries
bin_dir = "{}/bin".format(job.fileStore.getLocalTempDir())
read_global_directory(job.fileStore, bin_dir_id, bin_dir)
# We define a string we can just tack onto the binary name and get
# either the system or the downloaded version.
bin_prefix = bin_dir + "/"
else:
bin_prefix = ""
# Download the indexed graph to a directory we can use
graph_dir = "{}/graph".format(job.fileStore.getLocalTempDir())
read_global_directory(job.fileStore, index_dir_id, graph_dir)
# We know what the vg file in there will be named
graph_file = "{}/graph.vg".format(graph_dir)
# Also we need the sample fastq
fastq_file = "{}/input.fq".format(job.fileStore.getLocalTempDir())
sample_store.read_input_file(sample_fastq_key, fastq_file)
# And a temp file for our aligner output
output_file = "{}/output.gam".format(job.fileStore.getLocalTempDir())
# Open the file stream for writing
with open(output_file, "w") as alignment_file:
# Start the aligner and have it write to the file
# Plan out what to run
vg_parts = ["{}vg".format(bin_prefix), "map", "-f", fastq_file,
"-i", "-M2", "-W", "500", "-u", "0", "-U", "-t", str(job.cores), graph_file]
if options.index_mode == "rocksdb":
vg_parts += ["-d", graph_file + ".index", "-n3", "-k",
str(options.kmer_size)]
elif options.index_mode == "gcsa-kmer":
# Use the new default context size in this case
vg_parts += ["-x", graph_file + ".xg", "-g", graph_file + ".gcsa",
"-n5", "-k", str(options.kmer_size)]
elif options.index_mode == "gcsa-mem":
# Don't pass the kmer size, so MEM matching is used
vg_parts += ["-x", graph_file + ".xg", "-g", graph_file + ".gcsa",
"-n5"]
else:
raise RuntimeError("invalid indexing mode: " + options.index_mode)
RealTimeLogger.get().info(
"Running VG for {} against {} {}: {}".format(sample, graph_name,
region, " ".join(vg_parts)))
# Mark when we start the alignment
start_time = timeit.default_timer()
process = subprocess.Popen(vg_parts, stdout=alignment_file)
if process.wait() != 0:
# Complain if vg dies
raise RuntimeError("vg died with error {}".format(
process.returncode))
# Mark when it's done
end_time = timeit.default_timer()
run_time = end_time - start_time
RealTimeLogger.get().info("Aligned {}".format(output_file))
# Upload the alignment
out_store.write_output_file(output_file, alignment_file_key)
RealTimeLogger.get().info("Need to recompute stats for new "
"alignment: {}".format(stats_file_key))
# Add a follow-on to calculate stats. It only needs 2 cores since it's
# not really prarllel.
job.addFollowOnJobFn(run_stats, options, bin_dir_id, index_dir_id,
alignment_file_key, stats_file_key, run_time=run_time,
cores=2, memory="4G", disk="10G")
def run_stats(job, options, bin_dir_id, index_dir_id, alignment_file_key,
stats_file_key, run_time=None):
"""
If the stats aren't done, or if they need to be re-done, retrieve the
alignment file from the output store under alignment_file_key and compute the
stats file, saving it under stats_file_key.
Uses index_dir_id to get the graph, and thus the reference sequence that
each read is aligned against, for the purpose of discounting Ns.
Can take a run time to put in the stats.
Assumes that stats actually do need to be computed, and overwrites any old
stats.
TODO: go through the proper file store (and cache) for getting alignment
data.
"""
# Set up the IO stores each time, since we can't unpickle them on Azure for
# some reason.
sample_store = IOStore.get(options.sample_store)
out_store = IOStore.get(options.out_store)
RealTimeLogger.get().info("Computing stats for {}".format(stats_file_key))
if bin_dir_id is not None:
# Download the binaries
bin_dir = "{}/bin".format(job.fileStore.getLocalTempDir())
read_global_directory(job.fileStore, bin_dir_id, bin_dir)
# We define a string we can just tack onto the binary name and get either
# the system or the downloaded version.
bin_prefix = bin_dir + "/"
else:
bin_prefix = ""
# Download the indexed graph to a directory we can use
graph_dir = "{}/graph".format(job.fileStore.getLocalTempDir())
read_global_directory(job.fileStore, index_dir_id, graph_dir)
# We know what the vg file in there will be named
graph_file = "{}/graph.vg".format(graph_dir)
# Load the node sequences into memory. This holds node sequence string by
# ID.
node_sequences = {}
# Read the alignments in in JSON-line format
read_graph = subprocess.Popen(["{}vg".format(bin_prefix), "view", "-j",
graph_file], stdout=subprocess.PIPE)
for line in read_graph.stdout:
# Parse the graph chunk JSON
graph_chunk = json.loads(line)
for node_dict in graph_chunk.get("node", []):
# For each node, store its sequence under its id. We want to crash
# if a node exists for which one or the other isn't defined.
node_sequences[node_dict["id"]] = node_dict["sequence"]
if read_graph.wait() != 0:
# Complain if vg dies
raise RuntimeError("vg died with error {}".format(
read_graph.returncode))
# Declare local files for everything
stats_file = "{}/stats.json".format(job.fileStore.getLocalTempDir())
alignment_file = "{}/output.gam".format(job.fileStore.getLocalTempDir())
# Download the alignment
out_store.read_input_file(alignment_file_key, alignment_file)
# Read the alignments in in JSON-line format
read_alignment = subprocess.Popen(["{}vg".format(bin_prefix), "view", "-aj",
alignment_file], stdout=subprocess.PIPE)
# Count up the stats
stats = {
"total_reads": 0,
"total_mapped": 0,
"total_multimapped": 0,
"mapped_lengths": collections.Counter(),
"unmapped_lengths": collections.Counter(),
"aligned_lengths": collections.Counter(),
"primary_scores": collections.Counter(),
"primary_mapqs": collections.Counter(),
"primary_identities": collections.Counter(),
"primary_mismatches": collections.Counter(),
"primary_indels": collections.Counter(),
"primary_substitutions": collections.Counter(),
"secondary_scores": collections.Counter(),
"secondary_mapqs": collections.Counter(),
"secondary_identities": collections.Counter(),
"secondary_mismatches": collections.Counter(),
"secondary_indels": collections.Counter(),
"secondary_substitutions": collections.Counter(),
"run_time": run_time
}
last_alignment = None
for line in read_alignment.stdout:
# Parse the alignment JSON
alignment = json.loads(line)
# How long is this read?
length = len(alignment["sequence"])
if alignment.has_key("score"):
# This alignment is aligned.
# Grab its score
score = alignment["score"]
# Get the mappings
mappings = alignment.get("path", {}).get("mapping", [])
# Calculate the exact match bases
matches = 0
# And total up the instances of indels (only counting those where
# the reference has no Ns, and which aren't leading or trailing soft
# clips)
indels = 0
# And total up the number of substitutions (mismatching/alternative
# bases in edits with equal lengths where the reference has no Ns).
substitutions = 0
# What should the denominator for substitution rate be for this
# read? How many bases are in the read and aligned?
aligned_length = 0
# What's the mapping quality? May not be defined on some reads.
mapq = alignment.get("mapping_quality", 0.0)
# And the identity?
identity = alignment["identity"]
for mapping_number, mapping in enumerate(mappings):
# Figure out what the reference sequence for this mapping should
# be
position = mapping.get("position", {})
if position.has_key("node_id"):
# We actually are mapped to a reference node
ref_sequence = node_sequences[position["node_id"]]
# Grab the offset
offset = position.get("offset", 0)
if mapping.get("is_reverse", False):
# We start at the offset base on the reverse strand.
# Add 1 to make the offset inclusive as an end poiint
ref_sequence = reverse_complement(
ref_sequence[0:offset + 1])
else:
# Just clip so we start at the specified offset
ref_sequence = ref_sequence[offset:]
else:
# We're aligned against no node, and thus an empty reference
# sequence (and thus must be all insertions)
ref_sequence = ""
# Start at the beginning of the reference sequence for the
# mapping.
index_in_ref = 0
# Pull out the edits
edits = mapping.get("edit", [])
for edit_number, edit in enumerate(edits):
# An edit may be a soft clip if it's either the first edit
# in the first mapping, or the last edit in the last
# mapping. This flag stores whether that is the case
# (although to actually be a soft clip it also has to be an
# insertion, and not either a substitution or a perfect
# match as spelled by the aligner).
may_be_soft_clip = ((edit_number == 0 and
mapping_number == 0) or
(edit_number == len(edits) - 1 and
mapping_number == len(mappings) - 1))
# Count up the Ns in the reference sequence for the edit. We
# get the part of the reference string that should belong to
# this edit.
reference_N_count = count_Ns(ref_sequence[
index_in_ref:index_in_ref + edit.get("from_length", 0)])
if edit.get("to_length", 0) == edit.get("from_length", 0):
# Add in the length of this edit if it's actually
# aligned (not an indel or softclip)
aligned_length += edit.get("to_length", 0)
if (not edit.has_key("sequence") and
edit.get("to_length", 0) == edit.get("from_length", 0)):
# The edit has equal from and to lengths, but no
# sequence provided.
# We found a perfect match edit. Grab its length
matches += edit["from_length"]
# We don't care about Ns when evaluating perfect
# matches. VG already split out any mismatches into non-
# perfect matches, and we ignore the N-matched-to-N
# case.
if not may_be_soft_clip and (edit.get("to_length", 0) !=
edit.get("from_length", 0)):
# This edit is an indel and isn't on the very end of a
# read.
if reference_N_count == 0:
# Only count the indel if it's not against an N in
# the reference
indels += 1
if (edit.get("to_length", 0) ==
edit.get("from_length", 0) and
edit.has_key("sequence")):
# The edit has equal from and to lengths, and a provided
# sequence. This edit is thus a SNP or MNP. It
# represents substitutions.
# We take as substituted all the bases except those
# opposite reference Ns. Sequence Ns are ignored.
substitutions += (edit.get("to_length", 0) -
reference_N_count)
# Pull those Ns out of the substitution rate denominator
# as well.
aligned_length -= reference_N_count
# We still count query Ns as "aligned" when not in indels
# Advance in the reference sequence
index_in_ref += edit.get("from_length", 0)
# Calculate mismatches as what's not perfect matches
mismatches = length - matches
if alignment.get("is_secondary", False):
# It's a multimapping. We can have max 1 per read, so it's a
# multimapped read.
if (last_alignment is None or
last_alignment.get("name") != alignment.get("name") or
last_alignment.get("is_secondary", False)):
# This is a secondary alignment without a corresponding primary
# alignment (which would have to be right before it given the
# way vg dumps buffers
raise RuntimeError("{} secondary alignment comes after "
"alignment of {} instead of corresponding primary "
"alignment\n".format(alignment.get("name"),
last_alignment.get("name") if last_alignment is not None
else "nothing"))
# Log its stats as multimapped
stats["total_multimapped"] += 1
stats["secondary_scores"][score] += 1
stats["secondary_mismatches"][mismatches] += 1
stats["secondary_indels"][indels] += 1
stats["secondary_substitutions"][substitutions] += 1
stats["secondary_mapqs"][mapq] += 1
stats["secondary_identities"][identity] += 1
else:
# Log its stats as primary. We'll get exactly one of these per
# read with any mappings.
stats["total_mapped"] += 1
stats["primary_scores"][score] += 1
stats["primary_mismatches"][mismatches] += 1
stats["primary_indels"][indels] += 1
stats["primary_substitutions"][substitutions] += 1
stats["primary_mapqs"][mapq] += 1
stats["primary_identities"][identity] += 1
# Record that a read of this length was mapped
stats["mapped_lengths"][length] += 1
# And that a read with this many aligned primary bases was found
stats["aligned_lengths"][aligned_length] += 1
# We won't see an unaligned primary alignment for this read, so
# count the read
stats["total_reads"] += 1
elif not alignment.get("is_secondary", False):
# We have an unmapped primary "alignment"
# Count the read by its primary alignment
stats["total_reads"] += 1
# Record that an unmapped read has this length
stats["unmapped_lengths"][length] += 1
# Save the alignment for checking for wayward secondaries
last_alignment = alignment
with open(stats_file, "w") as stats_handle:
# Save the stats as JSON
json.dump(stats, stats_handle)
if read_alignment.wait() != 0:
# Complain if vg dies
raise RuntimeError("vg died with error {}".format(
read_alignment.returncode))
# Now send the stats to the output store where they belong.
out_store.write_output_file(stats_file, stats_file_key)
def main(args):
"""
Parses command line arguments and do the work of the program.
"args" specifies the program arguments, with args[0] being the executable
name. The return value should be used as the program's exit code.
"""
if len(args) == 2 and args[1] == "--test":
# Run the tests
return doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
options = parse_args(args) # This holds the nicely-parsed options object
if options.too_old is not None:
# Parse the too-old date
options.too_old = dateutil.parser.parse(options.too_old)
assert(options.too_old.tzinfo != None)
RealTimeLogger.start_master()
# Pre-read the input file so we don't try to send file handles over the
# network.
options.server_list = list(options.server_list)
# Make a root job
root_job = Job.wrapJobFn(run_all_alignments, options,
cores=1, memory="4G", disk="50G")
# Run it and see how many jobs fail
failed_jobs = Job.Runner.startToil(root_job, options)
if failed_jobs > 0:
raise Exception("{} jobs failed!".format(failed_jobs))
print("All jobs completed successfully")
RealTimeLogger.stop_master()
if __name__ == "__main__" :
sys.exit(main(sys.argv))
|
import re, tkFont, copy, random, time, tkMessageBox, json, os.path
from Tkinter import *
from tkFileDialog import askopenfilename, asksaveasfilename
from functools import partial
#the squares that a checker can move into from each position
blackMoveMapping = {1:[5, 6],
2:[6, 7],
3:[7, 8],
4:[8],
5:[9],
6:[9, 10],
7:[10, 11],
8:[11, 12],
9:[13, 14],
10:[14, 15],
11:[15, 16],
12:[16],
13:[17],
14:[17, 18],
15:[18, 19],
16:[19, 20],
17:[21, 22],
18:[22, 23],
19:[23, 24],
20:[24],
21:[25],
22:[25, 26],
23:[26, 27],
24:[27, 28],
25:[29, 30],
26:[30, 31],
27:[31, 32],
28:[32],
29:[],
30:[],
31:[],
32:[]}
whiteMoveMapping = {1:[],
2:[],
3:[],
4:[],
5:[1],
6:[1, 2],
7:[2, 3],
8:[3, 4],
9:[5, 6],
10:[6, 7],
11:[7, 8],
12:[8],
13:[9],
14:[9, 10],
15:[10, 11],
16:[11, 12],
17:[13, 14],
18:[14, 15],
19:[15, 16],
20:[16],
21:[17],
22:[17, 18],
23:[18, 19],
24:[19, 20],
25:[21, 22],
26:[22, 23],
27:[23, 24],
28:[24],
29:[25],
30:[25, 26],
31:[26, 27],
32:[27, 28]}
# in each list(for each position) first is the piece to be jumped over, second is
# the landing spot
blackJumpMapping = {1:[[6, 10]],
2:[[6, 9], [7, 11]],
3:[[7, 10], [8, 12]],
4:[[8, 11]],
5:[[9, 14]],
6:[[9, 13], [10, 15]],
7:[[10, 14], [11, 16]],
8:[[11, 15]],
9:[[14, 18]],
10:[[14, 17], [15, 19]],
11:[[15, 18], [16, 20]],
12:[[16, 19]],
13:[[17, 22]],
14:[[17, 21], [18, 23]],
15:[[18, 22], [19, 24]],
16:[[19, 23]],
17:[[22, 26]],
18:[[22, 25], [23, 27]],
19:[[23, 26], [24, 28]],
20:[[24, 27]],
21:[[25, 30]],
22:[[25, 29], [26, 31]],
23:[[26, 30], [27, 32]],
24:[[27, 31]],
25:[],
26:[],
27:[],
28:[],
29:[],
30:[],
31:[],
32:[]}
whiteJumpMapping = {1:[],
2:[],
3:[],
4:[],
5:[],
6:[],
7:[],
8:[],
9:[[6, 2]],
10:[[6, 1], [7, 3]],
11:[[7, 2], [8, 4]],
12:[[8, 3]],
13:[[9, 6]],
14:[[9, 5], [10, 7]],
15:[[10, 6], [11, 8]],
16:[[11, 7]],
17:[[14, 10]],
18:[[14, 9], [15, 11]],
19:[[15, 10], [16, 12]],
20:[[16, 11]],
21:[[17, 14]],
22:[[17, 13], [18, 15]],
23:[[18, 14], [19, 16]],
24:[[19, 15]],
25:[[22, 18]],
26:[[22, 17], [23, 19]],
27:[[23, 18], [24, 20]],
28:[[24, 19]],
29:[[25, 22]],
30:[[25, 21], [26, 23]],
31:[[26, 22], [27, 24]],
32:[[27, 23]]}
kingMoveMapping = {1:[5, 6],
2:[6, 7],
3:[7, 8],
4:[8],
5:[1, 9],
6:[1, 2, 9, 10],
7:[2, 3, 10, 11],
8:[3, 4, 11, 12],
9:[5, 6, 13, 14],
10:[6, 7, 14, 15],
11:[7, 8, 15, 16],
12:[8, 16],
13:[9, 17],
14:[9, 10, 17, 18],
15:[10, 11, 18, 19],
16:[11, 12, 19, 20],
17:[13, 14, 21, 22],
18:[14, 15, 22, 23],
19:[15, 16, 23, 24],
20:[16, 24],
21:[17, 25],
22:[17, 18, 25, 26],
23:[18, 19, 26, 27],
24:[19, 20, 27, 28],
25:[21, 22, 29, 30],
26:[22, 23, 30, 31],
27:[23, 24, 31, 32],
28:[24, 32],
29:[25],
30:[25, 26],
31:[26, 27],
32:[27, 28]}
kingJumpMapping = {1:[[6, 10]],
2:[[6, 9], [7, 11]],
3:[[7, 10], [8, 12]],
4:[[8, 11]],
5:[[9, 14]],
6:[[9, 13], [10, 15]],
7:[[10, 14], [11, 16]],
8:[[11, 15]],
9:[[6, 2], [14, 18]],
10:[[6, 1], [7, 3], [14, 17], [15, 19]],
11:[[7, 2], [8, 4], [15, 18], [16, 20]],
12:[[8, 3], [16, 19]],
13:[[9, 6], [17, 22]],
14:[[9, 5], [10, 7], [17, 21], [18, 23]],
15:[[10, 6], [11, 8], [18, 22], [19, 24]],
16:[[11, 7], [19, 23]],
17:[[14, 10], [22, 26]],
18:[[14, 9], [15, 11], [22, 25], [23, 27]],
19:[[15, 10], [16, 12], [23, 26], [24, 28]],
20:[[16, 11], [24, 27]],
21:[[17, 14], [25, 30]],
22:[[17, 13], [18, 15], [25, 29], [26, 31]],
23:[[18, 14], [19, 16], [26, 30], [27, 32]],
24:[[19, 15], [27, 31]],
25:[[22, 18]],
26:[[22, 17], [23, 19]],
27:[[23, 18], [24, 20]],
28:[[24, 19]],
29:[[25, 22]],
30:[[25, 21], [26, 23]],
31:[[26, 22], [27, 24]],
32:[[27, 23]]}
moveMappings = {'w':whiteMoveMapping, 'b':blackMoveMapping}
jumpMappings = {'w':whiteJumpMapping, 'b':blackJumpMapping}
weightsFileName = 'weights.json'
learnConstant = 0.1 #learning constant
initialWeight = 0.5
#0 == empty
#1 == black checker
#2 == black king
#3 == white checker
#4 == white king
def makeBoard():
board = [0 for x in range(32)]
for i in range(12):
board[i] = 3
for i in range(20, 32):
board[i] = 1
return board
def makeInitialWeights():
weights = []
for i in range(4):
a = [initialWeight for j in range(32)]
weights.append(a)
return weights
def getFeatures(board):
features = []
for i in range(1, 5):
a = [1 if(x == i) else 0 for x in board]
features.append(a)
return features
def evaluateFeatures(features, weights):
value = 0
for i in range(4):
for j in range(32):
value += (weights[i][j] * features[i][j])
return value
def evaluateBoard(board, weights):
return evaluateFeatures(getFeatures(board), weights)
def updateWeights(trainingData, weights, didWin):
n = len(trainingData)
estimates = [evaluateBoard(x, weights) for x in trainingData]
values = [0 for x in range(n)]
if(didWin == True):
values[len(values)-1] = 100
else:
values[len(values)-1] = -100
for i in range(n-1):
values[i] = estimates[i+1]
for i in range(n):
board = trainingData[i]
features = getFeatures(board)
value = values[i]
estimate = estimates[i]
#update our weights
for j in range(len(weights)):
for k in range(len(weights[j])):
weights[j][k] = weights[j][k] + (learnConstant*(value-estimate)*features[j][k])
def getBestPossibleBoard(boards, weights):
values = [evaluateBoard(b, weights) for b in boards]
maxValue = values[0]
maxBoard = boards[0]
for i in range(1, len(boards)):
if(values[i] > maxValue):
maxValue = values[i]
maxBoard = boards[i]
return maxBoard
def getJumps(board, index, jumpMapping, enemyCheckers, prev, result):
#i == the jumped over spot, j == the landing spot
for (iK, jK) in jumpMapping[index+1]:
i, j = iK-1, jK-1
if(board[i] in enemyCheckers and board[j] == 0):
at = copy.deepcopy(prev)
at.append([i , j])
result.append(at)
getJumps(board, j, jumpMapping, enemyCheckers, at, result)
return result
def getKingJumps(board, index, allyKing, enemyCheckers, prev, result):
for (iK, jK) in kingJumpMapping[index+1]:
i, j = iK-1, jK-1
if(board[i] in enemyCheckers and board[j] == 0):
#set up the next board... we need to make sure the king can't jump
#back over where it just jumped
newBoard = copy.deepcopy(board)
newBoard[index] = 0
newBoard[i] = 0
newBoard[j] = allyKing
at = copy.deepcopy(prev)
at.append([i, j])
result.append(at)
getKingJumps(newBoard, j, allyKing, enemyCheckers, at, result)
return result
def getAllPossibleJumps(board, turn):
moves = []
jumpMapping = jumpMappings[turn]
allyChecker = 1 if(turn == 'w') else 3
enemyChecker = 3 if(turn == 'w') else 1
allyKing = 2 if(turn == 'w') else 4
enemyKing = 4 if(turn == 'w') else 2
enemyCheckers = [enemyChecker, enemyKing]
for i in range(32):
if(board[i] == allyChecker):
at = getJumps(board, i, jumpMapping, enemyCheckers, [], [])
if(at != []):
for move in at:
moves.append([i, move])
elif(board[i] == allyKing):
at = getKingJumps(board, i, allyKing, enemyCheckers, [], [])
if(at != []):
for move in at:
moves.append([i, move])
return moves
def getAllPossibleMoves(board, turn):
moves = []
moveMapping = moveMappings[turn]
allyChecker = 1 if(turn == 'w') else 3
enemyChecker = 3 if(turn == 'w') else 1
allyKing = 2 if(turn == 'w') else 4
enemyKing = 4 if(turn == 'w') else 2
for i in range(32):
if(board[i] == allyChecker):
for j in moveMapping[i+1]:
if(board[j-1] == 0):
moves.append([i, j-1])
elif(board[i] == allyKing):
for j in kingMoveMapping[i+1]:
if(board[j-1] == 0):
moves.append([i, j-1])
return moves
def crownPieces(board):
for i in range(0, 4):
if(board[i] == 1):
board[i] = 2
for i in range(28, 32):
if(board[i] == 3):
board[i] = 4
def getAllPossibleBoards(board, turn):
boards = []
jumps = getAllPossibleJumps(board, turn)
if(jumps != []):
for (i, moves) in jumps:
newBoard = copy.deepcopy(board)
for (j, k) in moves:
newBoard[j] = 0
newPosition = moves[len(moves)-1][1]
newBoard[newPosition] = newBoard[i]
newBoard[i] = 0
crownPieces(newBoard)
boards.append(newBoard)
else:
moves = getAllPossibleMoves(board, turn)
for (i, j) in moves:
newBoard = copy.deepcopy(board)
newBoard[j] = newBoard[i]
newBoard[i] = 0
crownPieces(newBoard)
boards.append(newBoard)
return boards
def isGameOver(boards):
return boards == []
def getRandomBoard(boards):
randomNum = random.randint(0, len(boards)-1)
return boards[randomNum]
def trainAi(iterations):
#training our AI
print("We will now train our AI using {0} iterations... this may take a while".format(iterations))
startTime = time.time()
blackWeights = makeInitialWeights()
whiteWeights = makeInitialWeights()
#train the black AI against a random AI
for i in range(iterations):
turn = 'b'
board = makeBoard()
boards = getAllPossibleBoards(board, turn)
trainingData = []
while(not isGameOver(boards)):
if(turn == 'b'):
bestBoard = getBestPossibleBoard(boards, blackWeights)
board = bestBoard
trainingData.append(board)
turn = 'w'
else:
randomBoard = getRandomBoard(boards)
board = randomBoard
turn = 'b'
boards = getAllPossibleBoards(board, turn)
didWin = (turn == 'w')
updateWeights(trainingData, blackWeights, didWin)
#train the white AI against a random AI
for i in range(iterations):
turn = 'b'
board = makeBoard()
boards = getAllPossibleBoards(board, turn)
trainingData = []
while(not isGameOver(boards)):
if(turn == 'b'):
randomBoard = getRandomBoard(boards)
board = randomBoard
turn = 'w'
else:
bestBoard = getBestPossibleBoard(boards, whiteWeights)
board = bestBoard
trainingData.append(board)
turn = 'b'
boards = getAllPossibleBoards(board, turn)
didWin = (turn == 'b')
updateWeights(trainingData, whiteWeights, didWin)
endTime = time.time()
print("Training {0} iterations took: {1}".format(iterations, str(endTime-startTime)))
return [blackWeights, whiteWeights]
#might be a good idea to validate that the json conforms to some schema in the future
def loadWeightsFromJson(fileName):
if(os.path.isfile(fileName)):
with open(fileName, 'r') as infile:
global blackWeights, whiteWeights
theWeights = json.load(infile)
blackWeights = theWeights[0]
whiteWeights = theWeights[1]
def saveWeightsToJson(fileName, blackWeights, whiteWeights):
with open(fileName, 'w') as outfile:
json.dump([blackWeights, whiteWeights], outfile)
def openJsonFile():
fileName = askopenfilename(filetypes=[('json files', '.json')])
if(isinstance(fileName, (str, unicode))):
loadWeightsFromJson(fileName)
print('loaded weights from: {0}'.format(fileName))
def saveJsonFile():
fileName = asksaveasfilename(filetypes=[('json files', '.json')])
if(isinstance(fileName, (str, unicode))):
saveWeightsToJson(fileName, blackWeights, whiteWeights)
print('saved weights to: {0}'.format(fileName))
def cancelAiTraining(topLevel):
topLevel.grab_release()
topLevel.destroy()
def startAiTraining(iterationEntry, topLevel):
try:
iterations = int(iterationEntry.get())
except ValueError:
print('iterations must be a valid integer value')
else:
theWeights = trainAi(iterations)
global blackWeights, whiteWeights
blackWeights = theWeights[0]
whiteWeights = theWeights[1]
print(theWeights)
cancelAiTraining(topLevel)
def doAiTraining(root):
topLevel = Toplevel()
topLevel.grab_set()
topLevel.wm_title("Checkers!!!")
label1 = Label(topLevel, text='Number of training iterations:')
label1.pack()
iterationEntry = Entry(topLevel)
iterationEntry.pack()
buttonStart = Button(topLevel, text='Start',
command=partial(startAiTraining, iterationEntry, topLevel))
buttonStart.pack()
buttonCancel = Button(topLevel, text='Cancel', command=partial(cancelAiTraining, topLevel))
buttonCancel.pack()
blackWeights = makeInitialWeights()
whiteWeights = makeInitialWeights()
loadWeightsFromJson(weightsFileName)
print("--------------------")
print('blackWeights:')
print("--------------------")
print(blackWeights)
print("--------------------")
print('whiteWeights:')
print("--------------------")
print(whiteWeights)
print("--------------------")
board = makeBoard()
# GUI Code
currentGameOngoing = False
weightsMapping = {'b': blackWeights, 'w': whiteWeights}
def newGameClick():
startGame(playAsWhich.get() == 0)
def displayMovesClick():
global currentGameOngoing
if(currentGameOngoing):
displayAllPossibleJumpsOrMoves(currentJumps, currentMoves)
def startGame(playerFirst):
global pieceSelected, currentIndexes, currentJumps, currentMoves
global currentTurn, currentBoards, currentGameOngoing
global playerTurn, computerTurn, board
board = makeBoard()
pieceSelected = False
currentTurn = 'b'
currentJumps = getAllPossibleJumps(board, 'b')
currentMoves = getAllPossibleMoves(board, 'b')
currentBoards = getAllPossibleBoards(board, 'b')
currentIndexes = {}
currentGameOngoing = True
currentGameWinner = 'b'
playerTurn = 'b' if(playerFirst) else 'w'
computerTurn = 'w' if(playerFirst) else 'b'
updateButtons(board)
if(computerTurn == 'b'):
doComputerTurn()
statusLabel['text'] = 'Player turn'
def displayAllPossibleJumpsOrMoves(jumps, moves):
if(jumps != []):
for(i, moves) in jumps:
buttons[i]['bg'] = 'green'
for (j, k) in moves:
buttons[j]['bg'] = 'red'
buttons[k]['bg'] = 'blue'
else:
for (i, j) in moves:
buttons[i]['bg'] = 'green'
buttons[j]['bg'] = 'blue'
def displayPossibleJumpsOrMoves(board, jumps, moves, index):
global currentIndexes
currentIndexes = {}
result = False
if(jumps != []):
for(i, moves) in jumps:
if(i == index):
result = True
buttons[i]['bg'] = 'green'
for (j, k) in moves:
buttons[j]['bg'] = 'red'
buttons[k]['bg'] = 'blue'
currentIndexes[k] = [i, moves]
else:
for (i, j) in moves:
if(i == index):
result = True
buttons[i]['bg'] = 'green'
buttons[j]['bg'] = 'blue'
currentIndexes[j] = [i]
return result
def nextTurn():
global board, currentJumps, currentMoves, currentIndexes, currentBoards
global currentGameOngoing, currentTurn
crownPieces(board)
updateButtons(board)
if(currentTurn == 'b'):
currentTurn = 'w'
else:
currentTurn = 'b'
currentJumps = getAllPossibleJumps(board, currentTurn)
currentMoves = getAllPossibleMoves(board, currentTurn)
currentIndexes = {}
currentBoards = getAllPossibleBoards(board, currentTurn)
if(isGameOver(currentBoards)):
winner = 'black' if(currentTurn == 'w') else 'white'
statusLabel['text'] = '{0} wins!'.format(winner)
tkMessageBox.showinfo('Game Over!', 'Game is over: {0} wins!'.format(winner))
currentGameOngoing = False
def doComputerTurn():
statusLabel['text'] = 'computer turn'
weights = weightsMapping[computerTurn]
global board
board = getBestPossibleBoard(currentBoards, weights)
nextTurn()
statusLabel['text'] = 'player turn'
def buttonClick(zeroIndex):
global currentGameOngoing, currentTurn, playerTurn
if(not currentGameOngoing or not currentTurn == playerTurn):
return
global pieceSelected, currentIndexes, currentJumps, currentMoves
global currentBoards
updateButtons(board)
if(pieceSelected == True and zeroIndex in currentIndexes):
pieceSelected = False
startIndex = currentIndexes[zeroIndex][0]
board[zeroIndex] = board[startIndex]
board[startIndex] = 0
#handle jumps
if(len(currentIndexes[zeroIndex]) > 1):
for (i, j) in currentIndexes[zeroIndex][1]:
board[i] = 0
if(board[j] == zeroIndex):
break
nextTurn()
if(currentGameOngoing):
doComputerTurn()
else:
pieceSelected = displayPossibleJumpsOrMoves(board, currentJumps, currentMoves, zeroIndex)
def updateButtons(board):
for i in range(32):
buttons[i]['bg'] = 'grey'
buttons[i].config(image=buttonUpdateImage[board[i]])
root = Tk()
#you have to make the images after instatiating the root Tkinter window for some
#weird reason
imagesFolder = 'checker_images'
separator = '/'
emptyCheckerImage = PhotoImage(file=imagesFolder + separator + 'emptyChecker.png')
whiteCheckerImage = PhotoImage(file=imagesFolder + separator + 'whiteChecker.png')
blackCheckerImage = PhotoImage(file=imagesFolder + separator + 'blackChecker.png')
whiteCheckerKingImage = PhotoImage(file=imagesFolder + separator + 'whiteCheckerKing.png')
blackCheckerKingImage = PhotoImage(file=imagesFolder + separator + 'blackCheckerKing.png')
buttonUpdateImage = {0: emptyCheckerImage, 1:whiteCheckerImage, 2:whiteCheckerKingImage,
3:blackCheckerImage, 4:blackCheckerKingImage}
Grid.rowconfigure(root, 0, weight=1)
Grid.columnconfigure(root, 0, weight=1)
root.minsize(width=1100, height=900)
root.maxsize(width=1100, height=900)
root.wm_title("Checkers!!!")
topLevelFrame = Frame(root)
topLevelFrame.grid(row=0, column=0, sticky=N+S+E+W)
boardFrame = Frame(topLevelFrame)
boardFrame.grid(row=0, column=0, sticky=N+S+E+W)
Grid.rowconfigure(topLevelFrame, 0, weight=1)
Grid.columnconfigure(topLevelFrame, 0, weight=5)
Grid.columnconfigure(topLevelFrame, 1, weight=1)
buttonFont = tkFont.Font(family='Helvetica', size=24, weight='bold')
buttons = []
i, j, num = 0, 0, 0
for r in range(8):
num += 1
if(num >= 2):
num = 0
for c in range(8):
button = Button(boardFrame, text="", command=partial(buttonClick, i))
button.grid(row=r, column=c, sticky=N+S+E+W)
button['font'] = buttonFont
button['bg'] = 'white'
button['state'] = 'disabled'
if(j % 2 == num):
i += 1
#button['text'] = str(i) #this displays the index for each board position
button['bg'] = 'grey'
button['state'] = 'normal'
buttons.append(button)
j += 1
for r in range(8):
Grid.rowconfigure(boardFrame, r, weight=1)
for c in range(8):
Grid.columnconfigure(boardFrame, c, weight=1)
optionsFrame = Frame(topLevelFrame)
optionsFrame.grid(row=0, column=1, sticky=N+S+E+W)
newGameButton = Button(optionsFrame, text="New Game?", command=newGameClick)
newGameButton.grid(row=0, column=0, sticky=N+S+E+W)
playAsWhich = IntVar()
radio1 = Radiobutton(optionsFrame, text="Play as black?", variable=playAsWhich, value=0)
radio1.grid(row=1, column=0, sticky=N+S+E+W)
radio1.invoke()
radio2 = Radiobutton(optionsFrame, text="Play as white?", variable=playAsWhich, value=1)
radio2.grid(row=2, column=0, sticky=N+S+E+W)
displayMovesButton = Button(optionsFrame, text="Display moves", command=displayMovesClick)
displayMovesButton.grid(row=3, column=0, sticky=N+S+W+E)
statusLabel = Label(optionsFrame, text="click new game!")
statusLabel.grid(row=4, column=0, sticky=N+S+W+E)
loadAIButton = Button(optionsFrame, text="Load AI", command=openJsonFile)
loadAIButton.grid(row=5, column=0, sticky=N+S+W+E)
saveAIButton = Button(optionsFrame, text="Save AI", command=saveJsonFile)
saveAIButton.grid(row=6, column=0, sticky=N+S+W+E)
trainAIButton = Button(optionsFrame, text="Train AI", command=partial(doAiTraining, root))
trainAIButton.grid(row=7, column=0, sticky=N+S+W+E)
for i in range(8):
Grid.rowconfigure(optionsFrame, i, weight=1)
Grid.rowconfigure(optionsFrame, 8, weight = 20)
Grid.columnconfigure(optionsFrame, 0, weight=1)
updateButtons(board)
root.mainloop()
small GUI updates... need to finish implementing progress updating
import re, tkFont, copy, random, time, tkMessageBox, json, os.path, ttk
from Tkinter import *
from tkFileDialog import askopenfilename, asksaveasfilename
from functools import partial
#the squares that a checker can move into from each position
blackMoveMapping = {1:[5, 6],
2:[6, 7],
3:[7, 8],
4:[8],
5:[9],
6:[9, 10],
7:[10, 11],
8:[11, 12],
9:[13, 14],
10:[14, 15],
11:[15, 16],
12:[16],
13:[17],
14:[17, 18],
15:[18, 19],
16:[19, 20],
17:[21, 22],
18:[22, 23],
19:[23, 24],
20:[24],
21:[25],
22:[25, 26],
23:[26, 27],
24:[27, 28],
25:[29, 30],
26:[30, 31],
27:[31, 32],
28:[32],
29:[],
30:[],
31:[],
32:[]}
whiteMoveMapping = {1:[],
2:[],
3:[],
4:[],
5:[1],
6:[1, 2],
7:[2, 3],
8:[3, 4],
9:[5, 6],
10:[6, 7],
11:[7, 8],
12:[8],
13:[9],
14:[9, 10],
15:[10, 11],
16:[11, 12],
17:[13, 14],
18:[14, 15],
19:[15, 16],
20:[16],
21:[17],
22:[17, 18],
23:[18, 19],
24:[19, 20],
25:[21, 22],
26:[22, 23],
27:[23, 24],
28:[24],
29:[25],
30:[25, 26],
31:[26, 27],
32:[27, 28]}
# in each list(for each position) first is the piece to be jumped over, second is
# the landing spot
blackJumpMapping = {1:[[6, 10]],
2:[[6, 9], [7, 11]],
3:[[7, 10], [8, 12]],
4:[[8, 11]],
5:[[9, 14]],
6:[[9, 13], [10, 15]],
7:[[10, 14], [11, 16]],
8:[[11, 15]],
9:[[14, 18]],
10:[[14, 17], [15, 19]],
11:[[15, 18], [16, 20]],
12:[[16, 19]],
13:[[17, 22]],
14:[[17, 21], [18, 23]],
15:[[18, 22], [19, 24]],
16:[[19, 23]],
17:[[22, 26]],
18:[[22, 25], [23, 27]],
19:[[23, 26], [24, 28]],
20:[[24, 27]],
21:[[25, 30]],
22:[[25, 29], [26, 31]],
23:[[26, 30], [27, 32]],
24:[[27, 31]],
25:[],
26:[],
27:[],
28:[],
29:[],
30:[],
31:[],
32:[]}
whiteJumpMapping = {1:[],
2:[],
3:[],
4:[],
5:[],
6:[],
7:[],
8:[],
9:[[6, 2]],
10:[[6, 1], [7, 3]],
11:[[7, 2], [8, 4]],
12:[[8, 3]],
13:[[9, 6]],
14:[[9, 5], [10, 7]],
15:[[10, 6], [11, 8]],
16:[[11, 7]],
17:[[14, 10]],
18:[[14, 9], [15, 11]],
19:[[15, 10], [16, 12]],
20:[[16, 11]],
21:[[17, 14]],
22:[[17, 13], [18, 15]],
23:[[18, 14], [19, 16]],
24:[[19, 15]],
25:[[22, 18]],
26:[[22, 17], [23, 19]],
27:[[23, 18], [24, 20]],
28:[[24, 19]],
29:[[25, 22]],
30:[[25, 21], [26, 23]],
31:[[26, 22], [27, 24]],
32:[[27, 23]]}
kingMoveMapping = {1:[5, 6],
2:[6, 7],
3:[7, 8],
4:[8],
5:[1, 9],
6:[1, 2, 9, 10],
7:[2, 3, 10, 11],
8:[3, 4, 11, 12],
9:[5, 6, 13, 14],
10:[6, 7, 14, 15],
11:[7, 8, 15, 16],
12:[8, 16],
13:[9, 17],
14:[9, 10, 17, 18],
15:[10, 11, 18, 19],
16:[11, 12, 19, 20],
17:[13, 14, 21, 22],
18:[14, 15, 22, 23],
19:[15, 16, 23, 24],
20:[16, 24],
21:[17, 25],
22:[17, 18, 25, 26],
23:[18, 19, 26, 27],
24:[19, 20, 27, 28],
25:[21, 22, 29, 30],
26:[22, 23, 30, 31],
27:[23, 24, 31, 32],
28:[24, 32],
29:[25],
30:[25, 26],
31:[26, 27],
32:[27, 28]}
kingJumpMapping = {1:[[6, 10]],
2:[[6, 9], [7, 11]],
3:[[7, 10], [8, 12]],
4:[[8, 11]],
5:[[9, 14]],
6:[[9, 13], [10, 15]],
7:[[10, 14], [11, 16]],
8:[[11, 15]],
9:[[6, 2], [14, 18]],
10:[[6, 1], [7, 3], [14, 17], [15, 19]],
11:[[7, 2], [8, 4], [15, 18], [16, 20]],
12:[[8, 3], [16, 19]],
13:[[9, 6], [17, 22]],
14:[[9, 5], [10, 7], [17, 21], [18, 23]],
15:[[10, 6], [11, 8], [18, 22], [19, 24]],
16:[[11, 7], [19, 23]],
17:[[14, 10], [22, 26]],
18:[[14, 9], [15, 11], [22, 25], [23, 27]],
19:[[15, 10], [16, 12], [23, 26], [24, 28]],
20:[[16, 11], [24, 27]],
21:[[17, 14], [25, 30]],
22:[[17, 13], [18, 15], [25, 29], [26, 31]],
23:[[18, 14], [19, 16], [26, 30], [27, 32]],
24:[[19, 15], [27, 31]],
25:[[22, 18]],
26:[[22, 17], [23, 19]],
27:[[23, 18], [24, 20]],
28:[[24, 19]],
29:[[25, 22]],
30:[[25, 21], [26, 23]],
31:[[26, 22], [27, 24]],
32:[[27, 23]]}
moveMappings = {'w':whiteMoveMapping, 'b':blackMoveMapping}
jumpMappings = {'w':whiteJumpMapping, 'b':blackJumpMapping}
weightsFileName = 'weights.json'
learnConstant = 0.1 #learning constant
initialWeight = 0.5
#0 == empty
#1 == black checker
#2 == black king
#3 == white checker
#4 == white king
def makeBoard():
board = [0 for x in range(32)]
for i in range(12):
board[i] = 3
for i in range(20, 32):
board[i] = 1
return board
def makeInitialWeights():
weights = []
for i in range(4):
a = [initialWeight for j in range(32)]
weights.append(a)
return weights
def getFeatures(board):
features = []
for i in range(1, 5):
a = [1 if(x == i) else 0 for x in board]
features.append(a)
return features
def evaluateFeatures(features, weights):
value = 0
for i in range(4):
for j in range(32):
value += (weights[i][j] * features[i][j])
return value
def evaluateBoard(board, weights):
return evaluateFeatures(getFeatures(board), weights)
def updateWeights(trainingData, weights, didWin):
n = len(trainingData)
estimates = [evaluateBoard(x, weights) for x in trainingData]
values = [0 for x in range(n)]
if(didWin == True):
values[len(values)-1] = 100
else:
values[len(values)-1] = -100
for i in range(n-1):
values[i] = estimates[i+1]
for i in range(n):
board = trainingData[i]
features = getFeatures(board)
value = values[i]
estimate = estimates[i]
#update our weights
for j in range(len(weights)):
for k in range(len(weights[j])):
weights[j][k] = weights[j][k] + (learnConstant*(value-estimate)*features[j][k])
def getBestPossibleBoard(boards, weights):
values = [evaluateBoard(b, weights) for b in boards]
maxValue = values[0]
maxBoard = boards[0]
for i in range(1, len(boards)):
if(values[i] > maxValue):
maxValue = values[i]
maxBoard = boards[i]
return maxBoard
def getJumps(board, index, jumpMapping, enemyCheckers, prev, result):
#i == the jumped over spot, j == the landing spot
for (iK, jK) in jumpMapping[index+1]:
i, j = iK-1, jK-1
if(board[i] in enemyCheckers and board[j] == 0):
at = copy.deepcopy(prev)
at.append([i , j])
result.append(at)
getJumps(board, j, jumpMapping, enemyCheckers, at, result)
return result
def getKingJumps(board, index, allyKing, enemyCheckers, prev, result):
for (iK, jK) in kingJumpMapping[index+1]:
i, j = iK-1, jK-1
if(board[i] in enemyCheckers and board[j] == 0):
#set up the next board... we need to make sure the king can't jump
#back over where it just jumped
newBoard = copy.deepcopy(board)
newBoard[index] = 0
newBoard[i] = 0
newBoard[j] = allyKing
at = copy.deepcopy(prev)
at.append([i, j])
result.append(at)
getKingJumps(newBoard, j, allyKing, enemyCheckers, at, result)
return result
def getAllPossibleJumps(board, turn):
moves = []
jumpMapping = jumpMappings[turn]
allyChecker = 1 if(turn == 'w') else 3
enemyChecker = 3 if(turn == 'w') else 1
allyKing = 2 if(turn == 'w') else 4
enemyKing = 4 if(turn == 'w') else 2
enemyCheckers = [enemyChecker, enemyKing]
for i in range(32):
if(board[i] == allyChecker):
at = getJumps(board, i, jumpMapping, enemyCheckers, [], [])
if(at != []):
for move in at:
moves.append([i, move])
elif(board[i] == allyKing):
at = getKingJumps(board, i, allyKing, enemyCheckers, [], [])
if(at != []):
for move in at:
moves.append([i, move])
return moves
def getAllPossibleMoves(board, turn):
moves = []
moveMapping = moveMappings[turn]
allyChecker = 1 if(turn == 'w') else 3
enemyChecker = 3 if(turn == 'w') else 1
allyKing = 2 if(turn == 'w') else 4
enemyKing = 4 if(turn == 'w') else 2
for i in range(32):
if(board[i] == allyChecker):
for j in moveMapping[i+1]:
if(board[j-1] == 0):
moves.append([i, j-1])
elif(board[i] == allyKing):
for j in kingMoveMapping[i+1]:
if(board[j-1] == 0):
moves.append([i, j-1])
return moves
def crownPieces(board):
for i in range(0, 4):
if(board[i] == 1):
board[i] = 2
for i in range(28, 32):
if(board[i] == 3):
board[i] = 4
def getAllPossibleBoards(board, turn):
boards = []
jumps = getAllPossibleJumps(board, turn)
if(jumps != []):
for (i, moves) in jumps:
newBoard = copy.deepcopy(board)
for (j, k) in moves:
newBoard[j] = 0
newPosition = moves[len(moves)-1][1]
newBoard[newPosition] = newBoard[i]
newBoard[i] = 0
crownPieces(newBoard)
boards.append(newBoard)
else:
moves = getAllPossibleMoves(board, turn)
for (i, j) in moves:
newBoard = copy.deepcopy(board)
newBoard[j] = newBoard[i]
newBoard[i] = 0
crownPieces(newBoard)
boards.append(newBoard)
return boards
def isGameOver(boards):
return boards == []
def getRandomBoard(boards):
randomNum = random.randint(0, len(boards)-1)
return boards[randomNum]
def trainAi(iterations):
#training our AI
print("We will now train our AI using {0} iterations... this may take a while".format(iterations))
startTime = time.time()
blackWeights = makeInitialWeights()
whiteWeights = makeInitialWeights()
#train the black AI against a random AI
for i in range(iterations):
turn = 'b'
board = makeBoard()
boards = getAllPossibleBoards(board, turn)
trainingData = []
while(not isGameOver(boards)):
if(turn == 'b'):
bestBoard = getBestPossibleBoard(boards, blackWeights)
board = bestBoard
trainingData.append(board)
turn = 'w'
else:
randomBoard = getRandomBoard(boards)
board = randomBoard
turn = 'b'
boards = getAllPossibleBoards(board, turn)
didWin = (turn == 'w')
updateWeights(trainingData, blackWeights, didWin)
#train the white AI against a random AI
for i in range(iterations):
turn = 'b'
board = makeBoard()
boards = getAllPossibleBoards(board, turn)
trainingData = []
while(not isGameOver(boards)):
if(turn == 'b'):
randomBoard = getRandomBoard(boards)
board = randomBoard
turn = 'w'
else:
bestBoard = getBestPossibleBoard(boards, whiteWeights)
board = bestBoard
trainingData.append(board)
turn = 'b'
boards = getAllPossibleBoards(board, turn)
didWin = (turn == 'b')
updateWeights(trainingData, whiteWeights, didWin)
endTime = time.time()
print("Training {0} iterations took: {1}".format(iterations, str(endTime-startTime)))
return [blackWeights, whiteWeights]
#might be a good idea to validate that the json conforms to some schema in the future
def loadWeightsFromJson(fileName):
if(os.path.isfile(fileName)):
with open(fileName, 'r') as infile:
global blackWeights, whiteWeights
theWeights = json.load(infile)
blackWeights = theWeights[0]
whiteWeights = theWeights[1]
def saveWeightsToJson(fileName, blackWeights, whiteWeights):
with open(fileName, 'w') as outfile:
json.dump([blackWeights, whiteWeights], outfile)
def openJsonFile():
fileName = askopenfilename(filetypes=[('json files', '.json')])
if(isinstance(fileName, (str, unicode))):
loadWeightsFromJson(fileName)
print('loaded weights from: {0}'.format(fileName))
def saveJsonFile():
fileName = asksaveasfilename(filetypes=[('json files', '.json')])
if(isinstance(fileName, (str, unicode))):
saveWeightsToJson(fileName, blackWeights, whiteWeights)
print('saved weights to: {0}'.format(fileName))
def cancelAiTraining(topLevel):
topLevel.grab_release()
topLevel.destroy()
def startAiTraining(iterationEntry, topLevel):
try:
iterations = int(iterationEntry.get())
except ValueError:
print('iterations must be a valid integer value')
else:
theWeights = trainAi(iterations)
global blackWeights, whiteWeights
blackWeights = theWeights[0]
whiteWeights = theWeights[1]
print(theWeights)
cancelAiTraining(topLevel)
def doAiTraining(root):
#TODO-finish implementing progress updating
#defining our pop up form
topLevel = Toplevel()
topLevel.minsize(width=600, height=200)
#topLevel.maxsize(width=1000, height=200)
topLevel.grab_set()
topLevel.wm_title("Checkers!!!")
iterationLabel = Label(topLevel, text='# of training iterations:')
iterationLabel.grid(row=0, column=0, sticky=N+S+E+W)
iterationEntry = Entry(topLevel)
iterationEntry.grid(row=0, column=1, sticky=N+S+E+W)
blackLabel = Label(topLevel, text='BlackAI: -10000/-10000')
blackLabel.grid(row=1, column=0, sticky=N+S+E+W)
blackBar = ttk.Progressbar(topLevel, orient='horizontal', mode='determinate')
blackBar.grid(row=1, column=1, sticky=N+S+E+W)
whiteLabel = Label(topLevel, text='WhiteAI: -10000/-10000')
whiteLabel.grid(row=2, column=0, sticky=N+S+E+W)
whiteBar = ttk.Progressbar(topLevel, orient='horizontal', mode='determinate')
whiteBar.grid(row=2, column=1, sticky=N+S+E+W)
estimateLabel = Label(topLevel, text='Est: 0 seconds left')
estimateLabel.grid(row=3, column=0, sticky=N+S+E+W)
timeLabel = Label(topLevel, text='Time running: 0 seconds')
timeLabel.grid(row=3, column=1, sticky=N+S+E+W)
buttonStart = Button(topLevel, text='Start',
command=partial(startAiTraining, iterationEntry, topLevel))
buttonStart.grid(row=4, column=0, sticky=N+S+E+W)
buttonCancel = Button(topLevel, text='Cancel', command=partial(cancelAiTraining, topLevel))
buttonCancel.grid(row=4, column=1, sticky=N+S+E+W)
for i in range(5):
Grid.rowconfigure(topLevel, i, weight=1)
for i in range(2):
Grid.columnconfigure(topLevel, i, weight=1)
blackWeights = makeInitialWeights()
whiteWeights = makeInitialWeights()
loadWeightsFromJson(weightsFileName)
print("--------------------")
print('blackWeights:')
print("--------------------")
print(blackWeights)
print("--------------------")
print('whiteWeights:')
print("--------------------")
print(whiteWeights)
print("--------------------")
board = makeBoard()
# GUI Code
currentGameOngoing = False
weightsMapping = {'b': blackWeights, 'w': whiteWeights}
def newGameClick():
startGame(playAsWhich.get() == 0)
def displayMovesClick():
global currentGameOngoing
if(currentGameOngoing):
displayAllPossibleJumpsOrMoves(currentJumps, currentMoves)
def startGame(playerFirst):
global pieceSelected, currentIndexes, currentJumps, currentMoves
global currentTurn, currentBoards, currentGameOngoing
global playerTurn, computerTurn, board
board = makeBoard()
pieceSelected = False
currentTurn = 'b'
currentJumps = getAllPossibleJumps(board, 'b')
currentMoves = getAllPossibleMoves(board, 'b')
currentBoards = getAllPossibleBoards(board, 'b')
currentIndexes = {}
currentGameOngoing = True
currentGameWinner = 'b'
playerTurn = 'b' if(playerFirst) else 'w'
computerTurn = 'w' if(playerFirst) else 'b'
updateButtons(board)
if(computerTurn == 'b'):
doComputerTurn()
statusLabel['text'] = 'Player turn'
def displayAllPossibleJumpsOrMoves(jumps, moves):
if(jumps != []):
for(i, moves) in jumps:
buttons[i]['bg'] = 'green'
for (j, k) in moves:
buttons[j]['bg'] = 'red'
buttons[k]['bg'] = 'blue'
else:
for (i, j) in moves:
buttons[i]['bg'] = 'green'
buttons[j]['bg'] = 'blue'
def displayPossibleJumpsOrMoves(board, jumps, moves, index):
global currentIndexes
currentIndexes = {}
result = False
if(jumps != []):
for(i, moves) in jumps:
if(i == index):
result = True
buttons[i]['bg'] = 'green'
for (j, k) in moves:
buttons[j]['bg'] = 'red'
buttons[k]['bg'] = 'blue'
currentIndexes[k] = [i, moves]
else:
for (i, j) in moves:
if(i == index):
result = True
buttons[i]['bg'] = 'green'
buttons[j]['bg'] = 'blue'
currentIndexes[j] = [i]
return result
def nextTurn():
global board, currentJumps, currentMoves, currentIndexes, currentBoards
global currentGameOngoing, currentTurn
crownPieces(board)
updateButtons(board)
if(currentTurn == 'b'):
currentTurn = 'w'
else:
currentTurn = 'b'
currentJumps = getAllPossibleJumps(board, currentTurn)
currentMoves = getAllPossibleMoves(board, currentTurn)
currentIndexes = {}
currentBoards = getAllPossibleBoards(board, currentTurn)
if(isGameOver(currentBoards)):
winner = 'black' if(currentTurn == 'w') else 'white'
statusLabel['text'] = '{0} wins!'.format(winner)
tkMessageBox.showinfo('Game Over!', 'Game is over: {0} wins!'.format(winner))
currentGameOngoing = False
def doComputerTurn():
statusLabel['text'] = 'computer turn'
weights = weightsMapping[computerTurn]
global board
board = getBestPossibleBoard(currentBoards, weights)
nextTurn()
statusLabel['text'] = 'player turn'
def buttonClick(zeroIndex):
global currentGameOngoing, currentTurn, playerTurn
if(not currentGameOngoing or not currentTurn == playerTurn):
return
global pieceSelected, currentIndexes, currentJumps, currentMoves
global currentBoards
updateButtons(board)
if(pieceSelected == True and zeroIndex in currentIndexes):
pieceSelected = False
startIndex = currentIndexes[zeroIndex][0]
board[zeroIndex] = board[startIndex]
board[startIndex] = 0
#handle jumps
if(len(currentIndexes[zeroIndex]) > 1):
for (i, j) in currentIndexes[zeroIndex][1]:
board[i] = 0
if(board[j] == zeroIndex):
break
nextTurn()
if(currentGameOngoing):
doComputerTurn()
else:
pieceSelected = displayPossibleJumpsOrMoves(board, currentJumps, currentMoves, zeroIndex)
def updateButtons(board):
for i in range(32):
buttons[i]['bg'] = 'grey'
buttons[i].config(image=buttonUpdateImage[board[i]])
root = Tk()
#you have to make the images after instatiating the root Tkinter window for some
#weird reason
imagesFolder = 'checker_images'
separator = '/'
emptyCheckerImage = PhotoImage(file=imagesFolder + separator + 'emptyChecker.png')
whiteCheckerImage = PhotoImage(file=imagesFolder + separator + 'whiteChecker.png')
blackCheckerImage = PhotoImage(file=imagesFolder + separator + 'blackChecker.png')
whiteCheckerKingImage = PhotoImage(file=imagesFolder + separator + 'whiteCheckerKing.png')
blackCheckerKingImage = PhotoImage(file=imagesFolder + separator + 'blackCheckerKing.png')
buttonUpdateImage = {0: emptyCheckerImage, 1:whiteCheckerImage, 2:whiteCheckerKingImage,
3:blackCheckerImage, 4:blackCheckerKingImage}
Grid.rowconfigure(root, 0, weight=1)
Grid.columnconfigure(root, 0, weight=1)
root.minsize(width=1100, height=900)
root.maxsize(width=1100, height=900)
root.wm_title("Checkers!!!")
topLevelFrame = Frame(root)
topLevelFrame.grid(row=0, column=0, sticky=N+S+E+W)
boardFrame = Frame(topLevelFrame)
boardFrame.grid(row=0, column=0, sticky=N+S+E+W)
Grid.rowconfigure(topLevelFrame, 0, weight=1)
Grid.columnconfigure(topLevelFrame, 0, weight=5)
Grid.columnconfigure(topLevelFrame, 1, weight=1)
buttonFont = tkFont.Font(family='Helvetica', size=24, weight='bold')
buttons = []
i, j, num = 0, 0, 0
for r in range(8):
num += 1
if(num >= 2):
num = 0
for c in range(8):
button = Button(boardFrame, text="", command=partial(buttonClick, i))
button.grid(row=r, column=c, sticky=N+S+E+W)
button['font'] = buttonFont
button['bg'] = 'white'
button['state'] = 'disabled'
if(j % 2 == num):
i += 1
#button['text'] = str(i) #this displays the index for each board position
button['bg'] = 'grey'
button['state'] = 'normal'
buttons.append(button)
j += 1
for r in range(8):
Grid.rowconfigure(boardFrame, r, weight=1)
for c in range(8):
Grid.columnconfigure(boardFrame, c, weight=1)
optionsFrame = Frame(topLevelFrame)
optionsFrame.grid(row=0, column=1, sticky=N+S+E+W)
newGameButton = Button(optionsFrame, text="New Game?", command=newGameClick)
newGameButton.grid(row=0, column=0, sticky=N+S+E+W)
playAsWhich = IntVar()
radio1 = Radiobutton(optionsFrame, text="Play as black?", variable=playAsWhich, value=0)
radio1.grid(row=1, column=0, sticky=N+S+E+W)
radio1.invoke()
radio2 = Radiobutton(optionsFrame, text="Play as white?", variable=playAsWhich, value=1)
radio2.grid(row=2, column=0, sticky=N+S+E+W)
displayMovesButton = Button(optionsFrame, text="Display moves", command=displayMovesClick)
displayMovesButton.grid(row=3, column=0, sticky=N+S+W+E)
statusLabel = Label(optionsFrame, text="click new game!")
statusLabel.grid(row=4, column=0, sticky=N+S+W+E)
loadAIButton = Button(optionsFrame, text="Load AI", command=openJsonFile)
loadAIButton.grid(row=5, column=0, sticky=N+S+W+E)
saveAIButton = Button(optionsFrame, text="Save AI", command=saveJsonFile)
saveAIButton.grid(row=6, column=0, sticky=N+S+W+E)
trainAIButton = Button(optionsFrame, text="Train AI", command=partial(doAiTraining, root))
trainAIButton.grid(row=7, column=0, sticky=N+S+W+E)
for i in range(8):
Grid.rowconfigure(optionsFrame, i, weight=1)
Grid.rowconfigure(optionsFrame, 8, weight = 20)
Grid.columnconfigure(optionsFrame, 0, weight=1)
updateButtons(board)
root.mainloop()
|
from collections import OrderedDict
from contextlib import contextmanager
from datetime import datetime, time
from decimal import Decimal
from enum import Enum
import collections
from functools import lru_cache
import alchemyjsonschema
import falcon
import rapidjson as json
from falcon import HTTPConflict, HTTPBadRequest, HTTPNotFound
from sqlalchemy import inspect
from sqlalchemy.dialects.postgresql import TSVECTOR
from sqlalchemy.exc import IntegrityError, ProgrammingError
from sqlalchemy.orm import sessionmaker, subqueryload, aliased
from sqlalchemy.orm.base import MANYTOONE
from sqlalchemy.orm.exc import NoResultFound, MultipleResultsFound
from sqlalchemy.sql import sqltypes, operators, extract, func
from sqlalchemy.sql.expression import and_, or_, not_, desc, select
from sqlalchemy.sql.functions import Function
from api.resources.base import BaseCollectionResource, BaseSingleResource
class AlchemyMixin(object):
"""
Provides serialize and deserialize methods to convert between JSON and SQLAlchemy datatypes.
"""
MULTIVALUE_SEPARATOR = ','
PARAM_RELATIONS = 'relations'
PARAM_RELATIONS_ALL = '_all'
DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
RELATIONS_AS_LIST = True
IGNORE_UNKNOWN_FILTER = False
_underscore_operators = {
'exact': operators.eq,
'notexact': operators.ne,
'gt': operators.gt,
'lt': operators.lt,
'gte': operators.ge,
'lte': operators.le,
'range': operators.between_op,
'notrange': operators.notbetween_op,
'in': operators.in_op,
'notin': operators.notin_op,
'contains': operators.contains_op,
'notcontains': operators.notcontains_op,
'match': operators.match_op,
'notmatch': operators.notmatch_op,
'iexact': operators.ilike_op,
'notiexact': operators.notilike_op,
'startswith': operators.startswith_op,
'notstartswith': operators.notstartswith_op,
'endswith': operators.endswith_op,
'notendswith': operators.notendswith_op,
'hasall': lambda c, x: c.has_all(x),
'hasany': lambda c, x: c.has_any(x),
'haskey': lambda c, x: c.has_key(x), # noqa
'overlap': lambda c, x: c.op('&&')(x),
'istartswith': lambda c, x: c.ilike(x.replace('%', '%%') + '%'),
'notistartswith': lambda c, x: c.notilike(x.replace('%', '%%') + '%'),
'iendswith': lambda c, x: c.ilike('%' + x.replace('%', '%%')),
'notiendswith': lambda c, x: c.notilike('%' + x.replace('%', '%%')),
'isnull': lambda c, x: c.is_(None) if x else c.isnot(None),
'isnotnull': lambda c, x: c.isnot(None) if x else c.is_(None),
'year': lambda c, x: extract('year', c) == x,
'month': lambda c, x: extract('month', c) == x,
'day': lambda c, x: extract('day', c) == x,
'func': Function,
'sfunc': Function,
'efunc': Function,
}
_logical_operators = {
'or': or_,
'and': and_,
'not': not_,
}
@classmethod
@contextmanager
def session_scope(cls, db_engine=None, session_class=None):
"""
Provide a scoped db session for a series of operarions.
The session is created immediately before the scope begins, and is closed
on scope exit.
:param db_engine: SQLAlchemy Engine or other Connectable
:type db_engine: sqlalchemy.engine.Connectable
:param session_class: SQLAlchemy Session
:type session_class: sqlalchemy.orm.Session
"""
if session_class is None:
session_class = sessionmaker(bind=db_engine)
db_session = session_class()
try:
yield db_session
db_session.commit()
except:
db_session.rollback()
raise
finally:
db_session.close()
@classmethod
def serialize(cls, obj, skip_primary_key=False, skip_foreign_keys=False, relations_level=1, relations_ignore=None,
relations_include=None):
"""
Converts the object to a serializable dictionary.
:param obj: the object to serialize
:param skip_primary_key: should primary keys be skipped
:type skip_primary_key: bool
:param skip_foreign_keys: should foreign keys be skipped
:type skip_foreign_keys: bool
:param relations_level: how many levels of relations to serialize
:type relations_level: int
:param relations_ignore: relationship names to ignore
:type relations_ignore: list
:param relations_include: relationship names to include
:type relations_include: list
:return: a serializable dictionary
:rtype: dict
"""
data = {}
data = cls.serialize_columns(obj, data, skip_primary_key, skip_foreign_keys)
if relations_level > 0:
if relations_ignore is None:
relations_ignore = []
data = cls.serialize_relations(obj, data, relations_level, relations_ignore, relations_include)
return data
@classmethod
def serialize_columns(cls, obj, data, skip_primary_key=False, skip_foreign_keys=False):
columns = inspect(obj).mapper.columns
for key, column in columns.items():
if skip_primary_key and column.primary_key:
continue
if skip_foreign_keys and len(column.foreign_keys):
continue
if isinstance(column.type, TSVECTOR):
continue
data[key] = cls.serialize_column(column, getattr(obj, key))
return data
@classmethod
def serialize_column(cls, column, value):
if isinstance(value, datetime):
return value.strftime(cls.DATETIME_FORMAT)
elif isinstance(value, time):
return value.isoformat()
elif isinstance(value, Decimal):
return float(value)
elif isinstance(value, Enum):
return value.value
return value
@classmethod
def serialize_relations(cls, obj, data, relations_level=1, relations_ignore=None, relations_include=None):
mapper = inspect(obj).mapper
for relation in mapper.relationships:
if relation.key in relations_ignore\
or (relations_include is not None and relation.key not in relations_include):
continue
rel_obj = getattr(obj, relation.key)
if rel_obj is None:
continue
relations_ignore = [] if relations_ignore is None else list(relations_ignore)
if relation.back_populates:
relations_ignore.append(relation.back_populates)
if relation.direction == MANYTOONE:
data[relation.key] = cls.serialize(rel_obj,
relations_level=relations_level - 1,
relations_ignore=relations_ignore)
elif not relation.uselist:
data.update(cls.serialize(rel_obj,
skip_primary_key=True,
relations_level=relations_level - 1,
relations_ignore=relations_ignore))
else:
if cls.RELATIONS_AS_LIST:
data[relation.key] = [
cls.serialize(rel,
skip_primary_key=False,
relations_level=relations_level - 1,
relations_ignore=relations_ignore)
for rel in rel_obj
]
else:
data[relation.key] = {
str(rel.id): cls.serialize(rel,
skip_primary_key=True,
relations_level=relations_level - 1,
relations_ignore=relations_ignore)
for rel in rel_obj if hasattr(rel, 'id')
}
return data
def deserialize(self, data, mapper=None):
"""
Converts incoming data to internal types. Detects relation objects. Moves one to one relation attributes
to a separate key. Silently skips unknown attributes.
:param data: incoming data
:type data: dict
:param mapper: mapper, if None, mapper of the main object class will be used
:type mapper: sqlalchemy.orm.mapper.Mapper
:return: data with correct types
:rtype: dict
"""
attributes = {}
if data is None:
return attributes
def is_int(s):
try:
int(s)
except ValueError:
return False
return True
if mapper is None:
mapper = inspect(self.objects_class)
for key, value in data.items():
if key in mapper.relationships:
rel_mapper = mapper.relationships[key].mapper
# handle a special case, when value is a dict with only all integer keys, then convert it to a list
if isinstance(value, dict) and all(is_int(pk) for pk in value.keys()):
replacement = []
for pk, attrs in value.items():
attrs[rel_mapper.primary_key[0].name] = pk
replacement.append(attrs)
value = replacement
if isinstance(value, dict):
attributes[key] = self.deserialize(value, rel_mapper)
elif isinstance(value, list):
attributes[key] = []
for item in value:
if isinstance(item, dict):
attributes[key].append(self.deserialize(item, rel_mapper))
else:
attributes[key].append(item)
else:
attributes[key] = value
elif key in mapper.columns:
attributes[key] = self.deserialize_column(mapper.columns[key], value)
else:
for relation in mapper.relationships:
if relation.direction == MANYTOONE or relation.uselist or key not in relation.mapper.columns:
continue
if relation.key not in attributes:
attributes[relation.key] = {}
attributes[relation.key][key] = self.deserialize_column(relation.mapper.columns[key], value)
return attributes
def deserialize_column(self, column, value):
if value is None:
return None
if isinstance(column.type, sqltypes.DateTime):
return datetime.strptime(value, self.DATETIME_FORMAT)
if isinstance(column.type, sqltypes.Time):
hour, minute, second = value.split(':')
return time(int(hour), int(minute), int(second))
if isinstance(column.type, sqltypes.Integer):
return int(value)
if isinstance(column.type, sqltypes.Float):
return float(value)
return value
@lru_cache(maxsize=None)
def get_schema(self, objects_class):
extended_mapping = alchemyjsonschema.default_column_to_schema.copy()
extended_mapping[sqltypes.ARRAY] = 'array'
extended_mapping[sqltypes.JSON] = 'object'
extended_mapping[TSVECTOR] = 'array'
factory = alchemyjsonschema.SchemaFactory(alchemyjsonschema.StructuralWalker,
classifier=alchemyjsonschema.Classifier(extended_mapping))
return factory(objects_class, depth=1)
def filter_by(self, query, conditions, order_criteria=None):
"""
:param query: SQLAlchemy Query object
:type query: sqlalchemy.orm.query.Query
:param conditions: conditions dictionary
:type conditions: dict
:param order_criteria: optional order criteria
:type order_criteria: dict
:return: modified query
:rtype: sqlalchemy.orm.query.Query
"""
return self._filter_or_exclude(query, conditions, order_criteria=order_criteria)
def exclude_by(self, query, conditions):
"""
:param query: SQLAlchemy Query object
:type query: sqlalchemy.orm.query.Query
:param conditions: conditions dictionary
:type conditions: dict
:return: modified query
:rtype: sqlalchemy.orm.query.Query
"""
return self._filter_or_exclude(query, {'not': {'and': conditions}})
def _filter_or_exclude(self, query, conditions, default_op=None, order_criteria=None):
"""
:param query: SQLAlchemy Query object
:type query: sqlalchemy.orm.query.Query
:param conditions: conditions dictionary
:type conditions: dict
:param default_op: a default operator to join all filter expressions
:type default_op: function
:return: modified query
:rtype: sqlalchemy.orm.query.Query
"""
relationships = {
'aliases': {},
'join_chains': [],
}
expressions = self._build_filter_expressions(conditions, default_op, relationships)
order_expressions = []
if order_criteria:
order_expressions = self._build_order_expressions(order_criteria, relationships)
query = self._apply_joins(query, relationships, distinct=expressions is not None)
if expressions is not None:
query = query.filter(expressions)
if order_criteria and order_expressions is not None:
query = query.order_by(*order_expressions)
return query
def _apply_joins(self, query, relationships, distinct=True):
longest_chains = []
for chain_a, chain_a_ext, chain_a_is_outer in relationships['join_chains']:
is_longest = True
any_is_outer = chain_a_is_outer
for chain_b, chain_b_ext, chain_b_is_outer in relationships['join_chains']:
if chain_a == chain_b:
if chain_b_is_outer:
any_is_outer = True
continue
if set(chain_a).issubset(chain_b):
is_longest = False
break
if is_longest and (chain_a_ext, any_is_outer) not in longest_chains:
longest_chains.append((chain_a_ext, any_is_outer))
if not longest_chains:
return query
for chain, chain_is_outer in longest_chains:
for alias, relation in chain:
query = query.join((alias, relation), from_joinpoint=True, isouter=chain_is_outer)
query = query.reset_joinpoint()
return query.distinct() if distinct else query
def _build_filter_expressions(self, conditions, default_op, relationships):
"""
:param conditions: conditions dictionary
:type conditions: dict
:param default_op: a default operator to join all filter expressions
:type default_op: function
:param relationships: a dict with all joins to apply, describes current state in recurrent calls
:type relationships: dict
:return: expressions list
:rtype: list
"""
if default_op is None:
default_op = and_
expressions = []
for arg, value in conditions.items():
if arg in self._logical_operators:
expression = self._parse_logical_op(arg, value, self._logical_operators[arg], relationships)
else:
expression = self._parse_tokens(self.objects_class, arg.split('__'), value, relationships,
lambda c, n, v: operators.eq(n, self.deserialize_column(c, v)))
if expression is not None:
expressions.append(expression)
result = None
if len(expressions) > 1:
result = default_op(*expressions) if default_op != not_ else not_(and_(*expressions))
elif len(expressions) == 1:
result = expressions[0] if default_op != not_ else not_(expressions[0])
return result
def _parse_logical_op(self, arg, value, default_op, relationships):
"""
:param arg: condition name
:type arg: str
:param value: condition value
:type value: dict | list
:param default_op: a default operator to join all filter expressions
:type default_op: function
:param relationships: a dict with all joins to apply, describes current state in recurrent calls
:type relationships: dict
:return: expressions list
:rtype: list
"""
if isinstance(value, dict):
return self._build_filter_expressions(value, default_op, relationships)
if not isinstance(value, list):
raise HTTPBadRequest('Invalid attribute', 'Filter attribute {} is invalid'.format(arg))
expressions = []
for subconditions in value:
if not isinstance(subconditions, dict):
raise HTTPBadRequest('Invalid attribute', 'Filter attribute {} is invalid'.format(arg))
subexpressions = self._build_filter_expressions(subconditions, and_, relationships)
if subexpressions is not None:
expressions.append(subexpressions)
result = None
if len(expressions) > 1:
result = default_op(*expressions) if default_op != not_ else not_(and_(*expressions))
elif len(expressions) == 1:
result = expressions[0] if default_op != not_ else not_(expressions[0])
return result
def _parse_tokens(self, obj_class, tokens, value, relationships, default_expression=None):
column_name = None
column = None
column_alias = obj_class
mapper = inspect(obj_class)
join_chain = []
join_chain_ext = []
join_is_outer = False
for index, token in enumerate(tokens):
if token == CollectionResource.PARAM_TEXT_QUERY:
query_method = getattr(obj_class, 'get_term_query', None)
if not callable(query_method):
raise HTTPBadRequest('Invalid attribute', 'Param {} is invalid, specific object '
'can\'t provide a query'.format('__'.join(tokens)))
return query_method(self=obj_class, column_alias=column_alias, column_name=column_name, value=value,
default_op=or_ if tokens[-1] == 'or' else and_)
if column_name is not None and token in self._underscore_operators:
op = self._underscore_operators[token]
if op in [operators.between_op, operators.in_op]:
if not isinstance(value, list):
value = [value]
# isnull is the only operator where the value is not of the same type as the column
if token != 'isnull' and token != 'isnotnull':
if isinstance(value, list):
value = list(map(lambda x: self.deserialize_column(column, x), value))
else:
value = self.deserialize_column(column, value)
if op == Function:
expression = column_name
if len(tokens[index+1:]) > 1:
if token == 'efunc':
value = self._parse_tokens(obj_class, tokens[index+1:-1], value, relationships,
lambda c, n, v: n)
else:
for func_name in tokens[index+1:-1]:
expression = Function(func_name, expression)
if tokens[-1] in self._underscore_operators:
expression = self._underscore_operators[tokens[-1]](expression, value)
else:
if token != 'sfunc':
expression = Function(tokens[-1], expression, value)
else:
expression = Function(tokens[-1], expression)
else:
expression = op(column_name, value)
if token == 'isnull':
join_is_outer = True
if join_chain:
relationships['join_chains'].append((join_chain, join_chain_ext, join_is_outer))
return expression
if token in mapper.relationships:
# follow the relation and change current obj_class and mapper
obj_class = mapper.relationships[token].mapper.class_
mapper = mapper.relationships[token].mapper
column_alias, is_new_alias = self.next_alias(relationships['aliases'], token, obj_class,
prefix=relationships.get('prefix', ''))
join_chain.append(token)
join_chain_ext.append((column_alias, token))
continue
if token not in mapper.column_attrs:
if self.IGNORE_UNKNOWN_FILTER:
return None
# if token is not an op or relation it has to be a valid column
raise HTTPBadRequest('Invalid attribute', 'Param {} is invalid, part {} is expected '
'to be a known column name'.format('__'.join(tokens), token))
column_name = getattr(column_alias, token)
""":type column: sqlalchemy.schema.Column"""
column = mapper.columns[token]
if join_chain:
relationships['join_chains'].append((join_chain, join_chain_ext, join_is_outer))
if column_name is not None and default_expression is not None:
# if last token was a relation it's just going to be ignored
return default_expression(column, column_name, value)
return None
@staticmethod
def get_tsquery(value, default_op):
if isinstance(value, list):
tq = func.plainto_tsquery('english', value.pop())
while len(value):
tq = tq.op('||' if default_op == or_ else '&&')(func.plainto_tsquery('english', value.pop()))
else:
tq = func.plainto_tsquery('english', value)
return tq
@staticmethod
def next_alias(aliases, name, obj_class, use_existing=True, prefix=''):
is_new = True
if name in aliases:
if use_existing:
is_new = False
else:
aliases[name]['number'] += 1
aliases[name]['aliased'].append(
aliased(obj_class, name=prefix + name + '_' + str(aliases[name]['number'])))
else:
aliases[name] = {'number': 1,
'aliased': [aliased(obj_class, name=prefix + name + '_1')]}
return aliases[name]['aliased'][-1], is_new
def order_by(self, query, criteria):
"""
:param query: SQLAlchemy Query object
:type query: sqlalchemy.orm.query.Query
:return: modified query
:rtype: sqlalchemy.orm.query.Query
"""
relationships = {
'aliases': {},
'join_chains': [],
}
expressions = self._build_order_expressions(criteria, relationships)
query = self._apply_joins(query, relationships, distinct=False)
if expressions is not None:
query = query.order_by(*expressions)
return query
def _build_order_expressions(self, criteria, relationships):
"""
:param criteria: criteria dictionary
:type criteria: dict
:param relationships: a dict with all joins to apply, describes current state in recurrent calls
:type relationships: dict
:return: expressions list
:rtype: list
"""
expressions = []
if isinstance(criteria, dict):
criteria = list(criteria.items())
for arg in criteria:
if isinstance(arg, tuple):
arg, value = arg
else:
value = None
is_ascending = True
if len(arg) and arg[0] == '+' or arg[0] == '-':
is_ascending = arg[:1] == '+'
arg = arg[1:]
expression = self._parse_tokens(self.objects_class, arg.split('__'), value, relationships,
lambda c, n, v: n)
if expression is not None:
expressions.append(expression if is_ascending else desc(expression))
return expressions
def clean_relations(self, relations):
"""
Checks all special values in relations and makes sure to always return either a list or None.
:param relations: relation names
:type relations: str | list
:return: either a list (may be empty) or None if all relations should be included
:rtype: list[str] | None
"""
if relations == '':
return []
elif relations == self.PARAM_RELATIONS_ALL:
return None
elif isinstance(relations, str):
return [relations]
return relations
@staticmethod
def save_resource(obj, data, db_session):
"""
Extracts relation dicts from data, saves them and then updates the main object.
:param obj: a new or existing model
:type obj: object
:param data: data to assign to the model and/or its relations
:type data: dict
:param db_session: SQLAlchemy session
:type db_session: sqlalchemy.orm.session.Session
"""
# fetching related objects should not trigger saving of main object,
# because FKs could not have been set yet
autoflush = db_session.autoflush
db_session.autoflush = False
mapper = inspect(obj).mapper
for key, value in data.items():
if key not in mapper.relationships and getattr(obj, key) != value:
setattr(obj, key, value)
db_session.add(obj)
for key, value in data.items():
if key not in mapper.relationships:
continue
related_mapper = mapper.relationships[key].mapper
pk = related_mapper.primary_key[0].name
if isinstance(value, list):
keys = []
objects = getattr(obj, key)
reindexed = {getattr(related, pk): index for index, related in enumerate(objects)}
for item in value:
if isinstance(item, dict):
if pk in item and item[pk] in reindexed:
AlchemyMixin.save_resource(objects[reindexed[item[pk]]], item, db_session)
reindexed.pop(item[pk])
else:
objects.append(AlchemyMixin.update_or_create(db_session, related_mapper, item))
else:
if item in reindexed:
reindexed.pop(item)
else:
keys.append(item)
for index in reindexed.values():
del objects[index]
if keys:
expression = related_mapper.primary_key[0].in_(keys)
objects += db_session.query(related_mapper.class_).filter(expression).all()
else:
rel_obj = getattr(obj, key)
if isinstance(value, dict):
relationship = mapper.relationships[key]
if (relationship.direction == MANYTOONE or relationship.uselist)\
and (pk not in value or rel_obj is None):
setattr(obj, key, AlchemyMixin.update_or_create(db_session, related_mapper, value))
else:
AlchemyMixin.save_resource(rel_obj, value, db_session)
elif rel_obj is None or getattr(rel_obj, pk) != value:
expression = related_mapper.primary_key[0].__eq__(value)
setattr(obj, key, db_session.query(related_mapper.class_).filter(expression).first())
db_session.autoflush = autoflush
return obj
@staticmethod
def update_or_create(db_session, mapper, attributes):
"""
Updated the record if attributes contain the primary key value(s) and creates it if they don't.
:param db_session:
:type db_session: sqlalchemy.orm.session.Session
:param mapper:
:type mapper: sqlalchemy.orm.mapper.Mapper
:param attributes:
:type attributes: dict
:return:
:rtype: object
"""
query_attrs = {}
for key in mapper.primary_key:
if key in attributes:
query_attrs[key] = attributes.pop(key)
if query_attrs:
obj = db_session.query(mapper.class_).get(query_attrs[0] if len(query_attrs) == 1 else tuple(query_attrs))
else:
obj = mapper.class_()
if attributes:
return AlchemyMixin.save_resource(obj, attributes, db_session)
return obj
@staticmethod
def get_or_create(db_session, model_class, query_attrs, update_attrs=None, update_existing=False):
"""
Fetches the record and if it doesn't exist yet, creates it, handling a race condition.
:param db_session: session within DB connection
:type db_session: sqlalchemy.orm.session.Session
:param model_class: class of the model to return or create
:type model_class: class
:param query_attrs: attributes used to fetch the model
:type query_attrs: dict
:param update_attrs: attributes used to create a new model
:type update_attrs: dict
:param update_existing: if True and update_attrs are set, updates existing records
:type update_existing: bool
:return: existing or new object and a flag if existing or new object is being returned
:rtype: tuple
"""
query = db_session.query(model_class).filter_by(**query_attrs)
existing = query.one_or_none()
if existing:
if update_existing and update_attrs is not None:
for key, value in update_attrs.items():
if getattr(existing, key) != value:
setattr(existing, key, value)
return existing, False
db_session.begin_nested()
try:
if update_attrs is None:
update_attrs = query_attrs
else:
update_attrs.update(query_attrs)
new_object = model_class(**update_attrs)
db_session.add(new_object)
db_session.commit()
except IntegrityError:
db_session.rollback()
existing = query.one_or_none()
if update_existing and update_attrs is not None:
for key, value in update_attrs.items():
if getattr(existing, key) != value:
setattr(existing, key, value)
return existing, False
return new_object, True
@staticmethod
def get_default_schema(model_class, method='POST'):
"""
Returns a schema to be used in falconjsonio.schema.request_schema decorator
:return:
"""
schema = {
'type': 'object',
'properties': {
},
}
if method == 'POST':
schema['required'] = []
return schema
class CollectionResource(AlchemyMixin, BaseCollectionResource):
"""
Allows to fetch a collection of a resource (GET) and to create new resource in that collection (POST).
May be extended to allow batch operations (ex. PATCH).
When fetching a collection (GET), following params are supported:
* limit, offset - for pagination
* total_count - to calculate total number of items matching filters, without pagination
* relations - list of relation names to include in the result, uses special value `_all` for all relations
* all other params are treated as filters, syntax mimics Django filters, see `AlchemyMixin._underscore_operators`
User input can be validated by attaching the `falconjsonio.schema.request_schema()` decorator.
"""
VIOLATION_UNIQUE = '23505'
def __init__(self, objects_class, db_engine, max_limit=None, eager_limit=None):
"""
:param objects_class: class represent single element of object lists that suppose to be returned
:param db_engine: SQL Alchemy engine
:type db_engine: sqlalchemy.engine.Engine
:param max_limit: max limit of elements that suppose to be returned by default
:type max_limit: int
:param eager_limit: if None or the value of limit param is greater than this, subquery eager loading
will be enabled
:type eager_limit: int
"""
super(CollectionResource, self).__init__(objects_class, max_limit)
self.db_engine = db_engine
self.eager_limit = eager_limit
if not hasattr(self, '__request_schemas__'):
self.__request_schemas__ = {}
self.__request_schemas__['POST'] = AlchemyMixin.get_default_schema(objects_class, 'POST')
def get_queryset(self, req, resp, db_session=None, limit=None):
"""
Return a query object used to fetch data.
:param req: Falcon request
:type req: falcon.request.Request
:param resp: Falcon response
:type resp: falcon.response.Response
:param db_session: SQLAlchemy session
:type db_session: sqlalchemy.orm.session.Session
:param limit: max number of records fetched
:type limit: int | None
:return: a query from `object_class`
"""
query = db_session.query(self.objects_class)
relations = self.clean_relations(self.get_param_or_post(req, self.PARAM_RELATIONS, ''))
if self.eager_limit is None or (limit is not None and limit > self.eager_limit):
if relations is None:
query = query.options(subqueryload('*'))
elif len(relations):
for relation in relations:
query = query.options(subqueryload(relation))
search = self.get_param_or_post(req, self.PARAM_SEARCH)
if search:
try:
req.params.update(json.loads(search) if isinstance(search, str) else search)
except ValueError:
raise HTTPBadRequest('Invalid attribute',
'Value of {} filter attribute is invalid'.format(self.PARAM_SEARCH))
order = self.get_param_or_post(req, self.PARAM_ORDER)
if order:
if isinstance(order, str):
if (order[0] == '{' and order[-1] == '}') or (order[0] == '[' and order[-1] == ']'):
try:
order = json.loads(order)
except ValueError:
# not valid json, ignore and try to parse as an ordinary list of attributes
pass
if not isinstance(order, list) and not isinstance(order, dict):
order = [order]
return self.filter_by(query, req.params, order)
primary_keys = inspect(self.objects_class).primary_key
return self.filter_by(query, req.params).order_by(*primary_keys)
def get_total_objects(self, queryset, totals):
if not totals:
return {}
agg_query, dimensions = self._build_total_expressions(queryset, totals)
def nested_dict(n, type):
"""Creates an n-dimension dictionary where the n-th dimension is of type 'type'
"""
if n <= 1:
return type()
return collections.defaultdict(lambda: nested_dict(n - 1, type))
result = nested_dict(len(dimensions) + 2, None)
for aggs in queryset.session.execute(agg_query):
for metric_key, metric_value in aggs.items():
if metric_key in dimensions:
continue
last_result = result
last_key = 'total_' + metric_key
for dimension in dimensions:
last_result = last_result[last_key]
last_key = str(aggs[dimension])
last_result[last_key] = metric_value if not isinstance(metric_value, Decimal) else float(metric_value)
return result
def _build_total_expressions(self, queryset, totals):
mapper = inspect(self.objects_class)
primary_keys = mapper.primary_key
relationships = {
'aliases': {},
'join_chains': [],
'prefix': 'totals_',
}
aggregates = []
group_cols = OrderedDict()
group_by = []
group_limit = None
for total in totals:
for aggregate, columns in total.items():
if aggregate == self.AGGR_GROUPLIMIT:
if not isinstance(columns, int):
raise HTTPBadRequest('Invalid attribute', 'Group limit option requires an integer value')
group_limit = columns
continue
if not columns:
if aggregate == self.AGGR_GROUPBY:
raise HTTPBadRequest('Invalid attribute', 'Group by option requires at least one column name')
if len(primary_keys) > 1:
aggregates.append(Function(aggregate, func.row(*primary_keys)).label(aggregate))
else:
aggregates.append(Function(aggregate, *primary_keys).label(aggregate))
continue
if not isinstance(columns, list):
columns = [columns]
for column in columns:
expression = self._parse_tokens(self.objects_class, column.split('__'), None, relationships,
lambda c, n, v: n)
if expression is not None:
if aggregate == self.AGGR_GROUPBY:
group_cols[column] = expression.label(column)
group_by.append(expression)
else:
aggregates.append(Function(aggregate, expression).label(aggregate))
agg_query = self._apply_joins(queryset, relationships, distinct=False)
group_cols_expr = list(group_cols.values())
columns = group_cols_expr + aggregates
if group_limit:
row_order = list(map(lambda c: c.desc(), aggregates))
columns.append(func.row_number().over(partition_by=group_cols_expr[:-1],
order_by=row_order).label('row_number'))
order = ','.join(list(map(str, range(1, len(group_cols_expr) + 1)))
+ list(map(lambda c: str(c) + ' DESC', range(1 + len(group_cols_expr),
len(aggregates) + len(group_cols_expr) + 1))))
agg_query = agg_query.statement.with_only_columns(columns).order_by(None).order_by(order)
if group_by:
agg_query = agg_query.group_by(*group_by)
if group_limit:
subquery = agg_query.alias()
agg_query = select([subquery]).where(subquery.c.row_number <= group_limit)
return agg_query, list(group_cols.keys())
def get_object_list(self, queryset, limit=None, offset=None):
if limit is None:
limit = self.max_limit
if offset is None:
offset = 0
if limit is not None:
if self.max_limit is not None:
limit = min(limit, self.max_limit)
limit = max(limit, 0)
queryset = queryset.limit(limit)
offset = max(offset, 0)
return queryset.offset(offset)
def on_get(self, req, resp):
limit = self.get_param_or_post(req, self.PARAM_LIMIT)
offset = self.get_param_or_post(req, self.PARAM_OFFSET)
if limit is not None:
limit = int(limit)
if offset is not None:
offset = int(offset)
totals = self.get_param_totals(req)
# retrieve that param without removing it so self.get_queryset() so it can also use it
relations = self.clean_relations(self.get_param_or_post(req, self.PARAM_RELATIONS, '', pop_params=False))
with self.session_scope(self.db_engine) as db_session:
query = self.get_queryset(req, resp, db_session, limit)
totals = self.get_total_objects(query, totals)
object_list = self.get_object_list(query, limit, offset)
serialized = [self.serialize(obj, relations_include=relations,
relations_ignore=list(getattr(self, 'serialize_ignore', [])))
for obj in object_list]
result = {'results': serialized,
'total': totals['total_count'] if 'total_count' in totals else None,
'returned': len(serialized)} # avoid calling object_list.count() which executes the query again
result.update(totals)
headers = {'x-api-total': str(result['total']) if result['total'] is not None else '',
'x-api-returned': str(result['returned'])}
resp.set_headers(headers)
self.render_response(result, req, resp)
def on_head(self, req, resp):
limit = self.get_param_or_post(req, self.PARAM_LIMIT)
offset = self.get_param_or_post(req, self.PARAM_OFFSET)
if limit is not None:
limit = int(limit)
if offset is not None:
offset = int(offset)
totals = self.get_param_totals(req)
with self.session_scope(self.db_engine) as db_session:
query = self.get_queryset(req, resp, db_session, limit)
totals = self.get_total_objects(query, totals)
object_list = self.get_object_list(query, limit, offset)
headers = {'x-api-total': str(totals['total_count']) if 'total_count' in totals else '',
'x-api-returned': str(len(object_list))}
resp.set_headers(headers)
resp.status = falcon.HTTP_NO_CONTENT
def create(self, req, resp, data, db_session=None):
"""
Create a new or update an existing record using provided data.
:param req: Falcon request
:type req: falcon.request.Request
:param resp: Falcon response
:type resp: falcon.response.Response
:param data:
:type data: dict
:param db_session: SQLAlchemy session
:type db_session: sqlalchemy.orm.session.Session
:return: created object, serialized to a dict
:rtype: dict
"""
relations = self.clean_relations(self.get_param_or_post(req, self.PARAM_RELATIONS, ''))
resource = self.save_resource(self.objects_class(), data, db_session)
db_session.commit()
return self.serialize(resource, relations_include=relations,
relations_ignore=list(getattr(self, 'serialize_ignore', [])))
def on_post(self, req, resp, *args, **kwargs):
data = self.deserialize(req.context['doc'] if 'doc' in req.context else None)
data, errors = self.clean(data)
if errors:
result = {'errors': errors}
status_code = falcon.HTTP_BAD_REQUEST
self.render_response(result, req, resp, status_code)
return
try:
with self.session_scope(self.db_engine) as db_session:
result = self.create(req, resp, data, db_session=db_session)
except IntegrityError:
raise HTTPConflict('Conflict', 'Unique constraint violated')
except ProgrammingError as err:
# Cases such as unallowed NULL value should have been checked before we got here (e.g. validate against
# schema using falconjsonio) - therefore assume this is a UNIQUE constraint violation
if len(err.orig.args) > 1 and err.orig.args[1] == self.VIOLATION_UNIQUE:
raise HTTPConflict('Conflict', 'Unique constraint violated')
raise
status_code = falcon.HTTP_CREATED
self.render_response(result, req, resp, status_code)
class SingleResource(AlchemyMixin, BaseSingleResource):
"""
Allows to fetch a single resource (GET) and to update (PATCH, PUT) or remove it (DELETE).
When fetching a resource (GET), following params are supported:
* relations - list of relation names to include in the result, uses special value `_all` for all relations
User input can be validated by attaching the `falconjsonio.schema.request_schema()` decorator.
"""
VIOLATION_FOREIGN_KEY = '23503'
def __init__(self, objects_class, db_engine):
"""
:param objects_class: class represent single element of object lists that suppose to be returned
:param db_engine: SQL Alchemy engine
:type db_engine: sqlalchemy.engine.Engine
"""
super(SingleResource, self).__init__(objects_class)
self.db_engine = db_engine
if not hasattr(self, '__request_schemas__'):
self.__request_schemas__ = {}
self.__request_schemas__['POST'] = AlchemyMixin.get_default_schema(objects_class, 'POST')
self.__request_schemas__['PUT'] = AlchemyMixin.get_default_schema(objects_class, 'POST')
def get_object(self, req, resp, path_params, for_update=False, db_session=None):
"""
:param req: Falcon request
:type req: falcon.request.Request
:param resp: Falcon response
:type resp: falcon.response.Response
:param path_params: path params extracted from URL path
:type path_params: dict
:param for_update: if the object is going to be updated or deleted
:type for_update: bool
:param db_session: SQLAlchemy session
:type db_session: sqlalchemy.orm.session.Session
"""
query = db_session.query(self.objects_class)
if for_update:
query = query.with_for_update()
for key, value in path_params.items():
attr = getattr(self.objects_class, key, None)
query = query.filter(attr == value)
conditions = dict(req.params)
if self.PARAM_RELATIONS in conditions:
conditions.pop(self.PARAM_RELATIONS)
query = self.filter_by(query, conditions)
try:
obj = query.one()
except NoResultFound:
raise HTTPNotFound()
except MultipleResultsFound:
raise HTTPBadRequest('Multiple results', 'Query params match multiple records')
return obj
def on_get(self, req, resp, *args, **kwargs):
relations = self.clean_relations(self.get_param_or_post(req, self.PARAM_RELATIONS, ''))
with self.session_scope(self.db_engine) as db_session:
obj = self.get_object(req, resp, kwargs, db_session=db_session)
result = self.serialize(obj,
relations_include=relations,
relations_ignore=list(getattr(self, 'serialize_ignore', [])))
self.render_response(result, req, resp)
def on_head(self, req, resp, *args, **kwargs):
with self.session_scope(self.db_engine) as db_session:
# call get_object to check if it exists
self.get_object(req, resp, kwargs, db_session=db_session)
resp.status = falcon.HTTP_NO_CONTENT
def delete(self, req, resp, obj, db_session=None):
"""
Delete an existing record.
:param req: Falcon request
:type req: falcon.request.Request
:param resp: Falcon response
:type resp: falcon.response.Response
:param obj: the object to delete
:param db_session: SQLAlchemy session
:type db_session: sqlalchemy.orm.session.Session
"""
deleted = db_session.delete(obj)
if deleted == 0:
raise falcon.HTTPConflict('Conflict', 'Resource found but conditions violated')
def on_delete(self, req, resp, *args, **kwargs):
try:
with self.session_scope(self.db_engine) as db_session:
obj = self.get_object(req, resp, kwargs, for_update=True, db_session=db_session)
self.delete(req, resp, obj, db_session)
except (IntegrityError, ProgrammingError) as err:
# This should only be caused by foreign key constraint being violated
if isinstance(err, IntegrityError) or err.orig.args[1] == self.VIOLATION_FOREIGN_KEY:
raise HTTPConflict('Conflict', 'Other content links to this')
else:
raise
self.render_response({}, req, resp)
def update(self, req, resp, data, obj, db_session=None):
"""
Create a new or update an existing record using provided data.
:param req: Falcon request
:type req: falcon.request.Request
:param resp: Falcon response
:type resp: falcon.response.Response
:param data:
:type data: dict
:param obj: the object to update
:param db_session: SQLAlchemy session
:type db_session: sqlalchemy.orm.session.Session
:return: created or updated object, serialized to a dict
:rtype: dict
"""
relations = self.clean_relations(self.get_param_or_post(req, self.PARAM_RELATIONS, ''))
resource = self.save_resource(obj, data, db_session)
db_session.commit()
return self.serialize(resource, relations_include=relations,
relations_ignore=list(getattr(self, 'serialize_ignore', [])))
def on_put(self, req, resp, *args, **kwargs):
status_code = falcon.HTTP_OK
try:
with self.session_scope(self.db_engine) as db_session:
obj = self.get_object(req, resp, kwargs, for_update=True, db_session=db_session)
data = self.deserialize(req.context['doc'] if 'doc' in req.context else None)
data, errors = self.clean(data)
if errors:
result = {'errors': errors}
status_code = falcon.HTTP_BAD_REQUEST
else:
result = self.update(req, resp, data, obj, db_session)
except (IntegrityError, ProgrammingError) as err:
# Cases such as unallowed NULL value should have been checked before we got here (e.g. validate against
# schema using falconjsonio) - therefore assume this is a UNIQUE constraint violation
if isinstance(err, IntegrityError) or err.orig.args[1] == self.VIOLATION_FOREIGN_KEY:
raise HTTPConflict('Conflict', 'Unique constraint violated')
else:
raise
self.render_response(result, req, resp, status_code)
add icontains for sqlalchemy resource
from collections import OrderedDict
from contextlib import contextmanager
from datetime import datetime, time
from decimal import Decimal
from enum import Enum
import collections
from functools import lru_cache
import alchemyjsonschema
import falcon
import rapidjson as json
from falcon import HTTPConflict, HTTPBadRequest, HTTPNotFound
from sqlalchemy import inspect
from sqlalchemy.dialects.postgresql import TSVECTOR
from sqlalchemy.exc import IntegrityError, ProgrammingError
from sqlalchemy.orm import sessionmaker, subqueryload, aliased
from sqlalchemy.orm.base import MANYTOONE
from sqlalchemy.orm.exc import NoResultFound, MultipleResultsFound
from sqlalchemy.sql import sqltypes, operators, extract, func
from sqlalchemy.sql.expression import and_, or_, not_, desc, select
from sqlalchemy.sql.functions import Function
from api.resources.base import BaseCollectionResource, BaseSingleResource
class AlchemyMixin(object):
"""
Provides serialize and deserialize methods to convert between JSON and SQLAlchemy datatypes.
"""
MULTIVALUE_SEPARATOR = ','
PARAM_RELATIONS = 'relations'
PARAM_RELATIONS_ALL = '_all'
DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
RELATIONS_AS_LIST = True
IGNORE_UNKNOWN_FILTER = False
_underscore_operators = {
'exact': operators.eq,
'notexact': operators.ne,
'gt': operators.gt,
'lt': operators.lt,
'gte': operators.ge,
'lte': operators.le,
'range': operators.between_op,
'notrange': operators.notbetween_op,
'in': operators.in_op,
'notin': operators.notin_op,
'contains': operators.contains_op,
'notcontains': operators.notcontains_op,
'icontains': lambda c, x: c.ilike('%' + x.replace('%', '\%').replace('_', '\_') + '%'),
'noticontains': lambda c, x: c.notilike('%' + x.replace('%', '\%').replace('_', '\_') + '%'),
'match': operators.match_op,
'notmatch': operators.notmatch_op,
'iexact': operators.ilike_op,
'notiexact': operators.notilike_op,
'startswith': operators.startswith_op,
'notstartswith': operators.notstartswith_op,
'endswith': operators.endswith_op,
'notendswith': operators.notendswith_op,
'hasall': lambda c, x: c.has_all(x),
'hasany': lambda c, x: c.has_any(x),
'haskey': lambda c, x: c.has_key(x), # noqa
'overlap': lambda c, x: c.op('&&')(x),
'istartswith': lambda c, x: c.ilike(x.replace('%', '\%').replace('_', '\_') + '%'),
'notistartswith': lambda c, x: c.notilike(x.replace('%', '\%').replace('_', '\_') + '%'),
'iendswith': lambda c, x: c.ilike('%' + x.replace('%', '\%').replace('_', '\_')),
'notiendswith': lambda c, x: c.notilike('%' + x.replace('%', '\%').replace('_', '\_')),
'isnull': lambda c, x: c.is_(None) if x else c.isnot(None),
'isnotnull': lambda c, x: c.isnot(None) if x else c.is_(None),
'year': lambda c, x: extract('year', c) == x,
'month': lambda c, x: extract('month', c) == x,
'day': lambda c, x: extract('day', c) == x,
'func': Function,
'sfunc': Function,
'efunc': Function,
}
_logical_operators = {
'or': or_,
'and': and_,
'not': not_,
}
@classmethod
@contextmanager
def session_scope(cls, db_engine=None, session_class=None):
"""
Provide a scoped db session for a series of operarions.
The session is created immediately before the scope begins, and is closed
on scope exit.
:param db_engine: SQLAlchemy Engine or other Connectable
:type db_engine: sqlalchemy.engine.Connectable
:param session_class: SQLAlchemy Session
:type session_class: sqlalchemy.orm.Session
"""
if session_class is None:
session_class = sessionmaker(bind=db_engine)
db_session = session_class()
try:
yield db_session
db_session.commit()
except:
db_session.rollback()
raise
finally:
db_session.close()
@classmethod
def serialize(cls, obj, skip_primary_key=False, skip_foreign_keys=False, relations_level=1, relations_ignore=None,
relations_include=None):
"""
Converts the object to a serializable dictionary.
:param obj: the object to serialize
:param skip_primary_key: should primary keys be skipped
:type skip_primary_key: bool
:param skip_foreign_keys: should foreign keys be skipped
:type skip_foreign_keys: bool
:param relations_level: how many levels of relations to serialize
:type relations_level: int
:param relations_ignore: relationship names to ignore
:type relations_ignore: list
:param relations_include: relationship names to include
:type relations_include: list
:return: a serializable dictionary
:rtype: dict
"""
data = {}
data = cls.serialize_columns(obj, data, skip_primary_key, skip_foreign_keys)
if relations_level > 0:
if relations_ignore is None:
relations_ignore = []
data = cls.serialize_relations(obj, data, relations_level, relations_ignore, relations_include)
return data
@classmethod
def serialize_columns(cls, obj, data, skip_primary_key=False, skip_foreign_keys=False):
columns = inspect(obj).mapper.columns
for key, column in columns.items():
if skip_primary_key and column.primary_key:
continue
if skip_foreign_keys and len(column.foreign_keys):
continue
if isinstance(column.type, TSVECTOR):
continue
data[key] = cls.serialize_column(column, getattr(obj, key))
return data
@classmethod
def serialize_column(cls, column, value):
if isinstance(value, datetime):
return value.strftime(cls.DATETIME_FORMAT)
elif isinstance(value, time):
return value.isoformat()
elif isinstance(value, Decimal):
return float(value)
elif isinstance(value, Enum):
return value.value
return value
@classmethod
def serialize_relations(cls, obj, data, relations_level=1, relations_ignore=None, relations_include=None):
mapper = inspect(obj).mapper
for relation in mapper.relationships:
if relation.key in relations_ignore\
or (relations_include is not None and relation.key not in relations_include):
continue
rel_obj = getattr(obj, relation.key)
if rel_obj is None:
continue
relations_ignore = [] if relations_ignore is None else list(relations_ignore)
if relation.back_populates:
relations_ignore.append(relation.back_populates)
if relation.direction == MANYTOONE:
data[relation.key] = cls.serialize(rel_obj,
relations_level=relations_level - 1,
relations_ignore=relations_ignore)
elif not relation.uselist:
data.update(cls.serialize(rel_obj,
skip_primary_key=True,
relations_level=relations_level - 1,
relations_ignore=relations_ignore))
else:
if cls.RELATIONS_AS_LIST:
data[relation.key] = [
cls.serialize(rel,
skip_primary_key=False,
relations_level=relations_level - 1,
relations_ignore=relations_ignore)
for rel in rel_obj
]
else:
data[relation.key] = {
str(rel.id): cls.serialize(rel,
skip_primary_key=True,
relations_level=relations_level - 1,
relations_ignore=relations_ignore)
for rel in rel_obj if hasattr(rel, 'id')
}
return data
def deserialize(self, data, mapper=None):
"""
Converts incoming data to internal types. Detects relation objects. Moves one to one relation attributes
to a separate key. Silently skips unknown attributes.
:param data: incoming data
:type data: dict
:param mapper: mapper, if None, mapper of the main object class will be used
:type mapper: sqlalchemy.orm.mapper.Mapper
:return: data with correct types
:rtype: dict
"""
attributes = {}
if data is None:
return attributes
def is_int(s):
try:
int(s)
except ValueError:
return False
return True
if mapper is None:
mapper = inspect(self.objects_class)
for key, value in data.items():
if key in mapper.relationships:
rel_mapper = mapper.relationships[key].mapper
# handle a special case, when value is a dict with only all integer keys, then convert it to a list
if isinstance(value, dict) and all(is_int(pk) for pk in value.keys()):
replacement = []
for pk, attrs in value.items():
attrs[rel_mapper.primary_key[0].name] = pk
replacement.append(attrs)
value = replacement
if isinstance(value, dict):
attributes[key] = self.deserialize(value, rel_mapper)
elif isinstance(value, list):
attributes[key] = []
for item in value:
if isinstance(item, dict):
attributes[key].append(self.deserialize(item, rel_mapper))
else:
attributes[key].append(item)
else:
attributes[key] = value
elif key in mapper.columns:
attributes[key] = self.deserialize_column(mapper.columns[key], value)
else:
for relation in mapper.relationships:
if relation.direction == MANYTOONE or relation.uselist or key not in relation.mapper.columns:
continue
if relation.key not in attributes:
attributes[relation.key] = {}
attributes[relation.key][key] = self.deserialize_column(relation.mapper.columns[key], value)
return attributes
def deserialize_column(self, column, value):
if value is None:
return None
if isinstance(column.type, sqltypes.DateTime):
return datetime.strptime(value, self.DATETIME_FORMAT)
if isinstance(column.type, sqltypes.Time):
hour, minute, second = value.split(':')
return time(int(hour), int(minute), int(second))
if isinstance(column.type, sqltypes.Integer):
return int(value)
if isinstance(column.type, sqltypes.Float):
return float(value)
return value
@lru_cache(maxsize=None)
def get_schema(self, objects_class):
extended_mapping = alchemyjsonschema.default_column_to_schema.copy()
extended_mapping[sqltypes.ARRAY] = 'array'
extended_mapping[sqltypes.JSON] = 'object'
extended_mapping[TSVECTOR] = 'array'
factory = alchemyjsonschema.SchemaFactory(alchemyjsonschema.StructuralWalker,
classifier=alchemyjsonschema.Classifier(extended_mapping))
return factory(objects_class, depth=1)
def filter_by(self, query, conditions, order_criteria=None):
"""
:param query: SQLAlchemy Query object
:type query: sqlalchemy.orm.query.Query
:param conditions: conditions dictionary
:type conditions: dict
:param order_criteria: optional order criteria
:type order_criteria: dict
:return: modified query
:rtype: sqlalchemy.orm.query.Query
"""
return self._filter_or_exclude(query, conditions, order_criteria=order_criteria)
def exclude_by(self, query, conditions):
"""
:param query: SQLAlchemy Query object
:type query: sqlalchemy.orm.query.Query
:param conditions: conditions dictionary
:type conditions: dict
:return: modified query
:rtype: sqlalchemy.orm.query.Query
"""
return self._filter_or_exclude(query, {'not': {'and': conditions}})
def _filter_or_exclude(self, query, conditions, default_op=None, order_criteria=None):
"""
:param query: SQLAlchemy Query object
:type query: sqlalchemy.orm.query.Query
:param conditions: conditions dictionary
:type conditions: dict
:param default_op: a default operator to join all filter expressions
:type default_op: function
:return: modified query
:rtype: sqlalchemy.orm.query.Query
"""
relationships = {
'aliases': {},
'join_chains': [],
}
expressions = self._build_filter_expressions(conditions, default_op, relationships)
order_expressions = []
if order_criteria:
order_expressions = self._build_order_expressions(order_criteria, relationships)
query = self._apply_joins(query, relationships, distinct=expressions is not None)
if expressions is not None:
query = query.filter(expressions)
if order_criteria and order_expressions is not None:
query = query.order_by(*order_expressions)
return query
def _apply_joins(self, query, relationships, distinct=True):
longest_chains = []
for chain_a, chain_a_ext, chain_a_is_outer in relationships['join_chains']:
is_longest = True
any_is_outer = chain_a_is_outer
for chain_b, chain_b_ext, chain_b_is_outer in relationships['join_chains']:
if chain_a == chain_b:
if chain_b_is_outer:
any_is_outer = True
continue
if set(chain_a).issubset(chain_b):
is_longest = False
break
if is_longest and (chain_a_ext, any_is_outer) not in longest_chains:
longest_chains.append((chain_a_ext, any_is_outer))
if not longest_chains:
return query
for chain, chain_is_outer in longest_chains:
for alias, relation in chain:
query = query.join((alias, relation), from_joinpoint=True, isouter=chain_is_outer)
query = query.reset_joinpoint()
return query.distinct() if distinct else query
def _build_filter_expressions(self, conditions, default_op, relationships):
"""
:param conditions: conditions dictionary
:type conditions: dict
:param default_op: a default operator to join all filter expressions
:type default_op: function
:param relationships: a dict with all joins to apply, describes current state in recurrent calls
:type relationships: dict
:return: expressions list
:rtype: list
"""
if default_op is None:
default_op = and_
expressions = []
for arg, value in conditions.items():
if arg in self._logical_operators:
expression = self._parse_logical_op(arg, value, self._logical_operators[arg], relationships)
else:
expression = self._parse_tokens(self.objects_class, arg.split('__'), value, relationships,
lambda c, n, v: operators.eq(n, self.deserialize_column(c, v)))
if expression is not None:
expressions.append(expression)
result = None
if len(expressions) > 1:
result = default_op(*expressions) if default_op != not_ else not_(and_(*expressions))
elif len(expressions) == 1:
result = expressions[0] if default_op != not_ else not_(expressions[0])
return result
def _parse_logical_op(self, arg, value, default_op, relationships):
"""
:param arg: condition name
:type arg: str
:param value: condition value
:type value: dict | list
:param default_op: a default operator to join all filter expressions
:type default_op: function
:param relationships: a dict with all joins to apply, describes current state in recurrent calls
:type relationships: dict
:return: expressions list
:rtype: list
"""
if isinstance(value, dict):
return self._build_filter_expressions(value, default_op, relationships)
if not isinstance(value, list):
raise HTTPBadRequest('Invalid attribute', 'Filter attribute {} is invalid'.format(arg))
expressions = []
for subconditions in value:
if not isinstance(subconditions, dict):
raise HTTPBadRequest('Invalid attribute', 'Filter attribute {} is invalid'.format(arg))
subexpressions = self._build_filter_expressions(subconditions, and_, relationships)
if subexpressions is not None:
expressions.append(subexpressions)
result = None
if len(expressions) > 1:
result = default_op(*expressions) if default_op != not_ else not_(and_(*expressions))
elif len(expressions) == 1:
result = expressions[0] if default_op != not_ else not_(expressions[0])
return result
def _parse_tokens(self, obj_class, tokens, value, relationships, default_expression=None):
column_name = None
column = None
column_alias = obj_class
mapper = inspect(obj_class)
join_chain = []
join_chain_ext = []
join_is_outer = False
for index, token in enumerate(tokens):
if token == CollectionResource.PARAM_TEXT_QUERY:
query_method = getattr(obj_class, 'get_term_query', None)
if not callable(query_method):
raise HTTPBadRequest('Invalid attribute', 'Param {} is invalid, specific object '
'can\'t provide a query'.format('__'.join(tokens)))
return query_method(self=obj_class, column_alias=column_alias, column_name=column_name, value=value,
default_op=or_ if tokens[-1] == 'or' else and_)
if column_name is not None and token in self._underscore_operators:
op = self._underscore_operators[token]
if op in [operators.between_op, operators.in_op]:
if not isinstance(value, list):
value = [value]
# isnull is the only operator where the value is not of the same type as the column
if token != 'isnull' and token != 'isnotnull':
if isinstance(value, list):
value = list(map(lambda x: self.deserialize_column(column, x), value))
else:
value = self.deserialize_column(column, value)
if op == Function:
expression = column_name
if len(tokens[index+1:]) > 1:
if token == 'efunc':
value = self._parse_tokens(obj_class, tokens[index+1:-1], value, relationships,
lambda c, n, v: n)
else:
for func_name in tokens[index+1:-1]:
expression = Function(func_name, expression)
if tokens[-1] in self._underscore_operators:
expression = self._underscore_operators[tokens[-1]](expression, value)
else:
if token != 'sfunc':
expression = Function(tokens[-1], expression, value)
else:
expression = Function(tokens[-1], expression)
else:
expression = op(column_name, value)
if token == 'isnull':
join_is_outer = True
if join_chain:
relationships['join_chains'].append((join_chain, join_chain_ext, join_is_outer))
return expression
if token in mapper.relationships:
# follow the relation and change current obj_class and mapper
obj_class = mapper.relationships[token].mapper.class_
mapper = mapper.relationships[token].mapper
column_alias, is_new_alias = self.next_alias(relationships['aliases'], token, obj_class,
prefix=relationships.get('prefix', ''))
join_chain.append(token)
join_chain_ext.append((column_alias, token))
continue
if token not in mapper.column_attrs:
if self.IGNORE_UNKNOWN_FILTER:
return None
# if token is not an op or relation it has to be a valid column
raise HTTPBadRequest('Invalid attribute', 'Param {} is invalid, part {} is expected '
'to be a known column name'.format('__'.join(tokens), token))
column_name = getattr(column_alias, token)
""":type column: sqlalchemy.schema.Column"""
column = mapper.columns[token]
if join_chain:
relationships['join_chains'].append((join_chain, join_chain_ext, join_is_outer))
if column_name is not None and default_expression is not None:
# if last token was a relation it's just going to be ignored
return default_expression(column, column_name, value)
return None
@staticmethod
def get_tsquery(value, default_op):
if isinstance(value, list):
tq = func.plainto_tsquery('english', value.pop())
while len(value):
tq = tq.op('||' if default_op == or_ else '&&')(func.plainto_tsquery('english', value.pop()))
else:
tq = func.plainto_tsquery('english', value)
return tq
@staticmethod
def next_alias(aliases, name, obj_class, use_existing=True, prefix=''):
is_new = True
if name in aliases:
if use_existing:
is_new = False
else:
aliases[name]['number'] += 1
aliases[name]['aliased'].append(
aliased(obj_class, name=prefix + name + '_' + str(aliases[name]['number'])))
else:
aliases[name] = {'number': 1,
'aliased': [aliased(obj_class, name=prefix + name + '_1')]}
return aliases[name]['aliased'][-1], is_new
def order_by(self, query, criteria):
"""
:param query: SQLAlchemy Query object
:type query: sqlalchemy.orm.query.Query
:return: modified query
:rtype: sqlalchemy.orm.query.Query
"""
relationships = {
'aliases': {},
'join_chains': [],
}
expressions = self._build_order_expressions(criteria, relationships)
query = self._apply_joins(query, relationships, distinct=False)
if expressions is not None:
query = query.order_by(*expressions)
return query
def _build_order_expressions(self, criteria, relationships):
"""
:param criteria: criteria dictionary
:type criteria: dict
:param relationships: a dict with all joins to apply, describes current state in recurrent calls
:type relationships: dict
:return: expressions list
:rtype: list
"""
expressions = []
if isinstance(criteria, dict):
criteria = list(criteria.items())
for arg in criteria:
if isinstance(arg, tuple):
arg, value = arg
else:
value = None
is_ascending = True
if len(arg) and arg[0] == '+' or arg[0] == '-':
is_ascending = arg[:1] == '+'
arg = arg[1:]
expression = self._parse_tokens(self.objects_class, arg.split('__'), value, relationships,
lambda c, n, v: n)
if expression is not None:
expressions.append(expression if is_ascending else desc(expression))
return expressions
def clean_relations(self, relations):
"""
Checks all special values in relations and makes sure to always return either a list or None.
:param relations: relation names
:type relations: str | list
:return: either a list (may be empty) or None if all relations should be included
:rtype: list[str] | None
"""
if relations == '':
return []
elif relations == self.PARAM_RELATIONS_ALL:
return None
elif isinstance(relations, str):
return [relations]
return relations
@staticmethod
def save_resource(obj, data, db_session):
"""
Extracts relation dicts from data, saves them and then updates the main object.
:param obj: a new or existing model
:type obj: object
:param data: data to assign to the model and/or its relations
:type data: dict
:param db_session: SQLAlchemy session
:type db_session: sqlalchemy.orm.session.Session
"""
# fetching related objects should not trigger saving of main object,
# because FKs could not have been set yet
autoflush = db_session.autoflush
db_session.autoflush = False
mapper = inspect(obj).mapper
for key, value in data.items():
if key not in mapper.relationships and getattr(obj, key) != value:
setattr(obj, key, value)
db_session.add(obj)
for key, value in data.items():
if key not in mapper.relationships:
continue
related_mapper = mapper.relationships[key].mapper
pk = related_mapper.primary_key[0].name
if isinstance(value, list):
keys = []
objects = getattr(obj, key)
reindexed = {getattr(related, pk): index for index, related in enumerate(objects)}
for item in value:
if isinstance(item, dict):
if pk in item and item[pk] in reindexed:
AlchemyMixin.save_resource(objects[reindexed[item[pk]]], item, db_session)
reindexed.pop(item[pk])
else:
objects.append(AlchemyMixin.update_or_create(db_session, related_mapper, item))
else:
if item in reindexed:
reindexed.pop(item)
else:
keys.append(item)
for index in reindexed.values():
del objects[index]
if keys:
expression = related_mapper.primary_key[0].in_(keys)
objects += db_session.query(related_mapper.class_).filter(expression).all()
else:
rel_obj = getattr(obj, key)
if isinstance(value, dict):
relationship = mapper.relationships[key]
if (relationship.direction == MANYTOONE or relationship.uselist)\
and (pk not in value or rel_obj is None):
setattr(obj, key, AlchemyMixin.update_or_create(db_session, related_mapper, value))
else:
AlchemyMixin.save_resource(rel_obj, value, db_session)
elif rel_obj is None or getattr(rel_obj, pk) != value:
expression = related_mapper.primary_key[0].__eq__(value)
setattr(obj, key, db_session.query(related_mapper.class_).filter(expression).first())
db_session.autoflush = autoflush
return obj
@staticmethod
def update_or_create(db_session, mapper, attributes):
"""
Updated the record if attributes contain the primary key value(s) and creates it if they don't.
:param db_session:
:type db_session: sqlalchemy.orm.session.Session
:param mapper:
:type mapper: sqlalchemy.orm.mapper.Mapper
:param attributes:
:type attributes: dict
:return:
:rtype: object
"""
query_attrs = {}
for key in mapper.primary_key:
if key in attributes:
query_attrs[key] = attributes.pop(key)
if query_attrs:
obj = db_session.query(mapper.class_).get(query_attrs[0] if len(query_attrs) == 1 else tuple(query_attrs))
else:
obj = mapper.class_()
if attributes:
return AlchemyMixin.save_resource(obj, attributes, db_session)
return obj
@staticmethod
def get_or_create(db_session, model_class, query_attrs, update_attrs=None, update_existing=False):
"""
Fetches the record and if it doesn't exist yet, creates it, handling a race condition.
:param db_session: session within DB connection
:type db_session: sqlalchemy.orm.session.Session
:param model_class: class of the model to return or create
:type model_class: class
:param query_attrs: attributes used to fetch the model
:type query_attrs: dict
:param update_attrs: attributes used to create a new model
:type update_attrs: dict
:param update_existing: if True and update_attrs are set, updates existing records
:type update_existing: bool
:return: existing or new object and a flag if existing or new object is being returned
:rtype: tuple
"""
query = db_session.query(model_class).filter_by(**query_attrs)
existing = query.one_or_none()
if existing:
if update_existing and update_attrs is not None:
for key, value in update_attrs.items():
if getattr(existing, key) != value:
setattr(existing, key, value)
return existing, False
db_session.begin_nested()
try:
if update_attrs is None:
update_attrs = query_attrs
else:
update_attrs.update(query_attrs)
new_object = model_class(**update_attrs)
db_session.add(new_object)
db_session.commit()
except IntegrityError:
db_session.rollback()
existing = query.one_or_none()
if update_existing and update_attrs is not None:
for key, value in update_attrs.items():
if getattr(existing, key) != value:
setattr(existing, key, value)
return existing, False
return new_object, True
@staticmethod
def get_default_schema(model_class, method='POST'):
"""
Returns a schema to be used in falconjsonio.schema.request_schema decorator
:return:
"""
schema = {
'type': 'object',
'properties': {
},
}
if method == 'POST':
schema['required'] = []
return schema
class CollectionResource(AlchemyMixin, BaseCollectionResource):
"""
Allows to fetch a collection of a resource (GET) and to create new resource in that collection (POST).
May be extended to allow batch operations (ex. PATCH).
When fetching a collection (GET), following params are supported:
* limit, offset - for pagination
* total_count - to calculate total number of items matching filters, without pagination
* relations - list of relation names to include in the result, uses special value `_all` for all relations
* all other params are treated as filters, syntax mimics Django filters, see `AlchemyMixin._underscore_operators`
User input can be validated by attaching the `falconjsonio.schema.request_schema()` decorator.
"""
VIOLATION_UNIQUE = '23505'
def __init__(self, objects_class, db_engine, max_limit=None, eager_limit=None):
"""
:param objects_class: class represent single element of object lists that suppose to be returned
:param db_engine: SQL Alchemy engine
:type db_engine: sqlalchemy.engine.Engine
:param max_limit: max limit of elements that suppose to be returned by default
:type max_limit: int
:param eager_limit: if None or the value of limit param is greater than this, subquery eager loading
will be enabled
:type eager_limit: int
"""
super(CollectionResource, self).__init__(objects_class, max_limit)
self.db_engine = db_engine
self.eager_limit = eager_limit
if not hasattr(self, '__request_schemas__'):
self.__request_schemas__ = {}
self.__request_schemas__['POST'] = AlchemyMixin.get_default_schema(objects_class, 'POST')
def get_queryset(self, req, resp, db_session=None, limit=None):
"""
Return a query object used to fetch data.
:param req: Falcon request
:type req: falcon.request.Request
:param resp: Falcon response
:type resp: falcon.response.Response
:param db_session: SQLAlchemy session
:type db_session: sqlalchemy.orm.session.Session
:param limit: max number of records fetched
:type limit: int | None
:return: a query from `object_class`
"""
query = db_session.query(self.objects_class)
relations = self.clean_relations(self.get_param_or_post(req, self.PARAM_RELATIONS, ''))
if self.eager_limit is None or (limit is not None and limit > self.eager_limit):
if relations is None:
query = query.options(subqueryload('*'))
elif len(relations):
for relation in relations:
query = query.options(subqueryload(relation))
search = self.get_param_or_post(req, self.PARAM_SEARCH)
if search:
try:
req.params.update(json.loads(search) if isinstance(search, str) else search)
except ValueError:
raise HTTPBadRequest('Invalid attribute',
'Value of {} filter attribute is invalid'.format(self.PARAM_SEARCH))
order = self.get_param_or_post(req, self.PARAM_ORDER)
if order:
if isinstance(order, str):
if (order[0] == '{' and order[-1] == '}') or (order[0] == '[' and order[-1] == ']'):
try:
order = json.loads(order)
except ValueError:
# not valid json, ignore and try to parse as an ordinary list of attributes
pass
if not isinstance(order, list) and not isinstance(order, dict):
order = [order]
return self.filter_by(query, req.params, order)
primary_keys = inspect(self.objects_class).primary_key
return self.filter_by(query, req.params).order_by(*primary_keys)
def get_total_objects(self, queryset, totals):
if not totals:
return {}
agg_query, dimensions = self._build_total_expressions(queryset, totals)
def nested_dict(n, type):
"""Creates an n-dimension dictionary where the n-th dimension is of type 'type'
"""
if n <= 1:
return type()
return collections.defaultdict(lambda: nested_dict(n - 1, type))
result = nested_dict(len(dimensions) + 2, None)
for aggs in queryset.session.execute(agg_query):
for metric_key, metric_value in aggs.items():
if metric_key in dimensions:
continue
last_result = result
last_key = 'total_' + metric_key
for dimension in dimensions:
last_result = last_result[last_key]
last_key = str(aggs[dimension])
last_result[last_key] = metric_value if not isinstance(metric_value, Decimal) else float(metric_value)
return result
def _build_total_expressions(self, queryset, totals):
mapper = inspect(self.objects_class)
primary_keys = mapper.primary_key
relationships = {
'aliases': {},
'join_chains': [],
'prefix': 'totals_',
}
aggregates = []
group_cols = OrderedDict()
group_by = []
group_limit = None
for total in totals:
for aggregate, columns in total.items():
if aggregate == self.AGGR_GROUPLIMIT:
if not isinstance(columns, int):
raise HTTPBadRequest('Invalid attribute', 'Group limit option requires an integer value')
group_limit = columns
continue
if not columns:
if aggregate == self.AGGR_GROUPBY:
raise HTTPBadRequest('Invalid attribute', 'Group by option requires at least one column name')
if len(primary_keys) > 1:
aggregates.append(Function(aggregate, func.row(*primary_keys)).label(aggregate))
else:
aggregates.append(Function(aggregate, *primary_keys).label(aggregate))
continue
if not isinstance(columns, list):
columns = [columns]
for column in columns:
expression = self._parse_tokens(self.objects_class, column.split('__'), None, relationships,
lambda c, n, v: n)
if expression is not None:
if aggregate == self.AGGR_GROUPBY:
group_cols[column] = expression.label(column)
group_by.append(expression)
else:
aggregates.append(Function(aggregate, expression).label(aggregate))
agg_query = self._apply_joins(queryset, relationships, distinct=False)
group_cols_expr = list(group_cols.values())
columns = group_cols_expr + aggregates
if group_limit:
row_order = list(map(lambda c: c.desc(), aggregates))
columns.append(func.row_number().over(partition_by=group_cols_expr[:-1],
order_by=row_order).label('row_number'))
order = ','.join(list(map(str, range(1, len(group_cols_expr) + 1)))
+ list(map(lambda c: str(c) + ' DESC', range(1 + len(group_cols_expr),
len(aggregates) + len(group_cols_expr) + 1))))
agg_query = agg_query.statement.with_only_columns(columns).order_by(None).order_by(order)
if group_by:
agg_query = agg_query.group_by(*group_by)
if group_limit:
subquery = agg_query.alias()
agg_query = select([subquery]).where(subquery.c.row_number <= group_limit)
return agg_query, list(group_cols.keys())
def get_object_list(self, queryset, limit=None, offset=None):
if limit is None:
limit = self.max_limit
if offset is None:
offset = 0
if limit is not None:
if self.max_limit is not None:
limit = min(limit, self.max_limit)
limit = max(limit, 0)
queryset = queryset.limit(limit)
offset = max(offset, 0)
return queryset.offset(offset)
def on_get(self, req, resp):
limit = self.get_param_or_post(req, self.PARAM_LIMIT)
offset = self.get_param_or_post(req, self.PARAM_OFFSET)
if limit is not None:
limit = int(limit)
if offset is not None:
offset = int(offset)
totals = self.get_param_totals(req)
# retrieve that param without removing it so self.get_queryset() so it can also use it
relations = self.clean_relations(self.get_param_or_post(req, self.PARAM_RELATIONS, '', pop_params=False))
with self.session_scope(self.db_engine) as db_session:
query = self.get_queryset(req, resp, db_session, limit)
totals = self.get_total_objects(query, totals)
object_list = self.get_object_list(query, limit, offset)
serialized = [self.serialize(obj, relations_include=relations,
relations_ignore=list(getattr(self, 'serialize_ignore', [])))
for obj in object_list]
result = {'results': serialized,
'total': totals['total_count'] if 'total_count' in totals else None,
'returned': len(serialized)} # avoid calling object_list.count() which executes the query again
result.update(totals)
headers = {'x-api-total': str(result['total']) if result['total'] is not None else '',
'x-api-returned': str(result['returned'])}
resp.set_headers(headers)
self.render_response(result, req, resp)
def on_head(self, req, resp):
limit = self.get_param_or_post(req, self.PARAM_LIMIT)
offset = self.get_param_or_post(req, self.PARAM_OFFSET)
if limit is not None:
limit = int(limit)
if offset is not None:
offset = int(offset)
totals = self.get_param_totals(req)
with self.session_scope(self.db_engine) as db_session:
query = self.get_queryset(req, resp, db_session, limit)
totals = self.get_total_objects(query, totals)
object_list = self.get_object_list(query, limit, offset)
headers = {'x-api-total': str(totals['total_count']) if 'total_count' in totals else '',
'x-api-returned': str(len(object_list))}
resp.set_headers(headers)
resp.status = falcon.HTTP_NO_CONTENT
def create(self, req, resp, data, db_session=None):
"""
Create a new or update an existing record using provided data.
:param req: Falcon request
:type req: falcon.request.Request
:param resp: Falcon response
:type resp: falcon.response.Response
:param data:
:type data: dict
:param db_session: SQLAlchemy session
:type db_session: sqlalchemy.orm.session.Session
:return: created object, serialized to a dict
:rtype: dict
"""
relations = self.clean_relations(self.get_param_or_post(req, self.PARAM_RELATIONS, ''))
resource = self.save_resource(self.objects_class(), data, db_session)
db_session.commit()
return self.serialize(resource, relations_include=relations,
relations_ignore=list(getattr(self, 'serialize_ignore', [])))
def on_post(self, req, resp, *args, **kwargs):
data = self.deserialize(req.context['doc'] if 'doc' in req.context else None)
data, errors = self.clean(data)
if errors:
result = {'errors': errors}
status_code = falcon.HTTP_BAD_REQUEST
self.render_response(result, req, resp, status_code)
return
try:
with self.session_scope(self.db_engine) as db_session:
result = self.create(req, resp, data, db_session=db_session)
except IntegrityError:
raise HTTPConflict('Conflict', 'Unique constraint violated')
except ProgrammingError as err:
# Cases such as unallowed NULL value should have been checked before we got here (e.g. validate against
# schema using falconjsonio) - therefore assume this is a UNIQUE constraint violation
if len(err.orig.args) > 1 and err.orig.args[1] == self.VIOLATION_UNIQUE:
raise HTTPConflict('Conflict', 'Unique constraint violated')
raise
status_code = falcon.HTTP_CREATED
self.render_response(result, req, resp, status_code)
class SingleResource(AlchemyMixin, BaseSingleResource):
"""
Allows to fetch a single resource (GET) and to update (PATCH, PUT) or remove it (DELETE).
When fetching a resource (GET), following params are supported:
* relations - list of relation names to include in the result, uses special value `_all` for all relations
User input can be validated by attaching the `falconjsonio.schema.request_schema()` decorator.
"""
VIOLATION_FOREIGN_KEY = '23503'
def __init__(self, objects_class, db_engine):
"""
:param objects_class: class represent single element of object lists that suppose to be returned
:param db_engine: SQL Alchemy engine
:type db_engine: sqlalchemy.engine.Engine
"""
super(SingleResource, self).__init__(objects_class)
self.db_engine = db_engine
if not hasattr(self, '__request_schemas__'):
self.__request_schemas__ = {}
self.__request_schemas__['POST'] = AlchemyMixin.get_default_schema(objects_class, 'POST')
self.__request_schemas__['PUT'] = AlchemyMixin.get_default_schema(objects_class, 'POST')
def get_object(self, req, resp, path_params, for_update=False, db_session=None):
"""
:param req: Falcon request
:type req: falcon.request.Request
:param resp: Falcon response
:type resp: falcon.response.Response
:param path_params: path params extracted from URL path
:type path_params: dict
:param for_update: if the object is going to be updated or deleted
:type for_update: bool
:param db_session: SQLAlchemy session
:type db_session: sqlalchemy.orm.session.Session
"""
query = db_session.query(self.objects_class)
if for_update:
query = query.with_for_update()
for key, value in path_params.items():
attr = getattr(self.objects_class, key, None)
query = query.filter(attr == value)
conditions = dict(req.params)
if self.PARAM_RELATIONS in conditions:
conditions.pop(self.PARAM_RELATIONS)
query = self.filter_by(query, conditions)
try:
obj = query.one()
except NoResultFound:
raise HTTPNotFound()
except MultipleResultsFound:
raise HTTPBadRequest('Multiple results', 'Query params match multiple records')
return obj
def on_get(self, req, resp, *args, **kwargs):
relations = self.clean_relations(self.get_param_or_post(req, self.PARAM_RELATIONS, ''))
with self.session_scope(self.db_engine) as db_session:
obj = self.get_object(req, resp, kwargs, db_session=db_session)
result = self.serialize(obj,
relations_include=relations,
relations_ignore=list(getattr(self, 'serialize_ignore', [])))
self.render_response(result, req, resp)
def on_head(self, req, resp, *args, **kwargs):
with self.session_scope(self.db_engine) as db_session:
# call get_object to check if it exists
self.get_object(req, resp, kwargs, db_session=db_session)
resp.status = falcon.HTTP_NO_CONTENT
def delete(self, req, resp, obj, db_session=None):
"""
Delete an existing record.
:param req: Falcon request
:type req: falcon.request.Request
:param resp: Falcon response
:type resp: falcon.response.Response
:param obj: the object to delete
:param db_session: SQLAlchemy session
:type db_session: sqlalchemy.orm.session.Session
"""
deleted = db_session.delete(obj)
if deleted == 0:
raise falcon.HTTPConflict('Conflict', 'Resource found but conditions violated')
def on_delete(self, req, resp, *args, **kwargs):
try:
with self.session_scope(self.db_engine) as db_session:
obj = self.get_object(req, resp, kwargs, for_update=True, db_session=db_session)
self.delete(req, resp, obj, db_session)
except (IntegrityError, ProgrammingError) as err:
# This should only be caused by foreign key constraint being violated
if isinstance(err, IntegrityError) or err.orig.args[1] == self.VIOLATION_FOREIGN_KEY:
raise HTTPConflict('Conflict', 'Other content links to this')
else:
raise
self.render_response({}, req, resp)
def update(self, req, resp, data, obj, db_session=None):
"""
Create a new or update an existing record using provided data.
:param req: Falcon request
:type req: falcon.request.Request
:param resp: Falcon response
:type resp: falcon.response.Response
:param data:
:type data: dict
:param obj: the object to update
:param db_session: SQLAlchemy session
:type db_session: sqlalchemy.orm.session.Session
:return: created or updated object, serialized to a dict
:rtype: dict
"""
relations = self.clean_relations(self.get_param_or_post(req, self.PARAM_RELATIONS, ''))
resource = self.save_resource(obj, data, db_session)
db_session.commit()
return self.serialize(resource, relations_include=relations,
relations_ignore=list(getattr(self, 'serialize_ignore', [])))
def on_put(self, req, resp, *args, **kwargs):
status_code = falcon.HTTP_OK
try:
with self.session_scope(self.db_engine) as db_session:
obj = self.get_object(req, resp, kwargs, for_update=True, db_session=db_session)
data = self.deserialize(req.context['doc'] if 'doc' in req.context else None)
data, errors = self.clean(data)
if errors:
result = {'errors': errors}
status_code = falcon.HTTP_BAD_REQUEST
else:
result = self.update(req, resp, data, obj, db_session)
except (IntegrityError, ProgrammingError) as err:
# Cases such as unallowed NULL value should have been checked before we got here (e.g. validate against
# schema using falconjsonio) - therefore assume this is a UNIQUE constraint violation
if isinstance(err, IntegrityError) or err.orig.args[1] == self.VIOLATION_FOREIGN_KEY:
raise HTTPConflict('Conflict', 'Unique constraint violated')
else:
raise
self.render_response(result, req, resp, status_code)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
u"""
===============================
Shimehari.core.manager
~~~~~~~~~~~~~~~~~~~~~~
マネージメント
===============================
"""
import os
import sys
from optparse import OptionParser
from .AbstractCommand import AbstractCommand, CreatableCommand
from shimehari.core.helpers import importFromString
_commands = None
u"""
コマンドラインから色々実行するべさ
"""
def executeFromCommandLine(argv=None):
executer = CommandLineExecuter(argv)
executer.execute()
def loadCommandModule(cmdName, name):
module = importFromString('%s.manage.commands.%s' % (cmdName, name))
return module.Command()
def getCommands():
global _commands
if _commands is None:
_commands = dict([(name, 'shimehari.core') for name in findCommand(__path__[0])])
#ユーザーコマンドー…
return _commands
def findCommand(manageDir):
cmdDir = os.path.join(manageDir, 'commands')
try:
return [f[:-3] for f in os.listdir(cmdDir) if not f.startswith('_') and f.endswith('.py')]
except OSError:
return []
class CommandLineExecuter(object):
def __init__(self, argv):
self.argv = argv or sys.argv[:]
self.progName = os.path.basename(self.argv[0])
def fetchCommand(self, subcommand):
try:
cmdName = getCommands()[subcommand]
except KeyError:
sys.stdout.write("Unkown command: %r\nType %s help for usage. \n" % (subcommand, self.progName))
sys.exit(1)
if isinstance(cmdName, AbstractCommand):
cls = cmdName
else:
cls = loadCommandModule(cmdName, subcommand)
return cls
def execute(self):
parser = OptionParser()
try:
subcommand = self.argv[1]
except:
subcommand = 'help'
if subcommand == 'version':
from shimehari import getVersion
sys.stdout.write('shimehari version: ' + getVersion() + '\n')
elif self.argv[1:] == ['--version']:
sys.stdout.write('shimehari version: ' + getVersion() + '\n')
elif self.argv[1:] in (['--help'], ['-h']):
self.fetchCommand('help').runFromArgv(self.argv)
else:
self.fetchCommand(subcommand).runFromArgv(self.argv)
アルコール分 0.00%
アプリケーション単位でコマンド登録できるようにしたい的な何か
#!/usr/bin/env python
# -*- coding: utf-8 -*-
u"""
===============================
Shimehari.core.manager
~~~~~~~~~~~~~~~~~~~~~~
マネージメント
===============================
"""
import os
import sys
from optparse import OptionParser
from .AbstractCommand import AbstractCommand, CreatableCommand
from shimehari.core.helpers import importFromString
_commands = None
u"""
コマンドラインから色々実行するべさ
"""
def executeFromCommandLine(argv=None):
executer = CommandLineExecuter(argv)
executer.execute()
def loadCommandModule(cmdName, name):
module = importFromString('.'.join([cmdName, name]))
return module.Command()
def getCommands():
global _commands
if _commands is None:
_commands = dict([(name, 'shimehari.core.manage.commands') for name in findCommand(__path__[0], 'commands')])
#ユーザーコマンドー…
_commands.update(dict([(name, 'command') for name in findCommand(os.getcwd(), 'command')]))
return _commands
def findCommand(manageDir, commandDir):
cmdDir = os.path.join(manageDir, commandDir)
try:
return [f[:-3] for f in os.listdir(cmdDir) if not f.startswith('_') and f.endswith('.py')]
except OSError:
return []
class CommandLineExecuter(object):
def __init__(self, argv):
self.argv = argv or sys.argv[:]
self.progName = os.path.basename(self.argv[0])
def fetchCommand(self, subcommand):
try:
cmdName = getCommands()[subcommand]
except KeyError:
sys.stdout.write("Unkown command: %r\nType %s help for usage. \n" % (subcommand, self.progName))
sys.exit(1)
if isinstance(cmdName, AbstractCommand):
cls = cmdName
else:
cls = loadCommandModule(cmdName, subcommand)
return cls
def execute(self):
parser = OptionParser()
try:
subcommand = self.argv[1]
except:
subcommand = 'help'
if subcommand == 'version':
from shimehari import getVersion
sys.stdout.write('shimehari version: ' + getVersion() + '\n')
elif self.argv[1:] == ['--version']:
sys.stdout.write('shimehari version: ' + getVersion() + '\n')
elif self.argv[1:] in (['--help'], ['-h']):
self.fetchCommand('help').runFromArgv(self.argv)
else:
self.fetchCommand(subcommand).runFromArgv(self.argv)
|
"""
This is the default template for our main set of AWS servers.
"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=wildcard-import, unused-wildcard-import
# Pylint gets confused by path.py instances, which report themselves as class
# objects. As a result, pylint applies the wrong regex in validating names,
# and throws spurious errors. Therefore, we disable invalid-name checking.
# pylint: disable=invalid-name
import json
from openedx.stanford.cms.envs.common import *
from openedx.core.lib.logsettings import get_logger_config
import os
from path import Path as path
from xmodule.modulestore.modulestore_settings import convert_module_store_setting_if_needed
# SERVICE_VARIANT specifies name of the variant used, which decides what JSON
# configuration files are read during startup.
SERVICE_VARIANT = os.environ.get('SERVICE_VARIANT', None)
# CONFIG_ROOT specifies the directory where the JSON configuration
# files are expected to be found. If not specified, use the project
# directory.
CONFIG_ROOT = path(os.environ.get('CONFIG_ROOT', ENV_ROOT))
# CONFIG_PREFIX specifies the prefix of the JSON configuration files,
# based on the service variant. If no variant is use, don't use a
# prefix.
CONFIG_PREFIX = SERVICE_VARIANT + "." if SERVICE_VARIANT else ""
############### ALWAYS THE SAME ################################
DEBUG = False
EMAIL_BACKEND = 'django_ses.SESBackend'
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
# IMPORTANT: With this enabled, the server must always be behind a proxy that
# strips the header HTTP_X_FORWARDED_PROTO from client requests. Otherwise,
# a user can fool our server into thinking it was an https connection.
# See
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header
# for other warnings.
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
###################################### CELERY ################################
# Don't use a connection pool, since connections are dropped by ELB.
BROKER_POOL_LIMIT = 0
BROKER_CONNECTION_TIMEOUT = 1
# For the Result Store, use the django cache named 'celery'
CELERY_RESULT_BACKEND = 'djcelery.backends.cache:CacheBackend'
# When the broker is behind an ELB, use a heartbeat to refresh the
# connection and to detect if it has been dropped.
BROKER_HEARTBEAT = 10.0
BROKER_HEARTBEAT_CHECKRATE = 2
# Each worker should only fetch one message at a time
CELERYD_PREFETCH_MULTIPLIER = 1
# Rename the exchange and queues for each variant
QUEUE_VARIANT = CONFIG_PREFIX.lower()
CELERY_DEFAULT_EXCHANGE = 'edx.{0}core'.format(QUEUE_VARIANT)
HIGH_PRIORITY_QUEUE = 'edx.{0}core.high'.format(QUEUE_VARIANT)
DEFAULT_PRIORITY_QUEUE = 'edx.{0}core.default'.format(QUEUE_VARIANT)
LOW_PRIORITY_QUEUE = 'edx.{0}core.low'.format(QUEUE_VARIANT)
CELERY_DEFAULT_QUEUE = DEFAULT_PRIORITY_QUEUE
CELERY_DEFAULT_ROUTING_KEY = DEFAULT_PRIORITY_QUEUE
CELERY_QUEUES = {
HIGH_PRIORITY_QUEUE: {},
LOW_PRIORITY_QUEUE: {},
DEFAULT_PRIORITY_QUEUE: {}
}
CELERY_ROUTES = "{}celery.Router".format(QUEUE_VARIANT)
############# NON-SECURE ENV CONFIG ##############################
# Things like server locations, ports, etc.
with open(CONFIG_ROOT / CONFIG_PREFIX + "env.json") as env_file:
ENV_TOKENS = json.load(env_file)
# STATIC_URL_BASE specifies the base url to use for static files
STATIC_URL_BASE = ENV_TOKENS.get('STATIC_URL_BASE', None)
if STATIC_URL_BASE:
# collectstatic will fail if STATIC_URL is a unicode string
STATIC_URL = STATIC_URL_BASE.encode('ascii')
if not STATIC_URL.endswith("/"):
STATIC_URL += "/"
STATIC_URL += EDX_PLATFORM_REVISION + "/"
# DEFAULT_COURSE_ABOUT_IMAGE_URL specifies the default image to show for courses that don't provide one
DEFAULT_COURSE_ABOUT_IMAGE_URL = ENV_TOKENS.get('DEFAULT_COURSE_ABOUT_IMAGE_URL', DEFAULT_COURSE_ABOUT_IMAGE_URL)
# GITHUB_REPO_ROOT is the base directory
# for course data
GITHUB_REPO_ROOT = ENV_TOKENS.get('GITHUB_REPO_ROOT', GITHUB_REPO_ROOT)
# STATIC_ROOT specifies the directory where static files are
# collected
STATIC_ROOT_BASE = ENV_TOKENS.get('STATIC_ROOT_BASE', None)
if STATIC_ROOT_BASE:
STATIC_ROOT = path(STATIC_ROOT_BASE) / EDX_PLATFORM_REVISION
EMAIL_BACKEND = ENV_TOKENS.get('EMAIL_BACKEND', EMAIL_BACKEND)
EMAIL_FILE_PATH = ENV_TOKENS.get('EMAIL_FILE_PATH', None)
EMAIL_HOST = ENV_TOKENS.get('EMAIL_HOST', EMAIL_HOST)
EMAIL_PORT = ENV_TOKENS.get('EMAIL_PORT', EMAIL_PORT)
EMAIL_USE_TLS = ENV_TOKENS.get('EMAIL_USE_TLS', EMAIL_USE_TLS)
LMS_BASE = ENV_TOKENS.get('LMS_BASE')
LMS_ROOT_URL = ENV_TOKENS.get('LMS_ROOT_URL')
# Note that FEATURES['PREVIEW_LMS_BASE'] gets read in from the environment file.
SITE_NAME = ENV_TOKENS['SITE_NAME']
ALLOWED_HOSTS = [
# TODO: bbeggs remove this before prod, temp fix to get load testing running
"*",
ENV_TOKENS.get('CMS_BASE')
]
LOG_DIR = ENV_TOKENS['LOG_DIR']
CACHES = ENV_TOKENS['CACHES']
# Cache used for location mapping -- called many times with the same key/value
# in a given request.
if 'loc_cache' not in CACHES:
CACHES['loc_cache'] = {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'edx_location_mem_cache',
}
SESSION_COOKIE_DOMAIN = ENV_TOKENS.get('SESSION_COOKIE_DOMAIN')
SESSION_COOKIE_HTTPONLY = ENV_TOKENS.get('SESSION_COOKIE_HTTPONLY', True)
SESSION_ENGINE = ENV_TOKENS.get('SESSION_ENGINE', SESSION_ENGINE)
SESSION_COOKIE_SECURE = ENV_TOKENS.get('SESSION_COOKIE_SECURE', SESSION_COOKIE_SECURE)
SESSION_SAVE_EVERY_REQUEST = ENV_TOKENS.get('SESSION_SAVE_EVERY_REQUEST', SESSION_SAVE_EVERY_REQUEST)
# social sharing settings
SOCIAL_SHARING_SETTINGS = ENV_TOKENS.get('SOCIAL_SHARING_SETTINGS', SOCIAL_SHARING_SETTINGS)
REGISTRATION_EMAIL_PATTERNS_ALLOWED = ENV_TOKENS.get('REGISTRATION_EMAIL_PATTERNS_ALLOWED')
# allow for environments to specify what cookie name our login subsystem should use
# this is to fix a bug regarding simultaneous logins between edx.org and edge.edx.org which can
# happen with some browsers (e.g. Firefox)
if ENV_TOKENS.get('SESSION_COOKIE_NAME', None):
# NOTE, there's a bug in Django (http://bugs.python.org/issue18012) which necessitates this being a str()
SESSION_COOKIE_NAME = str(ENV_TOKENS.get('SESSION_COOKIE_NAME'))
# Set the names of cookies shared with the marketing site
# These have the same cookie domain as the session, which in production
# usually includes subdomains.
EDXMKTG_LOGGED_IN_COOKIE_NAME = ENV_TOKENS.get('EDXMKTG_LOGGED_IN_COOKIE_NAME', EDXMKTG_LOGGED_IN_COOKIE_NAME)
EDXMKTG_USER_INFO_COOKIE_NAME = ENV_TOKENS.get('EDXMKTG_USER_INFO_COOKIE_NAME', EDXMKTG_USER_INFO_COOKIE_NAME)
# Determines whether the CSRF token can be transported on
# unencrypted channels. It is set to False here for backward compatibility,
# but it is highly recommended that this is True for environments accessed
# by end users.
CSRF_COOKIE_SECURE = ENV_TOKENS.get('CSRF_COOKIE_SECURE', False)
#Email overrides
DEFAULT_FROM_EMAIL = ENV_TOKENS.get('DEFAULT_FROM_EMAIL', DEFAULT_FROM_EMAIL)
DEFAULT_FEEDBACK_EMAIL = ENV_TOKENS.get('DEFAULT_FEEDBACK_EMAIL', DEFAULT_FEEDBACK_EMAIL)
ADMINS = ENV_TOKENS.get('ADMINS', ADMINS)
SERVER_EMAIL = ENV_TOKENS.get('SERVER_EMAIL', SERVER_EMAIL)
MKTG_URLS = ENV_TOKENS.get('MKTG_URLS', MKTG_URLS)
TECH_SUPPORT_EMAIL = ENV_TOKENS.get('TECH_SUPPORT_EMAIL', TECH_SUPPORT_EMAIL)
for name, value in ENV_TOKENS.get("CODE_JAIL", {}).items():
oldvalue = CODE_JAIL.get(name)
if isinstance(oldvalue, dict):
for subname, subvalue in value.items():
oldvalue[subname] = subvalue
else:
CODE_JAIL[name] = value
COURSES_WITH_UNSAFE_CODE = ENV_TOKENS.get("COURSES_WITH_UNSAFE_CODE", [])
ASSET_IGNORE_REGEX = ENV_TOKENS.get('ASSET_IGNORE_REGEX', ASSET_IGNORE_REGEX)
# following setting is for backward compatibility
if ENV_TOKENS.get('COMPREHENSIVE_THEME_DIR', None):
COMPREHENSIVE_THEME_DIR = ENV_TOKENS.get('COMPREHENSIVE_THEME_DIR')
COMPREHENSIVE_THEME_DIRS = ENV_TOKENS.get('COMPREHENSIVE_THEME_DIRS', COMPREHENSIVE_THEME_DIRS) or []
# COMPREHENSIVE_THEME_LOCALE_PATHS contain the paths to themes locale directories e.g.
# "COMPREHENSIVE_THEME_LOCALE_PATHS" : [
# "/edx/src/edx-themes/conf/locale"
# ],
COMPREHENSIVE_THEME_LOCALE_PATHS = ENV_TOKENS.get('COMPREHENSIVE_THEME_LOCALE_PATHS', [])
DEFAULT_SITE_THEME = ENV_TOKENS.get('DEFAULT_SITE_THEME', DEFAULT_SITE_THEME)
ENABLE_COMPREHENSIVE_THEMING = ENV_TOKENS.get('ENABLE_COMPREHENSIVE_THEMING', ENABLE_COMPREHENSIVE_THEMING)
#Timezone overrides
TIME_ZONE = ENV_TOKENS.get('TIME_ZONE', TIME_ZONE)
# Push to LMS overrides
GIT_REPO_EXPORT_DIR = ENV_TOKENS.get('GIT_REPO_EXPORT_DIR', '/edx/var/edxapp/export_course_repos')
# Translation overrides
LANGUAGES = ENV_TOKENS.get('LANGUAGES', LANGUAGES)
LANGUAGE_CODE = ENV_TOKENS.get('LANGUAGE_CODE', LANGUAGE_CODE)
USE_I18N = ENV_TOKENS.get('USE_I18N', USE_I18N)
ENV_FEATURES = ENV_TOKENS.get('FEATURES', {})
for feature, value in ENV_FEATURES.items():
FEATURES[feature] = value
# Additional installed apps
for app in ENV_TOKENS.get('ADDL_INSTALLED_APPS', []):
INSTALLED_APPS += (app,)
WIKI_ENABLED = ENV_TOKENS.get('WIKI_ENABLED', WIKI_ENABLED)
LOGGING = get_logger_config(LOG_DIR,
logging_env=ENV_TOKENS['LOGGING_ENV'],
debug=False,
service_variant=SERVICE_VARIANT)
#theming start:
PLATFORM_NAME = ENV_TOKENS.get('PLATFORM_NAME', 'edX')
STUDIO_NAME = ENV_TOKENS.get('STUDIO_NAME', 'edX Studio')
STUDIO_SHORT_NAME = ENV_TOKENS.get('STUDIO_SHORT_NAME', 'Studio')
# Event Tracking
if "TRACKING_IGNORE_URL_PATTERNS" in ENV_TOKENS:
TRACKING_IGNORE_URL_PATTERNS = ENV_TOKENS.get("TRACKING_IGNORE_URL_PATTERNS")
# Django CAS external authentication settings
CAS_EXTRA_LOGIN_PARAMS = ENV_TOKENS.get("CAS_EXTRA_LOGIN_PARAMS", None)
if FEATURES.get('AUTH_USE_CAS'):
CAS_SERVER_URL = ENV_TOKENS.get("CAS_SERVER_URL", None)
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'django_cas.backends.CASBackend',
)
INSTALLED_APPS += ('django_cas',)
MIDDLEWARE_CLASSES += ('django_cas.middleware.CASMiddleware',)
CAS_ATTRIBUTE_CALLBACK = ENV_TOKENS.get('CAS_ATTRIBUTE_CALLBACK', None)
if CAS_ATTRIBUTE_CALLBACK:
import importlib
CAS_USER_DETAILS_RESOLVER = getattr(
importlib.import_module(CAS_ATTRIBUTE_CALLBACK['module']),
CAS_ATTRIBUTE_CALLBACK['function']
)
# Specific setting for the File Upload Service to store media in a bucket.
FILE_UPLOAD_STORAGE_BUCKET_NAME = ENV_TOKENS.get('FILE_UPLOAD_STORAGE_BUCKET_NAME', FILE_UPLOAD_STORAGE_BUCKET_NAME)
FILE_UPLOAD_STORAGE_PREFIX = ENV_TOKENS.get('FILE_UPLOAD_STORAGE_PREFIX', FILE_UPLOAD_STORAGE_PREFIX)
################ SECURE AUTH ITEMS ###############################
# Secret things: passwords, access keys, etc.
with open(CONFIG_ROOT / CONFIG_PREFIX + "auth.json") as auth_file:
AUTH_TOKENS = json.load(auth_file)
############### XBlock filesystem field config ##########
if 'DJFS' in AUTH_TOKENS and AUTH_TOKENS['DJFS'] is not None:
DJFS = AUTH_TOKENS['DJFS']
if 'url_root' in DJFS:
DJFS['url_root'] = DJFS['url_root'].format(platform_revision=EDX_PLATFORM_REVISION)
EMAIL_HOST_USER = AUTH_TOKENS.get('EMAIL_HOST_USER', EMAIL_HOST_USER)
EMAIL_HOST_PASSWORD = AUTH_TOKENS.get('EMAIL_HOST_PASSWORD', EMAIL_HOST_PASSWORD)
AWS_SES_REGION_NAME = ENV_TOKENS.get('AWS_SES_REGION_NAME', 'us-east-1')
AWS_SES_REGION_ENDPOINT = ENV_TOKENS.get('AWS_SES_REGION_ENDPOINT', 'email.us-east-1.amazonaws.com')
# Note that this is the Studio key for Segment. There is a separate key for the LMS.
CMS_SEGMENT_KEY = AUTH_TOKENS.get('SEGMENT_KEY')
SECRET_KEY = AUTH_TOKENS['SECRET_KEY']
AWS_ACCESS_KEY_ID = AUTH_TOKENS["AWS_ACCESS_KEY_ID"]
if AWS_ACCESS_KEY_ID == "":
AWS_ACCESS_KEY_ID = None
AWS_SECRET_ACCESS_KEY = AUTH_TOKENS["AWS_SECRET_ACCESS_KEY"]
if AWS_SECRET_ACCESS_KEY == "":
AWS_SECRET_ACCESS_KEY = None
# Disabling querystring auth instructs Boto to exclude the querystring parameters (e.g. signature, access key) it
# normally appends to every returned URL.
AWS_QUERYSTRING_AUTH = AUTH_TOKENS.get('AWS_QUERYSTRING_AUTH', True)
if AUTH_TOKENS.get('DEFAULT_FILE_STORAGE'):
DEFAULT_FILE_STORAGE = AUTH_TOKENS.get('DEFAULT_FILE_STORAGE')
elif AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY:
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
else:
DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage'
DATABASES = AUTH_TOKENS['DATABASES']
# The normal database user does not have enough permissions to run migrations.
# Migrations are run with separate credentials, given as DB_MIGRATION_*
# environment variables
for name, database in DATABASES.items():
if name != 'read_replica':
database.update({
'ENGINE': os.environ.get('DB_MIGRATION_ENGINE', database['ENGINE']),
'USER': os.environ.get('DB_MIGRATION_USER', database['USER']),
'PASSWORD': os.environ.get('DB_MIGRATION_PASS', database['PASSWORD']),
'NAME': os.environ.get('DB_MIGRATION_NAME', database['NAME']),
'HOST': os.environ.get('DB_MIGRATION_HOST', database['HOST']),
'PORT': os.environ.get('DB_MIGRATION_PORT', database['PORT']),
})
MODULESTORE = convert_module_store_setting_if_needed(AUTH_TOKENS.get('MODULESTORE', MODULESTORE))
MODULESTORE_FIELD_OVERRIDE_PROVIDERS = ENV_TOKENS.get(
'MODULESTORE_FIELD_OVERRIDE_PROVIDERS',
MODULESTORE_FIELD_OVERRIDE_PROVIDERS
)
XBLOCK_FIELD_DATA_WRAPPERS = ENV_TOKENS.get(
'XBLOCK_FIELD_DATA_WRAPPERS',
XBLOCK_FIELD_DATA_WRAPPERS
)
CONTENTSTORE = AUTH_TOKENS['CONTENTSTORE']
DOC_STORE_CONFIG = AUTH_TOKENS['DOC_STORE_CONFIG']
# Datadog for events!
DATADOG = AUTH_TOKENS.get("DATADOG", {})
DATADOG.update(ENV_TOKENS.get("DATADOG", {}))
# TODO: deprecated (compatibility with previous settings)
if 'DATADOG_API' in AUTH_TOKENS:
DATADOG['api_key'] = AUTH_TOKENS['DATADOG_API']
# Celery Broker
CELERY_ALWAYS_EAGER = ENV_TOKENS.get("CELERY_ALWAYS_EAGER", False)
CELERY_BROKER_TRANSPORT = ENV_TOKENS.get("CELERY_BROKER_TRANSPORT", "")
CELERY_BROKER_HOSTNAME = ENV_TOKENS.get("CELERY_BROKER_HOSTNAME", "")
CELERY_BROKER_VHOST = ENV_TOKENS.get("CELERY_BROKER_VHOST", "")
CELERY_BROKER_USER = AUTH_TOKENS.get("CELERY_BROKER_USER", "")
CELERY_BROKER_PASSWORD = AUTH_TOKENS.get("CELERY_BROKER_PASSWORD", "")
BROKER_URL = "{0}://{1}:{2}@{3}/{4}".format(CELERY_BROKER_TRANSPORT,
CELERY_BROKER_USER,
CELERY_BROKER_PASSWORD,
CELERY_BROKER_HOSTNAME,
CELERY_BROKER_VHOST)
BROKER_USE_SSL = ENV_TOKENS.get('CELERY_BROKER_USE_SSL', False)
# Allow CELERY_QUEUES to be overwritten by ENV_TOKENS,
ENV_CELERY_QUEUES = ENV_TOKENS.get('CELERY_QUEUES', None)
if ENV_CELERY_QUEUES:
CELERY_QUEUES = {queue: {} for queue in ENV_CELERY_QUEUES}
# Then add alternate environment queues
ALTERNATE_QUEUE_ENVS = ENV_TOKENS.get('ALTERNATE_WORKER_QUEUES', '').split()
ALTERNATE_QUEUES = [
DEFAULT_PRIORITY_QUEUE.replace(QUEUE_VARIANT, alternate + '.')
for alternate in ALTERNATE_QUEUE_ENVS
]
CELERY_QUEUES.update(
{
alternate: {}
for alternate in ALTERNATE_QUEUES
if alternate not in CELERY_QUEUES.keys()
}
)
# Event tracking
TRACKING_BACKENDS.update(AUTH_TOKENS.get("TRACKING_BACKENDS", {}))
EVENT_TRACKING_BACKENDS['tracking_logs']['OPTIONS']['backends'].update(AUTH_TOKENS.get("EVENT_TRACKING_BACKENDS", {}))
EVENT_TRACKING_BACKENDS['segmentio']['OPTIONS']['processors'][0]['OPTIONS']['whitelist'].extend(
AUTH_TOKENS.get("EVENT_TRACKING_SEGMENTIO_EMIT_WHITELIST", []))
VIRTUAL_UNIVERSITIES = ENV_TOKENS.get('VIRTUAL_UNIVERSITIES', [])
##### ACCOUNT LOCKOUT DEFAULT PARAMETERS #####
MAX_FAILED_LOGIN_ATTEMPTS_ALLOWED = ENV_TOKENS.get("MAX_FAILED_LOGIN_ATTEMPTS_ALLOWED", 5)
MAX_FAILED_LOGIN_ATTEMPTS_LOCKOUT_PERIOD_SECS = ENV_TOKENS.get("MAX_FAILED_LOGIN_ATTEMPTS_LOCKOUT_PERIOD_SECS", 15 * 60)
#### PASSWORD POLICY SETTINGS #####
PASSWORD_MIN_LENGTH = ENV_TOKENS.get("PASSWORD_MIN_LENGTH")
PASSWORD_MAX_LENGTH = ENV_TOKENS.get("PASSWORD_MAX_LENGTH")
PASSWORD_COMPLEXITY = ENV_TOKENS.get("PASSWORD_COMPLEXITY", {})
PASSWORD_DICTIONARY_EDIT_DISTANCE_THRESHOLD = ENV_TOKENS.get("PASSWORD_DICTIONARY_EDIT_DISTANCE_THRESHOLD")
PASSWORD_DICTIONARY = ENV_TOKENS.get("PASSWORD_DICTIONARY", [])
### INACTIVITY SETTINGS ####
SESSION_INACTIVITY_TIMEOUT_IN_SECONDS = AUTH_TOKENS.get("SESSION_INACTIVITY_TIMEOUT_IN_SECONDS")
##### X-Frame-Options response header settings #####
X_FRAME_OPTIONS = ENV_TOKENS.get('X_FRAME_OPTIONS', X_FRAME_OPTIONS)
##### ADVANCED_SECURITY_CONFIG #####
ADVANCED_SECURITY_CONFIG = ENV_TOKENS.get('ADVANCED_SECURITY_CONFIG', {})
################ ADVANCED COMPONENT/PROBLEM TYPES ###############
ADVANCED_PROBLEM_TYPES = ENV_TOKENS.get('ADVANCED_PROBLEM_TYPES', ADVANCED_PROBLEM_TYPES)
################ VIDEO UPLOAD PIPELINE ###############
VIDEO_UPLOAD_PIPELINE = ENV_TOKENS.get('VIDEO_UPLOAD_PIPELINE', VIDEO_UPLOAD_PIPELINE)
################ PUSH NOTIFICATIONS ###############
PARSE_KEYS = AUTH_TOKENS.get("PARSE_KEYS", {})
# Video Caching. Pairing country codes with CDN URLs.
# Example: {'CN': 'http://api.xuetangx.com/edx/video?s3_url='}
VIDEO_CDN_URL = ENV_TOKENS.get('VIDEO_CDN_URL', {})
if FEATURES['ENABLE_COURSEWARE_INDEX'] or FEATURES['ENABLE_LIBRARY_INDEX']:
# Use ElasticSearch for the search engine
SEARCH_ENGINE = "search.elastic.ElasticSearchEngine"
ELASTIC_SEARCH_CONFIG = ENV_TOKENS.get('ELASTIC_SEARCH_CONFIG', [{}])
XBLOCK_SETTINGS = ENV_TOKENS.get('XBLOCK_SETTINGS', {})
XBLOCK_SETTINGS.setdefault("VideoDescriptor", {})["licensing_enabled"] = FEATURES.get("LICENSING", False)
XBLOCK_SETTINGS.setdefault("VideoModule", {})['YOUTUBE_API_KEY'] = AUTH_TOKENS.get('YOUTUBE_API_KEY', YOUTUBE_API_KEY)
################# PROCTORING CONFIGURATION ##################
PROCTORING_BACKEND_PROVIDER = AUTH_TOKENS.get("PROCTORING_BACKEND_PROVIDER", PROCTORING_BACKEND_PROVIDER)
PROCTORING_SETTINGS = ENV_TOKENS.get("PROCTORING_SETTINGS", PROCTORING_SETTINGS)
################# MICROSITE ####################
# microsite specific configurations.
MICROSITE_CONFIGURATION = ENV_TOKENS.get('MICROSITE_CONFIGURATION', {})
MICROSITE_ROOT_DIR = path(ENV_TOKENS.get('MICROSITE_ROOT_DIR', ''))
# this setting specify which backend to be used when pulling microsite specific configuration
MICROSITE_BACKEND = ENV_TOKENS.get("MICROSITE_BACKEND", MICROSITE_BACKEND)
# this setting specify which backend to be used when loading microsite specific templates
MICROSITE_TEMPLATE_BACKEND = ENV_TOKENS.get("MICROSITE_TEMPLATE_BACKEND", MICROSITE_TEMPLATE_BACKEND)
# TTL for microsite database template cache
MICROSITE_DATABASE_TEMPLATE_CACHE_TTL = ENV_TOKENS.get(
"MICROSITE_DATABASE_TEMPLATE_CACHE_TTL", MICROSITE_DATABASE_TEMPLATE_CACHE_TTL
)
############################ OAUTH2 Provider ###################################
# OpenID Connect issuer ID. Normally the URL of the authentication endpoint.
OAUTH_OIDC_ISSUER = ENV_TOKENS['OAUTH_OIDC_ISSUER']
#### JWT configuration ####
JWT_AUTH.update(ENV_TOKENS.get('JWT_AUTH', {}))
######################## CUSTOM COURSES for EDX CONNECTOR ######################
if FEATURES.get('CUSTOM_COURSES_EDX'):
INSTALLED_APPS += ('openedx.core.djangoapps.ccxcon',)
# Partner support link for CMS footer
PARTNER_SUPPORT_EMAIL = ENV_TOKENS.get('PARTNER_SUPPORT_EMAIL', PARTNER_SUPPORT_EMAIL)
# Affiliate cookie tracking
AFFILIATE_COOKIE_NAME = ENV_TOKENS.get('AFFILIATE_COOKIE_NAME', AFFILIATE_COOKIE_NAME)
############## Settings for Studio Context Sensitive Help ##############
DOC_LINK_BASE_URL = ENV_TOKENS.get('DOC_LINK_BASE_URL', DOC_LINK_BASE_URL)
Get MKTG_URL_LINK_MAP from ENV_TOKENS in CMS
"""
This is the default template for our main set of AWS servers.
"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=wildcard-import, unused-wildcard-import
# Pylint gets confused by path.py instances, which report themselves as class
# objects. As a result, pylint applies the wrong regex in validating names,
# and throws spurious errors. Therefore, we disable invalid-name checking.
# pylint: disable=invalid-name
import json
from openedx.stanford.cms.envs.common import *
from openedx.core.lib.logsettings import get_logger_config
import os
from path import Path as path
from xmodule.modulestore.modulestore_settings import convert_module_store_setting_if_needed
# SERVICE_VARIANT specifies name of the variant used, which decides what JSON
# configuration files are read during startup.
SERVICE_VARIANT = os.environ.get('SERVICE_VARIANT', None)
# CONFIG_ROOT specifies the directory where the JSON configuration
# files are expected to be found. If not specified, use the project
# directory.
CONFIG_ROOT = path(os.environ.get('CONFIG_ROOT', ENV_ROOT))
# CONFIG_PREFIX specifies the prefix of the JSON configuration files,
# based on the service variant. If no variant is use, don't use a
# prefix.
CONFIG_PREFIX = SERVICE_VARIANT + "." if SERVICE_VARIANT else ""
############### ALWAYS THE SAME ################################
DEBUG = False
EMAIL_BACKEND = 'django_ses.SESBackend'
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
# IMPORTANT: With this enabled, the server must always be behind a proxy that
# strips the header HTTP_X_FORWARDED_PROTO from client requests. Otherwise,
# a user can fool our server into thinking it was an https connection.
# See
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header
# for other warnings.
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
###################################### CELERY ################################
# Don't use a connection pool, since connections are dropped by ELB.
BROKER_POOL_LIMIT = 0
BROKER_CONNECTION_TIMEOUT = 1
# For the Result Store, use the django cache named 'celery'
CELERY_RESULT_BACKEND = 'djcelery.backends.cache:CacheBackend'
# When the broker is behind an ELB, use a heartbeat to refresh the
# connection and to detect if it has been dropped.
BROKER_HEARTBEAT = 10.0
BROKER_HEARTBEAT_CHECKRATE = 2
# Each worker should only fetch one message at a time
CELERYD_PREFETCH_MULTIPLIER = 1
# Rename the exchange and queues for each variant
QUEUE_VARIANT = CONFIG_PREFIX.lower()
CELERY_DEFAULT_EXCHANGE = 'edx.{0}core'.format(QUEUE_VARIANT)
HIGH_PRIORITY_QUEUE = 'edx.{0}core.high'.format(QUEUE_VARIANT)
DEFAULT_PRIORITY_QUEUE = 'edx.{0}core.default'.format(QUEUE_VARIANT)
LOW_PRIORITY_QUEUE = 'edx.{0}core.low'.format(QUEUE_VARIANT)
CELERY_DEFAULT_QUEUE = DEFAULT_PRIORITY_QUEUE
CELERY_DEFAULT_ROUTING_KEY = DEFAULT_PRIORITY_QUEUE
CELERY_QUEUES = {
HIGH_PRIORITY_QUEUE: {},
LOW_PRIORITY_QUEUE: {},
DEFAULT_PRIORITY_QUEUE: {}
}
CELERY_ROUTES = "{}celery.Router".format(QUEUE_VARIANT)
############# NON-SECURE ENV CONFIG ##############################
# Things like server locations, ports, etc.
with open(CONFIG_ROOT / CONFIG_PREFIX + "env.json") as env_file:
ENV_TOKENS = json.load(env_file)
# STATIC_URL_BASE specifies the base url to use for static files
STATIC_URL_BASE = ENV_TOKENS.get('STATIC_URL_BASE', None)
if STATIC_URL_BASE:
# collectstatic will fail if STATIC_URL is a unicode string
STATIC_URL = STATIC_URL_BASE.encode('ascii')
if not STATIC_URL.endswith("/"):
STATIC_URL += "/"
STATIC_URL += EDX_PLATFORM_REVISION + "/"
# DEFAULT_COURSE_ABOUT_IMAGE_URL specifies the default image to show for courses that don't provide one
DEFAULT_COURSE_ABOUT_IMAGE_URL = ENV_TOKENS.get('DEFAULT_COURSE_ABOUT_IMAGE_URL', DEFAULT_COURSE_ABOUT_IMAGE_URL)
# GITHUB_REPO_ROOT is the base directory
# for course data
GITHUB_REPO_ROOT = ENV_TOKENS.get('GITHUB_REPO_ROOT', GITHUB_REPO_ROOT)
# STATIC_ROOT specifies the directory where static files are
# collected
STATIC_ROOT_BASE = ENV_TOKENS.get('STATIC_ROOT_BASE', None)
if STATIC_ROOT_BASE:
STATIC_ROOT = path(STATIC_ROOT_BASE) / EDX_PLATFORM_REVISION
EMAIL_BACKEND = ENV_TOKENS.get('EMAIL_BACKEND', EMAIL_BACKEND)
EMAIL_FILE_PATH = ENV_TOKENS.get('EMAIL_FILE_PATH', None)
EMAIL_HOST = ENV_TOKENS.get('EMAIL_HOST', EMAIL_HOST)
EMAIL_PORT = ENV_TOKENS.get('EMAIL_PORT', EMAIL_PORT)
EMAIL_USE_TLS = ENV_TOKENS.get('EMAIL_USE_TLS', EMAIL_USE_TLS)
LMS_BASE = ENV_TOKENS.get('LMS_BASE')
LMS_ROOT_URL = ENV_TOKENS.get('LMS_ROOT_URL')
# Note that FEATURES['PREVIEW_LMS_BASE'] gets read in from the environment file.
SITE_NAME = ENV_TOKENS['SITE_NAME']
ALLOWED_HOSTS = [
# TODO: bbeggs remove this before prod, temp fix to get load testing running
"*",
ENV_TOKENS.get('CMS_BASE')
]
LOG_DIR = ENV_TOKENS['LOG_DIR']
CACHES = ENV_TOKENS['CACHES']
# Cache used for location mapping -- called many times with the same key/value
# in a given request.
if 'loc_cache' not in CACHES:
CACHES['loc_cache'] = {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'edx_location_mem_cache',
}
SESSION_COOKIE_DOMAIN = ENV_TOKENS.get('SESSION_COOKIE_DOMAIN')
SESSION_COOKIE_HTTPONLY = ENV_TOKENS.get('SESSION_COOKIE_HTTPONLY', True)
SESSION_ENGINE = ENV_TOKENS.get('SESSION_ENGINE', SESSION_ENGINE)
SESSION_COOKIE_SECURE = ENV_TOKENS.get('SESSION_COOKIE_SECURE', SESSION_COOKIE_SECURE)
SESSION_SAVE_EVERY_REQUEST = ENV_TOKENS.get('SESSION_SAVE_EVERY_REQUEST', SESSION_SAVE_EVERY_REQUEST)
# social sharing settings
SOCIAL_SHARING_SETTINGS = ENV_TOKENS.get('SOCIAL_SHARING_SETTINGS', SOCIAL_SHARING_SETTINGS)
REGISTRATION_EMAIL_PATTERNS_ALLOWED = ENV_TOKENS.get('REGISTRATION_EMAIL_PATTERNS_ALLOWED')
# allow for environments to specify what cookie name our login subsystem should use
# this is to fix a bug regarding simultaneous logins between edx.org and edge.edx.org which can
# happen with some browsers (e.g. Firefox)
if ENV_TOKENS.get('SESSION_COOKIE_NAME', None):
# NOTE, there's a bug in Django (http://bugs.python.org/issue18012) which necessitates this being a str()
SESSION_COOKIE_NAME = str(ENV_TOKENS.get('SESSION_COOKIE_NAME'))
# Set the names of cookies shared with the marketing site
# These have the same cookie domain as the session, which in production
# usually includes subdomains.
EDXMKTG_LOGGED_IN_COOKIE_NAME = ENV_TOKENS.get('EDXMKTG_LOGGED_IN_COOKIE_NAME', EDXMKTG_LOGGED_IN_COOKIE_NAME)
EDXMKTG_USER_INFO_COOKIE_NAME = ENV_TOKENS.get('EDXMKTG_USER_INFO_COOKIE_NAME', EDXMKTG_USER_INFO_COOKIE_NAME)
# Determines whether the CSRF token can be transported on
# unencrypted channels. It is set to False here for backward compatibility,
# but it is highly recommended that this is True for environments accessed
# by end users.
CSRF_COOKIE_SECURE = ENV_TOKENS.get('CSRF_COOKIE_SECURE', False)
#Email overrides
DEFAULT_FROM_EMAIL = ENV_TOKENS.get('DEFAULT_FROM_EMAIL', DEFAULT_FROM_EMAIL)
DEFAULT_FEEDBACK_EMAIL = ENV_TOKENS.get('DEFAULT_FEEDBACK_EMAIL', DEFAULT_FEEDBACK_EMAIL)
ADMINS = ENV_TOKENS.get('ADMINS', ADMINS)
SERVER_EMAIL = ENV_TOKENS.get('SERVER_EMAIL', SERVER_EMAIL)
MKTG_URLS = ENV_TOKENS.get('MKTG_URLS', MKTG_URLS)
MKTG_URL_LINK_MAP.update(ENV_TOKENS.get('MKTG_URL_LINK_MAP', {}))
TECH_SUPPORT_EMAIL = ENV_TOKENS.get('TECH_SUPPORT_EMAIL', TECH_SUPPORT_EMAIL)
for name, value in ENV_TOKENS.get("CODE_JAIL", {}).items():
oldvalue = CODE_JAIL.get(name)
if isinstance(oldvalue, dict):
for subname, subvalue in value.items():
oldvalue[subname] = subvalue
else:
CODE_JAIL[name] = value
COURSES_WITH_UNSAFE_CODE = ENV_TOKENS.get("COURSES_WITH_UNSAFE_CODE", [])
ASSET_IGNORE_REGEX = ENV_TOKENS.get('ASSET_IGNORE_REGEX', ASSET_IGNORE_REGEX)
# following setting is for backward compatibility
if ENV_TOKENS.get('COMPREHENSIVE_THEME_DIR', None):
COMPREHENSIVE_THEME_DIR = ENV_TOKENS.get('COMPREHENSIVE_THEME_DIR')
COMPREHENSIVE_THEME_DIRS = ENV_TOKENS.get('COMPREHENSIVE_THEME_DIRS', COMPREHENSIVE_THEME_DIRS) or []
# COMPREHENSIVE_THEME_LOCALE_PATHS contain the paths to themes locale directories e.g.
# "COMPREHENSIVE_THEME_LOCALE_PATHS" : [
# "/edx/src/edx-themes/conf/locale"
# ],
COMPREHENSIVE_THEME_LOCALE_PATHS = ENV_TOKENS.get('COMPREHENSIVE_THEME_LOCALE_PATHS', [])
DEFAULT_SITE_THEME = ENV_TOKENS.get('DEFAULT_SITE_THEME', DEFAULT_SITE_THEME)
ENABLE_COMPREHENSIVE_THEMING = ENV_TOKENS.get('ENABLE_COMPREHENSIVE_THEMING', ENABLE_COMPREHENSIVE_THEMING)
#Timezone overrides
TIME_ZONE = ENV_TOKENS.get('TIME_ZONE', TIME_ZONE)
# Push to LMS overrides
GIT_REPO_EXPORT_DIR = ENV_TOKENS.get('GIT_REPO_EXPORT_DIR', '/edx/var/edxapp/export_course_repos')
# Translation overrides
LANGUAGES = ENV_TOKENS.get('LANGUAGES', LANGUAGES)
LANGUAGE_CODE = ENV_TOKENS.get('LANGUAGE_CODE', LANGUAGE_CODE)
USE_I18N = ENV_TOKENS.get('USE_I18N', USE_I18N)
ENV_FEATURES = ENV_TOKENS.get('FEATURES', {})
for feature, value in ENV_FEATURES.items():
FEATURES[feature] = value
# Additional installed apps
for app in ENV_TOKENS.get('ADDL_INSTALLED_APPS', []):
INSTALLED_APPS += (app,)
WIKI_ENABLED = ENV_TOKENS.get('WIKI_ENABLED', WIKI_ENABLED)
LOGGING = get_logger_config(LOG_DIR,
logging_env=ENV_TOKENS['LOGGING_ENV'],
debug=False,
service_variant=SERVICE_VARIANT)
#theming start:
PLATFORM_NAME = ENV_TOKENS.get('PLATFORM_NAME', 'edX')
STUDIO_NAME = ENV_TOKENS.get('STUDIO_NAME', 'edX Studio')
STUDIO_SHORT_NAME = ENV_TOKENS.get('STUDIO_SHORT_NAME', 'Studio')
# Event Tracking
if "TRACKING_IGNORE_URL_PATTERNS" in ENV_TOKENS:
TRACKING_IGNORE_URL_PATTERNS = ENV_TOKENS.get("TRACKING_IGNORE_URL_PATTERNS")
# Django CAS external authentication settings
CAS_EXTRA_LOGIN_PARAMS = ENV_TOKENS.get("CAS_EXTRA_LOGIN_PARAMS", None)
if FEATURES.get('AUTH_USE_CAS'):
CAS_SERVER_URL = ENV_TOKENS.get("CAS_SERVER_URL", None)
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'django_cas.backends.CASBackend',
)
INSTALLED_APPS += ('django_cas',)
MIDDLEWARE_CLASSES += ('django_cas.middleware.CASMiddleware',)
CAS_ATTRIBUTE_CALLBACK = ENV_TOKENS.get('CAS_ATTRIBUTE_CALLBACK', None)
if CAS_ATTRIBUTE_CALLBACK:
import importlib
CAS_USER_DETAILS_RESOLVER = getattr(
importlib.import_module(CAS_ATTRIBUTE_CALLBACK['module']),
CAS_ATTRIBUTE_CALLBACK['function']
)
# Specific setting for the File Upload Service to store media in a bucket.
FILE_UPLOAD_STORAGE_BUCKET_NAME = ENV_TOKENS.get('FILE_UPLOAD_STORAGE_BUCKET_NAME', FILE_UPLOAD_STORAGE_BUCKET_NAME)
FILE_UPLOAD_STORAGE_PREFIX = ENV_TOKENS.get('FILE_UPLOAD_STORAGE_PREFIX', FILE_UPLOAD_STORAGE_PREFIX)
################ SECURE AUTH ITEMS ###############################
# Secret things: passwords, access keys, etc.
with open(CONFIG_ROOT / CONFIG_PREFIX + "auth.json") as auth_file:
AUTH_TOKENS = json.load(auth_file)
############### XBlock filesystem field config ##########
if 'DJFS' in AUTH_TOKENS and AUTH_TOKENS['DJFS'] is not None:
DJFS = AUTH_TOKENS['DJFS']
if 'url_root' in DJFS:
DJFS['url_root'] = DJFS['url_root'].format(platform_revision=EDX_PLATFORM_REVISION)
EMAIL_HOST_USER = AUTH_TOKENS.get('EMAIL_HOST_USER', EMAIL_HOST_USER)
EMAIL_HOST_PASSWORD = AUTH_TOKENS.get('EMAIL_HOST_PASSWORD', EMAIL_HOST_PASSWORD)
AWS_SES_REGION_NAME = ENV_TOKENS.get('AWS_SES_REGION_NAME', 'us-east-1')
AWS_SES_REGION_ENDPOINT = ENV_TOKENS.get('AWS_SES_REGION_ENDPOINT', 'email.us-east-1.amazonaws.com')
# Note that this is the Studio key for Segment. There is a separate key for the LMS.
CMS_SEGMENT_KEY = AUTH_TOKENS.get('SEGMENT_KEY')
SECRET_KEY = AUTH_TOKENS['SECRET_KEY']
AWS_ACCESS_KEY_ID = AUTH_TOKENS["AWS_ACCESS_KEY_ID"]
if AWS_ACCESS_KEY_ID == "":
AWS_ACCESS_KEY_ID = None
AWS_SECRET_ACCESS_KEY = AUTH_TOKENS["AWS_SECRET_ACCESS_KEY"]
if AWS_SECRET_ACCESS_KEY == "":
AWS_SECRET_ACCESS_KEY = None
# Disabling querystring auth instructs Boto to exclude the querystring parameters (e.g. signature, access key) it
# normally appends to every returned URL.
AWS_QUERYSTRING_AUTH = AUTH_TOKENS.get('AWS_QUERYSTRING_AUTH', True)
if AUTH_TOKENS.get('DEFAULT_FILE_STORAGE'):
DEFAULT_FILE_STORAGE = AUTH_TOKENS.get('DEFAULT_FILE_STORAGE')
elif AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY:
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
else:
DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage'
DATABASES = AUTH_TOKENS['DATABASES']
# The normal database user does not have enough permissions to run migrations.
# Migrations are run with separate credentials, given as DB_MIGRATION_*
# environment variables
for name, database in DATABASES.items():
if name != 'read_replica':
database.update({
'ENGINE': os.environ.get('DB_MIGRATION_ENGINE', database['ENGINE']),
'USER': os.environ.get('DB_MIGRATION_USER', database['USER']),
'PASSWORD': os.environ.get('DB_MIGRATION_PASS', database['PASSWORD']),
'NAME': os.environ.get('DB_MIGRATION_NAME', database['NAME']),
'HOST': os.environ.get('DB_MIGRATION_HOST', database['HOST']),
'PORT': os.environ.get('DB_MIGRATION_PORT', database['PORT']),
})
MODULESTORE = convert_module_store_setting_if_needed(AUTH_TOKENS.get('MODULESTORE', MODULESTORE))
MODULESTORE_FIELD_OVERRIDE_PROVIDERS = ENV_TOKENS.get(
'MODULESTORE_FIELD_OVERRIDE_PROVIDERS',
MODULESTORE_FIELD_OVERRIDE_PROVIDERS
)
XBLOCK_FIELD_DATA_WRAPPERS = ENV_TOKENS.get(
'XBLOCK_FIELD_DATA_WRAPPERS',
XBLOCK_FIELD_DATA_WRAPPERS
)
CONTENTSTORE = AUTH_TOKENS['CONTENTSTORE']
DOC_STORE_CONFIG = AUTH_TOKENS['DOC_STORE_CONFIG']
# Datadog for events!
DATADOG = AUTH_TOKENS.get("DATADOG", {})
DATADOG.update(ENV_TOKENS.get("DATADOG", {}))
# TODO: deprecated (compatibility with previous settings)
if 'DATADOG_API' in AUTH_TOKENS:
DATADOG['api_key'] = AUTH_TOKENS['DATADOG_API']
# Celery Broker
CELERY_ALWAYS_EAGER = ENV_TOKENS.get("CELERY_ALWAYS_EAGER", False)
CELERY_BROKER_TRANSPORT = ENV_TOKENS.get("CELERY_BROKER_TRANSPORT", "")
CELERY_BROKER_HOSTNAME = ENV_TOKENS.get("CELERY_BROKER_HOSTNAME", "")
CELERY_BROKER_VHOST = ENV_TOKENS.get("CELERY_BROKER_VHOST", "")
CELERY_BROKER_USER = AUTH_TOKENS.get("CELERY_BROKER_USER", "")
CELERY_BROKER_PASSWORD = AUTH_TOKENS.get("CELERY_BROKER_PASSWORD", "")
BROKER_URL = "{0}://{1}:{2}@{3}/{4}".format(CELERY_BROKER_TRANSPORT,
CELERY_BROKER_USER,
CELERY_BROKER_PASSWORD,
CELERY_BROKER_HOSTNAME,
CELERY_BROKER_VHOST)
BROKER_USE_SSL = ENV_TOKENS.get('CELERY_BROKER_USE_SSL', False)
# Allow CELERY_QUEUES to be overwritten by ENV_TOKENS,
ENV_CELERY_QUEUES = ENV_TOKENS.get('CELERY_QUEUES', None)
if ENV_CELERY_QUEUES:
CELERY_QUEUES = {queue: {} for queue in ENV_CELERY_QUEUES}
# Then add alternate environment queues
ALTERNATE_QUEUE_ENVS = ENV_TOKENS.get('ALTERNATE_WORKER_QUEUES', '').split()
ALTERNATE_QUEUES = [
DEFAULT_PRIORITY_QUEUE.replace(QUEUE_VARIANT, alternate + '.')
for alternate in ALTERNATE_QUEUE_ENVS
]
CELERY_QUEUES.update(
{
alternate: {}
for alternate in ALTERNATE_QUEUES
if alternate not in CELERY_QUEUES.keys()
}
)
# Event tracking
TRACKING_BACKENDS.update(AUTH_TOKENS.get("TRACKING_BACKENDS", {}))
EVENT_TRACKING_BACKENDS['tracking_logs']['OPTIONS']['backends'].update(AUTH_TOKENS.get("EVENT_TRACKING_BACKENDS", {}))
EVENT_TRACKING_BACKENDS['segmentio']['OPTIONS']['processors'][0]['OPTIONS']['whitelist'].extend(
AUTH_TOKENS.get("EVENT_TRACKING_SEGMENTIO_EMIT_WHITELIST", []))
VIRTUAL_UNIVERSITIES = ENV_TOKENS.get('VIRTUAL_UNIVERSITIES', [])
##### ACCOUNT LOCKOUT DEFAULT PARAMETERS #####
MAX_FAILED_LOGIN_ATTEMPTS_ALLOWED = ENV_TOKENS.get("MAX_FAILED_LOGIN_ATTEMPTS_ALLOWED", 5)
MAX_FAILED_LOGIN_ATTEMPTS_LOCKOUT_PERIOD_SECS = ENV_TOKENS.get("MAX_FAILED_LOGIN_ATTEMPTS_LOCKOUT_PERIOD_SECS", 15 * 60)
#### PASSWORD POLICY SETTINGS #####
PASSWORD_MIN_LENGTH = ENV_TOKENS.get("PASSWORD_MIN_LENGTH")
PASSWORD_MAX_LENGTH = ENV_TOKENS.get("PASSWORD_MAX_LENGTH")
PASSWORD_COMPLEXITY = ENV_TOKENS.get("PASSWORD_COMPLEXITY", {})
PASSWORD_DICTIONARY_EDIT_DISTANCE_THRESHOLD = ENV_TOKENS.get("PASSWORD_DICTIONARY_EDIT_DISTANCE_THRESHOLD")
PASSWORD_DICTIONARY = ENV_TOKENS.get("PASSWORD_DICTIONARY", [])
### INACTIVITY SETTINGS ####
SESSION_INACTIVITY_TIMEOUT_IN_SECONDS = AUTH_TOKENS.get("SESSION_INACTIVITY_TIMEOUT_IN_SECONDS")
##### X-Frame-Options response header settings #####
X_FRAME_OPTIONS = ENV_TOKENS.get('X_FRAME_OPTIONS', X_FRAME_OPTIONS)
##### ADVANCED_SECURITY_CONFIG #####
ADVANCED_SECURITY_CONFIG = ENV_TOKENS.get('ADVANCED_SECURITY_CONFIG', {})
################ ADVANCED COMPONENT/PROBLEM TYPES ###############
ADVANCED_PROBLEM_TYPES = ENV_TOKENS.get('ADVANCED_PROBLEM_TYPES', ADVANCED_PROBLEM_TYPES)
################ VIDEO UPLOAD PIPELINE ###############
VIDEO_UPLOAD_PIPELINE = ENV_TOKENS.get('VIDEO_UPLOAD_PIPELINE', VIDEO_UPLOAD_PIPELINE)
################ PUSH NOTIFICATIONS ###############
PARSE_KEYS = AUTH_TOKENS.get("PARSE_KEYS", {})
# Video Caching. Pairing country codes with CDN URLs.
# Example: {'CN': 'http://api.xuetangx.com/edx/video?s3_url='}
VIDEO_CDN_URL = ENV_TOKENS.get('VIDEO_CDN_URL', {})
if FEATURES['ENABLE_COURSEWARE_INDEX'] or FEATURES['ENABLE_LIBRARY_INDEX']:
# Use ElasticSearch for the search engine
SEARCH_ENGINE = "search.elastic.ElasticSearchEngine"
ELASTIC_SEARCH_CONFIG = ENV_TOKENS.get('ELASTIC_SEARCH_CONFIG', [{}])
XBLOCK_SETTINGS = ENV_TOKENS.get('XBLOCK_SETTINGS', {})
XBLOCK_SETTINGS.setdefault("VideoDescriptor", {})["licensing_enabled"] = FEATURES.get("LICENSING", False)
XBLOCK_SETTINGS.setdefault("VideoModule", {})['YOUTUBE_API_KEY'] = AUTH_TOKENS.get('YOUTUBE_API_KEY', YOUTUBE_API_KEY)
################# PROCTORING CONFIGURATION ##################
PROCTORING_BACKEND_PROVIDER = AUTH_TOKENS.get("PROCTORING_BACKEND_PROVIDER", PROCTORING_BACKEND_PROVIDER)
PROCTORING_SETTINGS = ENV_TOKENS.get("PROCTORING_SETTINGS", PROCTORING_SETTINGS)
################# MICROSITE ####################
# microsite specific configurations.
MICROSITE_CONFIGURATION = ENV_TOKENS.get('MICROSITE_CONFIGURATION', {})
MICROSITE_ROOT_DIR = path(ENV_TOKENS.get('MICROSITE_ROOT_DIR', ''))
# this setting specify which backend to be used when pulling microsite specific configuration
MICROSITE_BACKEND = ENV_TOKENS.get("MICROSITE_BACKEND", MICROSITE_BACKEND)
# this setting specify which backend to be used when loading microsite specific templates
MICROSITE_TEMPLATE_BACKEND = ENV_TOKENS.get("MICROSITE_TEMPLATE_BACKEND", MICROSITE_TEMPLATE_BACKEND)
# TTL for microsite database template cache
MICROSITE_DATABASE_TEMPLATE_CACHE_TTL = ENV_TOKENS.get(
"MICROSITE_DATABASE_TEMPLATE_CACHE_TTL", MICROSITE_DATABASE_TEMPLATE_CACHE_TTL
)
############################ OAUTH2 Provider ###################################
# OpenID Connect issuer ID. Normally the URL of the authentication endpoint.
OAUTH_OIDC_ISSUER = ENV_TOKENS['OAUTH_OIDC_ISSUER']
#### JWT configuration ####
JWT_AUTH.update(ENV_TOKENS.get('JWT_AUTH', {}))
######################## CUSTOM COURSES for EDX CONNECTOR ######################
if FEATURES.get('CUSTOM_COURSES_EDX'):
INSTALLED_APPS += ('openedx.core.djangoapps.ccxcon',)
# Partner support link for CMS footer
PARTNER_SUPPORT_EMAIL = ENV_TOKENS.get('PARTNER_SUPPORT_EMAIL', PARTNER_SUPPORT_EMAIL)
# Affiliate cookie tracking
AFFILIATE_COOKIE_NAME = ENV_TOKENS.get('AFFILIATE_COOKIE_NAME', AFFILIATE_COOKIE_NAME)
############## Settings for Studio Context Sensitive Help ##############
DOC_LINK_BASE_URL = ENV_TOKENS.get('DOC_LINK_BASE_URL', DOC_LINK_BASE_URL)
|
"""Module containing element dict, species and reaction classes, and constants.
"""
# Python 2 compatibility
from __future__ import division
# Standard libraries
import math
import numpy as np
__all__ = ['RU', 'RUC', 'RU_JOUL', 'PA', 'get_elem_wt',
'ReacInfo', 'SpecInfo', 'calc_spec_smh']
# universal gas constants, SI units
RU = 8314.4621 # J/(kmole * K)
RU_JOUL = 8.3144621
RUC = (RU / 4.18400) # cal/(mole * K)
# Avogadro's number
AVAG = 6.0221367e23
# pressure of one standard atmosphere [Pa]
PA = 101325.0
class CommonEqualityMixin(object):
def __eq__(self, other):
return (isinstance(other, self.__class__)
and self.__dict__ == other.__dict__)
def __ne__(self, other):
return not self.__eq__(other)
def get_elem_wt():
"""Returns dict with built-in element names and atomic weights [kg/kmol].
Attributes
----------
None
Returns
-------
elem_wt : dict
Dictionary with element name keys and atomic weight [kg/kmol] values.
"""
elem_wt = dict([
('h', 1.00794), ('he', 4.00260), ('li', 6.93900),
('be', 9.01220), ('b', 10.81100), ('c', 12.0110),
('n', 14.00674), ('o', 15.99940), ('f', 18.99840),
('ne', 20.18300), ('na', 22.98980), ('mg', 24.31200),
('al', 26.98150), ('si', 28.08600), ('p', 30.97380),
('s', 32.06400), ('cl', 35.45300), ('ar', 39.94800),
('k', 39.10200), ('ca', 40.08000), ('sc', 44.95600),
('ti', 47.90000), ('v', 50.94200), ('cr', 51.99600),
('mn', 54.93800), ('fe', 55.84700), ('co', 58.93320),
('ni', 58.71000), ('cu', 63.54000), ('zn', 65.37000),
('ga', 69.72000), ('ge', 72.59000), ('as', 74.92160),
('se', 78.96000), ('br', 79.90090), ('kr', 83.80000),
('rb', 85.47000), ('sr', 87.62000), ('y', 88.90500),
('zr', 91.22000), ('nb', 92.90600), ('mo', 95.94000),
('tc', 99.00000), ('ru', 101.07000), ('rh', 102.90500),
('pd', 106.40000), ('ag', 107.87000), ('cd', 112.40000),
('in', 114.82000), ('sn', 118.69000), ('sb', 121.75000),
('te', 127.60000), ('i', 126.90440), ('xe', 131.30000),
('cs', 132.90500), ('ba', 137.34000), ('la', 138.91000),
('ce', 140.12000), ('pr', 140.90700), ('nd', 144.24000),
('pm', 145.00000), ('sm', 150.35000), ('eu', 151.96000),
('gd', 157.25000), ('tb', 158.92400), ('dy', 162.50000),
('ho', 164.93000), ('er', 167.26000), ('tm', 168.93400),
('yb', 173.04000), ('lu', 174.99700), ('hf', 178.49000),
('ta', 180.94800), ('w', 183.85000), ('re', 186.20000),
('os', 190.20000), ('ir', 192.20000), ('pt', 195.09000),
('au', 196.96700), ('hg', 200.59000), ('tl', 204.37000),
('pb', 207.19000), ('bi', 208.98000), ('po', 210.00000),
('at', 210.00000), ('rn', 222.00000), ('fr', 223.00000),
('ra', 226.00000), ('ac', 227.00000), ('th', 232.03800),
('pa', 231.00000), ('u', 238.03000), ('np', 237.00000),
('pu', 242.00000), ('am', 243.00000), ('cm', 247.00000),
('bk', 249.00000), ('cf', 251.00000), ('es', 254.00000),
('fm', 253.00000), ('d', 2.01410), ('e', 5.48578e-4)
])
return elem_wt
class ReacInfo(CommonEqualityMixin):
"""Reaction class.
Contains all information about a single reaction.
Attributes
----------
rev : bool
True if reversible reaction, False if irreversible.
reactants : list of str
List of reactant species names.
reac_nu : list of int/float
List of reactant stoichiometric coefficients, either int or float.
products : list of str
List of product species names.
prod_nu : list of int/float
List of product stoichiometric coefficients, either int or float.
A : float
Arrhenius pre-exponential coefficient.
b : float
Arrhenius temperature exponent.
E : float
Arrhenius activation energy.
rev_par : list of float, optional
List of reverse Arrhenius coefficients (default empty).
dup : bool, optional
Duplicate reaction flag (default False).
thd : bool, optional
Third-body reaction flag (default False).
thd_body : list of list of [str, float], optional
List of third body names and efficiencies (default empty).
pdep : bool, optional
Pressure-dependence flag (default False).
pdep_sp : str, optional
Name of specific third-body or 'M' (default '').
low : list of float, optional
List of low-pressure-limit Arrhenius coefficients (default empty).
high : list of float, optional
List of high-pressure-limit Arrhenius coefficients (default empty).
troe : bool, optional
Troe pressure-dependence formulation flag (default False).
troe_par : list of float, optional
List of Troe formulation constants (default empty).
sri : bool, optional
SRI pressure-dependence formulation flag (default False).
sri_par : list of float, optional
List of SRI formulation constants (default empty).
Notes
-----
`rev` does not require `rev_par`; if no explicit coefficients, the
reverse reaction rate will be calculated through the equilibrium
constant.
Only one of [`low`,`high`] can be defined.
If `troe` and `sri` are both False, then the Lindemann is assumed.
"""
def __init__(self, rev, reactants, reac_nu, products, prod_nu, A, b, E):
self.reac = reactants
self.reac_nu = reac_nu
self.prod = products
self.prod_nu = prod_nu
## Arrhenius coefficients
# pre-exponential factor [m, kmol, s]
self.A = A
# Temperature exponent [-]
self.b = b
# Activation energy, stored as activation temperature [K]
self.E = E
# reversible reaction properties
self.rev = rev
self.rev_par = [] # reverse A, b, E
# duplicate reaction
self.dup = False
# third-body efficiencies
self.thd_body = False
self.thd_body_eff = [] # in pairs with species and efficiency
# pressure dependence
self.pdep = False
self.pdep_sp = ''
self.low = []
self.high = []
self.troe = False
self.troe_par = []
self.sri = False
self.sri_par = []
# Parameters for pressure-dependent reaction parameterized by
# bivariate Chebyshev polynomial in temperature and pressure.
self.cheb = False
# Number of temperature values over which fit computed.
self.cheb_n_temp = 0
# Number of pressure values over which fit computed.
self.cheb_n_pres = 0
# Pressure limits for Chebyshev fit [Pa]
self.cheb_plim = [0.001 * PA, 100. * PA]
# Temperature limits for Chebyshev fit [K]
self.cheb_tlim = [300., 2500.]
# 2D array of Chebyshev fit coefficients
self.cheb_par = None
# Parameters for pressure-dependent reaction parameterized by
# logarithmically interpolating between Arrhenius rate expressions at
# various pressures.
self.plog = False
# List of arrays with [pressure [Pa], A, b, E]
self.plog_par = None
class SpecInfo(CommonEqualityMixin):
"""Species class.
Contains all information about a single species.
Attributes
----------
name : str
Name of species.
elem : list of list of [str, float]
Elemental composition in [element, number] pairs.
mw : float
Molecular weight.
hi : list of float
High-temperature range NASA thermodynamic coefficients.
lo : list of float
Low-temperature range NASA thermodynamic coefficients.
Trange : list of float
Temperatures defining ranges of thermodynamic polynomial fits
(low, middle, high), default ([300, 1000, 5000]).
"""
def __init__(self, name):
self.name = name
# elemental composition
self.elem = []
# molecular weight [kg/kmol]
self.mw = 0.0
# high-temp range thermodynamic coefficients
self.hi = np.zeros(7)
# low-temp range thermodynamic coefficients
self.lo = np.zeros(7)
# temperature [K] range for thermodynamic coefficients
self.Trange = [300.0, 1000.0, 5000.0]
def calc_spec_smh(T, specs):
"""Calculate standard-state entropies minus enthalpies for all species.
Parameters
----------
T : float
Temperature of gas mixture.
specs : list of SpecInfo
List of species.
Returns
-------
spec_smh : list of float
List of species' standard-state entropies minus enthalpies.
"""
spec_smh = []
Tlog = math.log(T)
T2 = T * T
T3 = T2 * T
T4 = T3 * T
Thalf = T / 2.0
T2 = T2 / 6.0
T3 = T3 / 12.0
T4 = T4 / 20.0
for sp in specs:
if T <= sp.Trange[1]:
smh = (sp.lo[0] * (Tlog - 1.0) + sp.lo[1] * Thalf + sp.lo[2] *
T2 + sp.lo[3] * T3 + sp.lo[4] * T4 - (sp.lo[5] / T) +
sp.lo[6]
)
else:
smh = (sp.hi[0] * (Tlog - 1.0) + sp.hi[1] * Thalf + sp.hi[2] *
T2 + sp.hi[3] * T3 + sp.hi[4] * T4 - (sp.hi[5] / T) +
sp.hi[6]
)
spec_smh.append(smh)
return (spec_smh)
fixed the class equality tester to work with numpy arrays
"""Module containing element dict, species and reaction classes, and constants.
"""
# Python 2 compatibility
from __future__ import division
# Standard libraries
import math
import numpy as np
__all__ = ['RU', 'RUC', 'RU_JOUL', 'PA', 'get_elem_wt',
'ReacInfo', 'SpecInfo', 'calc_spec_smh']
# universal gas constants, SI units
RU = 8314.4621 # J/(kmole * K)
RU_JOUL = 8.3144621
RUC = (RU / 4.18400) # cal/(mole * K)
# Avogadro's number
AVAG = 6.0221367e23
# pressure of one standard atmosphere [Pa]
PA = 101325.0
class CommonEqualityMixin(object):
def __eq__(self, other):
for key, value in self.__dict__.iteritems():
if not key in other.__dict__:
return False
if isinstance(value, np.ndarray) and not \
np.array_equal(value, other.__dict__[key]):
return False
elif value != other.__dict__[key]:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def get_elem_wt():
"""Returns dict with built-in element names and atomic weights [kg/kmol].
Attributes
----------
None
Returns
-------
elem_wt : dict
Dictionary with element name keys and atomic weight [kg/kmol] values.
"""
elem_wt = dict([
('h', 1.00794), ('he', 4.00260), ('li', 6.93900),
('be', 9.01220), ('b', 10.81100), ('c', 12.0110),
('n', 14.00674), ('o', 15.99940), ('f', 18.99840),
('ne', 20.18300), ('na', 22.98980), ('mg', 24.31200),
('al', 26.98150), ('si', 28.08600), ('p', 30.97380),
('s', 32.06400), ('cl', 35.45300), ('ar', 39.94800),
('k', 39.10200), ('ca', 40.08000), ('sc', 44.95600),
('ti', 47.90000), ('v', 50.94200), ('cr', 51.99600),
('mn', 54.93800), ('fe', 55.84700), ('co', 58.93320),
('ni', 58.71000), ('cu', 63.54000), ('zn', 65.37000),
('ga', 69.72000), ('ge', 72.59000), ('as', 74.92160),
('se', 78.96000), ('br', 79.90090), ('kr', 83.80000),
('rb', 85.47000), ('sr', 87.62000), ('y', 88.90500),
('zr', 91.22000), ('nb', 92.90600), ('mo', 95.94000),
('tc', 99.00000), ('ru', 101.07000), ('rh', 102.90500),
('pd', 106.40000), ('ag', 107.87000), ('cd', 112.40000),
('in', 114.82000), ('sn', 118.69000), ('sb', 121.75000),
('te', 127.60000), ('i', 126.90440), ('xe', 131.30000),
('cs', 132.90500), ('ba', 137.34000), ('la', 138.91000),
('ce', 140.12000), ('pr', 140.90700), ('nd', 144.24000),
('pm', 145.00000), ('sm', 150.35000), ('eu', 151.96000),
('gd', 157.25000), ('tb', 158.92400), ('dy', 162.50000),
('ho', 164.93000), ('er', 167.26000), ('tm', 168.93400),
('yb', 173.04000), ('lu', 174.99700), ('hf', 178.49000),
('ta', 180.94800), ('w', 183.85000), ('re', 186.20000),
('os', 190.20000), ('ir', 192.20000), ('pt', 195.09000),
('au', 196.96700), ('hg', 200.59000), ('tl', 204.37000),
('pb', 207.19000), ('bi', 208.98000), ('po', 210.00000),
('at', 210.00000), ('rn', 222.00000), ('fr', 223.00000),
('ra', 226.00000), ('ac', 227.00000), ('th', 232.03800),
('pa', 231.00000), ('u', 238.03000), ('np', 237.00000),
('pu', 242.00000), ('am', 243.00000), ('cm', 247.00000),
('bk', 249.00000), ('cf', 251.00000), ('es', 254.00000),
('fm', 253.00000), ('d', 2.01410), ('e', 5.48578e-4)
])
return elem_wt
class ReacInfo(CommonEqualityMixin):
"""Reaction class.
Contains all information about a single reaction.
Attributes
----------
rev : bool
True if reversible reaction, False if irreversible.
reactants : list of str
List of reactant species names.
reac_nu : list of int/float
List of reactant stoichiometric coefficients, either int or float.
products : list of str
List of product species names.
prod_nu : list of int/float
List of product stoichiometric coefficients, either int or float.
A : float
Arrhenius pre-exponential coefficient.
b : float
Arrhenius temperature exponent.
E : float
Arrhenius activation energy.
rev_par : list of float, optional
List of reverse Arrhenius coefficients (default empty).
dup : bool, optional
Duplicate reaction flag (default False).
thd : bool, optional
Third-body reaction flag (default False).
thd_body : list of list of [str, float], optional
List of third body names and efficiencies (default empty).
pdep : bool, optional
Pressure-dependence flag (default False).
pdep_sp : str, optional
Name of specific third-body or 'M' (default '').
low : list of float, optional
List of low-pressure-limit Arrhenius coefficients (default empty).
high : list of float, optional
List of high-pressure-limit Arrhenius coefficients (default empty).
troe : bool, optional
Troe pressure-dependence formulation flag (default False).
troe_par : list of float, optional
List of Troe formulation constants (default empty).
sri : bool, optional
SRI pressure-dependence formulation flag (default False).
sri_par : list of float, optional
List of SRI formulation constants (default empty).
Notes
-----
`rev` does not require `rev_par`; if no explicit coefficients, the
reverse reaction rate will be calculated through the equilibrium
constant.
Only one of [`low`,`high`] can be defined.
If `troe` and `sri` are both False, then the Lindemann is assumed.
"""
def __init__(self, rev, reactants, reac_nu, products, prod_nu, A, b, E):
self.reac = reactants
self.reac_nu = reac_nu
self.prod = products
self.prod_nu = prod_nu
## Arrhenius coefficients
# pre-exponential factor [m, kmol, s]
self.A = A
# Temperature exponent [-]
self.b = b
# Activation energy, stored as activation temperature [K]
self.E = E
# reversible reaction properties
self.rev = rev
self.rev_par = [] # reverse A, b, E
# duplicate reaction
self.dup = False
# third-body efficiencies
self.thd_body = False
self.thd_body_eff = [] # in pairs with species and efficiency
# pressure dependence
self.pdep = False
self.pdep_sp = ''
self.low = []
self.high = []
self.troe = False
self.troe_par = []
self.sri = False
self.sri_par = []
# Parameters for pressure-dependent reaction parameterized by
# bivariate Chebyshev polynomial in temperature and pressure.
self.cheb = False
# Number of temperature values over which fit computed.
self.cheb_n_temp = 0
# Number of pressure values over which fit computed.
self.cheb_n_pres = 0
# Pressure limits for Chebyshev fit [Pa]
self.cheb_plim = [0.001 * PA, 100. * PA]
# Temperature limits for Chebyshev fit [K]
self.cheb_tlim = [300., 2500.]
# 2D array of Chebyshev fit coefficients
self.cheb_par = None
# Parameters for pressure-dependent reaction parameterized by
# logarithmically interpolating between Arrhenius rate expressions at
# various pressures.
self.plog = False
# List of arrays with [pressure [Pa], A, b, E]
self.plog_par = None
class SpecInfo(CommonEqualityMixin):
"""Species class.
Contains all information about a single species.
Attributes
----------
name : str
Name of species.
elem : list of list of [str, float]
Elemental composition in [element, number] pairs.
mw : float
Molecular weight.
hi : list of float
High-temperature range NASA thermodynamic coefficients.
lo : list of float
Low-temperature range NASA thermodynamic coefficients.
Trange : list of float
Temperatures defining ranges of thermodynamic polynomial fits
(low, middle, high), default ([300, 1000, 5000]).
"""
def __init__(self, name):
self.name = name
# elemental composition
self.elem = []
# molecular weight [kg/kmol]
self.mw = 0.0
# high-temp range thermodynamic coefficients
self.hi = np.zeros(7)
# low-temp range thermodynamic coefficients
self.lo = np.zeros(7)
# temperature [K] range for thermodynamic coefficients
self.Trange = [300.0, 1000.0, 5000.0]
def calc_spec_smh(T, specs):
"""Calculate standard-state entropies minus enthalpies for all species.
Parameters
----------
T : float
Temperature of gas mixture.
specs : list of SpecInfo
List of species.
Returns
-------
spec_smh : list of float
List of species' standard-state entropies minus enthalpies.
"""
spec_smh = []
Tlog = math.log(T)
T2 = T * T
T3 = T2 * T
T4 = T3 * T
Thalf = T / 2.0
T2 = T2 / 6.0
T3 = T3 / 12.0
T4 = T4 / 20.0
for sp in specs:
if T <= sp.Trange[1]:
smh = (sp.lo[0] * (Tlog - 1.0) + sp.lo[1] * Thalf + sp.lo[2] *
T2 + sp.lo[3] * T3 + sp.lo[4] * T4 - (sp.lo[5] / T) +
sp.lo[6]
)
else:
smh = (sp.hi[0] * (Tlog - 1.0) + sp.hi[1] * Thalf + sp.hi[2] *
T2 + sp.hi[3] * T3 + sp.hi[4] * T4 - (sp.hi[5] / T) +
sp.hi[6]
)
spec_smh.append(smh)
return (spec_smh)
|
import pickle
from sslyze.plugins.openssl_cipher_suites_plugin import Sslv20ScanCommand, \
Sslv30ScanCommand, Tlsv10ScanCommand, Tlsv11ScanCommand, Tlsv12ScanCommand, Tlsv13ScanCommand
from sslyze.server_connectivity_tester import ServerConnectivityTester
from sslyze.ssl_settings import TlsWrappedProtocolEnum
from tests.markers import can_only_run_on_linux_64
from tests.openssl_server import LegacyOpenSslServer, ModernOpenSslServer, ClientAuthConfigEnum
class TestOpenSslCipherSuitesPlugin:
@can_only_run_on_linux_64
def test_sslv2_enabled(self):
with LegacyOpenSslServer() as server:
server_test = ServerConnectivityTester(
hostname=server.hostname,
ip_address=server.ip_address,
port=server.port
)
server_info = server_test.perform()
plugin = OpenSslCipherSuitesPlugin()
plugin_result = plugin.process_task(server_info, Sslv20ScanCommand())
# The embedded server does not have a preference
assert not plugin_result.preferred_cipher
accepted_cipher_name_list = [cipher.name for cipher in plugin_result.accepted_cipher_list]
assert {
'SSL_CK_RC4_128_EXPORT40_WITH_MD5', 'SSL_CK_IDEA_128_CBC_WITH_MD5',
'SSL_CK_RC2_128_CBC_EXPORT40_WITH_MD5', 'SSL_CK_DES_192_EDE3_CBC_WITH_MD5',
'SSL_CK_DES_192_EDE3_CBC_WITH_MD5', 'SSL_CK_RC4_128_WITH_MD5',
'SSL_CK_RC2_128_CBC_WITH_MD5', 'SSL_CK_DES_64_CBC_WITH_MD5'
} == set(accepted_cipher_name_list)
assert plugin_result.accepted_cipher_list
assert not plugin_result.rejected_cipher_list
assert not plugin_result.errored_cipher_list
assert plugin_result.as_text()
assert plugin_result.as_xml()
# Ensure the results are pickable so the ConcurrentScanner can receive them via a Queue
assert pickle.dumps(plugin_result)
def test_sslv2_disabled(self):
server_test = ServerConnectivityTester(hostname='www.google.com')
server_info = server_test.perform()
plugin = OpenSslCipherSuitesPlugin()
plugin_result = plugin.process_task(server_info, Sslv20ScanCommand())
assert plugin_result.preferred_cipher is None
assert not plugin_result.accepted_cipher_list
assert plugin_result.rejected_cipher_list
assert not plugin_result.errored_cipher_list
assert plugin_result.as_text()
assert plugin_result.as_xml()
# Ensure the results are pickable so the ConcurrentScanner can receive them via a Queue
assert pickle.dumps(plugin_result)
@can_only_run_on_linux_64
def test_sslv3_enabled(self):
with LegacyOpenSslServer() as server:
server_test = ServerConnectivityTester(
hostname=server.hostname,
ip_address=server.ip_address,
port=server.port)
server_info = server_test.perform()
plugin = OpenSslCipherSuitesPlugin()
plugin_result = plugin.process_task(server_info, Sslv30ScanCommand())
# The embedded server does not have a preference
assert not plugin_result.preferred_cipher
expected_ciphers = {
'TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA', 'TLS_RSA_WITH_3DES_EDE_CBC_SHA',
'TLS_DH_anon_WITH_AES_128_CBC_SHA', 'TLS_ECDH_anon_WITH_AES_128_CBC_SHA',
'TLS_DH_anon_WITH_SEED_CBC_SHA', 'TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5',
'TLS_ECDHE_RSA_WITH_NULL_SHA', 'TLS_ECDHE_RSA_WITH_RC4_128_SHA',
'TLS_DH_anon_WITH_AES_256_CBC_SHA',
'TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA', 'TLS_ECDH_anon_WITH_RC4_128_SHA',
'TLS_DH_anon_WITH_3DES_EDE_CBC_SHA', 'TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA',
'TLS_DH_anon_EXPORT_WITH_RC4_40_MD5', 'TLS_RSA_EXPORT_WITH_DES40_CBC_SHA',
'TLS_ECDH_anon_WITH_NULL_SHA',
'TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA', 'TLS_RSA_WITH_RC4_128_SHA',
'TLS_RSA_EXPORT_WITH_RC4_40_MD5',
'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA', 'TLS_RSA_WITH_NULL_MD5',
'TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA', 'TLS_DH_anon_WITH_DES_CBC_SHA',
'TLS_RSA_WITH_SEED_CBC_SHA', 'TLS_RSA_WITH_DES_CBC_SHA',
'TLS_ECDH_anon_WITH_AES_256_CBC_SHA', 'TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA',
'TLS_RSA_WITH_CAMELLIA_256_CBC_SHA', 'TLS_RSA_WITH_AES_256_CBC_SHA',
'TLS_RSA_WITH_RC4_128_MD5', 'TLS_RSA_WITH_CAMELLIA_128_CBC_SHA',
'TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA', 'TLS_RSA_WITH_NULL_SHA',
'TLS_RSA_WITH_IDEA_CBC_SHA', 'TLS_RSA_WITH_AES_128_CBC_SHA', 'TLS_DH_anon_WITH_RC4_128_MD5'
}
assert expected_ciphers == set([cipher.name for cipher in plugin_result.accepted_cipher_list])
assert plugin_result.accepted_cipher_list
assert plugin_result.rejected_cipher_list
assert not plugin_result.errored_cipher_list
assert plugin_result.as_text()
assert plugin_result.as_xml()
# Ensure the results are pickable so the ConcurrentScanner can receive them via a Queue
assert pickle.dumps(plugin_result)
def test_sslv3_disabled(self):
server_test = ServerConnectivityTester(hostname='www.google.com')
server_info = server_test.perform()
plugin = OpenSslCipherSuitesPlugin()
plugin_result = plugin.process_task(server_info, Sslv30ScanCommand())
assert plugin_result.preferred_cipher is None
assert not plugin_result.accepted_cipher_list
assert plugin_result.rejected_cipher_list
assert not plugin_result.errored_cipher_list
assert plugin_result.as_text()
assert plugin_result.as_xml()
# Ensure the results are pickable so the ConcurrentScanner can receive them via a Queue
assert pickle.dumps(plugin_result)
def test_tlsv1_0_enabled(self):
server_test = ServerConnectivityTester(hostname='www.google.com')
server_info = server_test.perform()
plugin = OpenSslCipherSuitesPlugin()
plugin_result = plugin.process_task(server_info, Tlsv10ScanCommand())
assert plugin_result.preferred_cipher
expected_ciphers = {
'TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA', 'TLS_RSA_WITH_AES_256_CBC_SHA',
'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA', 'TLS_RSA_WITH_AES_128_CBC_SHA',
'TLS_RSA_WITH_3DES_EDE_CBC_SHA'
}
assert expected_ciphers == set([cipher.name for cipher in plugin_result.accepted_cipher_list])
assert plugin_result.accepted_cipher_list
assert plugin_result.rejected_cipher_list
assert not plugin_result.errored_cipher_list
assert plugin_result.as_text()
assert plugin_result.as_xml()
# Ensure the results are pickable so the ConcurrentScanner can receive them via a Queue
assert pickle.dumps(plugin_result)
def test_tlsv1_0_disabled(self):
server_test = ServerConnectivityTester(hostname='success.trendmicro.com')
server_info = server_test.perform()
plugin = OpenSslCipherSuitesPlugin()
plugin_result = plugin.process_task(server_info, Tlsv10ScanCommand())
assert plugin_result.preferred_cipher is None
assert not plugin_result.accepted_cipher_list
assert plugin_result.rejected_cipher_list
assert not plugin_result.errored_cipher_list
assert plugin_result.as_text()
assert plugin_result.as_xml()
# Ensure the results are pickable so the ConcurrentScanner can receive them via a Queue
assert pickle.dumps(plugin_result)
def test_tlsv1_1_enabled(self):
server_test = ServerConnectivityTester(hostname='www.google.com')
server_info = server_test.perform()
plugin = OpenSslCipherSuitesPlugin()
plugin_result = plugin.process_task(server_info, Tlsv11ScanCommand())
assert plugin_result.preferred_cipher
assert plugin_result.accepted_cipher_list
expected_ciphers = {
'TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA', 'TLS_RSA_WITH_AES_256_CBC_SHA',
'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA', 'TLS_RSA_WITH_AES_128_CBC_SHA',
'TLS_RSA_WITH_3DES_EDE_CBC_SHA'
}
assert expected_ciphers == set([cipher.name for cipher in plugin_result.accepted_cipher_list])
assert plugin_result.rejected_cipher_list
assert not plugin_result.errored_cipher_list
assert plugin_result.as_text()
assert plugin_result.as_xml()
# Ensure the results are pickable so the ConcurrentScanner can receive them via a Queue
assert pickle.dumps(plugin_result)
def test_tlsv1_2_enabled(self):
server_test = ServerConnectivityTester(hostname='www.google.com')
server_info = server_test.perform()
plugin = OpenSslCipherSuitesPlugin()
# Also do full HTTP connections
plugin_result = plugin.process_task(server_info, Tlsv12ScanCommand(http_get=True))
assert plugin_result.preferred_cipher
assert plugin_result.accepted_cipher_list
expected_ciphers = {
'TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384', 'TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA',
'TLS_RSA_WITH_AES_256_GCM_SHA384', 'TLS_RSA_WITH_AES_256_CBC_SHA',
'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA', 'TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256',
'TLS_RSA_WITH_AES_128_GCM_SHA256', 'TLS_RSA_WITH_AES_128_CBC_SHA',
'TLS_RSA_WITH_3DES_EDE_CBC_SHA', 'TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256',
'TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256'
}
assert expected_ciphers == set([cipher.name for cipher in plugin_result.accepted_cipher_list])
assert plugin_result.rejected_cipher_list
assert not plugin_result.errored_cipher_list
assert plugin_result.as_text()
assert plugin_result.as_xml()
# Ensure the results are pickable so the ConcurrentScanner can receive them via a Queue
assert pickle.dumps(plugin_result)
def test_null_cipher_suites(self):
server_test = ServerConnectivityTester(hostname='null.badssl.com')
server_info = server_test.perform()
plugin = OpenSslCipherSuitesPlugin()
plugin_result = plugin.process_task(server_info, Tlsv12ScanCommand())
expected_ciphers = {
'TLS_ECDH_anon_WITH_AES_256_CBC_SHA', 'TLS_DH_anon_WITH_AES_256_CBC_SHA256',
'TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA', 'TLS_DH_anon_WITH_AES_256_GCM_SHA384',
'TLS_DH_anon_WITH_AES_256_CBC_SHA', 'TLS_ECDH_anon_WITH_AES_128_CBC_SHA',
'TLS_DH_anon_WITH_AES_128_CBC_SHA256', 'TLS_DH_anon_WITH_AES_128_CBC_SHA',
'TLS_DH_anon_WITH_AES_128_GCM_SHA256', 'TLS_DH_anon_WITH_SEED_CBC_SHA',
'TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA', 'TLS_ECDHE_RSA_WITH_NULL_SHA',
'TLS_ECDH_anon_WITH_NULL_SHA', 'TLS_RSA_WITH_NULL_SHA256', 'TLS_RSA_WITH_NULL_SHA'
}
assert expected_ciphers == set([cipher.name for cipher in plugin_result.accepted_cipher_list])
assert plugin_result.as_text()
assert plugin_result.as_xml()
# Ensure the results are pickable so the ConcurrentScanner can receive them via a Queue
assert pickle.dumps(plugin_result)
def test_rc4_cipher_suites(self):
server_test = ServerConnectivityTester(hostname='rc4.badssl.com')
server_info = server_test.perform()
plugin = OpenSslCipherSuitesPlugin()
plugin_result = plugin.process_task(server_info, Tlsv12ScanCommand())
accepted_cipher_name_list = [cipher.name for cipher in plugin_result.accepted_cipher_list]
assert {'TLS_ECDHE_RSA_WITH_RC4_128_SHA', 'TLS_RSA_WITH_RC4_128_SHA'} == set(accepted_cipher_name_list)
assert plugin_result.as_text()
assert plugin_result.as_xml()
# Ensure the results are pickable so the ConcurrentScanner can receive them via a Queue
assert pickle.dumps(plugin_result)
def test_rc4_md5_cipher_suites(self):
server_test = ServerConnectivityTester(hostname='rc4-md5.badssl.com')
server_info = server_test.perform()
plugin = OpenSslCipherSuitesPlugin()
plugin_result = plugin.process_task(server_info, Tlsv12ScanCommand())
accepted_cipher_name_list = [cipher.name for cipher in plugin_result.accepted_cipher_list]
assert {'TLS_RSA_WITH_RC4_128_MD5'} == set(accepted_cipher_name_list)
assert plugin_result.as_text()
assert plugin_result.as_xml()
# Ensure the results are pickable so the ConcurrentScanner can receive them via a Queue
assert pickle.dumps(plugin_result)
def test_follows_client_cipher_suite_preference(self):
# Google.com does not follow client cipher suite preference
server_test = ServerConnectivityTester(hostname='www.google.com')
server_info = server_test.perform()
plugin = OpenSslCipherSuitesPlugin()
plugin_result = plugin.process_task(server_info, Tlsv12ScanCommand())
assert plugin_result.preferred_cipher
assert plugin_result.accepted_cipher_list
# Sogou.com follows client cipher suite preference
server_test = ServerConnectivityTester(hostname='www.sogou.com')
server_info = server_test.perform()
plugin = OpenSslCipherSuitesPlugin()
plugin_result = plugin.process_task(server_info, Tlsv12ScanCommand())
assert plugin_result.preferred_cipher is None
assert plugin_result.accepted_cipher_list
# Ensure the results are pickable so the ConcurrentScanner can receive them via a Queue
assert pickle.dumps(plugin_result)
def test_smtp_post_handshake_response(self):
server_test = ServerConnectivityTester(
hostname='smtp.gmail.com',
port=587,
tls_wrapped_protocol=TlsWrappedProtocolEnum.STARTTLS_SMTP
)
server_info = server_test.perform()
plugin = OpenSslCipherSuitesPlugin()
plugin_result = plugin.process_task(server_info, Tlsv12ScanCommand())
assert plugin_result.as_text()
assert plugin_result.as_xml()
def test_tls_1_3_cipher_suites(self):
server_test = ServerConnectivityTester(hostname='www.cloudflare.com')
server_info = server_test.perform()
plugin = OpenSslCipherSuitesPlugin()
plugin_result = plugin.process_task(server_info, Tlsv13ScanCommand())
accepted_cipher_name_list = [cipher.name for cipher in plugin_result.accepted_cipher_list]
assert {'TLS_CHACHA20_POLY1305_SHA256', 'TLS_AES_256_GCM_SHA384', 'TLS_AES_128_GCM_SHA256'} == \
set(accepted_cipher_name_list)
@can_only_run_on_linux_64
def test_succeeds_when_client_auth_failed_tls_1_2(self):
# Given a TLS 1.2 server that requires client authentication
with LegacyOpenSslServer(client_auth_config=ClientAuthConfigEnum.REQUIRED) as server:
# And the client does NOT provide a client certificate
server_test = ServerConnectivityTester(
hostname=server.hostname,
ip_address=server.ip_address,
port=server.port
)
server_info = server_test.perform()
# OpenSslCipherSuitesPlugin works even when a client cert was not supplied
plugin = OpenSslCipherSuitesPlugin()
plugin_result = plugin.process_task(server_info, Tlsv12ScanCommand())
assert plugin_result.accepted_cipher_list
assert plugin_result.as_text()
assert plugin_result.as_xml()
@can_only_run_on_linux_64
def test_succeeds_when_client_auth_failed_tls_1_3(self):
# Given a TLS 1.3 server that requires client authentication
with ModernOpenSslServer(client_auth_config=ClientAuthConfigEnum.REQUIRED) as server:
# And the client does NOT provide a client certificate
server_test = ServerConnectivityTester(
hostname=server.hostname,
ip_address=server.ip_address,
port=server.port
)
server_info = server_test.perform()
# OpenSslCipherSuitesPlugin works even when a client cert was not supplied
plugin = OpenSslCipherSuitesPlugin()
plugin_result = plugin.process_task(server_info, Tlsv13ScanCommand())
assert plugin_result.accepted_cipher_list
assert plugin_result.as_text()
assert plugin_result.as_xml()
Start updating tests for cipher suites
from sslyze.plugins.openssl_cipher_suites.scan_commands import Sslv20ScanImplementation, CipherSuitesScanResult, \
Sslv30ScanImplementation, Tlsv10ScanImplementation, Tlsv11ScanImplementation, Tlsv12ScanImplementation
from sslyze.server_connectivity_tester import ServerConnectivityTester
from sslyze.server_setting import ServerNetworkLocationViaDirectConnection
from tests.markers import can_only_run_on_linux_64
from tests.openssl_server import LegacyOpenSslServer, ModernOpenSslServer, ClientAuthConfigEnum
class TestCipherSuitesPluginWithOnlineServer:
def test_sslv2_disabled(self):
# Given a server to scan that does not support SSL 2.0
server_location = ServerNetworkLocationViaDirectConnection.with_ip_address_lookup(
"www.google.com", 443
)
server_info = ServerConnectivityTester().perform(server_location)
# When scanning for cipher suites, it succeeds
result: CipherSuitesScanResult = Sslv20ScanImplementation.perform(server_info)
# And the result confirms that SSL 2.0 is not supported
assert result.preferred_cipher_suite is None
assert not result.accepted_cipher_suites
assert result.rejected_cipher_suites
def test_sslv3_disabled(self):
# Given a server to scan that does not support SSL 3.0
server_location = ServerNetworkLocationViaDirectConnection.with_ip_address_lookup(
"www.google.com", 443
)
server_info = ServerConnectivityTester().perform(server_location)
# When scanning for cipher suites, it succeeds
result: CipherSuitesScanResult = Sslv30ScanImplementation.perform(server_info)
# And the result confirms that SSL 3.0 is not supported
assert result.preferred_cipher_suite is None
assert not result.accepted_cipher_suites
assert result.rejected_cipher_suites
def test_tlsv1_0_enabled(self):
# Given a server to scan that supports TLS 1.0
server_location = ServerNetworkLocationViaDirectConnection.with_ip_address_lookup(
"www.google.com", 443
)
server_info = ServerConnectivityTester().perform(server_location)
# When scanning for cipher suites, it succeeds
result: CipherSuitesScanResult = Tlsv10ScanImplementation.perform(server_info)
# And the result confirms that TLS 1.0 is supported
assert result.preferred_cipher_suite
expected_ciphers = {
'TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA', 'TLS_RSA_WITH_AES_256_CBC_SHA',
'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA', 'TLS_RSA_WITH_AES_128_CBC_SHA',
'TLS_RSA_WITH_3DES_EDE_CBC_SHA'
}
assert expected_ciphers == {cipher.name for cipher in result.accepted_cipher_suites}
assert result.rejected_cipher_suites
def test_tlsv1_0_disabled(self):
# Given a server to scan that does NOT support TLS 1.0
server_location = ServerNetworkLocationViaDirectConnection.with_ip_address_lookup(
"success.trendmicro.com", 443
)
server_info = ServerConnectivityTester().perform(server_location)
# When scanning for cipher suites, it succeeds
result: CipherSuitesScanResult = Tlsv10ScanImplementation.perform(server_info)
# And the result confirms that TLS 1.0 is not supported
assert result.preferred_cipher_suite is None
assert not result.accepted_cipher_suites
assert result.rejected_cipher_suites
def test_tlsv1_1_enabled(self):
# Given a server to scan that supports TLS 1.1
server_location = ServerNetworkLocationViaDirectConnection.with_ip_address_lookup(
"www.google.com", 443
)
server_info = ServerConnectivityTester().perform(server_location)
# When scanning for cipher suites, it succeeds
result: CipherSuitesScanResult = Tlsv11ScanImplementation.perform(server_info)
# And the result confirms that TLS 1.1 is not supported
assert result.preferred_cipher_suite
expected_ciphers = {
'TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA', 'TLS_RSA_WITH_AES_256_CBC_SHA',
'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA', 'TLS_RSA_WITH_AES_128_CBC_SHA',
'TLS_RSA_WITH_3DES_EDE_CBC_SHA'
}
assert expected_ciphers == {cipher.name for cipher in result.accepted_cipher_suites}
assert result.rejected_cipher_suites
def test_tlsv1_2_enabled(self):
# Given a server to scan that supports TLS 1.2
server_location = ServerNetworkLocationViaDirectConnection.with_ip_address_lookup(
"www.google.com", 443
)
server_info = ServerConnectivityTester().perform(server_location)
# When scanning for cipher suites, it succeeds
result: CipherSuitesScanResult = Tlsv12ScanImplementation.perform(server_info)
# And the result confirms that TLS 1.2 is not supported
assert result.preferred_cipher_suite
expected_ciphers = {
'TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384', 'TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA',
'TLS_RSA_WITH_AES_256_GCM_SHA384', 'TLS_RSA_WITH_AES_256_CBC_SHA',
'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA', 'TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256',
'TLS_RSA_WITH_AES_128_GCM_SHA256', 'TLS_RSA_WITH_AES_128_CBC_SHA',
'TLS_RSA_WITH_3DES_EDE_CBC_SHA', 'TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256',
'TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256'
}
assert expected_ciphers == {cipher.name for cipher in result.accepted_cipher_suites}
def test_null_cipher_suites(self):
# Given a server to scan that supports NULL cipher suites
server_location = ServerNetworkLocationViaDirectConnection.with_ip_address_lookup(
"null.badssl.com", 443
)
server_info = ServerConnectivityTester().perform(server_location)
# When scanning for cipher suites, it succeeds
result: CipherSuitesScanResult = Tlsv12ScanImplementation.perform(server_info)
# And the NULL/Anon cipher suites were detected
expected_ciphers = {
'TLS_ECDH_anon_WITH_AES_256_CBC_SHA', 'TLS_DH_anon_WITH_AES_256_CBC_SHA256',
'TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA', 'TLS_DH_anon_WITH_AES_256_GCM_SHA384',
'TLS_DH_anon_WITH_AES_256_CBC_SHA', 'TLS_ECDH_anon_WITH_AES_128_CBC_SHA',
'TLS_DH_anon_WITH_AES_128_CBC_SHA256', 'TLS_DH_anon_WITH_AES_128_CBC_SHA',
'TLS_DH_anon_WITH_AES_128_GCM_SHA256', 'TLS_DH_anon_WITH_SEED_CBC_SHA',
'TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA', 'TLS_ECDHE_RSA_WITH_NULL_SHA',
'TLS_ECDH_anon_WITH_NULL_SHA', 'TLS_RSA_WITH_NULL_SHA256', 'TLS_RSA_WITH_NULL_SHA'
}
assert expected_ciphers == {cipher.name for cipher in result.accepted_cipher_suites}
def test_rc4_cipher_suites(self):
# Given a server to scan that supports RC4 cipher suites
server_location = ServerNetworkLocationViaDirectConnection.with_ip_address_lookup(
"rc4.badssl.com", 443
)
server_info = ServerConnectivityTester().perform(server_location)
# When scanning for cipher suites, it succeeds
result: CipherSuitesScanResult = Tlsv12ScanImplementation.perform(server_info)
# And the RC4 cipher suites were detected
assert {'TLS_ECDHE_RSA_WITH_RC4_128_SHA', 'TLS_RSA_WITH_RC4_128_SHA'} == {
cipher.name for cipher in result.accepted_cipher_suites
}
def test_does_not_follow_client_cipher_suite_preference(self):
# Given a server to scan that does not follow client cipher suite preference
server_location = ServerNetworkLocationViaDirectConnection.with_ip_address_lookup(
"www.google.com", 443
)
server_info = ServerConnectivityTester().perform(server_location)
# When scanning for cipher suites, it succeeds
result: CipherSuitesScanResult = Tlsv12ScanImplementation.perform(server_info)
# And the server is detected as not following the client's preference
assert not result.follows_cipher_suite_preference_from_client
def test_follows_client_cipher_suite_preference(self):
# Given a server to scan that follows client cipher suite preference
server_location = ServerNetworkLocationViaDirectConnection.with_ip_address_lookup(
"www.sogou.com", 443
)
server_info = ServerConnectivityTester().perform(server_location)
# When scanning for cipher suites, it succeeds
result: CipherSuitesScanResult = Tlsv12ScanImplementation.perform(server_info)
# And the server is detected as following the client's preference
assert result.follows_cipher_suite_preference_from_client
def test_smtp_post_handshake_response(self):
server_test = ServerConnectivityTester(
hostname='smtp.gmail.com',
port=587,
tls_wrapped_protocol=TlsWrappedProtocolEnum.STARTTLS_SMTP
)
server_info = server_test.perform()
plugin = OpenSslCipherSuitesPlugin()
plugin_result = plugin.process_task(server_info, Tlsv12ScanCommand())
assert plugin_result.as_text()
assert plugin_result.as_xml()
def test_tls_1_3_cipher_suites(self):
server_test = ServerConnectivityTester(hostname='www.cloudflare.com')
server_info = server_test.perform()
plugin = OpenSslCipherSuitesPlugin()
plugin_result = plugin.process_task(server_info, Tlsv13ScanCommand())
accepted_cipher_name_list = [cipher.name for cipher in plugin_result.accepted_cipher_list]
assert {'TLS_CHACHA20_POLY1305_SHA256', 'TLS_AES_256_GCM_SHA384', 'TLS_AES_128_GCM_SHA256'} == \
set(accepted_cipher_name_list)
@can_only_run_on_linux_64
class TestCipherSuitesPluginWithLocalServer:
def test_sslv2_enabled(self):
with LegacyOpenSslServer() as server:
server_test = ServerConnectivityTester(
hostname=server.hostname,
ip_address=server.ip_address,
port=server.port
)
server_info = server_test.perform()
plugin = OpenSslCipherSuitesPlugin()
plugin_result = plugin.process_task(server_info, Sslv20ScanCommand())
# The embedded server does not have a preference
assert not plugin_result.preferred_cipher
accepted_cipher_name_list = [cipher.name for cipher in plugin_result.accepted_cipher_list]
assert {
'SSL_CK_RC4_128_EXPORT40_WITH_MD5', 'SSL_CK_IDEA_128_CBC_WITH_MD5',
'SSL_CK_RC2_128_CBC_EXPORT40_WITH_MD5', 'SSL_CK_DES_192_EDE3_CBC_WITH_MD5',
'SSL_CK_DES_192_EDE3_CBC_WITH_MD5', 'SSL_CK_RC4_128_WITH_MD5',
'SSL_CK_RC2_128_CBC_WITH_MD5', 'SSL_CK_DES_64_CBC_WITH_MD5'
} == set(accepted_cipher_name_list)
assert plugin_result.accepted_cipher_list
assert not plugin_result.rejected_cipher_list
assert not plugin_result.errored_cipher_list
assert plugin_result.as_text()
assert plugin_result.as_xml()
# Ensure the results are pickable so the ConcurrentScanner can receive them via a Queue
assert pickle.dumps(plugin_result)
def test_sslv3_enabled(self):
with LegacyOpenSslServer() as server:
server_test = ServerConnectivityTester(
hostname=server.hostname,
ip_address=server.ip_address,
port=server.port)
server_info = server_test.perform()
plugin = OpenSslCipherSuitesPlugin()
plugin_result = plugin.process_task(server_info, Sslv30ScanCommand())
# The embedded server does not have a preference
assert not plugin_result.preferred_cipher
expected_ciphers = {
'TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA', 'TLS_RSA_WITH_3DES_EDE_CBC_SHA',
'TLS_DH_anon_WITH_AES_128_CBC_SHA', 'TLS_ECDH_anon_WITH_AES_128_CBC_SHA',
'TLS_DH_anon_WITH_SEED_CBC_SHA', 'TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5',
'TLS_ECDHE_RSA_WITH_NULL_SHA', 'TLS_ECDHE_RSA_WITH_RC4_128_SHA',
'TLS_DH_anon_WITH_AES_256_CBC_SHA',
'TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA', 'TLS_ECDH_anon_WITH_RC4_128_SHA',
'TLS_DH_anon_WITH_3DES_EDE_CBC_SHA', 'TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA',
'TLS_DH_anon_EXPORT_WITH_RC4_40_MD5', 'TLS_RSA_EXPORT_WITH_DES40_CBC_SHA',
'TLS_ECDH_anon_WITH_NULL_SHA',
'TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA', 'TLS_RSA_WITH_RC4_128_SHA',
'TLS_RSA_EXPORT_WITH_RC4_40_MD5',
'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA', 'TLS_RSA_WITH_NULL_MD5',
'TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA', 'TLS_DH_anon_WITH_DES_CBC_SHA',
'TLS_RSA_WITH_SEED_CBC_SHA', 'TLS_RSA_WITH_DES_CBC_SHA',
'TLS_ECDH_anon_WITH_AES_256_CBC_SHA', 'TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA',
'TLS_RSA_WITH_CAMELLIA_256_CBC_SHA', 'TLS_RSA_WITH_AES_256_CBC_SHA',
'TLS_RSA_WITH_RC4_128_MD5', 'TLS_RSA_WITH_CAMELLIA_128_CBC_SHA',
'TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA', 'TLS_RSA_WITH_NULL_SHA',
'TLS_RSA_WITH_IDEA_CBC_SHA', 'TLS_RSA_WITH_AES_128_CBC_SHA', 'TLS_DH_anon_WITH_RC4_128_MD5'
}
assert expected_ciphers == set([cipher.name for cipher in plugin_result.accepted_cipher_list])
assert plugin_result.accepted_cipher_list
assert plugin_result.rejected_cipher_list
assert not plugin_result.errored_cipher_list
assert plugin_result.as_text()
assert plugin_result.as_xml()
# Ensure the results are pickable so the ConcurrentScanner can receive them via a Queue
assert pickle.dumps(plugin_result)
def test_succeeds_when_client_auth_failed_tls_1_2(self):
# Given a TLS 1.2 server that requires client authentication
with LegacyOpenSslServer(client_auth_config=ClientAuthConfigEnum.REQUIRED) as server:
# And the client does NOT provide a client certificate
server_test = ServerConnectivityTester(
hostname=server.hostname,
ip_address=server.ip_address,
port=server.port
)
server_info = server_test.perform()
# OpenSslCipherSuitesPlugin works even when a client cert was not supplied
plugin = OpenSslCipherSuitesPlugin()
plugin_result = plugin.process_task(server_info, Tlsv12ScanCommand())
assert plugin_result.accepted_cipher_list
assert plugin_result.as_text()
assert plugin_result.as_xml()
def test_succeeds_when_client_auth_failed_tls_1_3(self):
# Given a TLS 1.3 server that requires client authentication
with ModernOpenSslServer(client_auth_config=ClientAuthConfigEnum.REQUIRED) as server:
# And the client does NOT provide a client certificate
server_test = ServerConnectivityTester(
hostname=server.hostname,
ip_address=server.ip_address,
port=server.port
)
server_info = server_test.perform()
# OpenSslCipherSuitesPlugin works even when a client cert was not supplied
plugin = OpenSslCipherSuitesPlugin()
plugin_result = plugin.process_task(server_info, Tlsv13ScanCommand())
assert plugin_result.accepted_cipher_list
assert plugin_result.as_text()
assert plugin_result.as_xml()
|
# -*- coding: utf-8 -*-
import asyncio
from functools import partial
import importlib
import logging
import boto3
from cached_property import cached_property
from .conf import settings
logger = logging.getLogger(__name__)
class Route(object):
def __init__(self, queue, handler, loop=None):
self._loop = loop or asyncio.get_event_loop()
self._client = boto3.client('sqs')
self.queue_name = queue
self._handler = handler
def __str__(self):
return '<Router(queue={} handler={})>'.format(self.queue_name, self._handler)
@cached_property
def queue_url(self):
response = self._client.get_queue_url(QueueName=self.queue_name)
return response['QueueUrl']
@cached_property
def handler(self):
package = '.'.join(self._handler.split('.')[:-1])
name = self._handler.split('.')[-1]
module = importlib.import_module(package)
return getattr(module, name)
async def handle_message(self, message):
if asyncio.iscoroutinefunction(self.handler):
logger.info('Handler is coroutine! {!r}'.format(self.handler))
return await self.handler(message)
else:
logger.info('Handler will run in a separate thread: {!r}'.format(self.handler))
return await self._loop.run_in_executor(None, self.handler, message)
async def fetch_messages(self):
fn = partial(self._client.receive_message,
QueueUrl=self.queue_url,
WaitTimeSeconds=settings.SQS_WAIT_TIME_SECONDS,
MaxNumberOfMessages=settings.SQS_MAX_MESSAGES)
response = await self._loop.run_in_executor(None, fn)
return response.get('Messages', [])
Enhance routine that imports the route handler callable
# -*- coding: utf-8 -*-
import asyncio
from functools import partial
import importlib
import logging
import boto3
from cached_property import cached_property
from .conf import settings
logger = logging.getLogger(__name__)
class Route(object):
def __init__(self, queue, handler, loop=None):
self._loop = loop or asyncio.get_event_loop()
self._client = boto3.client('sqs')
self.queue_name = queue
self._handler = handler
def __str__(self):
return '<Router(queue={} handler={})>'.format(self.queue_name, self._handler)
@cached_property
def queue_url(self):
response = self._client.get_queue_url(QueueName=self.queue_name)
return response['QueueUrl']
@cached_property
def handler(self):
package = '.'.join(self._handler.split('.')[:-1])
name = self._handler.split('.')[-1]
try:
module = importlib.import_module(package)
except ValueError as exc:
raise ImportError('Error trying to import {!r}'.format(self._handler)) from exc
handler = getattr(module, name)
if not callable(handler):
raise ImportError('{!r} should be callable'.format(self._handler))
return handler
async def handle_message(self, message):
if asyncio.iscoroutinefunction(self.handler):
logger.info('Handler is coroutine! {!r}'.format(self.handler))
return await self.handler(message)
else:
logger.info('Handler will run in a separate thread: {!r}'.format(self.handler))
return await self._loop.run_in_executor(None, self.handler, message)
async def fetch_messages(self):
fn = partial(self._client.receive_message,
QueueUrl=self.queue_url,
WaitTimeSeconds=settings.SQS_WAIT_TIME_SECONDS,
MaxNumberOfMessages=settings.SQS_MAX_MESSAGES)
response = await self._loop.run_in_executor(None, fn)
return response.get('Messages', [])
|
#
# Copyright (c) 2004 Specifix, Inc.
# All rights reserved
"""
Handles all updates to the file system; files should never get changed
on the filesystem except by this module!
@var MERGE: Flag constant value. If set, merge is attempted,
otherwise the changes from the changeset are used (this is for
rollbacks)
@var REPLACEFILES: Flag constant value. If set, a file that is in
the way of a newly created file will be overwritten. Otherwise an error
is produced.
"""
from repository import changeset
import errno
from repository import filecontents
import files
import log
import os
import package
import patch
import stat
import sys
import tempfile
import util
import versions
from build import tags
MERGE = 1 << 0
REPLACEFILES = 1 << 1
IGNOREUGIDS = 1 << 2
class FilesystemJob:
"""
Represents a set of actions which need to be applied to the filesystem.
This is kept very simple to minimize the chance of mistakes or errors.
"""
def _rename(self, oldPath, newPath, msg):
self.renames.append((oldPath, newPath, msg))
def _restore(self, fileObj, target, msg, contentsOverride = ""):
self.restores.append((fileObj.id(), fileObj, target, contentsOverride,
msg))
for tag in fileObj.tags:
if self.tagUpdates.has_key(tag):
self.tagUpdates[tag].append(target)
else:
self.tagUpdates[tag] = [ target ]
def _remove(self, fileObj, target, msg):
if isinstance(fileObj, files.Directory):
if not self.directorySet.has_key(target):
self.directorySet[target] = 0
else:
self.removes[target] = (fileObj, msg)
dir = os.path.dirname(target)
if self.directorySet.has_key(dir):
self.directorySet[dir] += 1
else:
self.directorySet[dir] = 1
for tag in fileObj.tags:
if self.tagRemoves.has_key(tag):
self.tagRemoves[tag].append(target)
else:
self.tagRemoves[tag] = [ target ]
def userRemoval(self, troveName, troveVersion, troveFlavor, fileId):
if not self.userRemovals.has_key((troveName, troveVersion, troveFlavor)):
self.userRemovals[(troveName, troveVersion, troveFlavor)] = [ fileId ]
else:
self.userRemovals.append(fileId)
def iterUserRemovals(self):
for ((troveName, troveVersion, troveFlavor), fileIdList) in \
self.userRemovals.iteritems():
yield (troveName, troveVersion, troveFlavor, fileIdList)
def _createFile(self, target, str, msg):
self.newFiles.append((target, str, msg))
def apply(self, tagSet = {}, tagScript = None):
tagCommands = []
runLdconfig = False
rootLen = len(self.root)
if self.tagRemoves.has_key('tagdescription'):
for path in self.tagRemoves['tagdescription']:
path = path[rootLen:]
tagInfo = None
for ti in tagSet.itervalues():
if ti.tagFile[:rootLen] == self.root and \
ti.tagFile[rootLen:] == path:
tagInfo = ti
break
if tagInfo:
del tagSet[tagInfo.tag]
if "self preremove" in tagInfo.implements:
tagCommands.append([ path, "self", "preremove" ])
del self.tagRemoves['tagdescription']
for tag, l in self.tagRemoves.iteritems():
if not tagSet.has_key(tag): continue
tagInfo = tagSet[tag]
if "files preremove" in tagInfo.implements:
l.sort()
cmd = [ tagInfo.file, "files", "preremove"] + \
[ x[rootLen:] for x in l ]
tagCommands.append(cmd)
if tagCommands:
if tagScript:
f = open(tagScript, "a")
for cmd in tagCommands:
f.write("# %s\n" % " ".join(cmd))
f.close()
else:
runTagCommands(self.root, tagCommands)
tagCommands = []
for (oldPath, newPath, msg) in self.renames:
os.rename(oldPath, newPath)
log.debug(msg)
contents = None
# restore in the same order files appear in the change set
self.restores.sort()
for (fileId, fileObj, target, override, msg) in self.restores:
# None means "don't restore contents"; "" means "take the
# contents from the change set"
if override != "":
contents = override
elif fileObj.hasContents:
contents = self.changeSet.getFileContents(fileId)[1]
fileObj.restore(contents, self.root, target, contents != None)
log.debug(msg)
paths = self.removes.keys()
paths.sort()
paths.reverse()
for target in paths:
(fileObj, msg) = self.removes[target]
# don't worry about files which don't exist
try:
os.lstat(target)
except OSError:
pass
else:
fileObj.remove(target)
log.debug(msg)
for (target, str, msg) in self.newFiles:
try:
os.unlink(target)
except OSError, e:
if e.errno != errno.ENOENT:
raise
f = open(target, "w")
f.write(str)
f.close()
log.warning(msg)
if self.tagUpdates.has_key('shlib'):
shlibAction(self.root, self.tagUpdates['shlib'])
del self.tagUpdates['shlib']
elif runLdconfig:
# override to force ldconfig to run on shlib removal
shlibAction(self.root, [])
if self.tagUpdates.has_key('tagdescription'):
for path in self.tagUpdates['tagdescription']:
# these are new tag action files which we need to run for
# the first time. we run them against everything in the database
# which has this tag, which includes the files we've just
# installed
tagInfo = tags.TagFile(path, {})
path = path[len(self.root):]
# don't run these twice
if self.tagUpdates.has_key(tagInfo.tag):
del self.tagUpdates[tagInfo.tag]
if "self update" in tagInfo.implements:
cmd = [ path, "self", "update" ] + \
[x for x in self.repos.iterFilesWithTag(tagInfo.tag)]
tagCommands.append(cmd)
elif "files update" in tagInfo.implements:
cmd = [ path, "files", "update" ] + \
[x for x in self.repos.iterFilesWithTag(tagInfo.tag)]
if len(cmd) > 3:
tagCommands.append(cmd)
tagSet[tagInfo.tag] = tagInfo
del self.tagUpdates['tagdescription']
for (tag, l) in self.tagUpdates.iteritems():
tagInfo = tagSet.get(tag, None)
if tagInfo is None: continue
if "files update" in tagInfo.implements:
l.sort()
cmd = [ tagInfo.file, "files", "update" ] + \
[ x[rootLen:] for x in l ]
tagCommands.append(cmd)
for tag, l in self.tagRemoves.iteritems():
if not tagSet.has_key(tag): continue
tagInfo = tagSet[tag]
if "files remove" in tagInfo.implements:
l.sort()
cmd = [ tagInfo.file, "files", "remove"] + \
[ x[rootLen:] for x in l ]
tagCommands.append(cmd)
if tagCommands:
if tagScript:
f = open(tagScript, "a")
f.write("\n".join([" ".join(x) for x in tagCommands]))
f.write("\n")
f.close()
else:
runTagCommands(self.root, tagCommands)
def getErrorList(self):
return self.errors
def iterNewPackageList(self):
return iter(self.newPackages)
def getOldPackageList(self):
return self.oldPackages
def getDirectoryCountSet(self):
return self.directorySet
def _singlePackage(self, repos, pkgCs, changeSet, basePkg, fsPkg, root,
flags):
"""
Build up the todo list for applying a single package to the
filesystem. Returns a package object which represents what will
end up in the filsystem after this object's apply() method is
called.
@param repos: the repository the files for basePkg are stored in
@type repos: repository.Repository
@param pkgCs: the package changeset to apply to the filesystem
@type pkgCs: package.PackageChangeSet
@param changeSet: the changeset pkgCs is part of
@type changeSet: changeset.ChangeSet
@param basePkg: the package the stuff in the filesystem came from
@type basePkg: package.Package
@param fsPkg: the package representing what's in the filesystem now
@type fsPkg: package.Package
@param root: root directory to apply changes to (this is ignored for
source management, which uses the cwd)
@type root: str
@param flags: flags which modify update behavior. See L{update}
module variable summary for flags definitions.
@type flags: int bitfield
@rtype: package.Package
"""
if basePkg:
assert(pkgCs.getOldVersion() == basePkg.getVersion())
fullyUpdated = 1
cwd = os.getcwd()
if (flags & IGNOREUGIDS) or os.getuid():
noIds = True
else:
noIds = False
if fsPkg:
fsPkg = fsPkg.copy()
else:
fsPkg = package.Trove(pkgCs.getName(), versions.NewVersion(),
pkgCs.getFlavor(), pkgCs.getChangeLog())
fsPkg.mergeTroveListChanges(pkgCs.iterChangedTroves(),
redundantOkay = True)
for (fileId, headPath, headFileVersion) in pkgCs.getNewFileList():
if headPath[0] == '/':
headRealPath = root + headPath
else:
headRealPath = cwd + "/" + headPath
headFile = files.ThawFile(changeSet.getFileChange(fileId), fileId)
try:
s = os.lstat(headRealPath)
# if this file is a directory and the file on the file
# system is a directory, we're OK
if (isinstance(headFile, files.Directory)
and stat.S_ISDIR(s.st_mode)):
# FIXME: this isn't the right directory handling
# we will want to set ownership/permissions if
# they don't conflict with any already-installed package
continue
elif not flags & REPLACEFILES:
self.errors.append("%s is in the way of a newly "
"created file" % headRealPath)
fullyUpdated = 0
continue
except OSError:
# the path doesn't exist, carry on with the restore
pass
self._restore(headFile, headRealPath, "creating %s" % headRealPath)
fsPkg.addFile(fileId, headPath, headFileVersion)
for fileId in pkgCs.getOldFileList():
(path, version) = basePkg.getFile(fileId)
if not fsPkg.hasFile(fileId):
log.debug("%s has already been removed" % path)
continue
if path[0] == '/':
realPath = root + path
else:
realPath = cwd + "/" + path
if flags & MERGE:
try:
# don't remove files if they've been changed locally
localFile = files.FileFromFilesystem(realPath, fileId)
except OSError, exc:
# it's okay if the file is missing, it means we all agree
if exc.errno == errno.ENOENT:
fsPkg.removeFile(fileId)
continue
else:
raise
oldFile = repos.getFileVersion(fileId, version)
# XXX mask out any flag that isn't the config flag.
# There are some flags that the localFile won't have
# such as SHLIB or INITSCRIPT
oldFile.flags.set(oldFile.flags.value() & files._FILE_FLAG_CONFIG)
# don't worry about metadata changes, just content changes
if oldFile.hasContents and localFile.hasContents and \
oldFile.contents != localFile.contents:
self.errors.append("%s has changed but has been removed "
"on head" % path)
continue
self._remove(oldFile, realPath, "removing %s" % path)
fsPkg.removeFile(fileId)
for (fileId, headPath, headFileVersion) in pkgCs.getChangedFileList():
if not fsPkg.hasFile(fileId):
# the file was removed from the local system; this change
# wins
self.userRemoval(pkgCs.getName(), pkgCs.getNewVersion(),
pkgCs.getFlavor(), fileId)
continue
(fsPath, fsVersion) = fsPkg.getFile(fileId)
if fsPath[0] == "/":
rootFixup = root
else:
rootFixup = cwd + "/"
pathOkay = 1
contentsOkay = 1
finalPath = fsPath
# if headPath is none, the name hasn't changed in the repository
if headPath and headPath != fsPath:
# the paths are different; if one of them matches the one
# from the old package, take the other one as it is the one
# which changed
if basePkg.hasFile(fileId):
basePath = basePkg.getFile(fileId)[0]
else:
basePath = None
if (not flags & MERGE) or fsPath == basePath :
# the path changed in the repository, propage that change
self._rename(rootFixup + fsPath, rootFixup + headPath,
"renaming %s to %s" % (fsPath, headPath))
fsPkg.addFile(fileId, headPath, fsVersion)
finalPath = headPath
else:
pathOkay = 0
finalPath = fsPath # let updates work still
self.errors.append("path conflict for %s (%s on head)" %
(fsPath, headPath))
realPath = rootFixup + finalPath
# headFileVersion is None for renames
if headFileVersion:
# FIXME we should be able to inspect headChanges directly
# to see if we need to go into the if statement which follows
# this rather then having to look up the file from the old
# package for every file which has changed
fsFile = files.FileFromFilesystem(realPath, fileId)
if not basePkg.hasFile(fileId):
# a file which was not in the base package was created
# on both the head of the branch and in the filesystem;
# this can happen during source management
self.errors.append("new file %s conflicts with file on "
"head of branch" % realPath)
contentsOkay = 0
else:
(baseFilePath, baseFileVersion) = basePkg.getFile(fileId)
baseFile = repos.getFileVersion(fileId, baseFileVersion)
headChanges = changeSet.getFileChange(fileId)
headFile = baseFile.copy()
headFile.twm(headChanges, headFile)
fsFile.flags.isConfig(headFile.flags.isConfig())
fsChanges = fsFile.diff(baseFile)
attributesChanged = False
if basePkg and headFileVersion and \
not fsFile.metadataEqual(headFile, ignoreOwnerGroup = noIds):
# something has changed for the file
if flags & MERGE:
if noIds:
# we don't want to merge owner/group ids in
# this case (something other than owner/group
# # changed, such as size). simply take the
# head values
baseFile.inode.setOwner(headFile.inode.owner())
baseFile.inode.setGroup(headFile.inode.group())
conflicts = fsFile.twm(headChanges, baseFile,
skip = "contents")
if not conflicts:
attributesChanged = True
else:
contentsOkay = False
self.errors.append("file attributes conflict for %s"
% realPath)
else:
# this forces the change to apply
fsFile.twm(headChanges, fsFile, skip = "contents")
attributesChanged = True
else:
conflicts = True
mergedChanges = None
beenRestored = False
if headFileVersion and headFile.hasContents and \
fsFile.hasContents and \
fsFile.contents.sha1() != headFile.contents.sha1():
# the contents have changed... let's see what to do
# get the contents if the version on head has contents, and
# either
# 1. the version from the base package doesn't have
# contents, or
# 2. the file changed between head and base
# (if both are false, no contents would have been saved for
# this file)
if (headFile.hasContents
and (not baseFile.hasContents
or headFile.contents.sha1() !=
baseFile.contents.sha1())):
headFileContType = changeSet.getFileContentsType(fileId)
else:
headFileContType = None
if (flags & REPLACEFILES) or (not flags & MERGE) or \
fsFile.contents == baseFile.contents:
# the contents changed in just the repository, so take
# those changes
if headFileContType == changeset.ChangedFileTypes.diff:
sha1 = baseFile.contents.sha1()
baseLineF = repos.getFileContents(pkgCs.getName(),
pkgCs.getOldVersion(), pkgCs.getFlavor(),
basePkg.getFile(fileId)[0])
baseLines = baseLineF.readlines()
del baseLineF
headFileContents = changeSet.getFileContents(fileId)[1]
diff = headFileContents.get().readlines()
(newLines, failedHunks) = patch.patch(baseLines, diff)
assert(not failedHunks)
headFileContents = \
filecontents.FromString("".join(newLines))
self._restore(fsFile, realPath,
"replacing %s with contents "
"from repository" % realPath,
contentsOverride = headFileContents)
else:
self._restore(fsFile, realPath,
"replacing %s with contents "
"from repository" % realPath)
beenRestored = True
elif headFile.contents == baseFile.contents:
# it changed in just the filesystem, so leave that change
log.debug("preserving new contents of %s" % realPath)
elif fsFile.flags.isConfig() or headFile.flags.isConfig():
# it changed in both the filesystem and the repository; our
# only hope is to generate a patch for what changed in the
# repository and try and apply it here
if headFileContType != changeset.ChangedFileTypes.diff:
self.errors.append("unexpected content type for %s" %
realPath)
contentsOkay = 0
else:
cur = open(realPath, "r").readlines()
headFileContents = changeSet.getFileContents(fileId)[1]
diff = headFileContents.get().readlines()
(newLines, failedHunks) = patch.patch(cur, diff)
cont = filecontents.FromString("".join(newLines))
self._restore(fsFile, realPath,
"merging changes from repository into %s" %
realPath,
contentsOverride = cont)
beenRestored = True
if failedHunks:
self._createFile(
realPath + ".conflicts",
"".join([x.asString() for x in failedHunks]),
"conflicts from merging changes from "
"head into %s saved as %s.conflicts" %
(realPath, realPath))
contentsOkay = 1
else:
self.errors.append("file contents conflict for %s" % realPath)
contentsOkay = 0
if attributesChanged and not beenRestored:
self._restore(fsFile, realPath,
"merging changes from repository into %s" % realPath,
contentsOverride = None)
if pathOkay and contentsOkay:
# XXX this doesn't even attempt to merge file permissions
# and such; the good part of that is differing owners don't
# break things
if not headFileVersion:
headFileVersion = fsPkg.getFile(fileId)[1]
fsPkg.addFile(fileId, finalPath, headFileVersion)
else:
fullyUpdated = 0
if fullyUpdated:
fsPkg.changeVersion(pkgCs.getNewVersion())
return fsPkg
def __init__(self, repos, changeSet, fsPkgDict, root, flags = MERGE):
"""
Constructs the job for applying a change set to the filesystem.
@param repos: the repository the current package and file information
is in
@type repos: repository.Repository
@param changeSet: the changeset to apply to the filesystem
@type changeSet: changeset.ChangeSet
@param fsPkgDict: dictionary mapping a package name to the package
object representing what's currently stored in the filesystem
@type fsPkgDict: dict of package.Package
@param root: root directory to apply changes to (this is ignored for
source management, which uses the cwd)
@type root: str
@param flags: flags which modify update behavior. See L{update}
module variable summary for flags definitions.
@type flags: int bitfield
"""
self.renames = []
self.restores = []
self.removes = {}
self.newPackages = []
self.oldPackages = []
self.errors = []
self.newFiles = []
self.root = root
self.changeSet = changeSet
self.directorySet = {}
self.userRemovals = {}
self.tagUpdates = {}
self.tagRemoves = {}
self.repos = repos
for pkgCs in changeSet.iterNewPackageList():
name = pkgCs.getName()
old = pkgCs.getOldVersion()
if old:
localVer = old.fork(versions.LocalBranch(), sameVerRel = 1)
basePkg = repos.getTrove(name, old, pkgCs.getFlavor())
pkg = self._singlePackage(repos, pkgCs, changeSet, basePkg,
fsPkgDict[(name, localVer)], root, flags)
self.oldPackages.append((basePkg.getName(),
basePkg.getVersion(),
basePkg.getFlavor()))
else:
pkg = self._singlePackage(repos, pkgCs, changeSet, None,
None, root, flags)
self.newPackages.append(pkg)
for (name, oldVersion, oldFlavor) in changeSet.getOldPackageList():
self.oldPackages.append((name, oldVersion, oldFlavor))
oldPkg = repos.getTrove(name, oldVersion, oldFlavor)
for (fileId, path, version) in oldPkg.iterFileList():
fileObj = repos.getFileVersion(fileId, version)
self._remove(fileObj, root + path,
"removing %s" % root + path)
def _localChanges(repos, changeSet, curPkg, srcPkg, newVersion, root, flags):
"""
Populates a change set against the files in the filesystem and builds
a package object which describes the files installed. The return
is a tuple with a boolean saying if anything changed and a package
reflecting what's in the filesystem; the changeSet is updated as a
side effect.
@param repos: Repository this directory is against.
@type repos: repository.Repository
@param changeSet: Changeset to update with information for this package
@type changeSet: changeset.ChangeSet
@param curPkg: Package which is installed
@type curPkg: package.Package
@param srcPkg: Package to generate the change set against
@type srcPkg: package.Package
@param newVersion: version to use for the newly created package
@type newVersion: versions.NewVersion
@param root: root directory the files are in (ignored for sources, which
are assumed to be in the current directory)
@type root: str
@param flags: IGNOREUGIDS or zero
@type flags: int
"""
noIds = ((flags & IGNOREUGIDS) != 0)
newPkg = curPkg.copy()
newPkg.changeVersion(newVersion)
fileIds = {}
for (fileId, path, version) in newPkg.iterFileList():
fileIds[fileId] = True
"""
Iterating over the files in newPkg would be much more natural then
iterating over the ones in the old package, and then going through
newPkg to find what we missed. However, doing it the hard way lets
us iterate right over the changeset we get from the repository.
"""
if srcPkg:
fileList = [ x for x in srcPkg.iterFileList() ]
# need to walk changesets in order of fileid
fileList.sort()
else:
fileList = []
# Used in the loops to determine whether to mark files as config
# would be nice to have a better list...
nonCfgExt = ('ps', 'eps', 'gif', 'png', 'tiff', 'jpeg', 'jpg',
'ico', 'rpm', 'ccs', 'gz', 'bz2', 'tgz', 'tbz', 'tbz2')
isSrcPkg = curPkg.getName().endswith(':source')
for (fileId, srcPath, srcFileVersion) in fileList:
# file disappeared
if not fileIds.has_key(fileId): continue
(path, version) = newPkg.getFile(fileId)
del fileIds[fileId]
if path[0] == '/':
realPath = root + path
else:
realPath = os.getcwd() + "/" + path
try:
os.lstat(realPath)
except OSError:
log.error("%s is missing (use remove if this is intentional)"
% path)
return None
srcFile = repos.getFileVersion(fileId, srcFileVersion)
f = files.FileFromFilesystem(realPath, fileId,
possibleMatch = srcFile)
extension = path.split(".")[-1]
if isSrcPkg and extension not in nonCfgExt:
f.flags.isConfig(set = True)
sb = os.stat(realPath)
if sb.st_size > 0 and stat.S_ISREG(sb.st_mode):
fd = os.open(realPath, os.O_RDONLY)
os.lseek(fd, -1, 2)
term = os.read(fd, 1)
if term != '\n':
log.warning("%s does not end with a trailing new line",
srcPath)
os.close(fd)
if not f.metadataEqual(srcFile, ignoreOwnerGroup = noIds):
newPkg.addFile(fileId, path, newVersion)
(filecs, hash) = changeset.fileChangeSet(fileId, srcFile, f)
changeSet.addFile(fileId, srcFileVersion, newVersion, filecs)
if hash:
newCont = filecontents.FromFilesystem(realPath)
if srcFile.hasContents:
theFile = repos.getFileContents(srcPkg.getName(),
srcPkg.getVersion(), srcPkg.getFlavor(), srcPath)
srcCont = filecontents.FromFile(theFile)
(contType, cont) = changeset.fileContentsDiff(srcFile, srcCont,
f, newCont)
changeSet.addFileContents(fileId, contType, cont,
f.flags.isConfig())
for fileId in fileIds.iterkeys():
(path, version) = newPkg.getFile(fileId)
if path[0] == '/':
realPath = root + path
else:
realPath = os.getcwd() + "/" + path
# if we're committing against head, this better be a new file.
# if we're generating a diff against someplace else, it might not
# be.
assert(srcPkg or isinstance(version, versions.NewVersion))
f = files.FileFromFilesystem(realPath, fileId)
extension = path.split(".")[-1]
if isSrcPkg and extension not in nonCfgExt:
f.flags.isConfig(set = True)
# new file, so this part is easy
changeSet.addFile(fileId, None, newVersion, f.freeze())
newPkg.addFile(fileId, path, newVersion)
if f.hasContents:
newCont = filecontents.FromFilesystem(realPath)
changeSet.addFileContents(fileId,
changeset.ChangedFileTypes.file,
newCont, f.flags.isConfig())
(csPkg, filesNeeded, pkgsNeeded) = newPkg.diff(srcPkg)
assert(not pkgsNeeded)
changeSet.newPackage(csPkg)
if (csPkg.getOldFileList() or csPkg.getChangedFileList()
or csPkg.getNewFileList()):
foundDifference = 1
else:
foundDifference = 0
return (foundDifference, newPkg)
def buildLocalChanges(repos, pkgList, root = "", flags = 0):
"""
Builds a change set against a set of files currently installed and
builds a package object which describes the files installed. The
return is a changeset and a list of tuples, each with a boolean
saying if anything changed for a package reflecting what's in the
filesystem for that package.
@param repos: Repository this directory is against.
@type repos: repository.Repository
@param pkgList: Specifies which pacakage to work on, and is a list
of (curPkg, srcPkg, newVersion) tuples as defined in the parameter
list for _localChanges()
@param root: root directory the files are in (ignored for sources, which
are assumed to be in the current directory)
@type root: str
@param flags: IGNOREUGIDS or zero
@type flags: int
"""
changeSet = changeset.ChangeSet()
returnList = []
for (curPkg, srcPkg, newVersion) in pkgList:
result = _localChanges(repos, changeSet, curPkg, srcPkg, newVersion,
root, flags)
if result is None:
# an error occurred
return None
returnList.append(result)
return (changeSet, returnList)
def shlibAction(root, shlibList):
p = "/sbin/ldconfig"
if os.getuid():
log.warning("ldconfig skipped (insufficient permissions)")
# write any needed entries in ld.so.conf before running ldconfig
sysetc = util.joinPaths(root, '/etc')
if not os.path.isdir(sysetc):
# normally happens only during testing, but why not be safe?
util.mkdirChain(sysetc)
ldsopath = util.joinPaths(root, '/etc/ld.so.conf')
try:
ldso = file(ldsopath, 'r+')
ldsolines = ldso.readlines()
ldso.close()
except:
# bootstrap
ldsolines = []
newlines = []
rootlen = len(root)
for path in shlibList:
dirname = os.path.dirname(path)[rootlen:]
dirline = dirname+'\n'
if dirline not in ldsolines:
ldsolines.append(dirline)
newlines.append(dirname)
if newlines:
log.debug("adding ld.so.conf entries: %s",
" ".join(newlines))
ldsofd, ldsotmpname = tempfile.mkstemp(
'ld.so.conf', '.ct', sysetc)
try:
ldso = os.fdopen(ldsofd, 'w')
os.chmod(ldsotmpname, 0644)
ldso.writelines(ldsolines)
ldso.close()
os.rename(ldsotmpname, ldsopath)
except:
os.unlink(ldsotmpname)
raise
if os.getuid():
log.warning("ldconfig skipped (insufficient permissions)")
elif os.access(util.joinPaths(root, p), os.X_OK) != True:
log.error("/sbin/ldconfig is not available")
else:
log.debug("running ldconfig")
pid = os.fork()
if not pid:
os.chdir(root)
os.chroot(root)
try:
# XXX add a test case for an invalid ldconfig binary
os.execl(p, p)
except:
pass
os._exit(1)
(id, status) = os.waitpid(pid, 0)
if not os.WIFEXITED(status) or os.WEXITSTATUS(status):
log.error("ldconfig failed")
def runTagCommands(root, cmdList):
uid = os.getuid()
for cmd in cmdList:
log.debug("running %s", " ".join(cmd))
if root != '/' and uid:
continue
pid = os.fork()
if not pid:
os.environ['PATH'] = "/sbin:/bin:/usr/sbin:/usr/bin"
if root != '/':
os.chdir(root)
os.chroot(root)
try:
os.execl(cmd[0], cmd)
except:
pass
os._exit(1)
(id, status) = os.waitpid(pid, 0)
if not os.WIFEXITED(status) or os.WEXITSTATUS(status):
log.error("%s failed", cmd[0])
use execv, not execl. Print the exception to aid in debugging
#
# Copyright (c) 2004 Specifix, Inc.
# All rights reserved
"""
Handles all updates to the file system; files should never get changed
on the filesystem except by this module!
@var MERGE: Flag constant value. If set, merge is attempted,
otherwise the changes from the changeset are used (this is for
rollbacks)
@var REPLACEFILES: Flag constant value. If set, a file that is in
the way of a newly created file will be overwritten. Otherwise an error
is produced.
"""
from repository import changeset
import errno
from repository import filecontents
import files
import log
import os
import package
import patch
import stat
import sys
import tempfile
import util
import versions
from build import tags
MERGE = 1 << 0
REPLACEFILES = 1 << 1
IGNOREUGIDS = 1 << 2
class FilesystemJob:
"""
Represents a set of actions which need to be applied to the filesystem.
This is kept very simple to minimize the chance of mistakes or errors.
"""
def _rename(self, oldPath, newPath, msg):
self.renames.append((oldPath, newPath, msg))
def _restore(self, fileObj, target, msg, contentsOverride = ""):
self.restores.append((fileObj.id(), fileObj, target, contentsOverride,
msg))
for tag in fileObj.tags:
if self.tagUpdates.has_key(tag):
self.tagUpdates[tag].append(target)
else:
self.tagUpdates[tag] = [ target ]
def _remove(self, fileObj, target, msg):
if isinstance(fileObj, files.Directory):
if not self.directorySet.has_key(target):
self.directorySet[target] = 0
else:
self.removes[target] = (fileObj, msg)
dir = os.path.dirname(target)
if self.directorySet.has_key(dir):
self.directorySet[dir] += 1
else:
self.directorySet[dir] = 1
for tag in fileObj.tags:
if self.tagRemoves.has_key(tag):
self.tagRemoves[tag].append(target)
else:
self.tagRemoves[tag] = [ target ]
def userRemoval(self, troveName, troveVersion, troveFlavor, fileId):
if not self.userRemovals.has_key((troveName, troveVersion, troveFlavor)):
self.userRemovals[(troveName, troveVersion, troveFlavor)] = [ fileId ]
else:
self.userRemovals.append(fileId)
def iterUserRemovals(self):
for ((troveName, troveVersion, troveFlavor), fileIdList) in \
self.userRemovals.iteritems():
yield (troveName, troveVersion, troveFlavor, fileIdList)
def _createFile(self, target, str, msg):
self.newFiles.append((target, str, msg))
def apply(self, tagSet = {}, tagScript = None):
tagCommands = []
runLdconfig = False
rootLen = len(self.root)
if self.tagRemoves.has_key('tagdescription'):
for path in self.tagRemoves['tagdescription']:
path = path[rootLen:]
tagInfo = None
for ti in tagSet.itervalues():
if ti.tagFile[:rootLen] == self.root and \
ti.tagFile[rootLen:] == path:
tagInfo = ti
break
if tagInfo:
del tagSet[tagInfo.tag]
if "self preremove" in tagInfo.implements:
tagCommands.append([ path, "self", "preremove" ])
del self.tagRemoves['tagdescription']
for tag, l in self.tagRemoves.iteritems():
if not tagSet.has_key(tag): continue
tagInfo = tagSet[tag]
if "files preremove" in tagInfo.implements:
l.sort()
cmd = [ tagInfo.file, "files", "preremove"] + \
[ x[rootLen:] for x in l ]
tagCommands.append(cmd)
if tagCommands:
if tagScript:
f = open(tagScript, "a")
for cmd in tagCommands:
f.write("# %s\n" % " ".join(cmd))
f.close()
else:
runTagCommands(self.root, tagCommands)
tagCommands = []
for (oldPath, newPath, msg) in self.renames:
os.rename(oldPath, newPath)
log.debug(msg)
contents = None
# restore in the same order files appear in the change set
self.restores.sort()
for (fileId, fileObj, target, override, msg) in self.restores:
# None means "don't restore contents"; "" means "take the
# contents from the change set"
if override != "":
contents = override
elif fileObj.hasContents:
contents = self.changeSet.getFileContents(fileId)[1]
fileObj.restore(contents, self.root, target, contents != None)
log.debug(msg)
paths = self.removes.keys()
paths.sort()
paths.reverse()
for target in paths:
(fileObj, msg) = self.removes[target]
# don't worry about files which don't exist
try:
os.lstat(target)
except OSError:
pass
else:
fileObj.remove(target)
log.debug(msg)
for (target, str, msg) in self.newFiles:
try:
os.unlink(target)
except OSError, e:
if e.errno != errno.ENOENT:
raise
f = open(target, "w")
f.write(str)
f.close()
log.warning(msg)
if self.tagUpdates.has_key('shlib'):
shlibAction(self.root, self.tagUpdates['shlib'])
del self.tagUpdates['shlib']
elif runLdconfig:
# override to force ldconfig to run on shlib removal
shlibAction(self.root, [])
if self.tagUpdates.has_key('tagdescription'):
for path in self.tagUpdates['tagdescription']:
# these are new tag action files which we need to run for
# the first time. we run them against everything in the database
# which has this tag, which includes the files we've just
# installed
tagInfo = tags.TagFile(path, {})
path = path[len(self.root):]
# don't run these twice
if self.tagUpdates.has_key(tagInfo.tag):
del self.tagUpdates[tagInfo.tag]
if "self update" in tagInfo.implements:
cmd = [ path, "self", "update" ] + \
[x for x in self.repos.iterFilesWithTag(tagInfo.tag)]
tagCommands.append(cmd)
elif "files update" in tagInfo.implements:
cmd = [ path, "files", "update" ] + \
[x for x in self.repos.iterFilesWithTag(tagInfo.tag)]
if len(cmd) > 3:
tagCommands.append(cmd)
tagSet[tagInfo.tag] = tagInfo
del self.tagUpdates['tagdescription']
for (tag, l) in self.tagUpdates.iteritems():
tagInfo = tagSet.get(tag, None)
if tagInfo is None: continue
if "files update" in tagInfo.implements:
l.sort()
cmd = [ tagInfo.file, "files", "update" ] + \
[ x[rootLen:] for x in l ]
tagCommands.append(cmd)
for tag, l in self.tagRemoves.iteritems():
if not tagSet.has_key(tag): continue
tagInfo = tagSet[tag]
if "files remove" in tagInfo.implements:
l.sort()
cmd = [ tagInfo.file, "files", "remove"] + \
[ x[rootLen:] for x in l ]
tagCommands.append(cmd)
if tagCommands:
if tagScript:
f = open(tagScript, "a")
f.write("\n".join([" ".join(x) for x in tagCommands]))
f.write("\n")
f.close()
else:
runTagCommands(self.root, tagCommands)
def getErrorList(self):
return self.errors
def iterNewPackageList(self):
return iter(self.newPackages)
def getOldPackageList(self):
return self.oldPackages
def getDirectoryCountSet(self):
return self.directorySet
def _singlePackage(self, repos, pkgCs, changeSet, basePkg, fsPkg, root,
flags):
"""
Build up the todo list for applying a single package to the
filesystem. Returns a package object which represents what will
end up in the filsystem after this object's apply() method is
called.
@param repos: the repository the files for basePkg are stored in
@type repos: repository.Repository
@param pkgCs: the package changeset to apply to the filesystem
@type pkgCs: package.PackageChangeSet
@param changeSet: the changeset pkgCs is part of
@type changeSet: changeset.ChangeSet
@param basePkg: the package the stuff in the filesystem came from
@type basePkg: package.Package
@param fsPkg: the package representing what's in the filesystem now
@type fsPkg: package.Package
@param root: root directory to apply changes to (this is ignored for
source management, which uses the cwd)
@type root: str
@param flags: flags which modify update behavior. See L{update}
module variable summary for flags definitions.
@type flags: int bitfield
@rtype: package.Package
"""
if basePkg:
assert(pkgCs.getOldVersion() == basePkg.getVersion())
fullyUpdated = 1
cwd = os.getcwd()
if (flags & IGNOREUGIDS) or os.getuid():
noIds = True
else:
noIds = False
if fsPkg:
fsPkg = fsPkg.copy()
else:
fsPkg = package.Trove(pkgCs.getName(), versions.NewVersion(),
pkgCs.getFlavor(), pkgCs.getChangeLog())
fsPkg.mergeTroveListChanges(pkgCs.iterChangedTroves(),
redundantOkay = True)
for (fileId, headPath, headFileVersion) in pkgCs.getNewFileList():
if headPath[0] == '/':
headRealPath = root + headPath
else:
headRealPath = cwd + "/" + headPath
headFile = files.ThawFile(changeSet.getFileChange(fileId), fileId)
try:
s = os.lstat(headRealPath)
# if this file is a directory and the file on the file
# system is a directory, we're OK
if (isinstance(headFile, files.Directory)
and stat.S_ISDIR(s.st_mode)):
# FIXME: this isn't the right directory handling
# we will want to set ownership/permissions if
# they don't conflict with any already-installed package
continue
elif not flags & REPLACEFILES:
self.errors.append("%s is in the way of a newly "
"created file" % headRealPath)
fullyUpdated = 0
continue
except OSError:
# the path doesn't exist, carry on with the restore
pass
self._restore(headFile, headRealPath, "creating %s" % headRealPath)
fsPkg.addFile(fileId, headPath, headFileVersion)
for fileId in pkgCs.getOldFileList():
(path, version) = basePkg.getFile(fileId)
if not fsPkg.hasFile(fileId):
log.debug("%s has already been removed" % path)
continue
if path[0] == '/':
realPath = root + path
else:
realPath = cwd + "/" + path
if flags & MERGE:
try:
# don't remove files if they've been changed locally
localFile = files.FileFromFilesystem(realPath, fileId)
except OSError, exc:
# it's okay if the file is missing, it means we all agree
if exc.errno == errno.ENOENT:
fsPkg.removeFile(fileId)
continue
else:
raise
oldFile = repos.getFileVersion(fileId, version)
# XXX mask out any flag that isn't the config flag.
# There are some flags that the localFile won't have
# such as SHLIB or INITSCRIPT
oldFile.flags.set(oldFile.flags.value() & files._FILE_FLAG_CONFIG)
# don't worry about metadata changes, just content changes
if oldFile.hasContents and localFile.hasContents and \
oldFile.contents != localFile.contents:
self.errors.append("%s has changed but has been removed "
"on head" % path)
continue
self._remove(oldFile, realPath, "removing %s" % path)
fsPkg.removeFile(fileId)
for (fileId, headPath, headFileVersion) in pkgCs.getChangedFileList():
if not fsPkg.hasFile(fileId):
# the file was removed from the local system; this change
# wins
self.userRemoval(pkgCs.getName(), pkgCs.getNewVersion(),
pkgCs.getFlavor(), fileId)
continue
(fsPath, fsVersion) = fsPkg.getFile(fileId)
if fsPath[0] == "/":
rootFixup = root
else:
rootFixup = cwd + "/"
pathOkay = 1
contentsOkay = 1
finalPath = fsPath
# if headPath is none, the name hasn't changed in the repository
if headPath and headPath != fsPath:
# the paths are different; if one of them matches the one
# from the old package, take the other one as it is the one
# which changed
if basePkg.hasFile(fileId):
basePath = basePkg.getFile(fileId)[0]
else:
basePath = None
if (not flags & MERGE) or fsPath == basePath :
# the path changed in the repository, propage that change
self._rename(rootFixup + fsPath, rootFixup + headPath,
"renaming %s to %s" % (fsPath, headPath))
fsPkg.addFile(fileId, headPath, fsVersion)
finalPath = headPath
else:
pathOkay = 0
finalPath = fsPath # let updates work still
self.errors.append("path conflict for %s (%s on head)" %
(fsPath, headPath))
realPath = rootFixup + finalPath
# headFileVersion is None for renames
if headFileVersion:
# FIXME we should be able to inspect headChanges directly
# to see if we need to go into the if statement which follows
# this rather then having to look up the file from the old
# package for every file which has changed
fsFile = files.FileFromFilesystem(realPath, fileId)
if not basePkg.hasFile(fileId):
# a file which was not in the base package was created
# on both the head of the branch and in the filesystem;
# this can happen during source management
self.errors.append("new file %s conflicts with file on "
"head of branch" % realPath)
contentsOkay = 0
else:
(baseFilePath, baseFileVersion) = basePkg.getFile(fileId)
baseFile = repos.getFileVersion(fileId, baseFileVersion)
headChanges = changeSet.getFileChange(fileId)
headFile = baseFile.copy()
headFile.twm(headChanges, headFile)
fsFile.flags.isConfig(headFile.flags.isConfig())
fsChanges = fsFile.diff(baseFile)
attributesChanged = False
if basePkg and headFileVersion and \
not fsFile.metadataEqual(headFile, ignoreOwnerGroup = noIds):
# something has changed for the file
if flags & MERGE:
if noIds:
# we don't want to merge owner/group ids in
# this case (something other than owner/group
# # changed, such as size). simply take the
# head values
baseFile.inode.setOwner(headFile.inode.owner())
baseFile.inode.setGroup(headFile.inode.group())
conflicts = fsFile.twm(headChanges, baseFile,
skip = "contents")
if not conflicts:
attributesChanged = True
else:
contentsOkay = False
self.errors.append("file attributes conflict for %s"
% realPath)
else:
# this forces the change to apply
fsFile.twm(headChanges, fsFile, skip = "contents")
attributesChanged = True
else:
conflicts = True
mergedChanges = None
beenRestored = False
if headFileVersion and headFile.hasContents and \
fsFile.hasContents and \
fsFile.contents.sha1() != headFile.contents.sha1():
# the contents have changed... let's see what to do
# get the contents if the version on head has contents, and
# either
# 1. the version from the base package doesn't have
# contents, or
# 2. the file changed between head and base
# (if both are false, no contents would have been saved for
# this file)
if (headFile.hasContents
and (not baseFile.hasContents
or headFile.contents.sha1() !=
baseFile.contents.sha1())):
headFileContType = changeSet.getFileContentsType(fileId)
else:
headFileContType = None
if (flags & REPLACEFILES) or (not flags & MERGE) or \
fsFile.contents == baseFile.contents:
# the contents changed in just the repository, so take
# those changes
if headFileContType == changeset.ChangedFileTypes.diff:
sha1 = baseFile.contents.sha1()
baseLineF = repos.getFileContents(pkgCs.getName(),
pkgCs.getOldVersion(), pkgCs.getFlavor(),
basePkg.getFile(fileId)[0])
baseLines = baseLineF.readlines()
del baseLineF
headFileContents = changeSet.getFileContents(fileId)[1]
diff = headFileContents.get().readlines()
(newLines, failedHunks) = patch.patch(baseLines, diff)
assert(not failedHunks)
headFileContents = \
filecontents.FromString("".join(newLines))
self._restore(fsFile, realPath,
"replacing %s with contents "
"from repository" % realPath,
contentsOverride = headFileContents)
else:
self._restore(fsFile, realPath,
"replacing %s with contents "
"from repository" % realPath)
beenRestored = True
elif headFile.contents == baseFile.contents:
# it changed in just the filesystem, so leave that change
log.debug("preserving new contents of %s" % realPath)
elif fsFile.flags.isConfig() or headFile.flags.isConfig():
# it changed in both the filesystem and the repository; our
# only hope is to generate a patch for what changed in the
# repository and try and apply it here
if headFileContType != changeset.ChangedFileTypes.diff:
self.errors.append("unexpected content type for %s" %
realPath)
contentsOkay = 0
else:
cur = open(realPath, "r").readlines()
headFileContents = changeSet.getFileContents(fileId)[1]
diff = headFileContents.get().readlines()
(newLines, failedHunks) = patch.patch(cur, diff)
cont = filecontents.FromString("".join(newLines))
self._restore(fsFile, realPath,
"merging changes from repository into %s" %
realPath,
contentsOverride = cont)
beenRestored = True
if failedHunks:
self._createFile(
realPath + ".conflicts",
"".join([x.asString() for x in failedHunks]),
"conflicts from merging changes from "
"head into %s saved as %s.conflicts" %
(realPath, realPath))
contentsOkay = 1
else:
self.errors.append("file contents conflict for %s" % realPath)
contentsOkay = 0
if attributesChanged and not beenRestored:
self._restore(fsFile, realPath,
"merging changes from repository into %s" % realPath,
contentsOverride = None)
if pathOkay and contentsOkay:
# XXX this doesn't even attempt to merge file permissions
# and such; the good part of that is differing owners don't
# break things
if not headFileVersion:
headFileVersion = fsPkg.getFile(fileId)[1]
fsPkg.addFile(fileId, finalPath, headFileVersion)
else:
fullyUpdated = 0
if fullyUpdated:
fsPkg.changeVersion(pkgCs.getNewVersion())
return fsPkg
def __init__(self, repos, changeSet, fsPkgDict, root, flags = MERGE):
"""
Constructs the job for applying a change set to the filesystem.
@param repos: the repository the current package and file information
is in
@type repos: repository.Repository
@param changeSet: the changeset to apply to the filesystem
@type changeSet: changeset.ChangeSet
@param fsPkgDict: dictionary mapping a package name to the package
object representing what's currently stored in the filesystem
@type fsPkgDict: dict of package.Package
@param root: root directory to apply changes to (this is ignored for
source management, which uses the cwd)
@type root: str
@param flags: flags which modify update behavior. See L{update}
module variable summary for flags definitions.
@type flags: int bitfield
"""
self.renames = []
self.restores = []
self.removes = {}
self.newPackages = []
self.oldPackages = []
self.errors = []
self.newFiles = []
self.root = root
self.changeSet = changeSet
self.directorySet = {}
self.userRemovals = {}
self.tagUpdates = {}
self.tagRemoves = {}
self.repos = repos
for pkgCs in changeSet.iterNewPackageList():
name = pkgCs.getName()
old = pkgCs.getOldVersion()
if old:
localVer = old.fork(versions.LocalBranch(), sameVerRel = 1)
basePkg = repos.getTrove(name, old, pkgCs.getFlavor())
pkg = self._singlePackage(repos, pkgCs, changeSet, basePkg,
fsPkgDict[(name, localVer)], root, flags)
self.oldPackages.append((basePkg.getName(),
basePkg.getVersion(),
basePkg.getFlavor()))
else:
pkg = self._singlePackage(repos, pkgCs, changeSet, None,
None, root, flags)
self.newPackages.append(pkg)
for (name, oldVersion, oldFlavor) in changeSet.getOldPackageList():
self.oldPackages.append((name, oldVersion, oldFlavor))
oldPkg = repos.getTrove(name, oldVersion, oldFlavor)
for (fileId, path, version) in oldPkg.iterFileList():
fileObj = repos.getFileVersion(fileId, version)
self._remove(fileObj, root + path,
"removing %s" % root + path)
def _localChanges(repos, changeSet, curPkg, srcPkg, newVersion, root, flags):
"""
Populates a change set against the files in the filesystem and builds
a package object which describes the files installed. The return
is a tuple with a boolean saying if anything changed and a package
reflecting what's in the filesystem; the changeSet is updated as a
side effect.
@param repos: Repository this directory is against.
@type repos: repository.Repository
@param changeSet: Changeset to update with information for this package
@type changeSet: changeset.ChangeSet
@param curPkg: Package which is installed
@type curPkg: package.Package
@param srcPkg: Package to generate the change set against
@type srcPkg: package.Package
@param newVersion: version to use for the newly created package
@type newVersion: versions.NewVersion
@param root: root directory the files are in (ignored for sources, which
are assumed to be in the current directory)
@type root: str
@param flags: IGNOREUGIDS or zero
@type flags: int
"""
noIds = ((flags & IGNOREUGIDS) != 0)
newPkg = curPkg.copy()
newPkg.changeVersion(newVersion)
fileIds = {}
for (fileId, path, version) in newPkg.iterFileList():
fileIds[fileId] = True
"""
Iterating over the files in newPkg would be much more natural then
iterating over the ones in the old package, and then going through
newPkg to find what we missed. However, doing it the hard way lets
us iterate right over the changeset we get from the repository.
"""
if srcPkg:
fileList = [ x for x in srcPkg.iterFileList() ]
# need to walk changesets in order of fileid
fileList.sort()
else:
fileList = []
# Used in the loops to determine whether to mark files as config
# would be nice to have a better list...
nonCfgExt = ('ps', 'eps', 'gif', 'png', 'tiff', 'jpeg', 'jpg',
'ico', 'rpm', 'ccs', 'gz', 'bz2', 'tgz', 'tbz', 'tbz2')
isSrcPkg = curPkg.getName().endswith(':source')
for (fileId, srcPath, srcFileVersion) in fileList:
# file disappeared
if not fileIds.has_key(fileId): continue
(path, version) = newPkg.getFile(fileId)
del fileIds[fileId]
if path[0] == '/':
realPath = root + path
else:
realPath = os.getcwd() + "/" + path
try:
os.lstat(realPath)
except OSError:
log.error("%s is missing (use remove if this is intentional)"
% path)
return None
srcFile = repos.getFileVersion(fileId, srcFileVersion)
f = files.FileFromFilesystem(realPath, fileId,
possibleMatch = srcFile)
extension = path.split(".")[-1]
if isSrcPkg and extension not in nonCfgExt:
f.flags.isConfig(set = True)
sb = os.stat(realPath)
if sb.st_size > 0 and stat.S_ISREG(sb.st_mode):
fd = os.open(realPath, os.O_RDONLY)
os.lseek(fd, -1, 2)
term = os.read(fd, 1)
if term != '\n':
log.warning("%s does not end with a trailing new line",
srcPath)
os.close(fd)
if not f.metadataEqual(srcFile, ignoreOwnerGroup = noIds):
newPkg.addFile(fileId, path, newVersion)
(filecs, hash) = changeset.fileChangeSet(fileId, srcFile, f)
changeSet.addFile(fileId, srcFileVersion, newVersion, filecs)
if hash:
newCont = filecontents.FromFilesystem(realPath)
if srcFile.hasContents:
theFile = repos.getFileContents(srcPkg.getName(),
srcPkg.getVersion(), srcPkg.getFlavor(), srcPath)
srcCont = filecontents.FromFile(theFile)
(contType, cont) = changeset.fileContentsDiff(srcFile, srcCont,
f, newCont)
changeSet.addFileContents(fileId, contType, cont,
f.flags.isConfig())
for fileId in fileIds.iterkeys():
(path, version) = newPkg.getFile(fileId)
if path[0] == '/':
realPath = root + path
else:
realPath = os.getcwd() + "/" + path
# if we're committing against head, this better be a new file.
# if we're generating a diff against someplace else, it might not
# be.
assert(srcPkg or isinstance(version, versions.NewVersion))
f = files.FileFromFilesystem(realPath, fileId)
extension = path.split(".")[-1]
if isSrcPkg and extension not in nonCfgExt:
f.flags.isConfig(set = True)
# new file, so this part is easy
changeSet.addFile(fileId, None, newVersion, f.freeze())
newPkg.addFile(fileId, path, newVersion)
if f.hasContents:
newCont = filecontents.FromFilesystem(realPath)
changeSet.addFileContents(fileId,
changeset.ChangedFileTypes.file,
newCont, f.flags.isConfig())
(csPkg, filesNeeded, pkgsNeeded) = newPkg.diff(srcPkg)
assert(not pkgsNeeded)
changeSet.newPackage(csPkg)
if (csPkg.getOldFileList() or csPkg.getChangedFileList()
or csPkg.getNewFileList()):
foundDifference = 1
else:
foundDifference = 0
return (foundDifference, newPkg)
def buildLocalChanges(repos, pkgList, root = "", flags = 0):
"""
Builds a change set against a set of files currently installed and
builds a package object which describes the files installed. The
return is a changeset and a list of tuples, each with a boolean
saying if anything changed for a package reflecting what's in the
filesystem for that package.
@param repos: Repository this directory is against.
@type repos: repository.Repository
@param pkgList: Specifies which pacakage to work on, and is a list
of (curPkg, srcPkg, newVersion) tuples as defined in the parameter
list for _localChanges()
@param root: root directory the files are in (ignored for sources, which
are assumed to be in the current directory)
@type root: str
@param flags: IGNOREUGIDS or zero
@type flags: int
"""
changeSet = changeset.ChangeSet()
returnList = []
for (curPkg, srcPkg, newVersion) in pkgList:
result = _localChanges(repos, changeSet, curPkg, srcPkg, newVersion,
root, flags)
if result is None:
# an error occurred
return None
returnList.append(result)
return (changeSet, returnList)
def shlibAction(root, shlibList):
p = "/sbin/ldconfig"
if os.getuid():
log.warning("ldconfig skipped (insufficient permissions)")
# write any needed entries in ld.so.conf before running ldconfig
sysetc = util.joinPaths(root, '/etc')
if not os.path.isdir(sysetc):
# normally happens only during testing, but why not be safe?
util.mkdirChain(sysetc)
ldsopath = util.joinPaths(root, '/etc/ld.so.conf')
try:
ldso = file(ldsopath, 'r+')
ldsolines = ldso.readlines()
ldso.close()
except:
# bootstrap
ldsolines = []
newlines = []
rootlen = len(root)
for path in shlibList:
dirname = os.path.dirname(path)[rootlen:]
dirline = dirname+'\n'
if dirline not in ldsolines:
ldsolines.append(dirline)
newlines.append(dirname)
if newlines:
log.debug("adding ld.so.conf entries: %s",
" ".join(newlines))
ldsofd, ldsotmpname = tempfile.mkstemp(
'ld.so.conf', '.ct', sysetc)
try:
ldso = os.fdopen(ldsofd, 'w')
os.chmod(ldsotmpname, 0644)
ldso.writelines(ldsolines)
ldso.close()
os.rename(ldsotmpname, ldsopath)
except:
os.unlink(ldsotmpname)
raise
if os.getuid():
log.warning("ldconfig skipped (insufficient permissions)")
elif os.access(util.joinPaths(root, p), os.X_OK) != True:
log.error("/sbin/ldconfig is not available")
else:
log.debug("running ldconfig")
pid = os.fork()
if not pid:
os.chdir(root)
os.chroot(root)
try:
# XXX add a test case for an invalid ldconfig binary
os.execl(p, p)
except:
pass
os._exit(1)
(id, status) = os.waitpid(pid, 0)
if not os.WIFEXITED(status) or os.WEXITSTATUS(status):
log.error("ldconfig failed")
def runTagCommands(root, cmdList):
uid = os.getuid()
for cmd in cmdList:
log.debug("running %s", " ".join(cmd))
if root != '/' and uid:
continue
pid = os.fork()
if not pid:
os.environ['PATH'] = "/sbin:/bin:/usr/sbin:/usr/bin"
if root != '/':
os.chdir(root)
os.chroot(root)
try:
os.execv(cmd[0], cmd)
except Exception, e:
sys.stderr.write('%s\n' %e)
pass
os._exit(1)
(id, status) = os.waitpid(pid, 0)
if not os.WIFEXITED(status) or os.WEXITSTATUS(status):
log.error("%s failed", cmd[0])
|
from saluttest import exec_test
from avahitest import AvahiAnnouncer, AvahiRecordAnnouncer, AvahiListener
from avahitest import get_host_name, get_domain_name
import avahi
from xmppstream import setup_stream_listener, connect_to_stream
from servicetest import make_channel_proxy, format_event
from twisted.words.xish import xpath, domish
import time
import dbus
import socket
CHANNEL_TYPE_TEXT = "org.freedesktop.Telepathy.Channel.Type.Text"
HT_CONTACT = 1
HT_ROOM = 2
HT_CONTACT_LIST = 3
PUBLISHED_NAME = "acttest"
TESTSUITE_PUBLISHED_NAME = "salutacttest"
ACTIVITY_ID = str(time.time())
joined_activity = False
def compare_handle (name, conn, handle):
handle_name = conn.InspectHandles(HT_CONTACT, [handle])[0]
return name == handle_name
def wait_for_handle(name, q, conn):
publish_handle = conn.RequestHandles(HT_CONTACT_LIST, ["publish"])[0]
publish = conn.RequestChannel(
"org.freedesktop.Telepathy.Channel.Type.ContactList",
HT_CONTACT_LIST, publish_handle, False)
proxy = make_channel_proxy(conn, publish, "Channel.Interface.Group")
for h in proxy.GetMembers():
if compare_handle(name, conn, h):
return h
# Wait until the record shows up in publish
while True:
e = q.expect('dbus-signal', signal='MembersChanged', path=publish)
for h in e.args[1]:
if compare_handle(name, conn, h):
return h
def activity_listener_hook(q, event):
# Assert that the testsuite doesn't announce the activity
global joined_activity
if joined_activity:
return
assert event.name != \
ACTIVITY_ID + ":" + TESTSUITE_PUBLISHED_NAME + "@" + get_host_name(), \
"salut announced the activity while it shouldn't"
def announce_address(hostname, address):
"Announce IN A record, address is assume to be ipv4"
data = reduce (lambda x, y: (x << 8) + int(y), address.split("."), 0)
ndata = socket.htonl(data)
rdata = [ (ndata >> (24 - x)) & 0xff for x in xrange(0, 32, 8)]
AvahiRecordAnnouncer(hostname, 0x1, 0x01, rdata)
def test(q, bus, conn):
global joined_activity
conn.Connect()
q.expect('dbus-signal', signal='StatusChanged', args=[0L, 0L])
activity_txt = { "type": "org.laptop.HelloMesh",
"name": "HelloMesh",
"color": "#7b83c1,#260993",
"txtvers": "0",
"activity-id": ACTIVITY_ID,
"room": ACTIVITY_ID
}
# Listen for announcements
l = AvahiListener(q).listen_for_service("_olpc-activity1._udp")
q.hook(activity_listener_hook, 'service-added')
contact_name = PUBLISHED_NAME + "@" + get_host_name()
activity_name = ACTIVITY_ID + ":" + PUBLISHED_NAME + "@" + get_host_name()
AvahiAnnouncer(contact_name, "_presence._tcp", 1234, {})
act_hostname = ACTIVITY_ID + ":" + PUBLISHED_NAME + \
"._clique._udp." + get_domain_name()
act_address = "239.253.70.70"
announce_address(act_hostname, act_address)
# FIXME, if we use the same name as the running salut will MembersChanged
# isn't signalled later on, needs to be fixed.
AvahiAnnouncer(ACTIVITY_ID + ":" + PUBLISHED_NAME,
"_clique._udp", 12345, {}, hostname = act_hostname)
AvahiAnnouncer(activity_name, "_olpc-activity1._udp",
0, activity_txt)
# Publish a contact, now get it's handle
handle = wait_for_handle (contact_name, q, conn)
# Assert that the remote handles signals it joined the activity
while True:
e = q.expect('dbus-signal', signal = 'ActivitiesChanged')
if e.args[0] == handle and e.args[1] != []:
assert len(e.args[1]) == 1
assert e.args[1][0][0] == ACTIVITY_ID
activity_handle = e.args[1][0][1]
break
act_properties = conn.ActivityProperties.GetProperties(activity_handle)
assert act_properties['private'] == False
assert act_properties['color'] == activity_txt['color']
assert act_properties['name'] == activity_txt['name']
assert act_properties['type'] == activity_txt['type']
room_channel = conn.RequestChannel(CHANNEL_TYPE_TEXT,
HT_ROOM, activity_handle, True)
q.expect('dbus-signal', signal='MembersChanged', path=room_channel,
args = [u'', [1L], [], [], [], 1L, 0L])
# Make it public that we joined the activity
joined_activity = True
conn.BuddyInfo.SetActivities([(ACTIVITY_ID, activity_handle)])
q.expect('service-added',
name = ACTIVITY_ID + ":" + TESTSUITE_PUBLISHED_NAME +
"@" + get_host_name())
conn.BuddyInfo.SetActivities([])
q.expect('service-removed',
name = ACTIVITY_ID + ":" + TESTSUITE_PUBLISHED_NAME +
"@" + get_host_name())
if __name__ == '__main__':
exec_test(test, { "published-name": TESTSUITE_PUBLISHED_NAME}, timeout=15)
test-olpc-activity-announcements.py: port to new test API
from saluttest import exec_test
from avahitest import AvahiAnnouncer, AvahiRecordAnnouncer, AvahiListener
from avahitest import get_host_name, get_domain_name
import avahi
from xmppstream import setup_stream_listener, connect_to_stream
from servicetest import make_channel_proxy, format_event, EventPattern
from twisted.words.xish import xpath, domish
import constants as cs
import time
import dbus
import socket
CHANNEL_TYPE_TEXT = "org.freedesktop.Telepathy.Channel.Type.Text"
HT_CONTACT = 1
HT_ROOM = 2
HT_CONTACT_LIST = 3
PUBLISHED_NAME = "acttest"
TESTSUITE_PUBLISHED_NAME = "salutacttest"
ACTIVITY_ID = str(time.time())
def compare_handle (name, conn, handle):
handle_name = conn.InspectHandles(HT_CONTACT, [handle])[0]
return name == handle_name
def wait_for_handle(name, q, conn):
publish_handle = conn.RequestHandles(HT_CONTACT_LIST, ["publish"])[0]
publish = conn.RequestChannel(
"org.freedesktop.Telepathy.Channel.Type.ContactList",
HT_CONTACT_LIST, publish_handle, False)
proxy = make_channel_proxy(conn, publish, "Channel.Interface.Group")
for h in proxy.GetMembers():
if compare_handle(name, conn, h):
return h
# Wait until the record shows up in publish
while True:
e = q.expect('dbus-signal', signal='MembersChanged', path=publish)
for h in e.args[1]:
if compare_handle(name, conn, h):
return h
def announce_address(hostname, address):
"Announce IN A record, address is assume to be ipv4"
data = reduce (lambda x, y: (x << 8) + int(y), address.split("."), 0)
ndata = socket.htonl(data)
rdata = [ (ndata >> (24 - x)) & 0xff for x in xrange(0, 32, 8)]
AvahiRecordAnnouncer(hostname, 0x1, 0x01, rdata)
def test(q, bus, conn):
conn.Connect()
q.expect('dbus-signal', signal='StatusChanged', args=[0L, 0L])
activity_txt = { "type": "org.laptop.HelloMesh",
"name": "HelloMesh",
"color": "#7b83c1,#260993",
"txtvers": "0",
"activity-id": ACTIVITY_ID,
"room": ACTIVITY_ID
}
# Listen for announcements
l = AvahiListener(q).listen_for_service("_olpc-activity1._udp")
# Assert that the testsuite doesn't announce the activity
service_name = ACTIVITY_ID + ":" + TESTSUITE_PUBLISHED_NAME + "@" + get_host_name()
forbiden_event = EventPattern('service-added', name=service_name)
q.forbid_events([forbiden_event])
contact_name = PUBLISHED_NAME + "@" + get_host_name()
activity_name = ACTIVITY_ID + ":" + PUBLISHED_NAME + "@" + get_host_name()
AvahiAnnouncer(contact_name, "_presence._tcp", 1234, {})
act_hostname = ACTIVITY_ID + ":" + PUBLISHED_NAME + \
"._clique._udp." + get_domain_name()
act_address = "239.253.70.70"
announce_address(act_hostname, act_address)
# FIXME, if we use the same name as the running salut will MembersChanged
# isn't signalled later on, needs to be fixed.
AvahiAnnouncer(ACTIVITY_ID + ":" + PUBLISHED_NAME,
"_clique._udp", 12345, {}, hostname = act_hostname)
AvahiAnnouncer(activity_name, "_olpc-activity1._udp",
0, activity_txt)
# Publish a contact, now get it's handle
handle = wait_for_handle (contact_name, q, conn)
# Assert that the remote handles signals it joined the activity
while True:
e = q.expect('dbus-signal', signal = 'ActivitiesChanged')
if e.args[0] == handle and e.args[1] != []:
assert len(e.args[1]) == 1
assert e.args[1][0][0] == ACTIVITY_ID
activity_handle = e.args[1][0][1]
break
act_prop_iface = dbus.Interface(conn, cs.ACTIVITY_PROPERTIES)
act_properties = act_prop_iface.GetProperties(activity_handle)
assert act_properties['private'] == False
assert act_properties['color'] == activity_txt['color']
assert act_properties['name'] == activity_txt['name']
assert act_properties['type'] == activity_txt['type']
room_channel = conn.RequestChannel(CHANNEL_TYPE_TEXT,
HT_ROOM, activity_handle, True)
q.expect('dbus-signal', signal='MembersChanged', path=room_channel,
args = [u'', [1L], [], [], [], 1L, 0L])
# Make it public that we joined the activity
q.unforbid_events([forbiden_event])
buddy_info_iface = dbus.Interface(conn, cs.BUDDY_INFO)
buddy_info_iface.SetActivities([(ACTIVITY_ID, activity_handle)])
q.expect('service-added',
name = ACTIVITY_ID + ":" + TESTSUITE_PUBLISHED_NAME +
"@" + get_host_name())
buddy_info_iface.SetActivities([])
q.expect('service-removed',
name = ACTIVITY_ID + ":" + TESTSUITE_PUBLISHED_NAME +
"@" + get_host_name())
if __name__ == '__main__':
exec_test(test, { "published-name": TESTSUITE_PUBLISHED_NAME}, timeout=15)
|
Update point.py
class point():
#modification 20-mars :added spaces
def __init__(self, *args):
self.L = [0,0,0]
self.Dim = 0
for i, j in enumerate(args):
self.L[i]=j
self.Dim = i+1
self.X = self.L[0]
self.Y = self.L[1]
self.Z = self.L[2]
def _avx(self, value):
self.L[0] += value
self.X = self.L[0]
def _avy(self, value):
self.L[1] += value
self.Y = self.L[1]
def _avz(self, value):
self.L[2] += value
self.Z = self.L[2]
def _xyz(self):
return (self.L[0], self.L[1], self.L[2])
def _dim(self):
return self.Dim
def _avi(self, i, value):
self.L[i] += value
self.X = self.L[0]
self.Y = self.L[1]
self.Z = self.L[2]
|
##
# Copyright (C) 2013, 2014, 2015, 2016, 2017 Jessica Tallon & Matt Molyneaux
#
# This file is part of Inboxen.
#
# Inboxen is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Inboxen is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Inboxen. If not, see <http://www.gnu.org/licenses/>.
##
from datetime import datetime, timedelta
from importlib import import_module
import gc
import logging
import urllib
from django.apps import apps
from django.conf import settings
from django.contrib.auth import get_user_model
from django.core import mail
from django.core.cache import cache
from django.db import IntegrityError, transaction
from django.db.models import Avg, Case, Count, F, Max, Min, StdDev, Sum, When, IntegerField
from pytz import utc
from watson import search as watson_search
from inboxen import models
from inboxen.celery import app
log = logging.getLogger(__name__)
SEARCH_TIMEOUT = 60 * 30
@app.task(ignore_result=True)
@transaction.atomic()
def statistics():
"""Gather statistics about users and their inboxes"""
try:
last_stat = models.Statistic.objects.latest("date")
except models.Statistic.DoesNotExist:
last_stat = None
# the keys of these dictionaries have awful names for historical reasons
# don't change them unless you want to do a data migration
one_day_ago = datetime.now(utc) - timedelta(days=1)
user_aggregate = {
"count": Count("id"),
"new": Sum(Case(When(date_joined__gte=one_day_ago, then=1), output_field=IntegerField())),
"with_inboxes": Sum(Case(When(inbox__isnull=True, then=1), output_field=IntegerField())),
"oldest_user": Min("date_joined"),
"inbox_count__avg": Avg("inbox_count"),
"inbox_count__sum": Sum("inbox_count"),
"inbox_count__min": Min("inbox_count"),
"inbox_count__max": Max("inbox_count"),
}
inbox_aggregate = {
"disowned": Sum(Case(When(user__isnull=True, then=1), output_field=IntegerField())),
"email_count__avg": Avg("email_count"),
"email_count__sum": Sum("email_count"),
"email_count__min": Min("email_count"),
"email_count__max": Max("email_count"),
}
# collect user and inbox stats
users = get_user_model().objects.annotate(inbox_count=Count("inbox__id")).aggregate(**user_aggregate)
inboxes = {}
for key in list(users.keys()):
if key.startswith("inbox"):
inboxes[key] = users[key]
del users[key]
domain_count = models.Domain.objects.available(None).count()
inboxes_possible = len(settings.INBOX_CHOICES) ** settings.INBOX_LENGTH
inboxes["total_possible"] = inboxes_possible * domain_count
# collect email state
inbox_qs = models.Inbox.objects.exclude(flags=models.Inbox.flags.deleted).annotate(email_count=Count("email__id"))
emails = inbox_qs.aggregate(**inbox_aggregate)
inboxes["with_emails"] = inbox_qs.exclude(email_count=0).count()
emails["emails_read"] = models.Email.objects.filter(flags=models.Email.flags.read).count()
if last_stat:
email_diff = (emails["email_count__sum"] or 0) - (last_stat.emails["email_count__sum"] or 0)
emails["running_total"] = last_stat.emails["running_total"] + max(email_diff, 0)
else:
emails["running_total"] = emails["email_count__sum"] or 0
stat = models.Statistic(
users=users,
emails=emails,
inboxes=inboxes,
)
stat.save()
log.info("Saved statistics (%s)", stat.date)
@app.task(ignore_result=True)
def clean_expired_session():
"""Clear expired sessions"""
engine = import_module(settings.SESSION_ENGINE)
try:
engine.SessionStore.clear_expired()
except NotImplementedError:
log.info("%s does not implement clear_expired", settings.SESSION_ENGINE)
@app.task(ignore_result=True)
@transaction.atomic()
def inbox_new_flag(user_id, inbox_id=None):
emails = models.Email.objects.order_by("-received_date")
emails = emails.filter(inbox__user__id=user_id, inbox__flags=~models.Inbox.flags.exclude_from_unified)
if inbox_id is not None:
emails = emails.filter(inbox__id=inbox_id)
emails = list(emails.values_list("id", flat=True)[:100]) # number of emails on page
emails = models.Email.objects.filter(id__in=emails, flags=~models.Email.flags.seen)
if emails.count() > 0:
# if some emails haven't been seen yet, we have nothing else to do
return
elif inbox_id is None:
profile = models.UserProfile.objects.get_or_create(user_id=user_id)[0]
profile.flags.unified_has_new_messages = False
profile.save(update_fields=["flags"])
else:
with watson_search.skip_index_update():
inbox = models.Inbox.objects.get(user__id=user_id, id=inbox_id)
inbox.flags.new = False
inbox.save(update_fields=["flags"])
@app.task(ignore_result=True)
def deal_with_flags(email_id_list, user_id, inbox_id=None):
"""Set seen flags on a list of email IDs and then send off tasks to update
"new" flags on affected Inbox objects
"""
with transaction.atomic():
with watson_search.skip_index_update():
# update seen flags
models.Email.objects.filter(id__in=email_id_list).update(flags=F('flags').bitor(models.Email.flags.seen))
if inbox_id is None:
# grab affected inboxes
inbox_list = models.Inbox.objects.filter(user__id=user_id, email__id__in=email_id_list)
inbox_list = inbox_list.distinct()
for inbox in inbox_list:
inbox_new_flag.delay(user_id, inbox.id)
else:
# we only need to update
inbox_new_flag.delay(user_id)
@app.task(ignore_result=True)
def requests():
"""Check for unresolved Inbox allocation requests"""
requests = models.Request.objects.filter(succeeded__isnull=True)
requests = requests.select_related("requester").order_by("-date")
requests = requests.values("id", "amount", "date", "requester__username", "requester__inboxenprofile__pool_amount")
if len(requests) == 0:
return
output = []
item_format = "User: {username}\n Date: {date}\n Amount: {amount}\n Current: {current}\n"
for request in requests:
item = item_format.format(
username=request["requester__username"],
date=request["date"],
amount=request["amount"],
current=request["requester__inboxenprofile__pool_amount"]
)
output.append(item)
output = "\n\n".join(output)
mail.mail_admins("Inbox Allocation Requests", output)
@app.task(rate_limit="100/s")
def search(user_id, search_term):
"""Offload the expensive part of search to avoid blocking the web interface"""
email_subquery = models.Email.objects.viewable(user_id)
inbox_subquery = models.Inbox.objects.viewable(user_id)
results = {
"emails": list(watson_search.search(search_term, models=(email_subquery,)).values_list("id", flat=True)),
"inboxes": list(watson_search.search(search_term, models=(inbox_subquery,)).values_list("id", flat=True)),
}
key = u"{0}-{1}".format(user_id, search_term)
key = urllib.quote(key.encode("utf-8"))
cache.set(key, results, SEARCH_TIMEOUT)
return results
@app.task(ignore_result=True)
def force_garbage_collection():
"""Call the garbage collector.
This task expects to be sent to a broadcast queue
"""
collected = gc.collect()
log.info("GC collected {0} objects.".format(collected))
@app.task(rate_limit=500)
@transaction.atomic()
def delete_inboxen_item(model, item_pk):
_model = apps.get_app_config("inboxen").get_model(model)
try:
item = _model.objects.only('pk').get(pk=item_pk)
item.delete()
except (IntegrityError, _model.DoesNotExist):
pass
@app.task(rate_limit="1/m")
@transaction.atomic()
def batch_delete_items(model, args=None, kwargs=None, batch_number=500):
"""If something goes wrong and you've got a lot of orphaned entries in the
database, then this is the task you want.
Be aware: this task pulls a list of PKs from the database which may cause
increased memory use in the short term.
* model is a string
* args and kwargs should be obvious
* batch_number is the number of delete tasks that get sent off in one go
"""
_model = apps.get_app_config("inboxen").get_model(model)
if args is None and kwargs is None:
raise Exception("You need to specify some filter options!")
elif args is None:
args = []
elif kwargs is None:
kwargs = {}
items = _model.objects.only('pk').filter(*args, **kwargs)
items = [(model, item.pk) for item in items.iterator()]
if len(items) == 0:
return
items = delete_inboxen_item.chunks(items, batch_number).group()
items.skew(step=batch_number/10.0)
items.apply_async()
@app.task(rate_limit="1/h")
def clean_orphan_models():
# Body
batch_delete_items.delay("body", kwargs={"partlist__isnull": True})
# HeaderName
batch_delete_items.delay("headername", kwargs={"header__isnull": True})
# HeaderData
batch_delete_items.delay("headerdata", kwargs={"header__isnull": True})
Correct query for with inboxes stat
##
# Copyright (C) 2013, 2014, 2015, 2016, 2017 Jessica Tallon & Matt Molyneaux
#
# This file is part of Inboxen.
#
# Inboxen is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Inboxen is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Inboxen. If not, see <http://www.gnu.org/licenses/>.
##
from datetime import datetime, timedelta
from importlib import import_module
import gc
import logging
import urllib
from django.apps import apps
from django.conf import settings
from django.contrib.auth import get_user_model
from django.core import mail
from django.core.cache import cache
from django.db import IntegrityError, transaction
from django.db.models import Avg, Case, Count, F, Max, Min, StdDev, Sum, When, IntegerField
from pytz import utc
from watson import search as watson_search
from inboxen import models
from inboxen.celery import app
log = logging.getLogger(__name__)
SEARCH_TIMEOUT = 60 * 30
@app.task(ignore_result=True)
@transaction.atomic()
def statistics():
"""Gather statistics about users and their inboxes"""
try:
last_stat = models.Statistic.objects.latest("date")
except models.Statistic.DoesNotExist:
last_stat = None
# the keys of these dictionaries have awful names for historical reasons
# don't change them unless you want to do a data migration
one_day_ago = datetime.now(utc) - timedelta(days=1)
user_aggregate = {
"count": Count("id"),
"new": Sum(Case(When(date_joined__gte=one_day_ago, then=1), output_field=IntegerField())),
"with_inboxes": Sum(Case(When(inbox__isnull=False, then=1), output_field=IntegerField())),
"oldest_user": Min("date_joined"),
"inbox_count__avg": Avg("inbox_count"),
"inbox_count__sum": Sum("inbox_count"),
"inbox_count__min": Min("inbox_count"),
"inbox_count__max": Max("inbox_count"),
}
inbox_aggregate = {
"disowned": Sum(Case(When(user__isnull=True, then=1), output_field=IntegerField())),
"email_count__avg": Avg("email_count"),
"email_count__sum": Sum("email_count"),
"email_count__min": Min("email_count"),
"email_count__max": Max("email_count"),
}
# collect user and inbox stats
users = get_user_model().objects.annotate(inbox_count=Count("inbox__id")).aggregate(**user_aggregate)
inboxes = {}
for key in list(users.keys()):
if key.startswith("inbox"):
inboxes[key] = users[key]
del users[key]
domain_count = models.Domain.objects.available(None).count()
inboxes_possible = len(settings.INBOX_CHOICES) ** settings.INBOX_LENGTH
inboxes["total_possible"] = inboxes_possible * domain_count
# collect email state
inbox_qs = models.Inbox.objects.exclude(flags=models.Inbox.flags.deleted).annotate(email_count=Count("email__id"))
emails = inbox_qs.aggregate(**inbox_aggregate)
inboxes["with_emails"] = inbox_qs.exclude(email_count=0).count()
emails["emails_read"] = models.Email.objects.filter(flags=models.Email.flags.read).count()
if last_stat:
email_diff = (emails["email_count__sum"] or 0) - (last_stat.emails["email_count__sum"] or 0)
emails["running_total"] = last_stat.emails["running_total"] + max(email_diff, 0)
else:
emails["running_total"] = emails["email_count__sum"] or 0
stat = models.Statistic(
users=users,
emails=emails,
inboxes=inboxes,
)
stat.save()
log.info("Saved statistics (%s)", stat.date)
@app.task(ignore_result=True)
def clean_expired_session():
"""Clear expired sessions"""
engine = import_module(settings.SESSION_ENGINE)
try:
engine.SessionStore.clear_expired()
except NotImplementedError:
log.info("%s does not implement clear_expired", settings.SESSION_ENGINE)
@app.task(ignore_result=True)
@transaction.atomic()
def inbox_new_flag(user_id, inbox_id=None):
emails = models.Email.objects.order_by("-received_date")
emails = emails.filter(inbox__user__id=user_id, inbox__flags=~models.Inbox.flags.exclude_from_unified)
if inbox_id is not None:
emails = emails.filter(inbox__id=inbox_id)
emails = list(emails.values_list("id", flat=True)[:100]) # number of emails on page
emails = models.Email.objects.filter(id__in=emails, flags=~models.Email.flags.seen)
if emails.count() > 0:
# if some emails haven't been seen yet, we have nothing else to do
return
elif inbox_id is None:
profile = models.UserProfile.objects.get_or_create(user_id=user_id)[0]
profile.flags.unified_has_new_messages = False
profile.save(update_fields=["flags"])
else:
with watson_search.skip_index_update():
inbox = models.Inbox.objects.get(user__id=user_id, id=inbox_id)
inbox.flags.new = False
inbox.save(update_fields=["flags"])
@app.task(ignore_result=True)
def deal_with_flags(email_id_list, user_id, inbox_id=None):
"""Set seen flags on a list of email IDs and then send off tasks to update
"new" flags on affected Inbox objects
"""
with transaction.atomic():
with watson_search.skip_index_update():
# update seen flags
models.Email.objects.filter(id__in=email_id_list).update(flags=F('flags').bitor(models.Email.flags.seen))
if inbox_id is None:
# grab affected inboxes
inbox_list = models.Inbox.objects.filter(user__id=user_id, email__id__in=email_id_list)
inbox_list = inbox_list.distinct()
for inbox in inbox_list:
inbox_new_flag.delay(user_id, inbox.id)
else:
# we only need to update
inbox_new_flag.delay(user_id)
@app.task(ignore_result=True)
def requests():
"""Check for unresolved Inbox allocation requests"""
requests = models.Request.objects.filter(succeeded__isnull=True)
requests = requests.select_related("requester").order_by("-date")
requests = requests.values("id", "amount", "date", "requester__username", "requester__inboxenprofile__pool_amount")
if len(requests) == 0:
return
output = []
item_format = "User: {username}\n Date: {date}\n Amount: {amount}\n Current: {current}\n"
for request in requests:
item = item_format.format(
username=request["requester__username"],
date=request["date"],
amount=request["amount"],
current=request["requester__inboxenprofile__pool_amount"]
)
output.append(item)
output = "\n\n".join(output)
mail.mail_admins("Inbox Allocation Requests", output)
@app.task(rate_limit="100/s")
def search(user_id, search_term):
"""Offload the expensive part of search to avoid blocking the web interface"""
email_subquery = models.Email.objects.viewable(user_id)
inbox_subquery = models.Inbox.objects.viewable(user_id)
results = {
"emails": list(watson_search.search(search_term, models=(email_subquery,)).values_list("id", flat=True)),
"inboxes": list(watson_search.search(search_term, models=(inbox_subquery,)).values_list("id", flat=True)),
}
key = u"{0}-{1}".format(user_id, search_term)
key = urllib.quote(key.encode("utf-8"))
cache.set(key, results, SEARCH_TIMEOUT)
return results
@app.task(ignore_result=True)
def force_garbage_collection():
"""Call the garbage collector.
This task expects to be sent to a broadcast queue
"""
collected = gc.collect()
log.info("GC collected {0} objects.".format(collected))
@app.task(rate_limit=500)
@transaction.atomic()
def delete_inboxen_item(model, item_pk):
_model = apps.get_app_config("inboxen").get_model(model)
try:
item = _model.objects.only('pk').get(pk=item_pk)
item.delete()
except (IntegrityError, _model.DoesNotExist):
pass
@app.task(rate_limit="1/m")
@transaction.atomic()
def batch_delete_items(model, args=None, kwargs=None, batch_number=500):
"""If something goes wrong and you've got a lot of orphaned entries in the
database, then this is the task you want.
Be aware: this task pulls a list of PKs from the database which may cause
increased memory use in the short term.
* model is a string
* args and kwargs should be obvious
* batch_number is the number of delete tasks that get sent off in one go
"""
_model = apps.get_app_config("inboxen").get_model(model)
if args is None and kwargs is None:
raise Exception("You need to specify some filter options!")
elif args is None:
args = []
elif kwargs is None:
kwargs = {}
items = _model.objects.only('pk').filter(*args, **kwargs)
items = [(model, item.pk) for item in items.iterator()]
if len(items) == 0:
return
items = delete_inboxen_item.chunks(items, batch_number).group()
items.skew(step=batch_number/10.0)
items.apply_async()
@app.task(rate_limit="1/h")
def clean_orphan_models():
# Body
batch_delete_items.delay("body", kwargs={"partlist__isnull": True})
# HeaderName
batch_delete_items.delay("headername", kwargs={"header__isnull": True})
# HeaderData
batch_delete_items.delay("headerdata", kwargs={"header__isnull": True})
|
"""
Based on: https://cloud.google.com/compute/docs/tutorials/python-guide#before-you-begin
"""
import time
from pandaharvester.harvestercore import core_utils
from pandaharvester.harvestercore.plugin_base import PluginBase
# from requests.exceptions import SSLError
from pandaharvester.harvestercloud.googlecloud import compute, GoogleVM, ZONE, PROJECT
# setup base logger
base_logger = core_utils.setup_logger('google_submitter')
def wait_for_operation(project, zone, operation_name):
"""
Waits for an operation to complete.
TODO: decide whether we want to block or just move on and list the instance status later
:param project:
:param zone:
:param operation_name:
:return:
"""
tmp_log = core_utils.make_logger(base_logger, method_name='wait_for_operation')
tmp_log.debug('Waiting for operation to finish...')
while True:
result = compute.zoneOperations().get(project=project, zone=zone, operation=operation_name).execute()
if result['status'] == 'DONE':
if 'error' in result:
raise Exception(result['error'])
tmp_log.debug('Operation finished...')
return result
time.sleep(1)
def create_vm(work_spec):
"""
Boots up a VM in GCE based on the worker specifications
:param work_spec: worker specifications
:return:
"""
work_spec.reset_changed_list()
tmp_log = core_utils.make_logger(base_logger, 'workerID={0}'.format(work_spec.workerID),
method_name='submit_a_worker')
tmp_log.debug('nCore={0} minRamCount={1} maxDiskCount={2} maxWalltime={0}'.format(work_spec.nCore,
work_spec.minRamCount,
work_spec.maxDiskCount,
work_spec.maxWalltime))
try:
vm = GoogleVM(work_spec)
work_spec.batchID = vm.name
tmp_log.debug('Going to submit VM {0}'.format(vm.name))
operation = compute.instances().insert(project=PROJECT, zone=ZONE, body=vm.config).execute()
# tmp_log.debug('Submitting VM {0}'.format(vm.name))
# wait_for_operation(PROJECT, ZONE, operation['name'])
tmp_log.debug('Submitted VM {0}'.format(vm.name))
return (True, 'OK'), work_spec.get_changed_attributes()
except Exception as e:
return (False, str(e)), work_spec.get_changed_attributes()
class GoogleSubmitter(PluginBase):
"""
Plug-in for Google Cloud Engine VM submission. In this case the worker will abstract a VM running a job
"""
def __init__(self, **kwarg):
self.logBaseURL = 'http://localhost/test'
PluginBase.__init__(self, **kwarg)
def submit_workers(self, work_spec_list):
"""
:param work_spec_list: list of workers to submit
:return:
"""
tmp_log = self.make_logger(base_logger, method_name='submit_workers')
tmp_log.debug('start nWorkers={0}'.format(len(work_spec_list)))
# Create VMs in parallel
# authentication issues when running the Cloud API in multiprocess
# pool_size = min(len(work_spec_list), 10) # TODO: think about the optimal pool size
# with Pool(pool_size) as pool:
# ret_val_list = pool.map(create_vm, work_spec_list, lock)
ret_val_list = []
for work_spec in work_spec_list:
ret_val_list.append(create_vm(work_spec))
# Propagate changed attributes
ret_list = []
for work_spec, tmp_val in zip(work_spec_list, ret_val_list):
ret_val, tmp_dict = tmp_val
work_spec.set_attributes_with_dict(tmp_dict)
work_spec.set_log_file('batch_log', '{0}/{1}.log'.format(self.logBaseURL, work_spec.batchID))
work_spec.set_log_file('stdout', '{0}/{1}.out'.format(self.logBaseURL, work_spec.batchID))
work_spec.set_log_file('stderr', '{0}/{1}.err'.format(self.logBaseURL, work_spec.batchID))
ret_list.append(ret_val)
tmp_log.debug('done')
return ret_list
GCE: put VM to submitted state despite API exception
"""
Based on: https://cloud.google.com/compute/docs/tutorials/python-guide#before-you-begin
"""
import time
from pandaharvester.harvestercore import core_utils
from pandaharvester.harvestercore.plugin_base import PluginBase
# from requests.exceptions import SSLError
from pandaharvester.harvestercloud.googlecloud import compute, GoogleVM, ZONE, PROJECT
# setup base logger
base_logger = core_utils.setup_logger('google_submitter')
def wait_for_operation(project, zone, operation_name):
"""
Waits for an operation to complete.
TODO: decide whether we want to block or just move on and list the instance status later
:param project:
:param zone:
:param operation_name:
:return:
"""
tmp_log = core_utils.make_logger(base_logger, method_name='wait_for_operation')
tmp_log.debug('Waiting for operation to finish...')
while True:
result = compute.zoneOperations().get(project=project, zone=zone, operation=operation_name).execute()
if result['status'] == 'DONE':
if 'error' in result:
raise Exception(result['error'])
tmp_log.debug('Operation finished...')
return result
time.sleep(1)
def create_vm(work_spec):
"""
Boots up a VM in GCE based on the worker specifications
:param work_spec: worker specifications
:return:
"""
work_spec.reset_changed_list()
tmp_log = core_utils.make_logger(base_logger, 'workerID={0}'.format(work_spec.workerID),
method_name='submit_a_worker')
tmp_log.debug('nCore={0} minRamCount={1} maxDiskCount={2} maxWalltime={0}'.format(work_spec.nCore,
work_spec.minRamCount,
work_spec.maxDiskCount,
work_spec.maxWalltime))
try:
vm = GoogleVM(work_spec)
except Exception as e:
tmp_log.debug('VM preparation failed with: {0}'.format(e))
# there was some problem preparing the VM, usually related to interaction with GCE
# since the VM was not submitted yet, we mark the worker as "missed"
return (False, str(e)), work_spec.get_changed_attributes()
try:
tmp_log.debug('Going to submit VM {0}'.format(vm.name))
work_spec.batchID = vm.name
operation = compute.instances().insert(project=PROJECT, zone=ZONE, body=vm.config).execute()
# tmp_log.debug('Submitting VM {0}'.format(vm.name))
# wait_for_operation(PROJECT, ZONE, operation['name'])
tmp_log.debug('Submitted VM {0}'.format(vm.name))
return (True, 'OK'), work_spec.get_changed_attributes()
except Exception as e:
tmp_log.debug('GCE API exception: {0}'.format(e))
# Despite the exception we will consider the submission successful to set the worker as "submitted".
# This is related to the GCE API reliability. We have observed that despite failures (time outs, SSL errors, etc)
# in many cases the VMs still start and we don't want VMs that are not inventorized. If the VM submission failed
# the harvester monitor will see when listing the running VMs
return (True, str(e)), work_spec.get_changed_attributes()
class GoogleSubmitter(PluginBase):
"""
Plug-in for Google Cloud Engine VM submission. In this case the worker will abstract a VM running a job
"""
def __init__(self, **kwarg):
self.logBaseURL = 'http://localhost/test'
PluginBase.__init__(self, **kwarg)
def submit_workers(self, work_spec_list):
"""
:param work_spec_list: list of workers to submit
:return:
"""
tmp_log = self.make_logger(base_logger, method_name='submit_workers')
tmp_log.debug('start nWorkers={0}'.format(len(work_spec_list)))
# Create VMs in parallel
# authentication issues when running the Cloud API in multiprocess
# pool_size = min(len(work_spec_list), 10) # TODO: think about the optimal pool size
# with Pool(pool_size) as pool:
# ret_val_list = pool.map(create_vm, work_spec_list, lock)
ret_val_list = []
for work_spec in work_spec_list:
ret_val_list.append(create_vm(work_spec))
# Propagate changed attributes
ret_list = []
for work_spec, tmp_val in zip(work_spec_list, ret_val_list):
ret_val, tmp_dict = tmp_val
work_spec.set_attributes_with_dict(tmp_dict)
work_spec.set_log_file('batch_log', '{0}/{1}.log'.format(self.logBaseURL, work_spec.batchID))
work_spec.set_log_file('stdout', '{0}/{1}.out'.format(self.logBaseURL, work_spec.batchID))
work_spec.set_log_file('stderr', '{0}/{1}.err'.format(self.logBaseURL, work_spec.batchID))
ret_list.append(ret_val)
tmp_log.debug('done')
return ret_list
|
from tests.unit.dataactcore.factories.staging import AwardFinancialFactory, CertifiedAwardFinancialFactory
from tests.unit.dataactcore.factories.job import SubmissionFactory
from tests.unit.dataactvalidator.utils import number_of_errors, query_columns, populate_publish_status
from dataactcore.models.lookups import PUBLISH_STATUS_DICT
_FILE = 'c27_award_financial'
def test_column_headers(database):
expected_subset = {'row_number', 'tas', 'disaster_emergency_fund_code', 'fain', 'uri', 'piid', 'parent_award_id',
'gross_outlay_amount_by_awa_cpe', 'uniqueid_TAS', 'uniqueid_DisasterEmergencyFundCode',
'uniqueid_FAIN', 'uniqueid_URI', 'uniqueid_PIID', 'uniqueid_ParentAwardId'}
actual = set(query_columns(_FILE, database))
assert expected_subset == actual
def test_success(database):
""" Test File C GrossOutlayByAward_CPE balance for a TAS/DEFC/Award combination should continue to be reported in
subsequent periods during the FY, once it has been submitted to DATA Act, unless the most recently reported
outlay balance for this award breakdown was zero.
"""
populate_publish_status(database)
# Base submission
sub_1 = SubmissionFactory(submission_id=1, cgac_code='test', reporting_fiscal_year=2020, reporting_fiscal_period=3,
frec_code=None, publish_status_id=PUBLISH_STATUS_DICT['published'], d2_submission=False)
caf_fain = CertifiedAwardFinancialFactory(submission_id=sub_1.submission_id, tas='test_tas', fain='aBcD', uri=None,
piid=None, parent_award_id=None, disaster_emergency_fund_code='N',
gross_outlay_amount_by_awa_cpe=5)
caf_uri = CertifiedAwardFinancialFactory(submission_id=sub_1.submission_id, tas='test_tas', fain=None, uri='eFgH',
piid=None, parent_award_id=None, disaster_emergency_fund_code='n',
gross_outlay_amount_by_awa_cpe=5)
caf_piid = CertifiedAwardFinancialFactory(submission_id=sub_1.submission_id, tas='test_tas', fain=None, uri=None,
piid='iJkL', parent_award_id=None, disaster_emergency_fund_code='n',
gross_outlay_amount_by_awa_cpe=5)
caf_paid = CertifiedAwardFinancialFactory(submission_id=sub_1.submission_id, tas='test_TAS', fain=None, uri=None,
piid='mNoP', parent_award_id='qRsT', disaster_emergency_fund_code='N',
gross_outlay_amount_by_awa_cpe=5)
caf_zero = CertifiedAwardFinancialFactory(submission_id=sub_1.submission_id, tas='test_tas', fain='xYz', uri=None,
piid=None, parent_award_id=None, disaster_emergency_fund_code='n',
gross_outlay_amount_by_awa_cpe=0)
caf_null = CertifiedAwardFinancialFactory(submission_id=sub_1.submission_id, tas='test_tas', fain='xyZ', uri=None,
piid=None, parent_award_id=None, disaster_emergency_fund_code='n',
gross_outlay_amount_by_awa_cpe=None)
caf_tas = CertifiedAwardFinancialFactory(submission_id=sub_1.submission_id, tas='different_tas', fain='hiJK',
uri=None, piid=None, parent_award_id=None,
disaster_emergency_fund_code='n', gross_outlay_amount_by_awa_cpe=5)
caf_all_9 = CertifiedAwardFinancialFactory(submission_id=sub_1.submission_id, tas='test_tas', fain='aBcD',
uri='eFgH', piid='mNoP', parent_award_id='qRsT',
disaster_emergency_fund_code='9', gross_outlay_amount_by_awa_cpe=5)
database.session.add_all([sub_1, caf_fain, caf_uri, caf_piid, caf_paid, caf_zero, caf_null, caf_tas, caf_all_9])
database.session.commit()
# quarterly submission with each of the previous values (one of them is 0 now)
sub_q = SubmissionFactory(submission_id=2, reporting_fiscal_year=2020, reporting_fiscal_period=6, cgac_code='test',
frec_code=None, is_quarter_format=True, d2_submission=False)
af_fain = AwardFinancialFactory(submission_id=sub_q.submission_id, tas='test_tas', fain='abcd', uri=None, piid=None,
parent_award_id=None, disaster_emergency_fund_code='n',
gross_outlay_amount_by_awa_cpe=0)
af_uri = AwardFinancialFactory(submission_id=sub_q.submission_id, tas='test_tas', fain=None, uri='efgh', piid=None,
parent_award_id=None, disaster_emergency_fund_code='n',
gross_outlay_amount_by_awa_cpe=3)
af_piid = AwardFinancialFactory(submission_id=sub_q.submission_id, tas='test_tas', fain=None, uri=None, piid='ijkl',
parent_award_id=None, disaster_emergency_fund_code='n',
gross_outlay_amount_by_awa_cpe=7)
af_paid = AwardFinancialFactory(submission_id=sub_q.submission_id, tas='test_tas', fain=None, uri=None, piid='mnop',
parent_award_id='qrst', disaster_emergency_fund_code='n',
gross_outlay_amount_by_awa_cpe=2)
af_zero = AwardFinancialFactory(submission_id=sub_q.submission_id, tas='test_tas', fain='xyz', uri=None, piid=None,
parent_award_id=None, disaster_emergency_fund_code='n',
gross_outlay_amount_by_awa_cpe=6)
af_null = AwardFinancialFactory(submission_id=sub_q.submission_id, tas='test_tas', fain='xyz', uri=None, piid=None,
parent_award_id=None, disaster_emergency_fund_code='n',
gross_outlay_amount_by_awa_cpe=3)
af_tas = AwardFinancialFactory(submission_id=sub_q.submission_id, tas='different_tas', fain='hijk', uri=None,
piid=None, parent_award_id=None, disaster_emergency_fund_code='n',
gross_outlay_amount_by_awa_cpe=2)
# matches the DEFC of 9 with a different DEFC
af_9_match = AwardFinancialFactory(submission_id=sub_q.submission_id, tas='test_tas', fain='aBcD', uri='eFgH',
piid='mNoP', parent_award_id='qRsT', disaster_emergency_fund_code='n',
gross_outlay_amount_by_awa_cpe=5)
# Additional line doesn't mess anything up
af_bonus = AwardFinancialFactory(submission_id=sub_q.submission_id, tas='something_different')
errors = number_of_errors(_FILE, database, models=[af_fain, af_uri, af_piid, af_paid, af_zero, af_null, af_tas,
af_9_match, af_bonus],
submission=sub_q)
assert errors == 0
# period submission with each of the previous values
sub_p = SubmissionFactory(submission_id=3, reporting_fiscal_year=2020, reporting_fiscal_period=4, cgac_code='test',
frec_code=None, is_quarter_format=True, d2_submission=False)
af_fain = AwardFinancialFactory(submission_id=sub_p.submission_id, tas='test_tas', fain='abcd', uri=None, piid=None,
parent_award_id=None, disaster_emergency_fund_code='n',
gross_outlay_amount_by_awa_cpe=9)
af_uri = AwardFinancialFactory(submission_id=sub_p.submission_id, tas='test_tas', fain=None, uri='efgh', piid=None,
parent_award_id=None, disaster_emergency_fund_code='n',
gross_outlay_amount_by_awa_cpe=3)
af_piid = AwardFinancialFactory(submission_id=sub_p.submission_id, tas='test_tas', fain=None, uri=None, piid='ijkl',
parent_award_id=None, disaster_emergency_fund_code='n',
gross_outlay_amount_by_awa_cpe=7)
af_paid = AwardFinancialFactory(submission_id=sub_p.submission_id, tas='test_tas', fain=None, uri=None, piid='mnop',
parent_award_id='qrst', disaster_emergency_fund_code='n',
gross_outlay_amount_by_awa_cpe=2)
af_zero = AwardFinancialFactory(submission_id=sub_p.submission_id, tas='test_tas', fain='xyz', uri=None, piid=None,
parent_award_id=None, disaster_emergency_fund_code='n',
gross_outlay_amount_by_awa_cpe=6)
af_null = AwardFinancialFactory(submission_id=sub_p.submission_id, tas='test_tas', fain='xyz', uri=None, piid=None,
parent_award_id=None, disaster_emergency_fund_code='n',
gross_outlay_amount_by_awa_cpe=3)
af_tas = AwardFinancialFactory(submission_id=sub_p.submission_id, tas='different_tas', fain='hijk', uri=None,
piid=None, parent_award_id=None, disaster_emergency_fund_code='n',
gross_outlay_amount_by_awa_cpe=2)
# matches the DEFC of 9 with a different DEFC
af_9_match = AwardFinancialFactory(submission_id=sub_p.submission_id, tas='test_tas', fain='aBcD', uri='eFgH',
piid='mNoP', parent_award_id='qRsT', disaster_emergency_fund_code='n',
gross_outlay_amount_by_awa_cpe=5)
# Additional line doesn't mess anything up
af_bonus = AwardFinancialFactory(submission_id=sub_p.submission_id, tas='something_different')
errors = number_of_errors(_FILE, database, models=[af_fain, af_uri, af_piid, af_paid, af_zero, af_null, af_tas,
af_9_match, af_bonus],
submission=sub_p)
assert errors == 0
# submission missing the values that were 0 and NULL the previous quarter does not throw errors
sub_4 = SubmissionFactory(submission_id=4, reporting_fiscal_year=2020, reporting_fiscal_period=6, cgac_code='test',
frec_code=None, is_quarter_format=True, d2_submission=False)
af_fain = AwardFinancialFactory(submission_id=sub_4.submission_id, tas='test_tas', fain='abcd', uri=None, piid=None,
parent_award_id=None, disaster_emergency_fund_code='n',
gross_outlay_amount_by_awa_cpe=9)
af_uri = AwardFinancialFactory(submission_id=sub_4.submission_id, tas='test_tas', fain=None, uri='efgh', piid=None,
parent_award_id=None, disaster_emergency_fund_code='n',
gross_outlay_amount_by_awa_cpe=3)
af_piid = AwardFinancialFactory(submission_id=sub_4.submission_id, tas='test_tas', fain=None, uri=None, piid='ijkl',
parent_award_id=None, disaster_emergency_fund_code='n',
gross_outlay_amount_by_awa_cpe=7)
af_paid = AwardFinancialFactory(submission_id=sub_4.submission_id, tas='test_tas', fain=None, uri=None, piid='mnop',
parent_award_id='qrst', disaster_emergency_fund_code='n',
gross_outlay_amount_by_awa_cpe=2)
af_tas = AwardFinancialFactory(submission_id=sub_4.submission_id, tas='different_tas', fain='hijk', uri=None,
piid=None, parent_award_id=None, disaster_emergency_fund_code='n',
gross_outlay_amount_by_awa_cpe=2)
af_9_match = AwardFinancialFactory(submission_id=sub_4.submission_id, tas='test_tas', fain='aBcD', uri='eFgH',
piid='mNoP', parent_award_id='qRsT', disaster_emergency_fund_code='n',
gross_outlay_amount_by_awa_cpe=5)
errors = number_of_errors(_FILE, database, models=[af_fain, af_uri, af_piid, af_paid, af_tas, af_9_match],
submission=sub_4)
assert errors == 0
# submission that doesn't have a "previous period"
sub_5 = SubmissionFactory(submission_id=5, reporting_fiscal_year=2020, reporting_fiscal_period=5, cgac_code='test',
frec_code=None, is_quarter_format=True)
errors = number_of_errors(_FILE, database, models=[], submission=sub_5)
assert errors == 0
def test_failure(database):
""" Test fail File C GrossOutlayByAward_CPE balance for a TAS/DEFC/Award combination should continue to be reported
in subsequent periods during the FY, once it has been submitted to DATA Act, unless the most recently reported
outlay balance for this award breakdown was zero.
"""
populate_publish_status(database)
# Base submission
sub_1 = SubmissionFactory(submission_id=1, cgac_code='test', reporting_fiscal_year=2020, reporting_fiscal_period=3,
frec_code=None, publish_status_id=PUBLISH_STATUS_DICT['published'], d2_submission=False)
caf_fain = CertifiedAwardFinancialFactory(submission_id=sub_1.submission_id, tas='test_tas', fain='abcd', uri=None,
piid=None, parent_award_id=None, disaster_emergency_fund_code='N',
gross_outlay_amount_by_awa_cpe=5)
caf_defc_9 = CertifiedAwardFinancialFactory(submission_id=sub_1.submission_id, tas='test_tas', fain='abcd',
uri=None, piid=None, parent_award_id=None,
disaster_emergency_fund_code='9', gross_outlay_amount_by_awa_cpe=5)
database.session.add_all([sub_1, caf_fain, caf_defc_9])
database.session.commit()
# submission missing previous period value, missing value of 9 still registers an error
sub_2 = SubmissionFactory(submission_id=2, reporting_fiscal_year=2020, reporting_fiscal_period=4, cgac_code='test',
frec_code=None, is_quarter_format=False, d2_submission=False)
errors = number_of_errors(_FILE, database, models=[], submission=sub_2)
assert errors == 2
# submission with a row that has similar but not exact values (has a uri when the original didn't)
sub_3 = SubmissionFactory(submission_id=3, reporting_fiscal_year=2020, reporting_fiscal_period=4, cgac_code='test',
frec_code=None, is_quarter_format=False, d2_submission=False)
af_other = AwardFinancialFactory(submission_id=sub_3.submission_id, tas='test_tas', fain='abcd', uri='efgh',
piid=None, parent_award_id=None, disaster_emergency_fund_code='n',
gross_outlay_amount_by_awa_cpe=5)
af_defc_9 = AwardFinancialFactory(submission_id=sub_3.submission_id, tas='test_tas', fain='abcd', uri=None,
piid=None, parent_award_id=None, disaster_emergency_fund_code='9',
gross_outlay_amount_by_awa_cpe=5)
errors = number_of_errors(_FILE, database, models=[af_other, af_defc_9], submission=sub_3)
assert errors == 1
# submission with a row that matches but has gross outlay of NULL
sub_4 = SubmissionFactory(submission_id=4, reporting_fiscal_year=2020, reporting_fiscal_period=4, cgac_code='test',
frec_code=None, is_quarter_format=False, d2_submission=False)
af_null = AwardFinancialFactory(submission_id=sub_4.submission_id, tas='test_tas', fain='abcd', uri=None,
piid=None, parent_award_id=None, disaster_emergency_fund_code='n',
gross_outlay_amount_by_awa_cpe=None)
af_defc_9 = AwardFinancialFactory(submission_id=sub_4.submission_id, tas='test_tas', fain='abcd', uri=None,
piid=None, parent_award_id=None, disaster_emergency_fund_code='9',
gross_outlay_amount_by_awa_cpe=5)
errors = number_of_errors(_FILE, database, models=[af_null, af_defc_9], submission=sub_4)
assert errors == 1
Fixing tests for failure conditions
from tests.unit.dataactcore.factories.staging import AwardFinancialFactory, CertifiedAwardFinancialFactory
from tests.unit.dataactcore.factories.job import SubmissionFactory
from tests.unit.dataactvalidator.utils import number_of_errors, query_columns, populate_publish_status
from dataactcore.models.lookups import PUBLISH_STATUS_DICT
_FILE = 'c27_award_financial'
def test_column_headers(database):
expected_subset = {'row_number', 'tas', 'disaster_emergency_fund_code', 'fain', 'uri', 'piid', 'parent_award_id',
'gross_outlay_amount_by_awa_cpe', 'uniqueid_TAS', 'uniqueid_DisasterEmergencyFundCode',
'uniqueid_FAIN', 'uniqueid_URI', 'uniqueid_PIID', 'uniqueid_ParentAwardId'}
actual = set(query_columns(_FILE, database))
assert expected_subset == actual
def test_success(database):
""" Test File C GrossOutlayByAward_CPE balance for a TAS/DEFC/Award combination should continue to be reported in
subsequent periods during the FY, once it has been submitted to DATA Act, unless the most recently reported
outlay balance for this award breakdown was zero.
"""
populate_publish_status(database)
# Base submission
sub_1 = SubmissionFactory(submission_id=1, cgac_code='test', reporting_fiscal_year=2020, reporting_fiscal_period=3,
frec_code=None, publish_status_id=PUBLISH_STATUS_DICT['published'], d2_submission=False)
caf_fain = CertifiedAwardFinancialFactory(submission_id=sub_1.submission_id, tas='test_tas', fain='aBcD', uri=None,
piid=None, parent_award_id=None, disaster_emergency_fund_code='N',
gross_outlay_amount_by_awa_cpe=5)
caf_uri = CertifiedAwardFinancialFactory(submission_id=sub_1.submission_id, tas='test_tas', fain=None, uri='eFgH',
piid=None, parent_award_id=None, disaster_emergency_fund_code='n',
gross_outlay_amount_by_awa_cpe=5)
caf_piid = CertifiedAwardFinancialFactory(submission_id=sub_1.submission_id, tas='test_tas', fain=None, uri=None,
piid='iJkL', parent_award_id=None, disaster_emergency_fund_code='n',
gross_outlay_amount_by_awa_cpe=5)
caf_paid = CertifiedAwardFinancialFactory(submission_id=sub_1.submission_id, tas='test_TAS', fain=None, uri=None,
piid='mNoP', parent_award_id='qRsT', disaster_emergency_fund_code='N',
gross_outlay_amount_by_awa_cpe=5)
caf_zero = CertifiedAwardFinancialFactory(submission_id=sub_1.submission_id, tas='test_tas', fain='xYz', uri=None,
piid=None, parent_award_id=None, disaster_emergency_fund_code='n',
gross_outlay_amount_by_awa_cpe=0)
caf_null = CertifiedAwardFinancialFactory(submission_id=sub_1.submission_id, tas='test_tas', fain='xyZ', uri=None,
piid=None, parent_award_id=None, disaster_emergency_fund_code='n',
gross_outlay_amount_by_awa_cpe=None)
caf_tas = CertifiedAwardFinancialFactory(submission_id=sub_1.submission_id, tas='different_tas', fain='hiJK',
uri=None, piid=None, parent_award_id=None,
disaster_emergency_fund_code='n', gross_outlay_amount_by_awa_cpe=5)
caf_all_9 = CertifiedAwardFinancialFactory(submission_id=sub_1.submission_id, tas='test_tas', fain='aBcD',
uri='eFgH', piid='mNoP', parent_award_id='qRsT',
disaster_emergency_fund_code='9', gross_outlay_amount_by_awa_cpe=5)
database.session.add_all([sub_1, caf_fain, caf_uri, caf_piid, caf_paid, caf_zero, caf_null, caf_tas, caf_all_9])
database.session.commit()
# quarterly submission with each of the previous values (one of them is 0 now)
sub_q = SubmissionFactory(submission_id=2, reporting_fiscal_year=2020, reporting_fiscal_period=6, cgac_code='test',
frec_code=None, is_quarter_format=True, d2_submission=False)
af_fain = AwardFinancialFactory(submission_id=sub_q.submission_id, tas='test_tas', fain='abcd', uri=None, piid=None,
parent_award_id=None, disaster_emergency_fund_code='n',
gross_outlay_amount_by_awa_cpe=0)
af_uri = AwardFinancialFactory(submission_id=sub_q.submission_id, tas='test_tas', fain=None, uri='efgh', piid=None,
parent_award_id=None, disaster_emergency_fund_code='n',
gross_outlay_amount_by_awa_cpe=3)
af_piid = AwardFinancialFactory(submission_id=sub_q.submission_id, tas='test_tas', fain=None, uri=None, piid='ijkl',
parent_award_id=None, disaster_emergency_fund_code='n',
gross_outlay_amount_by_awa_cpe=7)
af_paid = AwardFinancialFactory(submission_id=sub_q.submission_id, tas='test_tas', fain=None, uri=None, piid='mnop',
parent_award_id='qrst', disaster_emergency_fund_code='n',
gross_outlay_amount_by_awa_cpe=2)
af_zero = AwardFinancialFactory(submission_id=sub_q.submission_id, tas='test_tas', fain='xyz', uri=None, piid=None,
parent_award_id=None, disaster_emergency_fund_code='n',
gross_outlay_amount_by_awa_cpe=6)
af_null = AwardFinancialFactory(submission_id=sub_q.submission_id, tas='test_tas', fain='xyz', uri=None, piid=None,
parent_award_id=None, disaster_emergency_fund_code='n',
gross_outlay_amount_by_awa_cpe=3)
af_tas = AwardFinancialFactory(submission_id=sub_q.submission_id, tas='different_tas', fain='hijk', uri=None,
piid=None, parent_award_id=None, disaster_emergency_fund_code='n',
gross_outlay_amount_by_awa_cpe=2)
# matches the DEFC of 9 with a different DEFC
af_9_match = AwardFinancialFactory(submission_id=sub_q.submission_id, tas='test_tas', fain='aBcD', uri='eFgH',
piid='mNoP', parent_award_id='qRsT', disaster_emergency_fund_code='n',
gross_outlay_amount_by_awa_cpe=5)
# Additional line doesn't mess anything up
af_bonus = AwardFinancialFactory(submission_id=sub_q.submission_id, tas='something_different')
errors = number_of_errors(_FILE, database, models=[af_fain, af_uri, af_piid, af_paid, af_zero, af_null, af_tas,
af_9_match, af_bonus],
submission=sub_q)
assert errors == 0
# period submission with each of the previous values
sub_p = SubmissionFactory(submission_id=3, reporting_fiscal_year=2020, reporting_fiscal_period=4, cgac_code='test',
frec_code=None, is_quarter_format=True, d2_submission=False)
af_fain = AwardFinancialFactory(submission_id=sub_p.submission_id, tas='test_tas', fain='abcd', uri=None, piid=None,
parent_award_id=None, disaster_emergency_fund_code='n',
gross_outlay_amount_by_awa_cpe=9)
af_uri = AwardFinancialFactory(submission_id=sub_p.submission_id, tas='test_tas', fain=None, uri='efgh', piid=None,
parent_award_id=None, disaster_emergency_fund_code='n',
gross_outlay_amount_by_awa_cpe=3)
af_piid = AwardFinancialFactory(submission_id=sub_p.submission_id, tas='test_tas', fain=None, uri=None, piid='ijkl',
parent_award_id=None, disaster_emergency_fund_code='n',
gross_outlay_amount_by_awa_cpe=7)
af_paid = AwardFinancialFactory(submission_id=sub_p.submission_id, tas='test_tas', fain=None, uri=None, piid='mnop',
parent_award_id='qrst', disaster_emergency_fund_code='n',
gross_outlay_amount_by_awa_cpe=2)
af_zero = AwardFinancialFactory(submission_id=sub_p.submission_id, tas='test_tas', fain='xyz', uri=None, piid=None,
parent_award_id=None, disaster_emergency_fund_code='n',
gross_outlay_amount_by_awa_cpe=6)
af_null = AwardFinancialFactory(submission_id=sub_p.submission_id, tas='test_tas', fain='xyz', uri=None, piid=None,
parent_award_id=None, disaster_emergency_fund_code='n',
gross_outlay_amount_by_awa_cpe=3)
af_tas = AwardFinancialFactory(submission_id=sub_p.submission_id, tas='different_tas', fain='hijk', uri=None,
piid=None, parent_award_id=None, disaster_emergency_fund_code='n',
gross_outlay_amount_by_awa_cpe=2)
# matches the DEFC of 9 with a different DEFC
af_9_match = AwardFinancialFactory(submission_id=sub_p.submission_id, tas='test_tas', fain='aBcD', uri='eFgH',
piid='mNoP', parent_award_id='qRsT', disaster_emergency_fund_code='n',
gross_outlay_amount_by_awa_cpe=5)
# Additional line doesn't mess anything up
af_bonus = AwardFinancialFactory(submission_id=sub_p.submission_id, tas='something_different')
errors = number_of_errors(_FILE, database, models=[af_fain, af_uri, af_piid, af_paid, af_zero, af_null, af_tas,
af_9_match, af_bonus],
submission=sub_p)
assert errors == 0
# submission missing the values that were 0 and NULL the previous quarter does not throw errors
sub_4 = SubmissionFactory(submission_id=4, reporting_fiscal_year=2020, reporting_fiscal_period=6, cgac_code='test',
frec_code=None, is_quarter_format=True, d2_submission=False)
af_fain = AwardFinancialFactory(submission_id=sub_4.submission_id, tas='test_tas', fain='abcd', uri=None, piid=None,
parent_award_id=None, disaster_emergency_fund_code='n',
gross_outlay_amount_by_awa_cpe=9)
af_uri = AwardFinancialFactory(submission_id=sub_4.submission_id, tas='test_tas', fain=None, uri='efgh', piid=None,
parent_award_id=None, disaster_emergency_fund_code='n',
gross_outlay_amount_by_awa_cpe=3)
af_piid = AwardFinancialFactory(submission_id=sub_4.submission_id, tas='test_tas', fain=None, uri=None, piid='ijkl',
parent_award_id=None, disaster_emergency_fund_code='n',
gross_outlay_amount_by_awa_cpe=7)
af_paid = AwardFinancialFactory(submission_id=sub_4.submission_id, tas='test_tas', fain=None, uri=None, piid='mnop',
parent_award_id='qrst', disaster_emergency_fund_code='n',
gross_outlay_amount_by_awa_cpe=2)
af_tas = AwardFinancialFactory(submission_id=sub_4.submission_id, tas='different_tas', fain='hijk', uri=None,
piid=None, parent_award_id=None, disaster_emergency_fund_code='n',
gross_outlay_amount_by_awa_cpe=2)
af_9_match = AwardFinancialFactory(submission_id=sub_4.submission_id, tas='test_tas', fain='aBcD', uri='eFgH',
piid='mNoP', parent_award_id='qRsT', disaster_emergency_fund_code='n',
gross_outlay_amount_by_awa_cpe=5)
errors = number_of_errors(_FILE, database, models=[af_fain, af_uri, af_piid, af_paid, af_tas, af_9_match],
submission=sub_4)
assert errors == 0
# submission that doesn't have a "previous period"
sub_5 = SubmissionFactory(submission_id=5, reporting_fiscal_year=2020, reporting_fiscal_period=5, cgac_code='test',
frec_code=None, is_quarter_format=True)
errors = number_of_errors(_FILE, database, models=[], submission=sub_5)
assert errors == 0
def test_failure(database):
""" Test fail File C GrossOutlayByAward_CPE balance for a TAS/DEFC/Award combination should continue to be reported
in subsequent periods during the FY, once it has been submitted to DATA Act, unless the most recently reported
outlay balance for this award breakdown was zero.
"""
populate_publish_status(database)
# Base submission
sub_1 = SubmissionFactory(submission_id=1, cgac_code='test', reporting_fiscal_year=2020, reporting_fiscal_period=3,
frec_code=None, publish_status_id=PUBLISH_STATUS_DICT['published'], d2_submission=False)
caf_fain = CertifiedAwardFinancialFactory(submission_id=sub_1.submission_id, tas='test_tas', fain='abcd', uri=None,
piid=None, parent_award_id=None, disaster_emergency_fund_code='N',
gross_outlay_amount_by_awa_cpe=5)
caf_defc_9 = CertifiedAwardFinancialFactory(submission_id=sub_1.submission_id, tas='test_tas', fain='abcd',
uri=None, piid=None, parent_award_id='testingHere',
disaster_emergency_fund_code='9', gross_outlay_amount_by_awa_cpe=5)
database.session.add_all([sub_1, caf_fain, caf_defc_9])
database.session.commit()
# submission missing previous period value, missing value of 9 still registers an error
sub_2 = SubmissionFactory(submission_id=2, reporting_fiscal_year=2020, reporting_fiscal_period=4, cgac_code='test',
frec_code=None, is_quarter_format=False, d2_submission=False)
errors = number_of_errors(_FILE, database, models=[], submission=sub_2)
assert errors == 2
# submission with a row that has similar but not exact values (has a uri when the original didn't)
sub_3 = SubmissionFactory(submission_id=3, reporting_fiscal_year=2020, reporting_fiscal_period=4, cgac_code='test',
frec_code=None, is_quarter_format=False, d2_submission=False)
af_other = AwardFinancialFactory(submission_id=sub_3.submission_id, tas='test_tas', fain='abcd', uri='efgh',
piid=None, parent_award_id=None, disaster_emergency_fund_code='n',
gross_outlay_amount_by_awa_cpe=5)
af_defc_9 = AwardFinancialFactory(submission_id=sub_3.submission_id, tas='test_tas', fain='abcd', uri=None,
piid=None, parent_award_id='testingHere', disaster_emergency_fund_code='9',
gross_outlay_amount_by_awa_cpe=5)
errors = number_of_errors(_FILE, database, models=[af_other, af_defc_9], submission=sub_3)
assert errors == 1
# submission with a row that matches but has gross outlay of NULL
sub_4 = SubmissionFactory(submission_id=4, reporting_fiscal_year=2020, reporting_fiscal_period=4, cgac_code='test',
frec_code=None, is_quarter_format=False, d2_submission=False)
af_null = AwardFinancialFactory(submission_id=sub_4.submission_id, tas='test_tas', fain='abcd', uri=None,
piid=None, parent_award_id=None, disaster_emergency_fund_code='n',
gross_outlay_amount_by_awa_cpe=None)
af_defc_9 = AwardFinancialFactory(submission_id=sub_4.submission_id, tas='test_tas', fain='abcd', uri=None,
piid=None, parent_award_id='testingHere', disaster_emergency_fund_code='n',
gross_outlay_amount_by_awa_cpe=5)
errors = number_of_errors(_FILE, database, models=[af_null, af_defc_9], submission=sub_4)
assert errors == 1
|
# -*- coding: utf-8 -*-
'''
This is the most important file for the web app. It contains the various
routes that end users can use.
For instance
@app.route('/about/', methods=['GET'])
def about():
return render_template("info/about.html")
Where /about/ is the link.
'''
from __future__ import absolute_import
import os
import pandas as pd
from flask import Flask, render_template, url_for, redirect, request
from werkzeug import secure_filename
from flask.ext.admin.contrib import sqla
from flask_admin import Admin, BaseView, expose, AdminIndexView
from flask_admin.contrib.sqla import ModelView
from flask.ext.admin.contrib.sqla.view import func
from flask_admin.form import BaseForm
from flask_admin.contrib.sqla import tools
from flask_mail import Mail
from flask_security import Security, SQLAlchemyUserDatastore, \
UserMixin, RoleMixin, login_required, current_user
from wtforms.fields import SelectField, StringField
from sqlalchemy import or_
from clic.wordlists import Cheshire3WordList
from clic.keywords import extract_keywords
from clic.web.api import api
from clic.chapter_repository import ChapterRepository
from clic.kwicgrouper import KWICgrouper, concordance_for_line_by_line_file
from clic.web.forms import BOOKS, SUBSETS
from clic.web.models import db, Annotation, Category, Role, User, List, Tag, Note, Subset
app = Flask(__name__, static_url_path='')
app.register_blueprint(api, url_prefix='/api')
app.config.from_pyfile('config.py')
mail = Mail(app)
db.init_app(app)
# Setup Flask-Security
# add a custom form:
# https://pythonhosted.org/Flask-Security/customizing.html#forms
from flask_security.forms import RegisterForm
from wtforms import TextField
from wtforms.validators import Required
class ExtendedRegisterForm(RegisterForm):
name = TextField('Name', [Required()])
user_datastore = SQLAlchemyUserDatastore(db, User, Role)
security = Security(app, user_datastore, register_form=ExtendedRegisterForm)
'''
Application routes
'''
#==============================================================================
# Home, about, docs, 404
#==============================================================================
@app.route('/', methods=['GET'])
def index():
return render_template("info/home.html")
@app.route('/about/', methods=['GET'])
def about():
return render_template("info/about.html")
@app.route('/documentation/', methods=['GET'])
def documentation():
return render_template("info/documentation.html")
@app.route('/releases/', methods=['GET'])
def releases():
return render_template("info/releases.html")
@app.route('/blog/', methods=['GET'])
def blog():
return render_template("info/blog.html")
#==============================================================================
# Concordances
#==============================================================================
@app.route('/concordances/', methods=['GET'])
def concordances():
if 'terms' in request.args.keys(): # form was submitted
return render_template("concordance-results.html")
else:
return render_template("concordance-form.html")
#==============================================================================
# Clusters
#==============================================================================
def construct_index_name(subset, cluster_length):
'''
subset = quote
cluster_length = 3
-> quote-3-gram-idx
---
subset =
cluster_length = 1
-> chapter-idx
---
subset =
cluster_length = 3
-> 3-gram-idx
---
subset = quote
cluster_length = 1
-> quote-idx
'''
if int(cluster_length) == 1:
# chapter-idx
if not subset:
return 'chapter-idx'
# quote-idx, non-quote-idx, shortsus-idx, longsus-idx
return subset + '-idx'
# 3gram-idx, quote-3gram-idx, non-quote-3gram-idx, longsus-3gram-idx
index_name = subset + '-' + cluster_length + 'gram' + '-idx'
# delete the - from the default '-3gram-idx'
return index_name.strip('-')
#TODO cache
@app.route('/clusters/', methods=['GET'])
def clusters():
'''
Used to be:
/clusters/?testIdxGroup=3gram-idx&testCollection=dickens&testIdxMod=quote
Now:
/clusters/?cluster_length=1&subcorpora=dickens&subset=
'''
#TODO sorting of df
#FIXME variables names etc. in js, etc.
#TODO check: number of tokens is different when changing the 3-4-5grams
# This is because it respects text unit boundaries
#TODO optional: let the user select the number of items he/she wants, with an option: all or complete
#TODO form validation
# form was submitted
if 'subset' in request.args.keys():
subset = request.args.get('subset')
# args is a multiDictionary: use .getlist() to access individual books
#TODO rename
subcorpora = request.args.getlist('subcorpus')
if not isinstance(subcorpora, list):
subcorpora = subcorpora.split()
cluster_length = request.args.get('cluster_length')
index_name = construct_index_name(subset, cluster_length)
clusters = Cheshire3WordList()
clusters.build_wordlist(index_name, subcorpora)
# links are still reminisicent of earlier naming scheme
#FIXME delete linking if not always working
return render_template("clusters-results.html",
cluster_length=cluster_length,
subcorpora=subcorpora,
subset=subset,
selectWords="whole",
clusters=clusters.wordlist.iloc[:1000],
total=clusters.total)
else:
return render_template("clusters-form.html")
#==============================================================================
# Keywords
#==============================================================================
#TODO cache
#TODO number of tokens
@app.route('/keywords/', methods=['GET'])
def keywords():
if 'subset_analysis' in request.args.keys(): # form was submitted
cluster_length = request.args.get('cluster_length')
subset_analysis = request.args.get('subset_analysis')
subcorpora_analysis = request.args.getlist('subcorpus_analysis')
if not isinstance(subcorpora_analysis, list):
subcorpora_analysis = subcorpora_analysis.split()
index_name_analysis = construct_index_name(subset_analysis, cluster_length)
subset_reference = request.args.get('subset_reference')
subcorpora_reference = request.args.getlist('subcorpus_reference')
if not isinstance(subcorpora_reference, list):
subcorpora_reference = subcorpora_reference.split()
index_name_reference = construct_index_name(subset_reference, cluster_length)
wordlist_analysis = Cheshire3WordList()
wordlist_analysis.build_wordlist(index_name_analysis, subcorpora_analysis)
wordlist_analysis = wordlist_analysis.wordlist
wordlist_reference = Cheshire3WordList()
wordlist_reference.build_wordlist(index_name_reference, subcorpora_reference)
wordlist_reference = wordlist_reference.wordlist
#FIXME why would reference frequency be a float?
#TODO check whether the wordlists are not truncated in the process
#TODO change which columns are added: display: expected*2, underused/overused, etc.
#TODO RENAME column headers
#FIXME click to search
#TODO plug p_value in
keywords = extract_keywords(wordlist_analysis,
wordlist_reference,
wordlist_analysis.Count.sum(),
wordlist_reference.Count.sum(),
limit_rows=10)
return render_template("keywords-results.html",
subset=subset,
selectWords="whole",
subcorpora_analysis=subcorpora_analysis,
keywords=keywords)
else:
return render_template("keywords-form.html")
#==============================================================================
# Chapters
#==============================================================================
@app.route('/chapter/<book>/<int:number>/')
@app.route('/chapter/<book>/<int:number>/<int:word_index>/<search_term>/')
def chapterView(number, book, word_index=None, search_term=None):
chapter_repository = ChapterRepository()
if word_index is None:
chapter, book_title = chapter_repository.get_chapter(number, book)
else:
chapter, book_title = chapter_repository.get_chapter_with_highlighted_search_term(number, book, word_index, search_term)
return render_template("chapter-view.html", content=chapter, book_title=book_title)
#==============================================================================
# Subsets
#==============================================================================
@app.route('/subsets/', methods=["GET"])
def subsets():
'''
This is a quick and dirty method to display the subsets in our db.
It now uses GET parameters, but should probably use POST parameters
ideally.
The basic design for POST parameters was almost ready but there were a
few issues.
'''
book = request.args.get('book')
subset = request.args.get('subset')
if book and subset:
return redirect(url_for('subsets_display',
book=book,
subset=subset))
return render_template("subsets-form.html")
@app.route('/subsets/<book>/<subset>/', methods=["GET", "POST"])
def subsets_display(book=None, subset=None):
if book and subset:
# make sure they are not malicious names
book = secure_filename(book)
subset = secure_filename(subset)
if book not in BOOKS:
return redirect(url_for('page_not_found'))
if subset not in SUBSETS:
return redirect(url_for('page_not_found'))
BASE_DIR = os.path.dirname(__file__)
filename = "../textfiles/{0}/{1}_{0}.txt".format(subset, book)
with open(os.path.join(BASE_DIR, filename), 'r') as the_file:
result = the_file.readlines()
return render_template("subsets-results.html",
book=book,
subset=subset,
result=result,
)
else:
return redirect(url_for('subsets'))
#==============================================================================
# 404
#==============================================================================
@app.errorhandler(404)
def page_not_found(error):
return render_template('page-not-found.html'), 404
#==============================================================================
# KWICgrouper
#==============================================================================
@app.route('/patterns/', methods=["GET"])
def patterns():
if not 'term' in request.args.keys():
return render_template("patterns-form.html")
else:
# MAKE DRY
book = request.args.get('book')
subset = request.args.get('subset')
term = request.args.get('term').strip()
local_args = dict(request.args)
kwic_filter = {}
for key,value in local_args.iteritems():
if key == "subset" or key == "book" or key == "term":
pass
elif value[0]:
# the values are in the first el of the list
# 'L2': [u'a']
values = value[0]
values = values.split(",")
values = [value.strip() for value in values]
kwic_filter[key] = values
if book and subset:
# make sure they are not malicious names
book = secure_filename(book)
subset = secure_filename(subset)
if book not in BOOKS:
return redirect(url_for('page_not_found'))
if subset not in SUBSETS:
return redirect(url_for('page_not_found'))
BASE_DIR = os.path.dirname(__file__)
filename = "../textfiles/{0}/{1}_{0}.txt".format(subset, book)
concordance = concordance_for_line_by_line_file(os.path.join(BASE_DIR, filename), term)
# should not be done here
if not concordance:
return render_template("patterns-noresults.html")
kwicgrouper = KWICgrouper(concordance)
textframe = kwicgrouper.filter_textframe(kwic_filter)
collocation_table = textframe.apply(pd.Series.value_counts, axis=0)
collocation_table["Sum"] = collocation_table.sum(axis=1)
collocation_table["Left Sum"] = collocation_table[["L5","L4","L3","L2","L1"]].sum(axis=1)
collocation_table["Right Sum"] = collocation_table[["R5","R4","R3","R2","R1"]].sum(axis=1)
pd.set_option('display.max_colwidth', 1000)
# replicate the index so that it is accessible from a row-level apply function
# http://stackoverflow.com/questions/20035518/insert-a-link-inside-a-pandas-table
collocation_table["collocate"] = collocation_table.index
# function that can be applied
def linkify(row, position, term=None, book=None, subset=None):
'''
The purpose is to make links in the dataframe.to_html() output clickable.
# http://stackoverflow.com/a/26614921
'''
if pd.notnull(row[position]):
return """<a href="/patterns/?{0}={1}&term={2}&book={4}&subset={5}">{3}</a>""".format(position,
row["collocate"],
term,
int(row[position]),
book,
subset
)
# http://localhost:5000/patterns/?L5=&L4=&L3=&L2=&L1=&term=voice&R1=&R2=&R3=&R4=&R5=&subset=long_suspensions&book=BH
def linkify_process(df, term, book, subset):
'''
Linkifies every column from L5-R5
'''
for itm in "L5 L4 L3 L2 L1 R1 R2 R3 R4 R5".split():
df[itm] = df.apply(linkify, args=([itm, term, book, subset]), axis=1)
return df
linkify_process(collocation_table, term, book, subset)
del collocation_table["collocate"]
collocation_table = collocation_table[collocation_table.index != ""]
collocation_table = collocation_table.fillna("").to_html(classes=["table", "table-striped", "table-hover", "dataTable", "no-footer", "uonDatatable", 'my_class" id = "dataTableCollocation'],
bold_rows=False,
).replace("<", "<").replace(">", ">")
bookname = book
subsetname = subset.replace("_", " ").capitalize()
# this bit is a hack:
# classes = 'my_class" id = "my_id'
# http://stackoverflow.com/questions/15079118/js-datatables-from-pandas
return render_template("patterns-results.html",
textframe=textframe,
# local_args=kwic_filter,
collocation_table=collocation_table,
bookname=bookname,
subsetname=subsetname)
#==============================================================================
# User annotation of subsets using Flask_admin
#==============================================================================
class SubsetModelView(ModelView):
# 'notes.owner.name' works, but cannot be distinguished
# column_filters = ('book', 'abbr', 'kind', 'corpus', 'text', 'notes', 'tags', 'tags.owner.name', 'tags.owner.email', )
column_filters = ('book', 'abbr', 'kind', 'text', 'notes', 'tags', 'tags.owner.name', 'tags.owner.email', )
column_searchable_list = ('abbr', 'text',)
column_list = ('book', 'kind', 'text', 'tags', 'notes')
# column_list = ('book', 'text',)
# column_exclude_list = ['abbr','corpus']
# column_editable_list could work with the above code included, but not great
# column_editable_list = ['tags', 'notes']
column_hide_backrefs = False
named_filter_urls = True
# editing
edit_modal = True
form_excluded_columns = ['book', 'abbr', 'kind', 'corpus', 'text',]
# nice but not what we are looking for:
# inline_models = (Tag, Note)
# can_view_details = True
can_create = False
can_delete = False # disable model deletion
can_edit = True # TODO disable editable fields
can_export = True # FIXME
export_max_rows = 10000
page_size = 50 # the number of entries to display on the list view
def is_accessible(self):
# return current_user.has_role('can_annotate')
return current_user.is_active()
# def edit_form(self, obj):
# return self._use_filtered_tags(super(SubsetModelView, self).edit_form(obj))
#
# def _use_filtered_tags(self, form):
# form.tags.query_factory = self._get_tags_list
# return form
#
# def _get_tags_list(self):
# return self.session.query(Tag).filter_by(owner=current_user).all()
class TagModelView(ModelView):
action_disallowed_list = ['delete',]
form_excluded_columns = ['subset',]
column_editable_list = ['tag_name',]
named_filter_urls = True
# column_filters = ['owner.name', 'tag_name']
column_filters = ['tag_name']
def is_accessible(self):
# return current_user.has_role('can_annotate')
return current_user.is_active()
# http://stackoverflow.com/a/30741433/2115409
def get_query(self):
return self.session.query(self.model).filter(self.model.owner == current_user)
# http://stackoverflow.com/a/26351005/2115409
def get_count_query(self):
return self.session.query(func.count('*')).filter(self.model.owner==current_user)
def create_form(self):
return self._use_filtered_owner(super(TagModelView, self).create_form())
def edit_form(self, obj):
return self._use_filtered_owner(super(TagModelView, self).edit_form(obj))
def _use_filtered_owner(self, form):
form.owner.query_factory = self._get_owner_list
return form
def _get_owner_list(self):
return self.session.query(User).filter_by(id=current_user.id).all()
class NoteModelView(ModelView):
action_disallowed_list = ['delete',]
column_editable_list = ['note',]
form_excluded_columns = ['subset',]
column_list = ('owner','note',)
named_filter_urls = True
column_filters = ('owner.name', 'note',)
def is_accessible(self):
# return current_user.has_role('can_annotate')
return current_user.is_active()
# http://stackoverflow.com/a/30741433/2115409
def get_query(self):
return self.session.query(self.model).filter(self.model.owner == current_user)
# http://stackoverflow.com/a/26351005/2115409
def get_count_query(self):
return self.session.query(func.count('*')).filter(self.model.owner==current_user)
def create_form(self):
return self._use_filtered_owner(super(NoteModelView, self).create_form())
def edit_form(self, obj):
return self._use_filtered_owner(super(NoteModelView, self).edit_form(obj))
def _use_filtered_owner(self, form):
form.owner.query_factory = self._get_owner_list
return form
def _get_owner_list(self):
return self.session.query(User).filter_by(id=current_user.id).all()
class UserAdmin(sqla.ModelView):
# Prevent administration of Users unless the currently logged-in user has the "superman" role
def is_accessible(self):
return current_user.has_role('superman')
class RoleAdmin(sqla.ModelView):
# Prevent administration of Roles unless the currently logged-in user has the "superman" role
def is_accessible(self):
return current_user.has_role('superman')
admin = Admin(
app,
template_mode='bootstrap3',
index_view=AdminIndexView(
name='Documentation',
url='/annotation',
template="user-annotation.html",
)
)
admin.add_view(SubsetModelView(Subset, db.session))
admin.add_view(TagModelView(Tag, db.session))
admin.add_view(NoteModelView(Note, db.session))
admin.add_view(UserAdmin(User, db.session))
admin.add_view(RoleAdmin(Role, db.session))
if __name__ == '__main__':
@app.before_first_request
def initialize_database():
db.create_all()
from flask_debugtoolbar import DebugToolbarExtension
app.debug = True
toolbar = DebugToolbarExtension(app)
app.run(host='0.0.0.0', port=5000, debug=True)
Add enforce_list method to make code DRY
# -*- coding: utf-8 -*-
'''
This is the most important file for the web app. It contains the various
routes that end users can use.
For instance
@app.route('/about/', methods=['GET'])
def about():
return render_template("info/about.html")
Where /about/ is the link.
'''
from __future__ import absolute_import
import os
import pandas as pd
from flask import Flask, render_template, url_for, redirect, request
from werkzeug import secure_filename
from flask.ext.admin.contrib import sqla
from flask_admin import Admin, BaseView, expose, AdminIndexView
from flask_admin.contrib.sqla import ModelView
from flask.ext.admin.contrib.sqla.view import func
from flask_admin.form import BaseForm
from flask_admin.contrib.sqla import tools
from flask_mail import Mail
from flask_security import Security, SQLAlchemyUserDatastore, \
UserMixin, RoleMixin, login_required, current_user
from wtforms.fields import SelectField, StringField
from sqlalchemy import or_
from clic.wordlists import Cheshire3WordList
from clic.keywords import extract_keywords
from clic.web.api import api
from clic.chapter_repository import ChapterRepository
from clic.kwicgrouper import KWICgrouper, concordance_for_line_by_line_file
from clic.web.forms import BOOKS, SUBSETS
from clic.web.models import db, Annotation, Category, Role, User, List, Tag, Note, Subset
app = Flask(__name__, static_url_path='')
app.register_blueprint(api, url_prefix='/api')
app.config.from_pyfile('config.py')
mail = Mail(app)
db.init_app(app)
# Setup Flask-Security
# add a custom form:
# https://pythonhosted.org/Flask-Security/customizing.html#forms
from flask_security.forms import RegisterForm
from wtforms import TextField
from wtforms.validators import Required
class ExtendedRegisterForm(RegisterForm):
name = TextField('Name', [Required()])
user_datastore = SQLAlchemyUserDatastore(db, User, Role)
security = Security(app, user_datastore, register_form=ExtendedRegisterForm)
'''
Application routes
'''
#==============================================================================
# Home, about, docs, 404
#==============================================================================
@app.route('/', methods=['GET'])
def index():
return render_template("info/home.html")
@app.route('/about/', methods=['GET'])
def about():
return render_template("info/about.html")
@app.route('/documentation/', methods=['GET'])
def documentation():
return render_template("info/documentation.html")
@app.route('/releases/', methods=['GET'])
def releases():
return render_template("info/releases.html")
@app.route('/blog/', methods=['GET'])
def blog():
return render_template("info/blog.html")
#==============================================================================
# Concordances
#==============================================================================
@app.route('/concordances/', methods=['GET'])
def concordances():
if 'terms' in request.args.keys(): # form was submitted
return render_template("concordance-results.html")
else:
return render_template("concordance-form.html")
#==============================================================================
# Clusters
#==============================================================================
def construct_index_name(subset, cluster_length):
'''
subset = quote
cluster_length = 3
-> quote-3-gram-idx
---
subset =
cluster_length = 1
-> chapter-idx
---
subset =
cluster_length = 3
-> 3-gram-idx
---
subset = quote
cluster_length = 1
-> quote-idx
'''
if int(cluster_length) == 1:
# chapter-idx
if not subset:
return 'chapter-idx'
# quote-idx, non-quote-idx, shortsus-idx, longsus-idx
return subset + '-idx'
# 3gram-idx, quote-3gram-idx, non-quote-3gram-idx, longsus-3gram-idx
index_name = subset + '-' + cluster_length + 'gram' + '-idx'
# delete the - from the default '-3gram-idx'
return index_name.strip('-')
def enforce_list(sequence):
'''
Ensures the input is a list.
If so: simply returns the list.
If not: turns the string into a list by splitting is on whitespace. For
a string without whitespace this will result in a list with a single item.
'''
if not isinstance(sequence, list):
sequence = sequence.split()
return sequence
#TODO cache
@app.route('/clusters/', methods=['GET'])
def clusters():
'''
/clusters/?cluster_length=1&subcorpus=dickens&subset=quote
Number of tokens is different when changing the 3-4-5-grams.
This is because it respects text unit boundaries.
#TODO optional: let the user select the number of items he/she wants, with an option: all or complete
#TODO form validation
'''
# form was submitted
if 'subset' in request.args.keys():
subset = request.args.get('subset')
# args is a multiDictionary: use .getlist() to access individual books
subcorpora = enforce_list(request.args.getlist('subcorpus'))
cluster_length = request.args.get('cluster_length')
index_name = construct_index_name(subset, cluster_length)
clusters = Cheshire3WordList()
clusters.build_wordlist(index_name, subcorpora)
#FIXME delete linking if not always working
return render_template("clusters-results.html",
cluster_length=cluster_length,
subcorpora=subcorpora,
subset=subset,
selectWords="whole",
# limit results to 1000 rows
clusters=clusters.wordlist.iloc[:1000],
total=clusters.total)
# no form was submitted, return form
return render_template("clusters-form.html")
#==============================================================================
# Keywords
#==============================================================================
#TODO cache
#TODO number of tokens
@app.route('/keywords/', methods=['GET'])
def keywords():
if 'subset_analysis' in request.args.keys(): # form was submitted
cluster_length = request.args.get('cluster_length')
subset_analysis = request.args.get('subset_analysis')
subcorpora_analysis = enforce_list(request.args.getlist('subcorpus_analysis'))
index_name_analysis = construct_index_name(subset_analysis, cluster_length)
wordlist_analysis = Cheshire3WordList()
wordlist_analysis.build_wordlist(index_name_analysis, subcorpora_analysis)
wordlist_analysis = wordlist_analysis.wordlist
subset_reference = request.args.get('subset_reference')
subcorpora_reference = enforce_list(request.args.getlist('subcorpus_reference'))
index_name_reference = construct_index_name(subset_reference, cluster_length)
wordlist_reference = Cheshire3WordList()
wordlist_reference.build_wordlist(index_name_reference, subcorpora_reference)
wordlist_reference = wordlist_reference.wordlist
#FIXME why would reference frequency be a float?
#TODO check whether the wordlists are not truncated in the process
#TODO change which columns are added: display: expected*2, underused/overused, etc.
#TODO RENAME column headers
#FIXME click to search
#TODO plug p_value in
keywords = extract_keywords(wordlist_analysis,
wordlist_reference,
wordlist_analysis.Count.sum(),
wordlist_reference.Count.sum(),
limit_rows=10)
return render_template("keywords-results.html",
subset=subset_analysis,
selectWords="whole",
subcorpora_analysis=subcorpora_analysis,
keywords=keywords)
else:
return render_template("keywords-form.html")
#==============================================================================
# Chapters
#==============================================================================
@app.route('/chapter/<book>/<int:number>/')
@app.route('/chapter/<book>/<int:number>/<int:word_index>/<search_term>/')
def chapterView(number, book, word_index=None, search_term=None):
chapter_repository = ChapterRepository()
if word_index is None:
chapter, book_title = chapter_repository.get_chapter(number, book)
else:
chapter, book_title = chapter_repository.get_chapter_with_highlighted_search_term(number, book, word_index, search_term)
return render_template("chapter-view.html", content=chapter, book_title=book_title)
#==============================================================================
# Subsets
#==============================================================================
@app.route('/subsets/', methods=["GET"])
def subsets():
'''
This is a quick and dirty method to display the subsets in our db.
It now uses GET parameters, but should probably use POST parameters
ideally.
The basic design for POST parameters was almost ready but there were a
few issues.
'''
book = request.args.get('book')
subset = request.args.get('subset')
if book and subset:
return redirect(url_for('subsets_display',
book=book,
subset=subset))
return render_template("subsets-form.html")
@app.route('/subsets/<book>/<subset>/', methods=["GET", "POST"])
def subsets_display(book=None, subset=None):
if book and subset:
# make sure they are not malicious names
book = secure_filename(book)
subset = secure_filename(subset)
if book not in BOOKS:
return redirect(url_for('page_not_found'))
if subset not in SUBSETS:
return redirect(url_for('page_not_found'))
BASE_DIR = os.path.dirname(__file__)
filename = "../textfiles/{0}/{1}_{0}.txt".format(subset, book)
with open(os.path.join(BASE_DIR, filename), 'r') as the_file:
result = the_file.readlines()
return render_template("subsets-results.html",
book=book,
subset=subset,
result=result,
)
else:
return redirect(url_for('subsets'))
#==============================================================================
# 404
#==============================================================================
@app.errorhandler(404)
def page_not_found(error):
return render_template('page-not-found.html'), 404
#==============================================================================
# KWICgrouper
#==============================================================================
@app.route('/patterns/', methods=["GET"])
def patterns():
if not 'term' in request.args.keys():
return render_template("patterns-form.html")
else:
# MAKE DRY
book = request.args.get('book')
subset = request.args.get('subset')
term = request.args.get('term').strip()
local_args = dict(request.args)
kwic_filter = {}
for key,value in local_args.iteritems():
if key == "subset" or key == "book" or key == "term":
pass
elif value[0]:
# the values are in the first el of the list
# 'L2': [u'a']
values = value[0]
values = values.split(",")
values = [value.strip() for value in values]
kwic_filter[key] = values
if book and subset:
# make sure they are not malicious names
book = secure_filename(book)
subset = secure_filename(subset)
if book not in BOOKS:
return redirect(url_for('page_not_found'))
if subset not in SUBSETS:
return redirect(url_for('page_not_found'))
BASE_DIR = os.path.dirname(__file__)
filename = "../textfiles/{0}/{1}_{0}.txt".format(subset, book)
concordance = concordance_for_line_by_line_file(os.path.join(BASE_DIR, filename), term)
# should not be done here
if not concordance:
return render_template("patterns-noresults.html")
kwicgrouper = KWICgrouper(concordance)
textframe = kwicgrouper.filter_textframe(kwic_filter)
collocation_table = textframe.apply(pd.Series.value_counts, axis=0)
collocation_table["Sum"] = collocation_table.sum(axis=1)
collocation_table["Left Sum"] = collocation_table[["L5","L4","L3","L2","L1"]].sum(axis=1)
collocation_table["Right Sum"] = collocation_table[["R5","R4","R3","R2","R1"]].sum(axis=1)
pd.set_option('display.max_colwidth', 1000)
# replicate the index so that it is accessible from a row-level apply function
# http://stackoverflow.com/questions/20035518/insert-a-link-inside-a-pandas-table
collocation_table["collocate"] = collocation_table.index
# function that can be applied
def linkify(row, position, term=None, book=None, subset=None):
'''
The purpose is to make links in the dataframe.to_html() output clickable.
# http://stackoverflow.com/a/26614921
'''
if pd.notnull(row[position]):
return """<a href="/patterns/?{0}={1}&term={2}&book={4}&subset={5}">{3}</a>""".format(position,
row["collocate"],
term,
int(row[position]),
book,
subset
)
# http://localhost:5000/patterns/?L5=&L4=&L3=&L2=&L1=&term=voice&R1=&R2=&R3=&R4=&R5=&subset=long_suspensions&book=BH
def linkify_process(df, term, book, subset):
'''
Linkifies every column from L5-R5
'''
for itm in "L5 L4 L3 L2 L1 R1 R2 R3 R4 R5".split():
df[itm] = df.apply(linkify, args=([itm, term, book, subset]), axis=1)
return df
linkify_process(collocation_table, term, book, subset)
del collocation_table["collocate"]
collocation_table = collocation_table[collocation_table.index != ""]
collocation_table = collocation_table.fillna("").to_html(classes=["table", "table-striped", "table-hover", "dataTable", "no-footer", "uonDatatable", 'my_class" id = "dataTableCollocation'],
bold_rows=False,
).replace("<", "<").replace(">", ">")
bookname = book
subsetname = subset.replace("_", " ").capitalize()
# this bit is a hack:
# classes = 'my_class" id = "my_id'
# http://stackoverflow.com/questions/15079118/js-datatables-from-pandas
return render_template("patterns-results.html",
textframe=textframe,
# local_args=kwic_filter,
collocation_table=collocation_table,
bookname=bookname,
subsetname=subsetname)
#==============================================================================
# User annotation of subsets using Flask_admin
#==============================================================================
class SubsetModelView(ModelView):
# 'notes.owner.name' works, but cannot be distinguished
# column_filters = ('book', 'abbr', 'kind', 'corpus', 'text', 'notes', 'tags', 'tags.owner.name', 'tags.owner.email', )
column_filters = ('book', 'abbr', 'kind', 'text', 'notes', 'tags', 'tags.owner.name', 'tags.owner.email', )
column_searchable_list = ('abbr', 'text',)
column_list = ('book', 'kind', 'text', 'tags', 'notes')
# column_list = ('book', 'text',)
# column_exclude_list = ['abbr','corpus']
# column_editable_list could work with the above code included, but not great
# column_editable_list = ['tags', 'notes']
column_hide_backrefs = False
named_filter_urls = True
# editing
edit_modal = True
form_excluded_columns = ['book', 'abbr', 'kind', 'corpus', 'text',]
# nice but not what we are looking for:
# inline_models = (Tag, Note)
# can_view_details = True
can_create = False
can_delete = False # disable model deletion
can_edit = True # TODO disable editable fields
can_export = True # FIXME
export_max_rows = 10000
page_size = 50 # the number of entries to display on the list view
def is_accessible(self):
# return current_user.has_role('can_annotate')
return current_user.is_active()
# def edit_form(self, obj):
# return self._use_filtered_tags(super(SubsetModelView, self).edit_form(obj))
#
# def _use_filtered_tags(self, form):
# form.tags.query_factory = self._get_tags_list
# return form
#
# def _get_tags_list(self):
# return self.session.query(Tag).filter_by(owner=current_user).all()
class TagModelView(ModelView):
action_disallowed_list = ['delete',]
form_excluded_columns = ['subset',]
column_editable_list = ['tag_name',]
named_filter_urls = True
# column_filters = ['owner.name', 'tag_name']
column_filters = ['tag_name']
def is_accessible(self):
# return current_user.has_role('can_annotate')
return current_user.is_active()
# http://stackoverflow.com/a/30741433/2115409
def get_query(self):
return self.session.query(self.model).filter(self.model.owner == current_user)
# http://stackoverflow.com/a/26351005/2115409
def get_count_query(self):
return self.session.query(func.count('*')).filter(self.model.owner==current_user)
def create_form(self):
return self._use_filtered_owner(super(TagModelView, self).create_form())
def edit_form(self, obj):
return self._use_filtered_owner(super(TagModelView, self).edit_form(obj))
def _use_filtered_owner(self, form):
form.owner.query_factory = self._get_owner_list
return form
def _get_owner_list(self):
return self.session.query(User).filter_by(id=current_user.id).all()
class NoteModelView(ModelView):
action_disallowed_list = ['delete',]
column_editable_list = ['note',]
form_excluded_columns = ['subset',]
column_list = ('owner','note',)
named_filter_urls = True
column_filters = ('owner.name', 'note',)
def is_accessible(self):
# return current_user.has_role('can_annotate')
return current_user.is_active()
# http://stackoverflow.com/a/30741433/2115409
def get_query(self):
return self.session.query(self.model).filter(self.model.owner == current_user)
# http://stackoverflow.com/a/26351005/2115409
def get_count_query(self):
return self.session.query(func.count('*')).filter(self.model.owner==current_user)
def create_form(self):
return self._use_filtered_owner(super(NoteModelView, self).create_form())
def edit_form(self, obj):
return self._use_filtered_owner(super(NoteModelView, self).edit_form(obj))
def _use_filtered_owner(self, form):
form.owner.query_factory = self._get_owner_list
return form
def _get_owner_list(self):
return self.session.query(User).filter_by(id=current_user.id).all()
class UserAdmin(sqla.ModelView):
# Prevent administration of Users unless the currently logged-in user has the "superman" role
def is_accessible(self):
return current_user.has_role('superman')
class RoleAdmin(sqla.ModelView):
# Prevent administration of Roles unless the currently logged-in user has the "superman" role
def is_accessible(self):
return current_user.has_role('superman')
admin = Admin(
app,
template_mode='bootstrap3',
index_view=AdminIndexView(
name='Documentation',
url='/annotation',
template="user-annotation.html",
)
)
admin.add_view(SubsetModelView(Subset, db.session))
admin.add_view(TagModelView(Tag, db.session))
admin.add_view(NoteModelView(Note, db.session))
admin.add_view(UserAdmin(User, db.session))
admin.add_view(RoleAdmin(Role, db.session))
if __name__ == '__main__':
@app.before_first_request
def initialize_database():
db.create_all()
from flask_debugtoolbar import DebugToolbarExtension
app.debug = True
toolbar = DebugToolbarExtension(app)
app.run(host='0.0.0.0', port=5000, debug=True)
|
# Tai Sakuma <tai.sakuma@gmail.com>
import sys
import pytest
try:
import unittest.mock as mock
except ImportError:
import mock
from alphatwirl.selection.factories.factory import AllFactory
from alphatwirl.selection.factories.factory import AnyFactory
from alphatwirl.selection.factories.factory import NotFactory
##__________________________________________________________________||
@pytest.fixture()
def mock_call_factory(monkeypatch):
ret = mock.Mock()
module = sys.modules['alphatwirl.selection.factories.factory']
monkeypatch.setattr(module, 'call_factory', ret)
return ret
@pytest.mark.parametrize('kwargs, expected_kwargs', [
pytest.param(
dict(
path_cfg_list = ("ev : ev.nJet[0] >= 2", "ev : ev.nMET[0] >= 200"),
name='test_all',
),
dict(
name='test_all',
),
id='simple',
),
])
def test_AllFactory(kwargs, expected_kwargs, mock_call_factory):
MockClass = mock.Mock()
obj = AllFactory(AllClass=MockClass, **kwargs)
assert [mock.call(**expected_kwargs)] == MockClass.call_args_list
assert obj == MockClass()
assert [
mock.call('ev : ev.nJet[0] >= 2', AllClass=MockClass),
mock.call('ev : ev.nMET[0] >= 200', AllClass=MockClass),
] == mock_call_factory.call_args_list
assert [
mock.call(mock_call_factory()),
mock.call(mock_call_factory()),
] == MockClass().add.call_args_list
@pytest.mark.parametrize('kwargs, expected_kwargs', [
pytest.param(
dict(
path_cfg_list = ("ev : ev.nJet[0] >= 2", "ev : ev.nMET[0] >= 200"),
name='test_any',
),
dict(
name='test_any',
),
id='simple',
),
])
def test_AnyFactory(kwargs, expected_kwargs, mock_call_factory):
MockClass = mock.Mock()
obj = AnyFactory(AnyClass=MockClass, **kwargs)
assert [mock.call(**expected_kwargs)] == MockClass.call_args_list
assert obj == MockClass()
assert [
mock.call('ev : ev.nJet[0] >= 2', AnyClass=MockClass),
mock.call('ev : ev.nMET[0] >= 200', AnyClass=MockClass),
] == mock_call_factory.call_args_list
assert [
mock.call(mock_call_factory()),
mock.call(mock_call_factory()),
] == MockClass().add.call_args_list
@pytest.mark.parametrize('kwargs, expected_kwargs', [
pytest.param(
dict(
path_cfg = "ev : ev.nJet[0] >= 2",
name='test_not',
),
dict(
name='test_not',
),
id='simple',
),
])
def test_NotFactory(kwargs, expected_kwargs, mock_call_factory):
MockClass = mock.Mock()
obj = NotFactory(NotClass=MockClass, **kwargs)
assert [
mock.call('ev : ev.nJet[0] >= 2', NotClass=MockClass),
] == mock_call_factory.call_args_list
expected_kwargs['selection'] = mock_call_factory()
assert [mock.call(**expected_kwargs)] == MockClass.call_args_list
assert obj == MockClass()
##__________________________________________________________________||
update test_AllAnyNotFactory.py
# Tai Sakuma <tai.sakuma@gmail.com>
import pytest
try:
import unittest.mock as mock
except ImportError:
import mock
from alphatwirl.selection.factories.factory import AllFactory
from alphatwirl.selection.factories.factory import AnyFactory
from alphatwirl.selection.factories.factory import NotFactory
##__________________________________________________________________||
def test_AllFactory():
MockClass = mock.Mock()
component1 = mock.sentinel.component1
component2 = mock.sentinel.component1
components = (component1, component2)
name = mock.sentinel.name
obj = AllFactory(components=components, AllClass=MockClass, name=name)
assert [mock.call(name=name)] == MockClass.call_args_list
assert [
mock.call(component1),
mock.call(component2),
] == MockClass().add.call_args_list
def test_AnyFactory():
MockClass = mock.Mock()
component1 = mock.sentinel.component1
component2 = mock.sentinel.component1
components = (component1, component2)
name = mock.sentinel.name
obj = AnyFactory(components=components, AnyClass=MockClass, name=name)
assert [mock.call(name=name)] == MockClass.call_args_list
assert [
mock.call(component1),
mock.call(component2),
] == MockClass().add.call_args_list
def test_NotFactory():
MockClass = mock.Mock()
component1 = mock.sentinel.component1
components = (component1, )
name = mock.sentinel.name
obj = NotFactory(components=components, NotClass=MockClass, name=name)
assert [mock.call(selection=component1, name=name)] == MockClass.call_args_list
assert obj == MockClass()
##__________________________________________________________________||
|
import os
import sys
import numpy as np
import io
#==============================================================================
# General-purpose MCMC diagnostic and summarization functions
#==============================================================================
def effective_sample_sizes(**kwargs):
'''
Estimate effective sample size for each input using AR(1) approximation.
Each input should be a 1- or 2-dimensional ndarray. 2-dimensional inputs
should have one variable per column, one iteration per row.
Parameters
----------
Returns
-------
- If only one array of draws is provided, a single array containing the
effective sample size(s) for those variables.
- If multiple arrays are provided, a dictionary with keys identical to
those provided as parameters and one array per input containing
effective sample size(s).
'''
# Ensure that at least one input was provided
if len(kwargs) < 1:
return ValueError('Must provide at least one array of draws.')
# Allocate empty dictionary for results
ess = {}
# Iterate over arrays of draws
for var, draws in kwargs.iteritems():
# Add dimension to 1d arrays
if len(np.shape(draws)) < 2:
draws = draws[:,np.newaxis]
# Demean the draws
draws = draws - draws.mean(axis=0)
# Compute lag-1 autocorrelation by column
acf = np.mean(draws[1:]*draws[:-1], axis=0) / np.var(draws, axis=0)
# Compute ess from ACF
ess[var] = np.shape(draws)[0]*(1.-acf)/(1.+acf)
if len(kwargs) > 1:
return ess
else:
return ess[kwargs.keys()[0]]
def posterior_means(**kwargs):
'''
Estimate posterior means from inputs.
Each input should be a 1- or 2-dimensional ndarray. 2-dimensional inputs
should have one variable per column, one iteration per row.
Parameters
----------
- **kwargs
Names and arrays of MCMC draws.
Returns
-------
- If only one array of draws is provided, a single array containing the
posterior mean estimate(s) for those variables.
- If multiple arrays are provided, a dictionary with keys identical to
those provided as parameters and one array per input containing
posterior mean estimate(s).
'''
# Ensure that at least one input was provided
if len(kwargs) < 1:
return ValueError('Must provide at least one array of draws.')
# Allocate empty dictionary for results
means = {}
# Iterate over arrays of draws
for var, draws in kwargs.iteritems():
# Add dimension to 1d arrays
if len(np.shape(draws)) < 2:
draws = draws[:,np.newaxis]
# Estimate posterior means
means[var] = np.mean(draws, 0)
if len(kwargs) > 1:
return means
else:
return means[kwargs.keys()[0]]
def posterior_variances(**kwargs):
'''
Estimate posterior variances from inputs.
Each input should be a 1- or 2-dimensional ndarray. 2-dimensional inputs
should have one variable per column, one iteration per row.
Parameters
----------
- **kwargs
Names and arrays of MCMC draws.
Returns
-------
- If only one array of draws is provided, a single array containing the
posterior variance estimate(s) for those variables.
- If multiple arrays are provided, a dictionary with keys identical to
those provided as parameters and one array per input containing
posterior variance estimate(s).
'''
# Ensure that at least one input was provided
if len(kwargs) < 1:
return ValueError('Must provide at least one array of draws.')
# Allocate empty dictionary for results
variances = {}
# Iterate over arrays of draws
for var, draws in kwargs.iteritems():
# Add dimension to 1d arrays
if len(np.shape(draws)) < 2:
draws = draws[:,np.newaxis]
# Estimate posterior means
variances[var] = np.var(draws, 0)
if len(kwargs) > 1:
return variances
else:
return variances[kwargs.keys()[0]]
def posterior_stderrors(**kwargs):
'''
Estimate posterior standard errors from inputs.
Each input should be a 1- or 2-dimensional ndarray. 2-dimensional inputs
should have one variable per column, one iteration per row.
Parameters
----------
- **kwargs
Names and arrays of MCMC draws.
Returns
-------
- If only one array of draws is provided, a single array containing the
posterior standard error estimate(s) for those variables.
- If multiple arrays are provided, a dictionary with keys identical to
those provided as parameters and one array per input containing
posterior standard error estimate(s).
'''
# Ensure that at least one input was provided
if len(kwargs) < 1:
return ValueError('Must provide at least one array of draws.')
# Allocate empty dictionary for results
stderrors = {}
# Iterate over arrays of draws
for var, draws in kwargs.iteritems():
# Add dimension to 1d arrays
if len(np.shape(draws)) < 2:
draws = draws[:,np.newaxis]
# Estimate posterior means
stderrors[var] = np.std(draws, 0)
if len(kwargs) > 1:
return stderrors
else:
return stderrors[kwargs.keys()[0]]
def find_maxima(x, boundary=False):
'''
Finds local maxima in sequence x, defining local maxima simply by
low-high-low triplets.
Parameters
----------
- x : ndarray
Sequence of values to search for local maxima
- boundary : bool
If True, include boundaries as possible maxima
Returns
-------
- maxima : ndarray
Boolean array of the same size as x with local maxima True
'''
# Intialization
up, down = np.ones((2, x.size), dtype=int)
# Central cases
up[1:-1] = (x[1:-1]>x[:-2])
down[1:-1] = (x[2:]<x[1:-1])
if boundary:
# Boundary cases
down[0] = (x[1]<x[0])
up[-1] = (x[-1]>x[-2])
# Logical and
maxima = up*down
return maxima
def local_relative_occupancy(b_t, window_small, window_local):
'''
Compute local relative occupancy from vector of coefficients.
Parameters
----------
- b_t : ndarray
Array of coefficients from a single draw
- window_small : ndarray
Array containing small window for local relative occupancy
- window_local : ndarray
Array containing larger window for local relative occupancy
Returns
-------
- l : ndarray
Array of same size as b_t containing local relative occupancies
'''
return (np.convolve(b_t, window_small, 'same') /
np.convolve(b_t, window_local, 'same'))
def condense_detections(detections):
'''
Condense adjacent detections (from smoothed local occupancy) into centers
and number of adjacent detections.
Parameters
----------
- detections : ndarray
1d array of detected positions
Returns
-------
- detections : ndarray
1d array of detected centers
- n : integer ndarray
Number of detections per center
'''
x = detections.copy() + 0.
n = np.ones_like(x)
while np.any(np.diff(x) < 2):
first = np.min(np.where(np.diff(x)<2)[0])
x *= n
x = np.r_[ x[:first], (x[first] + x[first+1]), x[first+2:]]
n = np.r_[ n[:first], (n[first] + n[first+1]), n[first+2:]]
x /= n
return x, n
def greedy_maxima_search(x, min_spacing=100, remove_boundary=1, verbose=0):
'''
Greedily search for local maxima in sequence subject to minimum spacing
constraint.
Parameters
----------
- x : ndarray
1d sequence of values to search for local maxima
- min_spacing : int
Minimum spacing of positions. Greedy search continues until this
constraint is met.
- remove_boundary : int
Length of region to exclude at each end of the sequence.
- verbose : int
Level of verbosity in output
Returns
-------
- out : ndarray
Integer array of same shape as x containing ones at positions found in
greedy search and zeros everywhere else.
'''
# Find local maxima in sequence; need indices of maxima, not binary
# indicators
positions = np.where(find_maxima(x))[0]
if remove_boundary > 0:
# Exclude boundary positions
positions = positions[positions>=remove_boundary]
positions = positions[positions<x.size-remove_boundary]
# Get spacing
spacing = np.diff(positions)
# Check for bad overlaps
while spacing.size > 0 and spacing.min() < min_spacing:
# Save positions from previous iterations
positions_last = positions.copy()
# Find bad positions
bad = np.where(spacing < min_spacing)[0]
# Find first bad position
first_bad = np.min(bad)
# Find which positions overlap with given position
# First, get where overlaps below threshold are located
good = np.where(spacing >= min_spacing)[0]
# Get number of positions from top bad one to good ones
dist = first_bad - good
# Find limits of bad cluster
if np.any(dist<0):
last_in_cluster = good[dist<0][np.argmax(dist[dist<0])]
last_in_cluster = min(last_in_cluster+1, spacing.size+1)
else:
last_in_cluster = spacing.size+1
if np.any(dist>0):
first_in_cluster = good[dist>0][np.argmin(dist[dist>0])]
first_in_cluster = max(0,first_in_cluster+1)
else:
first_in_cluster = 0
# Check coefficients of positions in cluster for maximum
top_in_cluster = np.argmax(x[positions[first_in_cluster:
last_in_cluster]])
top_in_cluster = first_in_cluster + top_in_cluster
# Handle non-uniqueness
top_in_cluster = np.min(top_in_cluster)
# Eliminate bad neighbors from positions
keep = np.ones(positions.size, dtype=bool)
if top_in_cluster > 0:
space = (positions[top_in_cluster] - positions[top_in_cluster-1])
if space < min_spacing:
keep[top_in_cluster-1] = False
if top_in_cluster < positions.size-1:
space = (positions[top_in_cluster+1] - positions[top_in_cluster])
if space < min_spacing:
keep[top_in_cluster+1] = False
positions = positions[keep]
if positions.size == positions_last.size:
print >> sys.stderr, 'Error --- greedy search is stuck'
print >> sys.stderr, positions, spacing
break
if verbose:
print >> sys.stderr, positions, spacing
# Update spacing
spacing = np.diff(positions)
out = np.zeros(np.size(x), dtype=np.int)
out[positions] = 1
return out
def summarise(cfg, chrom=1, null=False):
'''
Coordinate summarisation of MCMC results.
Parameters
----------
- cfg : dictionary
Dictionary of parameters containing at least those relevant MCMC
draw and summary output paths and parameters for summarization.
- chrom : int
Index of chromosome to analyze
- null : bool
Summarise null results?
Returns
-------
- status : int
Integer status for summarisation. 0 for success, > 0 for failure.
'''
# Reference useful information in local namespace
n_burnin = cfg['mcmc_params']['n_burnin']
scratch = cfg['mcmc_summaries']['path_scratch']
width_local = cfg['mcmc_summaries']['width_local']
concentration_pm = cfg['mcmc_summaries']['concentration_pm']
p_detect = cfg['mcmc_summaries']['p_detect']
# Check for existence and writeability of scratch directory
if os.access(scratch, os.F_OK):
# It exists, check for read-write
if not os.access(scratch, os.R_OK | os.W_OK):
print >> sys.stderr, ("Error --- Cannot read and write to %s" %
scratch)
return 1
else:
# Otherwise, try to make the directory
os.makedirs(scratch)
# Extract results to scratch directory
if null:
pattern_results = cfg['mcmc_output']['null_out_pattern']
else:
pattern_results = cfg['mcmc_output']['out_pattern']
pattern_results = pattern_results.strip()
path_results = pattern_results.format(**cfg) % chrom
f = np.load(path_results)
f.zip.extractall(scratch)
names_npy = f.zip.namelist()
f.close()
# Load results of interest
theta = np.load(scratch + '/theta.npy')
mu = np.load(scratch + '/mu.npy')
# Load region type information
with open(cfg['data']['regions_path'].format(**cfg), 'rb') as f:
lines_read = 0
for line in f:
lines_read += 1
if lines_read == chrom:
region_types = np.fromstring(line.strip(), sep=' ', dtype=int)
break
# Remove burnin
if n_burnin > 0:
mu = mu[n_burnin:]
theta = theta[n_burnin:]
# Compute effective sample sizes
n_eff = effective_sample_sizes(theta=theta)
# Estimate P(theta_i > mu)
p_theta_gt_mu = np.mean(theta - mu[:,region_types] > 0, 0)
# Compute local relative occupancy
window_local = np.ones(width_local)
local_occupancy_draws = np.empty_like(theta)
for t in xrange(theta.shape[0]):
local_occupancy_draws[t] = local_relative_occupancy(np.exp(theta[t]),
np.ones(1),
window_local)
# Posterior probability of single-basepair concentrations
baseline = (1. / np.convolve(np.ones_like(theta[0]), window_local, 'same'))
p_local_concentration_exact = np.mean(local_occupancy_draws > baseline, 0)
# Posterior probability of +/-(concentration_pm) concentrations
window_pm = np.ones(1 + 2*concentration_pm)
local_occupancy_smoothed = np.empty_like(theta)
for t in xrange(theta.shape[0]):
local_occupancy_smoothed[t] = local_relative_occupancy(np.exp(theta[t]),
window_pm,
window_local)
baseline_smoothed = (np.convolve(np.ones_like(theta[0]), window_pm, 'same')
/ np.convolve(np.ones_like(theta[0]), window_local,
'same'))
p_local_concentration_pm = np.mean(local_occupancy_smoothed >
baseline_smoothed, 0)
# Compute posterior means
theta_postmean = np.mean(theta, 0)
b_postmean = np.mean(np.exp(theta), 0)
# Compute standard errors
theta_se = np.std(theta, 0)
b_se = np.std(np.exp(theta), 0)
# Compute posterior medians
theta_postmed = np.median(theta, 0)
b_postmed = np.exp(theta_postmed)
# Provide nicely-formatted table of output for analyses and plotting
if null:
pattern_summaries = cfg['mcmc_output']['null_summary_pattern']
else:
pattern_summaries = cfg['mcmc_output']['summary_pattern']
pattern_summaries = pattern_summaries.strip()
path_summaries = pattern_summaries.format(**cfg) % chrom
summaries = np.rec.fromarrays([theta_postmean, theta_postmed, theta_se,
b_postmean, b_postmed, b_se, n_eff,
p_theta_gt_mu, p_local_concentration_exact,
p_local_concentration_pm],
names=('theta', 'theta_med', 'se_theta', 'b',
'b_med', 'se_b', 'n_eff',
'p_theta_gt_mu',
'p_local_concentration_pm0',
'p_local_concentration_pm%d' %
concentration_pm))
io.write_recarray_to_file(fname=path_summaries, data=summaries,
header=True, sep=' ')
# Run detection, if requested
if p_detect is not None and not null:
# Find detected positions
detected = np.where(p_local_concentration_pm > p_detect)[0]
# Condense regions
detected, n = condense_detections(detected)
# Write detections to text file
pattern_detections = cfg['mcmc_output']['detections_pattern']
pattern_detections = pattern_detections.strip()
path_detections = pattern_detections.format(**cfg) % chrom
detections = np.rec.fromarrays([detected, n],
names=('pos', 'n'))
io.write_recarray_to_file(fname=path_detections,
data=detections, header=True,
sep=' ')
# Clean-up scratch directory
for name in names_npy:
os.remove(scratch + '/' + name)
return 0
Add manual GC and deletion of local occupancies; mmap theta as read-only
import gc
import os
import sys
import numpy as np
import io
#==============================================================================
# General-purpose MCMC diagnostic and summarization functions
#==============================================================================
def effective_sample_sizes(**kwargs):
'''
Estimate effective sample size for each input using AR(1) approximation.
Each input should be a 1- or 2-dimensional ndarray. 2-dimensional inputs
should have one variable per column, one iteration per row.
Parameters
----------
Returns
-------
- If only one array of draws is provided, a single array containing the
effective sample size(s) for those variables.
- If multiple arrays are provided, a dictionary with keys identical to
those provided as parameters and one array per input containing
effective sample size(s).
'''
# Ensure that at least one input was provided
if len(kwargs) < 1:
return ValueError('Must provide at least one array of draws.')
# Allocate empty dictionary for results
ess = {}
# Iterate over arrays of draws
for var, draws in kwargs.iteritems():
# Add dimension to 1d arrays
if len(np.shape(draws)) < 2:
draws = draws[:,np.newaxis]
# Demean the draws
draws = draws - draws.mean(axis=0)
# Compute lag-1 autocorrelation by column
acf = np.mean(draws[1:]*draws[:-1], axis=0) / np.var(draws, axis=0)
# Compute ess from ACF
ess[var] = np.shape(draws)[0]*(1.-acf)/(1.+acf)
if len(kwargs) > 1:
return ess
else:
return ess[kwargs.keys()[0]]
def posterior_means(**kwargs):
'''
Estimate posterior means from inputs.
Each input should be a 1- or 2-dimensional ndarray. 2-dimensional inputs
should have one variable per column, one iteration per row.
Parameters
----------
- **kwargs
Names and arrays of MCMC draws.
Returns
-------
- If only one array of draws is provided, a single array containing the
posterior mean estimate(s) for those variables.
- If multiple arrays are provided, a dictionary with keys identical to
those provided as parameters and one array per input containing
posterior mean estimate(s).
'''
# Ensure that at least one input was provided
if len(kwargs) < 1:
return ValueError('Must provide at least one array of draws.')
# Allocate empty dictionary for results
means = {}
# Iterate over arrays of draws
for var, draws in kwargs.iteritems():
# Add dimension to 1d arrays
if len(np.shape(draws)) < 2:
draws = draws[:,np.newaxis]
# Estimate posterior means
means[var] = np.mean(draws, 0)
if len(kwargs) > 1:
return means
else:
return means[kwargs.keys()[0]]
def posterior_variances(**kwargs):
'''
Estimate posterior variances from inputs.
Each input should be a 1- or 2-dimensional ndarray. 2-dimensional inputs
should have one variable per column, one iteration per row.
Parameters
----------
- **kwargs
Names and arrays of MCMC draws.
Returns
-------
- If only one array of draws is provided, a single array containing the
posterior variance estimate(s) for those variables.
- If multiple arrays are provided, a dictionary with keys identical to
those provided as parameters and one array per input containing
posterior variance estimate(s).
'''
# Ensure that at least one input was provided
if len(kwargs) < 1:
return ValueError('Must provide at least one array of draws.')
# Allocate empty dictionary for results
variances = {}
# Iterate over arrays of draws
for var, draws in kwargs.iteritems():
# Add dimension to 1d arrays
if len(np.shape(draws)) < 2:
draws = draws[:,np.newaxis]
# Estimate posterior means
variances[var] = np.var(draws, 0)
if len(kwargs) > 1:
return variances
else:
return variances[kwargs.keys()[0]]
def posterior_stderrors(**kwargs):
'''
Estimate posterior standard errors from inputs.
Each input should be a 1- or 2-dimensional ndarray. 2-dimensional inputs
should have one variable per column, one iteration per row.
Parameters
----------
- **kwargs
Names and arrays of MCMC draws.
Returns
-------
- If only one array of draws is provided, a single array containing the
posterior standard error estimate(s) for those variables.
- If multiple arrays are provided, a dictionary with keys identical to
those provided as parameters and one array per input containing
posterior standard error estimate(s).
'''
# Ensure that at least one input was provided
if len(kwargs) < 1:
return ValueError('Must provide at least one array of draws.')
# Allocate empty dictionary for results
stderrors = {}
# Iterate over arrays of draws
for var, draws in kwargs.iteritems():
# Add dimension to 1d arrays
if len(np.shape(draws)) < 2:
draws = draws[:,np.newaxis]
# Estimate posterior means
stderrors[var] = np.std(draws, 0)
if len(kwargs) > 1:
return stderrors
else:
return stderrors[kwargs.keys()[0]]
def find_maxima(x, boundary=False):
'''
Finds local maxima in sequence x, defining local maxima simply by
low-high-low triplets.
Parameters
----------
- x : ndarray
Sequence of values to search for local maxima
- boundary : bool
If True, include boundaries as possible maxima
Returns
-------
- maxima : ndarray
Boolean array of the same size as x with local maxima True
'''
# Intialization
up, down = np.ones((2, x.size), dtype=int)
# Central cases
up[1:-1] = (x[1:-1]>x[:-2])
down[1:-1] = (x[2:]<x[1:-1])
if boundary:
# Boundary cases
down[0] = (x[1]<x[0])
up[-1] = (x[-1]>x[-2])
# Logical and
maxima = up*down
return maxima
def local_relative_occupancy(b_t, window_small, window_local):
'''
Compute local relative occupancy from vector of coefficients.
Parameters
----------
- b_t : ndarray
Array of coefficients from a single draw
- window_small : ndarray
Array containing small window for local relative occupancy
- window_local : ndarray
Array containing larger window for local relative occupancy
Returns
-------
- l : ndarray
Array of same size as b_t containing local relative occupancies
'''
return (np.convolve(b_t, window_small, 'same') /
np.convolve(b_t, window_local, 'same'))
def condense_detections(detections):
'''
Condense adjacent detections (from smoothed local occupancy) into centers
and number of adjacent detections.
Parameters
----------
- detections : ndarray
1d array of detected positions
Returns
-------
- detections : ndarray
1d array of detected centers
- n : integer ndarray
Number of detections per center
'''
x = detections.copy() + 0.
n = np.ones_like(x)
while np.any(np.diff(x) < 2):
first = np.min(np.where(np.diff(x)<2)[0])
x *= n
x = np.r_[ x[:first], (x[first] + x[first+1]), x[first+2:]]
n = np.r_[ n[:first], (n[first] + n[first+1]), n[first+2:]]
x /= n
return x, n
def greedy_maxima_search(x, min_spacing=100, remove_boundary=1, verbose=0):
'''
Greedily search for local maxima in sequence subject to minimum spacing
constraint.
Parameters
----------
- x : ndarray
1d sequence of values to search for local maxima
- min_spacing : int
Minimum spacing of positions. Greedy search continues until this
constraint is met.
- remove_boundary : int
Length of region to exclude at each end of the sequence.
- verbose : int
Level of verbosity in output
Returns
-------
- out : ndarray
Integer array of same shape as x containing ones at positions found in
greedy search and zeros everywhere else.
'''
# Find local maxima in sequence; need indices of maxima, not binary
# indicators
positions = np.where(find_maxima(x))[0]
if remove_boundary > 0:
# Exclude boundary positions
positions = positions[positions>=remove_boundary]
positions = positions[positions<x.size-remove_boundary]
# Get spacing
spacing = np.diff(positions)
# Check for bad overlaps
while spacing.size > 0 and spacing.min() < min_spacing:
# Save positions from previous iterations
positions_last = positions.copy()
# Find bad positions
bad = np.where(spacing < min_spacing)[0]
# Find first bad position
first_bad = np.min(bad)
# Find which positions overlap with given position
# First, get where overlaps below threshold are located
good = np.where(spacing >= min_spacing)[0]
# Get number of positions from top bad one to good ones
dist = first_bad - good
# Find limits of bad cluster
if np.any(dist<0):
last_in_cluster = good[dist<0][np.argmax(dist[dist<0])]
last_in_cluster = min(last_in_cluster+1, spacing.size+1)
else:
last_in_cluster = spacing.size+1
if np.any(dist>0):
first_in_cluster = good[dist>0][np.argmin(dist[dist>0])]
first_in_cluster = max(0,first_in_cluster+1)
else:
first_in_cluster = 0
# Check coefficients of positions in cluster for maximum
top_in_cluster = np.argmax(x[positions[first_in_cluster:
last_in_cluster]])
top_in_cluster = first_in_cluster + top_in_cluster
# Handle non-uniqueness
top_in_cluster = np.min(top_in_cluster)
# Eliminate bad neighbors from positions
keep = np.ones(positions.size, dtype=bool)
if top_in_cluster > 0:
space = (positions[top_in_cluster] - positions[top_in_cluster-1])
if space < min_spacing:
keep[top_in_cluster-1] = False
if top_in_cluster < positions.size-1:
space = (positions[top_in_cluster+1] - positions[top_in_cluster])
if space < min_spacing:
keep[top_in_cluster+1] = False
positions = positions[keep]
if positions.size == positions_last.size:
print >> sys.stderr, 'Error --- greedy search is stuck'
print >> sys.stderr, positions, spacing
break
if verbose:
print >> sys.stderr, positions, spacing
# Update spacing
spacing = np.diff(positions)
out = np.zeros(np.size(x), dtype=np.int)
out[positions] = 1
return out
def summarise(cfg, chrom=1, null=False):
'''
Coordinate summarisation of MCMC results.
Parameters
----------
- cfg : dictionary
Dictionary of parameters containing at least those relevant MCMC
draw and summary output paths and parameters for summarization.
- chrom : int
Index of chromosome to analyze
- null : bool
Summarise null results?
Returns
-------
- status : int
Integer status for summarisation. 0 for success, > 0 for failure.
'''
# Reference useful information in local namespace
n_burnin = cfg['mcmc_params']['n_burnin']
scratch = cfg['mcmc_summaries']['path_scratch']
width_local = cfg['mcmc_summaries']['width_local']
concentration_pm = cfg['mcmc_summaries']['concentration_pm']
p_detect = cfg['mcmc_summaries']['p_detect']
# Check for existence and writeability of scratch directory
if os.access(scratch, os.F_OK):
# It exists, check for read-write
if not os.access(scratch, os.R_OK | os.W_OK):
print >> sys.stderr, ("Error --- Cannot read and write to %s" %
scratch)
return 1
else:
# Otherwise, try to make the directory
os.makedirs(scratch)
# Extract results to scratch directory
if null:
pattern_results = cfg['mcmc_output']['null_out_pattern']
else:
pattern_results = cfg['mcmc_output']['out_pattern']
pattern_results = pattern_results.strip()
path_results = pattern_results.format(**cfg) % chrom
f = np.load(path_results)
f.zip.extractall(scratch)
names_npy = f.zip.namelist()
f.close()
# Load results of interest
theta = np.load(scratch + '/theta.npy', mmap_mode='r')
mu = np.load(scratch + '/mu.npy')
# Load region type information
with open(cfg['data']['regions_path'].format(**cfg), 'rb') as f:
lines_read = 0
for line in f:
lines_read += 1
if lines_read == chrom:
region_types = np.fromstring(line.strip(), sep=' ', dtype=int)
break
# Remove burnin
if n_burnin > 0:
mu = mu[n_burnin:]
theta = theta[n_burnin:]
# Compute effective sample sizes
n_eff = effective_sample_sizes(theta=theta)
# Estimate P(theta_i > mu)
p_theta_gt_mu = np.mean(theta - mu[:,region_types] > 0, 0)
# Compute local relative occupancy
window_local = np.ones(width_local)
local_occupancy_draws = np.empty_like(theta)
for t in xrange(theta.shape[0]):
local_occupancy_draws[t] = local_relative_occupancy(np.exp(theta[t]),
np.ones(1),
window_local)
# Posterior probability of single-basepair concentrations
baseline = (1. / np.convolve(np.ones_like(theta[0]), window_local, 'same'))
p_local_concentration_exact = np.mean(local_occupancy_draws > baseline, 0)
# Clean-up
del local_occupancy_draws
gc.collect()
# Posterior probability of +/-(concentration_pm) concentrations
window_pm = np.ones(1 + 2*concentration_pm)
local_occupancy_smoothed = np.empty_like(theta)
for t in xrange(theta.shape[0]):
local_occupancy_smoothed[t] = local_relative_occupancy(np.exp(theta[t]),
window_pm,
window_local)
baseline_smoothed = (np.convolve(np.ones_like(theta[0]), window_pm, 'same')
/ np.convolve(np.ones_like(theta[0]), window_local,
'same'))
p_local_concentration_pm = np.mean(local_occupancy_smoothed >
baseline_smoothed, 0)
# Clean-up
del local_occupancy_smoothed
gc.collect()
# Compute posterior means
theta_postmean = np.mean(theta, 0)
b_postmean = np.mean(np.exp(theta), 0)
# Compute standard errors
theta_se = np.std(theta, 0)
b_se = np.std(np.exp(theta), 0)
# Compute posterior medians
theta_postmed = np.median(theta, 0)
b_postmed = np.exp(theta_postmed)
# Provide nicely-formatted table of output for analyses and plotting
if null:
pattern_summaries = cfg['mcmc_output']['null_summary_pattern']
else:
pattern_summaries = cfg['mcmc_output']['summary_pattern']
pattern_summaries = pattern_summaries.strip()
path_summaries = pattern_summaries.format(**cfg) % chrom
summaries = np.rec.fromarrays([theta_postmean, theta_postmed, theta_se,
b_postmean, b_postmed, b_se, n_eff,
p_theta_gt_mu, p_local_concentration_exact,
p_local_concentration_pm],
names=('theta', 'theta_med', 'se_theta', 'b',
'b_med', 'se_b', 'n_eff',
'p_theta_gt_mu',
'p_local_concentration_pm0',
'p_local_concentration_pm%d' %
concentration_pm))
io.write_recarray_to_file(fname=path_summaries, data=summaries,
header=True, sep=' ')
# Run detection, if requested
if p_detect is not None and not null:
# Find detected positions
detected = np.where(p_local_concentration_pm > p_detect)[0]
# Condense regions
detected, n = condense_detections(detected)
# Write detections to text file
pattern_detections = cfg['mcmc_output']['detections_pattern']
pattern_detections = pattern_detections.strip()
path_detections = pattern_detections.format(**cfg) % chrom
detections = np.rec.fromarrays([detected, n],
names=('pos', 'n'))
io.write_recarray_to_file(fname=path_detections,
data=detections, header=True,
sep=' ')
# Clean-up scratch directory
for name in names_npy:
os.remove(scratch + '/' + name)
return 0
|
# Lint as: python2, python3
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generic TFX example gen base executor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import bisect
import hashlib
import os
from typing import Any, Dict, List, Text, Union
from absl import logging
import apache_beam as beam
from six import with_metaclass
import tensorflow as tf
from google.protobuf import json_format
from tfx import types
from tfx.components.base import base_executor
from tfx.components.example_gen import utils
from tfx.proto import example_gen_pb2
from tfx.types import artifact_utils
# Default file name for TFRecord output file prefix.
DEFAULT_FILE_NAME = 'data_tfrecord'
def _ExamplePartitionKey(record: tf.train.Example,
split_config: example_gen_pb2.SplitConfig) -> bytes:
"""Generates key for partition for tf.train.Example."""
if not split_config.HasField('partition_feature_name'):
return record.SerializeToString(deterministic=True)
# Use a feature for partitioning the examples.
feature_name = split_config.partition_feature_name
if feature_name not in record.features.feature:
raise RuntimeError('Feature name `{}` does not exist.'.format(feature_name))
feature = record.features.feature[feature_name]
if not feature.HasField('kind'):
raise RuntimeError('Partition feature does not contain any value.')
if (not feature.HasField('bytes_list') and
not feature.HasField('int64_list')):
raise RuntimeError('Only `bytes_list` and `int64_list` features are '
'supported for partition.')
return feature.SerializeToString(deterministic=True)
def _PartitionFn(
record: Union[tf.train.Example, bytes],
num_partitions: int,
buckets: List[int],
split_config: example_gen_pb2.SplitConfig,
) -> int:
"""Partition function for the ExampleGen's output splits."""
assert num_partitions == len(
buckets), 'Partitions do not match bucket number.'
if isinstance(record, tf.train.Example):
partition_str = _ExamplePartitionKey(record, split_config)
elif split_config.HasField('partition_feature_name'):
raise RuntimeError('Split by `partition_feature_name` is only supported '
'for FORMAT_TF_EXAMPLE payload format.')
else:
partition_str = record
bucket = int(hashlib.sha256(partition_str).hexdigest(), 16) % buckets[-1]
# For example, if buckets is [10,50,80], there will be 3 splits:
# bucket >=0 && < 10, returns 0
# bucket >=10 && < 50, returns 1
# bucket >=50 && < 80, returns 2
return bisect.bisect(buckets, bucket)
@beam.ptransform_fn
@beam.typehints.with_input_types(Union[tf.train.Example, bytes])
@beam.typehints.with_output_types(beam.pvalue.PDone)
def _WriteSplit(example_split: beam.pvalue.PCollection,
output_split_path: Text) -> beam.pvalue.PDone:
"""Shuffles and writes output split as serialized records in TFRecord."""
def _MaybeSerialize(x):
return x.SerializeToString() if isinstance(x, tf.train.Example) else x
return (example_split
# TODO(jyzhao): make shuffle optional.
| 'MaybeSerialize' >> beam.Map(_MaybeSerialize)
| 'Shuffle' >> beam.transforms.Reshuffle()
# TODO(jyzhao): multiple output format.
| 'Write' >> beam.io.WriteToTFRecord(
os.path.join(output_split_path, DEFAULT_FILE_NAME),
file_name_suffix='.gz'))
@beam.ptransform_fn
@beam.typehints.with_input_types(beam.Pipeline)
@beam.typehints.with_output_types(Union[tf.train.Example, bytes])
def _InputToExampleOrBytes(
pipeline: beam.Pipeline,
input_to_example: beam.PTransform,
exec_properties: Dict[Text, Any],
split_pattern: Text,
) -> beam.pvalue.PCollection:
"""Converts input into a tf.train.Example, or a bytes (serialized proto)."""
return (pipeline
| 'InputSourceToExampleOrBytes' >> input_to_example(
exec_properties, split_pattern))
class BaseExampleGenExecutor(
with_metaclass(abc.ABCMeta, base_executor.BaseExecutor)):
"""Generic TFX example gen base executor.
The base ExampleGen executor takes a configuration and converts external data
sources to TensorFlow Examples (tf.train.Example), or any other protocol
buffer as subclass defines.
The common configuration (defined in
https://github.com/tensorflow/tfx/blob/master/tfx/proto/example_gen.proto#L44.)
describes the general properties of input data and shared instructions when
producing output data.
The conversion is done in `GenerateExamplesByBeam` as a Beam pipeline, which
validates the configuration, reads the external data sources, converts the
record in the input source to tf.Example if needed, and splits the examples if
the output split config is given. Then the executor's `Do` writes the results
in splits to the output path.
For simple custom ExampleGens, the details of transforming input data
record(s) to a tf.Example is expected to be given in
`GetInputSourceToExamplePTransform`, which returns a Beam PTransform with the
actual implementation. For complex use cases, such as joining multiple data
sources and different interpretations of the configurations, the custom
ExampleGen can override `GenerateExamplesByBeam`.
"""
@abc.abstractmethod
def GetInputSourceToExamplePTransform(self) -> beam.PTransform:
"""Returns PTransform for converting input source to records.
The record is by default assumed to be tf.train.Example protos, subclassses
can serialize any protocol buffer into bytes as output PCollection,
so long as the downstream component can consume it.
Note that each input split will be transformed by this function separately.
For complex use case, consider override 'GenerateExamplesByBeam' instead.
Here is an example PTransform:
@beam.ptransform_fn
@beam.typehints.with_input_types(beam.Pipeline)
@beam.typehints.with_output_types(Union[tf.train.Example, bytes])
def ExamplePTransform(
pipeline: beam.Pipeline,
exec_properties: Dict[Text, Any],
split_pattern: Text) -> beam.pvalue.PCollection
"""
pass
def GenerateExamplesByBeam(
self,
pipeline: beam.Pipeline,
exec_properties: Dict[Text, Any],
) -> Dict[Text, beam.pvalue.PCollection]:
"""Converts input source to TF example splits based on configs.
Custom ExampleGen executor should provide GetInputSourceToExamplePTransform
for converting input split to TF Examples. Overriding this
'GenerateExamplesByBeam' method instead if complex logic is need, e.g.,
custom spliting logic.
Args:
pipeline: beam pipeline.
exec_properties: A dict of execution properties. Depends on detailed
example gen implementation.
- input_base: an external directory containing the data files.
- input_config: JSON string of example_gen_pb2.Input instance, providing
input configuration.
- output_config: JSON string of example_gen_pb2.Output instance,
providing output configuration.
- output_data_format: Payload format of generated data in output
artifact, one of example_gen_pb2.PayloadFormat enum.
Returns:
Dict of beam PCollection with split name as key, each PCollection is a
single output split that contains serialized TF Examples.
"""
# Get input split information.
input_config = example_gen_pb2.Input()
json_format.Parse(exec_properties[utils.INPUT_CONFIG_KEY], input_config)
# Get output split information.
output_config = example_gen_pb2.Output()
json_format.Parse(exec_properties[utils.OUTPUT_CONFIG_KEY], output_config)
# Get output split names.
split_names = utils.generate_output_split_names(input_config, output_config)
# Make beam_pipeline_args available in exec_properties since certain
# example_gen executors need this information.
# TODO(b/155441037): Revisit necessity of this when BigQueryExampleGen
# does not branch on project or runner anymore.
exec_properties['_beam_pipeline_args'] = self._beam_pipeline_args or []
example_splits = []
input_to_example = self.GetInputSourceToExamplePTransform()
if output_config.split_config.splits:
# Use output splits, input must have only one split.
assert len(
input_config.splits
) == 1, 'input must have only one split when output split is specified.'
# Calculate split buckets.
buckets = []
total_buckets = 0
for split in output_config.split_config.splits:
total_buckets += split.hash_buckets
buckets.append(total_buckets)
example_splits = (
pipeline
| 'InputToExampleOrBytes' >>
# pylint: disable=no-value-for-parameter
_InputToExampleOrBytes(input_to_example, exec_properties,
input_config.splits[0].pattern)
| 'SplitData' >> beam.Partition(_PartitionFn, len(buckets), buckets,
output_config.split_config))
else:
# Use input splits.
for split in input_config.splits:
examples = (
pipeline
| 'InputToExampleOrBytes[{}]'.format(split.name) >>
# pylint: disable=no-value-for-parameter
_InputToExampleOrBytes(input_to_example, exec_properties,
split.pattern))
example_splits.append(examples)
result = {}
for index, example_split in enumerate(example_splits):
result[split_names[index]] = example_split
return result
def Do(
self,
input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any],
) -> None:
"""Take input data source and generates serialized data splits.
The output is intended to be serialized tf.train.Examples protocol buffer
in gzipped TFRecord format, but subclasses can choose to override to write
to any serialized records payload into gzipped TFRecord as specified,
so long as downstream component can consume it. The format of payload is
added to `payload_format` custom property of the output Example artifact.
Args:
input_dict: Input dict from input key to a list of Artifacts. Depends on
detailed example gen implementation.
output_dict: Output dict from output key to a list of Artifacts.
- examples: splits of tf examples.
exec_properties: A dict of execution properties. Depends on detailed
example gen implementation.
- input_base: an external directory containing the data files.
- input_config: JSON string of example_gen_pb2.Input instance, providing
input configuration.
- output_config: JSON string of example_gen_pb2.Output instance,
providing output configuration.
- output_data_format: Payload format of generated data in output
artifact, one of example_gen_pb2.PayloadFormat enum.
Returns:
None
"""
self._log_startup(input_dict, output_dict, exec_properties)
logging.info('Generating examples.')
with self._make_beam_pipeline() as pipeline:
example_splits = self.GenerateExamplesByBeam(pipeline, exec_properties)
# pylint: disable=expression-not-assigned, no-value-for-parameter
for split_name, example_split in example_splits.items():
(example_split
| 'WriteSplit[{%s}]' % split_name) >> _WriteSplit(
artifact_utils.get_split_uri(output_dict[utils.EXAMPLES_KEY],
split_name)))
# pylint: enable=expression-not-assigned, no-value-for-parameter
output_payload_format = exec_properties.get(utils.OUTPUT_DATA_FORMAT_KEY)
if output_payload_format:
for output_examples_artifact in output_dict[utils.EXAMPLES_KEY]:
output_examples_artifact.set_string_custom_property(
utils.PAYLOAD_FORMAT_PROPERTY_NAME,
example_gen_pb2.PayloadFormat.Name(output_payload_format))
logging.info('Examples generated.')
fixed typo
# Lint as: python2, python3
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generic TFX example gen base executor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import bisect
import hashlib
import os
from typing import Any, Dict, List, Text, Union
from absl import logging
import apache_beam as beam
from six import with_metaclass
import tensorflow as tf
from google.protobuf import json_format
from tfx import types
from tfx.components.base import base_executor
from tfx.components.example_gen import utils
from tfx.proto import example_gen_pb2
from tfx.types import artifact_utils
# Default file name for TFRecord output file prefix.
DEFAULT_FILE_NAME = 'data_tfrecord'
def _ExamplePartitionKey(record: tf.train.Example,
split_config: example_gen_pb2.SplitConfig) -> bytes:
"""Generates key for partition for tf.train.Example."""
if not split_config.HasField('partition_feature_name'):
return record.SerializeToString(deterministic=True)
# Use a feature for partitioning the examples.
feature_name = split_config.partition_feature_name
if feature_name not in record.features.feature:
raise RuntimeError('Feature name `{}` does not exist.'.format(feature_name))
feature = record.features.feature[feature_name]
if not feature.HasField('kind'):
raise RuntimeError('Partition feature does not contain any value.')
if (not feature.HasField('bytes_list') and
not feature.HasField('int64_list')):
raise RuntimeError('Only `bytes_list` and `int64_list` features are '
'supported for partition.')
return feature.SerializeToString(deterministic=True)
def _PartitionFn(
record: Union[tf.train.Example, bytes],
num_partitions: int,
buckets: List[int],
split_config: example_gen_pb2.SplitConfig,
) -> int:
"""Partition function for the ExampleGen's output splits."""
assert num_partitions == len(
buckets), 'Partitions do not match bucket number.'
if isinstance(record, tf.train.Example):
partition_str = _ExamplePartitionKey(record, split_config)
elif split_config.HasField('partition_feature_name'):
raise RuntimeError('Split by `partition_feature_name` is only supported '
'for FORMAT_TF_EXAMPLE payload format.')
else:
partition_str = record
bucket = int(hashlib.sha256(partition_str).hexdigest(), 16) % buckets[-1]
# For example, if buckets is [10,50,80], there will be 3 splits:
# bucket >=0 && < 10, returns 0
# bucket >=10 && < 50, returns 1
# bucket >=50 && < 80, returns 2
return bisect.bisect(buckets, bucket)
@beam.ptransform_fn
@beam.typehints.with_input_types(Union[tf.train.Example, bytes])
@beam.typehints.with_output_types(beam.pvalue.PDone)
def _WriteSplit(example_split: beam.pvalue.PCollection,
output_split_path: Text) -> beam.pvalue.PDone:
"""Shuffles and writes output split as serialized records in TFRecord."""
def _MaybeSerialize(x):
return x.SerializeToString() if isinstance(x, tf.train.Example) else x
return (example_split
# TODO(jyzhao): make shuffle optional.
| 'MaybeSerialize' >> beam.Map(_MaybeSerialize)
| 'Shuffle' >> beam.transforms.Reshuffle()
# TODO(jyzhao): multiple output format.
| 'Write' >> beam.io.WriteToTFRecord(
os.path.join(output_split_path, DEFAULT_FILE_NAME),
file_name_suffix='.gz'))
@beam.ptransform_fn
@beam.typehints.with_input_types(beam.Pipeline)
@beam.typehints.with_output_types(Union[tf.train.Example, bytes])
def _InputToExampleOrBytes(
pipeline: beam.Pipeline,
input_to_example: beam.PTransform,
exec_properties: Dict[Text, Any],
split_pattern: Text,
) -> beam.pvalue.PCollection:
"""Converts input into a tf.train.Example, or a bytes (serialized proto)."""
return (pipeline
| 'InputSourceToExampleOrBytes' >> input_to_example(
exec_properties, split_pattern))
class BaseExampleGenExecutor(
with_metaclass(abc.ABCMeta, base_executor.BaseExecutor)):
"""Generic TFX example gen base executor.
The base ExampleGen executor takes a configuration and converts external data
sources to TensorFlow Examples (tf.train.Example), or any other protocol
buffer as subclass defines.
The common configuration (defined in
https://github.com/tensorflow/tfx/blob/master/tfx/proto/example_gen.proto#L44.)
describes the general properties of input data and shared instructions when
producing output data.
The conversion is done in `GenerateExamplesByBeam` as a Beam pipeline, which
validates the configuration, reads the external data sources, converts the
record in the input source to tf.Example if needed, and splits the examples if
the output split config is given. Then the executor's `Do` writes the results
in splits to the output path.
For simple custom ExampleGens, the details of transforming input data
record(s) to a tf.Example is expected to be given in
`GetInputSourceToExamplePTransform`, which returns a Beam PTransform with the
actual implementation. For complex use cases, such as joining multiple data
sources and different interpretations of the configurations, the custom
ExampleGen can override `GenerateExamplesByBeam`.
"""
@abc.abstractmethod
def GetInputSourceToExamplePTransform(self) -> beam.PTransform:
"""Returns PTransform for converting input source to records.
The record is by default assumed to be tf.train.Example protos, subclassses
can serialize any protocol buffer into bytes as output PCollection,
so long as the downstream component can consume it.
Note that each input split will be transformed by this function separately.
For complex use case, consider override 'GenerateExamplesByBeam' instead.
Here is an example PTransform:
@beam.ptransform_fn
@beam.typehints.with_input_types(beam.Pipeline)
@beam.typehints.with_output_types(Union[tf.train.Example, bytes])
def ExamplePTransform(
pipeline: beam.Pipeline,
exec_properties: Dict[Text, Any],
split_pattern: Text) -> beam.pvalue.PCollection
"""
pass
def GenerateExamplesByBeam(
self,
pipeline: beam.Pipeline,
exec_properties: Dict[Text, Any],
) -> Dict[Text, beam.pvalue.PCollection]:
"""Converts input source to TF example splits based on configs.
Custom ExampleGen executor should provide GetInputSourceToExamplePTransform
for converting input split to TF Examples. Overriding this
'GenerateExamplesByBeam' method instead if complex logic is need, e.g.,
custom spliting logic.
Args:
pipeline: beam pipeline.
exec_properties: A dict of execution properties. Depends on detailed
example gen implementation.
- input_base: an external directory containing the data files.
- input_config: JSON string of example_gen_pb2.Input instance, providing
input configuration.
- output_config: JSON string of example_gen_pb2.Output instance,
providing output configuration.
- output_data_format: Payload format of generated data in output
artifact, one of example_gen_pb2.PayloadFormat enum.
Returns:
Dict of beam PCollection with split name as key, each PCollection is a
single output split that contains serialized TF Examples.
"""
# Get input split information.
input_config = example_gen_pb2.Input()
json_format.Parse(exec_properties[utils.INPUT_CONFIG_KEY], input_config)
# Get output split information.
output_config = example_gen_pb2.Output()
json_format.Parse(exec_properties[utils.OUTPUT_CONFIG_KEY], output_config)
# Get output split names.
split_names = utils.generate_output_split_names(input_config, output_config)
# Make beam_pipeline_args available in exec_properties since certain
# example_gen executors need this information.
# TODO(b/155441037): Revisit necessity of this when BigQueryExampleGen
# does not branch on project or runner anymore.
exec_properties['_beam_pipeline_args'] = self._beam_pipeline_args or []
example_splits = []
input_to_example = self.GetInputSourceToExamplePTransform()
if output_config.split_config.splits:
# Use output splits, input must have only one split.
assert len(
input_config.splits
) == 1, 'input must have only one split when output split is specified.'
# Calculate split buckets.
buckets = []
total_buckets = 0
for split in output_config.split_config.splits:
total_buckets += split.hash_buckets
buckets.append(total_buckets)
example_splits = (
pipeline
| 'InputToExampleOrBytes' >>
# pylint: disable=no-value-for-parameter
_InputToExampleOrBytes(input_to_example, exec_properties,
input_config.splits[0].pattern)
| 'SplitData' >> beam.Partition(_PartitionFn, len(buckets), buckets,
output_config.split_config))
else:
# Use input splits.
for split in input_config.splits:
examples = (
pipeline
| 'InputToExampleOrBytes[{}]'.format(split.name) >>
# pylint: disable=no-value-for-parameter
_InputToExampleOrBytes(input_to_example, exec_properties,
split.pattern))
example_splits.append(examples)
result = {}
for index, example_split in enumerate(example_splits):
result[split_names[index]] = example_split
return result
def Do(
self,
input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any],
) -> None:
"""Take input data source and generates serialized data splits.
The output is intended to be serialized tf.train.Examples protocol buffer
in gzipped TFRecord format, but subclasses can choose to override to write
to any serialized records payload into gzipped TFRecord as specified,
so long as downstream component can consume it. The format of payload is
added to `payload_format` custom property of the output Example artifact.
Args:
input_dict: Input dict from input key to a list of Artifacts. Depends on
detailed example gen implementation.
output_dict: Output dict from output key to a list of Artifacts.
- examples: splits of tf examples.
exec_properties: A dict of execution properties. Depends on detailed
example gen implementation.
- input_base: an external directory containing the data files.
- input_config: JSON string of example_gen_pb2.Input instance, providing
input configuration.
- output_config: JSON string of example_gen_pb2.Output instance,
providing output configuration.
- output_data_format: Payload format of generated data in output
artifact, one of example_gen_pb2.PayloadFormat enum.
Returns:
None
"""
self._log_startup(input_dict, output_dict, exec_properties)
logging.info('Generating examples.')
with self._make_beam_pipeline() as pipeline:
example_splits = self.GenerateExamplesByBeam(pipeline, exec_properties)
# pylint: disable=expression-not-assigned, no-value-for-parameter
for split_name, example_split in example_splits.items():
(example_split
| 'WriteSplit[%s]' % split_name) >> _WriteSplit(
artifact_utils.get_split_uri(output_dict[utils.EXAMPLES_KEY],
split_name)))
# pylint: enable=expression-not-assigned, no-value-for-parameter
output_payload_format = exec_properties.get(utils.OUTPUT_DATA_FORMAT_KEY)
if output_payload_format:
for output_examples_artifact in output_dict[utils.EXAMPLES_KEY]:
output_examples_artifact.set_string_custom_property(
utils.PAYLOAD_FORMAT_PROPERTY_NAME,
example_gen_pb2.PayloadFormat.Name(output_payload_format))
logging.info('Examples generated.')
|
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs cassandra.
Cassandra homepage: http://cassandra.apache.org
cassandra-stress tool page:
http://docs.datastax.com/en/cassandra/2.1/cassandra/tools/toolsCStress_t.html
"""
import collections
import functools
import logging
import math
import os
import posixpath
import re
import time
from perfkitbenchmarker import configs
from perfkitbenchmarker import errors
from perfkitbenchmarker import flags
from perfkitbenchmarker import regex_util
from perfkitbenchmarker import sample
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.packages import cassandra
NUM_KEYS_PER_CORE = 2000000
flags.DEFINE_integer('num_keys', 0,
'Number of keys used in cassandra-stress tool. '
'If unset, this benchmark will use %s * num_cpus '
'on data nodes as the value.' % NUM_KEYS_PER_CORE)
flags.DEFINE_integer('num_cassandra_stress_threads', 50,
'Number of threads used in cassandra-stress tool '
'on each loader node.')
FLAGS = flags.FLAGS
BENCHMARK_NAME = 'cassandra_stress'
BENCHMARK_CONFIG = """
cassandra_stress:
description: Benchmark Cassandra using cassandra-stress
vm_groups:
cassandra_nodes:
vm_spec: *default_single_core
disk_spec: *default_500_gb
vm_count: 3
stress_client:
vm_spec: *default_single_core
"""
CASSANDRA_GROUP = 'cassandra_nodes'
CLIENT_GROUP = 'stress_client'
PROPAGATION_WAIT_TIME = 30
SLEEP_BETWEEN_CHECK_IN_SECONDS = 5
# Stress test options.
CONSISTENCY_LEVEL = 'QUORUM'
REPLICATION_FACTOR = 3
RETRIES = 1000
CASSANDRA_STRESS = posixpath.join(cassandra.CASSANDRA_DIR, 'tools', 'bin',
'cassandra-stress')
# Results documentation:
# http://docs.datastax.com/en/cassandra/2.1/cassandra/tools/toolsCStressOutput_c.html
RESULTS_METRICS = (
'op rate', # Number of operations per second performed during the run.
'partition rate', # Number of partition operations per second performed
# during the run.
'row rate', # Number of row operations per second performed during the run.
'latency mean', # Average latency in milliseconds for each operation during
# that run.
'latency median', # Median latency in milliseconds for each operation
# during that run.
'latency 95th percentile', # 95% of the time the latency was less than
# the number displayed in the column.
'latency 99th percentile', # 99% of the time the latency was less than
# the number displayed in the column.
'latency 99.9th percentile', # 99.9% of the time the latency was less than
# the number displayed in the column.
'latency max', # Maximum latency in milliseconds.
'Total partitions', # Number of partitions.
'Total errors', # Number of errors.
'Total operation time') # Total operation time.
# Metrics are aggregated between client vms.
AGGREGATED_METRICS = {'op rate', 'partition rate', 'row rate',
'Total partitions', 'Total errors'}
# Maximum value will be choisen between client vms.
MAXIMUM_METRICS = {'latency max'}
def GetConfig(user_config):
return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
def CheckPrerequisites():
"""Verifies that the required resources are present.
Raises:
perfkitbenchmarker.data.ResourceNotFound: On missing resource.
"""
cassandra.CheckPrerequisites()
def Prepare(benchmark_spec):
"""Install Cassandra and Java on target vms.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
vm_dict = benchmark_spec.vm_groups
cassandra_vms = vm_dict[CASSANDRA_GROUP]
logging.info('VM dictionary %s', vm_dict)
logging.info('Authorizing loader[0] permission to access all other vms.')
vm_dict[CLIENT_GROUP][0].AuthenticateVm()
logging.info('Preparing data files and Java on all vms.')
vm_util.RunThreaded(lambda vm: vm.Install('cassandra'), benchmark_spec.vms)
seed_vm = cassandra_vms[0]
configure = functools.partial(cassandra.Configure, seed_vms=[seed_vm])
vm_util.RunThreaded(configure, cassandra_vms)
cassandra.StartCluster(seed_vm, cassandra_vms[1:])
def _ResultFilePath(vm):
return posixpath.join(vm_util.VM_TMP_DIR,
vm.hostname + '.stress_results.txt')
def RunTestOnLoader(vm, loader_index, keys_per_vm, data_node_ips):
"""Run Cassandra-stress test on loader node.
Args:
vm: The target vm.
loader_index: The index of target vm in loader vms.
keys_per_vm: The number of keys per loader vm need to insert.
data_node_ips: List of IP addresses for all data nodes.
"""
vm.RobustRemoteCommand(
'{0} write cl={1} n={2} '
'-node {3} -schema replication\(factor={4}\) -pop seq={5}..{6} '
'-log file={7} -rate threads={8} -errors retries={9}'.format(
CASSANDRA_STRESS, CONSISTENCY_LEVEL, keys_per_vm,
','.join(data_node_ips), REPLICATION_FACTOR,
loader_index * keys_per_vm + 1, (loader_index + 1) * keys_per_vm,
_ResultFilePath(vm), RETRIES, FLAGS.num_cassandra_stress_threads))
def RunCassandraStress(benchmark_spec):
"""Start Cassandra test.
Args:
benchmark_spec: The benchmark specification. Contains all data
that is required to run the benchmark.
"""
logging.info('Creating Keyspace.')
loader_vms = benchmark_spec.vm_groups[CLIENT_GROUP]
cassandra_vms = benchmark_spec.vm_groups[CASSANDRA_GROUP]
data_node_ips = [vm.internal_ip for vm in cassandra_vms]
loader_vms[0].RemoteCommand(
'{0} write n=1 cl={1} '
'-node {2} -schema replication\(factor={3}\) > /dev/null'.format(
CASSANDRA_STRESS, CONSISTENCY_LEVEL,
','.join(data_node_ips), REPLICATION_FACTOR))
logging.info('Waiting %s for keyspace to propagate.', PROPAGATION_WAIT_TIME)
time.sleep(PROPAGATION_WAIT_TIME)
if not FLAGS.num_keys:
FLAGS.num_keys = NUM_KEYS_PER_CORE * cassandra_vms[0].num_cpus
logging.info('Num keys not set, using %s in cassandra-stress test.',
FLAGS.num_keys)
logging.info('Executing the benchmark.')
num_loaders = len(loader_vms)
keys_per_vm = FLAGS.num_keys / num_loaders
if FLAGS.num_keys % num_loaders:
logging.warn('Total number of keys rounded to %s (%s keys per loader vm).',
keys_per_vm * num_loaders, keys_per_vm)
args = [((loader_vms[i], i, keys_per_vm, data_node_ips), {})
for i in xrange(0, num_loaders)]
vm_util.RunThreaded(RunTestOnLoader, args)
def WaitForLoaderToFinish(vm):
"""Watch loader node and wait for it to finish test.
Args:
vm: The target vm.
"""
result_path = _ResultFilePath(vm)
while True:
resp, _ = vm.RemoteCommand('tail -n 1 ' + result_path)
if re.findall(r'END', resp):
break
if re.findall(r'FAILURE', resp):
vm.PullFile(vm_util.GetTempDir(), result_path)
raise errors.Benchmarks.RunError(
'cassandra-stress tool failed, check %s for details.'
% posixpath.join(vm_util.GetTempDir(),
os.path.basename(result_path)))
time.sleep(SLEEP_BETWEEN_CHECK_IN_SECONDS)
def CollectResultFile(vm, results):
"""Collect result file on vm.
Args:
vm: The target vm.
results: A dictionary of lists. Each list contains results of a field
defined in RESULTS_METRICS collected from each loader machines.
"""
result_path = _ResultFilePath(vm)
vm.PullFile(vm_util.GetTempDir(), result_path)
resp, _ = vm.RemoteCommand('tail -n 20 ' + result_path)
for metric in RESULTS_METRICS:
value = regex_util.ExtractGroup(r'%s[\t ]+: ([\d\.:]+)' % metric, resp)
if metric == RESULTS_METRICS[-1]: # Total operation time
value = value.split(':')
results[metric].append(
int(value[0]) * 3600 + int(value[1]) * 60 + int(value[2]))
else:
results[metric].append(float(value))
def RunCassandraStressTest(benchmark_spec):
"""Start all loader nodes as Cassandra clients and run stress test.
Args:
benchmark_spec: The benchmark specification. Contains all data
that is required to run the benchmark.
"""
try:
RunCassandraStress(benchmark_spec)
finally:
logging.info('Tests running. Watching progress.')
vm_util.RunThreaded(WaitForLoaderToFinish,
benchmark_spec.vm_groups[CLIENT_GROUP])
def CollectResults(benchmark_spec):
"""Collect and parse test results.
Args:
benchmark_spec: The benchmark specification. Contains all data
that is required to run the benchmark.
Returns:
A list of sample.Sample objects.
"""
logging.info('Gathering results.')
vm_dict = benchmark_spec.vm_groups
loader_vms = vm_dict[CLIENT_GROUP]
raw_results = collections.defaultdict(list)
args = [((vm, raw_results), {}) for vm in loader_vms]
vm_util.RunThreaded(CollectResultFile, args)
metadata = {'num_keys': FLAGS.num_keys,
'num_data_nodes': len(vm_dict[CASSANDRA_GROUP]),
'num_loader_nodes': len(loader_vms),
'num_cassandra_stress_threads':
FLAGS.num_cassandra_stress_threads}
results = []
for metric in RESULTS_METRICS:
if metric in MAXIMUM_METRICS:
value = max(raw_results[metric])
else:
value = math.fsum(raw_results[metric])
if metric not in AGGREGATED_METRICS:
value = value / len(loader_vms)
if metric.startswith('latency'):
unit = 'ms'
elif metric.endswith('rate'):
unit = 'operations per second'
elif metric == 'Total operation time':
unit = 'seconds'
results.append(sample.Sample(metric, value, unit, metadata))
logging.info('Cassandra results:\n%s', results)
return results
def Run(benchmark_spec):
"""Run Cassandra on target vms.
Args:
benchmark_spec: The benchmark specification. Contains all data
that is required to run the benchmark.
Returns:
A list of sample.Sample objects.
"""
RunCassandraStressTest(benchmark_spec)
return CollectResults(benchmark_spec)
def Cleanup(benchmark_spec):
"""Cleanup function.
Args:
benchmark_spec: The benchmark specification. Contains all data
that is required to run the benchmark.
"""
vm_dict = benchmark_spec.vm_groups
cassandra_vms = vm_dict[CASSANDRA_GROUP]
vm_util.RunThreaded(cassandra.Stop, cassandra_vms)
vm_util.RunThreaded(cassandra.CleanNode, cassandra_vms)
Improve cassandra commands.
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs cassandra.
Cassandra homepage: http://cassandra.apache.org
cassandra-stress tool page:
http://docs.datastax.com/en/cassandra/2.1/cassandra/tools/toolsCStress_t.html
"""
import collections
import functools
import logging
import math
import os
import posixpath
import re
import time
from perfkitbenchmarker import configs
from perfkitbenchmarker import errors
from perfkitbenchmarker import flags
from perfkitbenchmarker import regex_util
from perfkitbenchmarker import sample
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.packages import cassandra
NUM_KEYS_PER_CORE = 2000000
flags.DEFINE_integer('num_keys', 0,
'Number of keys used in cassandra-stress tool. '
'If unset, this benchmark will use %s * num_cpus '
'on data nodes as the value.' % NUM_KEYS_PER_CORE)
flags.DEFINE_integer('num_cassandra_stress_threads', 50,
'Number of threads used in cassandra-stress tool '
'on each loader node.')
FLAGS = flags.FLAGS
BENCHMARK_NAME = 'cassandra_stress'
BENCHMARK_CONFIG = """
cassandra_stress:
description: Benchmark Cassandra using cassandra-stress
vm_groups:
cassandra_nodes:
vm_spec: *default_single_core
disk_spec: *default_500_gb
vm_count: 3
stress_client:
vm_spec: *default_single_core
"""
CASSANDRA_GROUP = 'cassandra_nodes'
CLIENT_GROUP = 'stress_client'
PROPAGATION_WAIT_TIME = 30
SLEEP_BETWEEN_CHECK_IN_SECONDS = 5
# Stress test options.
CONSISTENCY_LEVEL = 'QUORUM'
REPLICATION_FACTOR = 3
RETRIES = 1000
CASSANDRA_STRESS = posixpath.join(cassandra.CASSANDRA_DIR, 'tools', 'bin',
'cassandra-stress')
# Results documentation:
# http://docs.datastax.com/en/cassandra/2.1/cassandra/tools/toolsCStressOutput_c.html
RESULTS_METRICS = (
'op rate', # Number of operations per second performed during the run.
'partition rate', # Number of partition operations per second performed
# during the run.
'row rate', # Number of row operations per second performed during the run.
'latency mean', # Average latency in milliseconds for each operation during
# that run.
'latency median', # Median latency in milliseconds for each operation
# during that run.
'latency 95th percentile', # 95% of the time the latency was less than
# the number displayed in the column.
'latency 99th percentile', # 99% of the time the latency was less than
# the number displayed in the column.
'latency 99.9th percentile', # 99.9% of the time the latency was less than
# the number displayed in the column.
'latency max', # Maximum latency in milliseconds.
'Total partitions', # Number of partitions.
'Total errors', # Number of errors.
'Total operation time') # Total operation time.
# Metrics are aggregated between client vms.
AGGREGATED_METRICS = {'op rate', 'partition rate', 'row rate',
'Total partitions', 'Total errors'}
# Maximum value will be choisen between client vms.
MAXIMUM_METRICS = {'latency max'}
def GetConfig(user_config):
return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
def CheckPrerequisites():
"""Verifies that the required resources are present.
Raises:
perfkitbenchmarker.data.ResourceNotFound: On missing resource.
"""
cassandra.CheckPrerequisites()
def Prepare(benchmark_spec):
"""Install Cassandra and Java on target vms.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
vm_dict = benchmark_spec.vm_groups
cassandra_vms = vm_dict[CASSANDRA_GROUP]
logging.info('VM dictionary %s', vm_dict)
logging.info('Authorizing loader[0] permission to access all other vms.')
vm_dict[CLIENT_GROUP][0].AuthenticateVm()
logging.info('Preparing data files and Java on all vms.')
vm_util.RunThreaded(lambda vm: vm.Install('cassandra'), benchmark_spec.vms)
seed_vm = cassandra_vms[0]
configure = functools.partial(cassandra.Configure, seed_vms=[seed_vm])
vm_util.RunThreaded(configure, cassandra_vms)
cassandra.StartCluster(seed_vm, cassandra_vms[1:])
def _ResultFilePath(vm):
return posixpath.join(vm_util.VM_TMP_DIR,
vm.hostname + '.stress_results.txt')
def RunTestOnLoader(vm, loader_index, keys_per_vm, data_node_ips):
"""Run Cassandra-stress test on loader node.
Args:
vm: The target vm.
loader_index: The index of target vm in loader vms.
keys_per_vm: The number of keys per loader vm need to insert.
data_node_ips: List of IP addresses for all data nodes.
"""
vm.RobustRemoteCommand(
'{cassandra} write cl={consistency_level} n={num_keys} '
'-node {nodes} -schema replication\(factor={replication_factor}\) '
'-pop seq={start_index}..{end_index} '
'-log file={result_file} -rate threads={threads} '
'-errors retries={retries}'.format(
cassandra=CASSANDRA_STRESS,
consistency_level=CONSISTENCY_LEVEL,
num_keys=keys_per_vm,
nodes=','.join(data_node_ips), replication_factor=REPLICATION_FACTOR,
start_index=loader_index * keys_per_vm + 1,
end_index=(loader_index + 1) * keys_per_vm,
result_file=_ResultFilePath(vm), retries=RETRIES,
threads=FLAGS.num_cassandra_stress_threads))
def RunCassandraStress(benchmark_spec):
"""Start Cassandra test.
Args:
benchmark_spec: The benchmark specification. Contains all data
that is required to run the benchmark.
"""
logging.info('Creating Keyspace.')
loader_vms = benchmark_spec.vm_groups[CLIENT_GROUP]
cassandra_vms = benchmark_spec.vm_groups[CASSANDRA_GROUP]
data_node_ips = [vm.internal_ip for vm in cassandra_vms]
loader_vms[0].RemoteCommand(
'{cassandra} write n=1 cl={consistency_level} -node {nodes} '
'-schema replication\(factor={replication_factor}\) > /dev/null'.format(
cassandra=CASSANDRA_STRESS, consistency_level=CONSISTENCY_LEVEL,
nodes=','.join(data_node_ips), replication_factor=REPLICATION_FACTOR))
logging.info('Waiting %s for keyspace to propagate.', PROPAGATION_WAIT_TIME)
time.sleep(PROPAGATION_WAIT_TIME)
if not FLAGS.num_keys:
FLAGS.num_keys = NUM_KEYS_PER_CORE * cassandra_vms[0].num_cpus
logging.info('Num keys not set, using %s in cassandra-stress test.',
FLAGS.num_keys)
logging.info('Executing the benchmark.')
num_loaders = len(loader_vms)
keys_per_vm = FLAGS.num_keys / num_loaders
if FLAGS.num_keys % num_loaders:
logging.warn('Total number of keys rounded to %s (%s keys per loader vm).',
keys_per_vm * num_loaders, keys_per_vm)
args = [((loader_vms[i], i, keys_per_vm, data_node_ips), {})
for i in xrange(0, num_loaders)]
vm_util.RunThreaded(RunTestOnLoader, args)
def WaitForLoaderToFinish(vm):
"""Watch loader node and wait for it to finish test.
Args:
vm: The target vm.
"""
result_path = _ResultFilePath(vm)
while True:
resp, _ = vm.RemoteCommand('tail -n 1 ' + result_path)
if re.findall(r'END', resp):
break
if re.findall(r'FAILURE', resp):
vm.PullFile(vm_util.GetTempDir(), result_path)
raise errors.Benchmarks.RunError(
'cassandra-stress tool failed, check %s for details.'
% posixpath.join(vm_util.GetTempDir(),
os.path.basename(result_path)))
time.sleep(SLEEP_BETWEEN_CHECK_IN_SECONDS)
def CollectResultFile(vm, results):
"""Collect result file on vm.
Args:
vm: The target vm.
results: A dictionary of lists. Each list contains results of a field
defined in RESULTS_METRICS collected from each loader machines.
"""
result_path = _ResultFilePath(vm)
vm.PullFile(vm_util.GetTempDir(), result_path)
resp, _ = vm.RemoteCommand('tail -n 20 ' + result_path)
for metric in RESULTS_METRICS:
value = regex_util.ExtractGroup(r'%s[\t ]+: ([\d\.:]+)' % metric, resp)
if metric == RESULTS_METRICS[-1]: # Total operation time
value = value.split(':')
results[metric].append(
int(value[0]) * 3600 + int(value[1]) * 60 + int(value[2]))
else:
results[metric].append(float(value))
def RunCassandraStressTest(benchmark_spec):
"""Start all loader nodes as Cassandra clients and run stress test.
Args:
benchmark_spec: The benchmark specification. Contains all data
that is required to run the benchmark.
"""
try:
RunCassandraStress(benchmark_spec)
finally:
logging.info('Tests running. Watching progress.')
vm_util.RunThreaded(WaitForLoaderToFinish,
benchmark_spec.vm_groups[CLIENT_GROUP])
def CollectResults(benchmark_spec):
"""Collect and parse test results.
Args:
benchmark_spec: The benchmark specification. Contains all data
that is required to run the benchmark.
Returns:
A list of sample.Sample objects.
"""
logging.info('Gathering results.')
vm_dict = benchmark_spec.vm_groups
loader_vms = vm_dict[CLIENT_GROUP]
raw_results = collections.defaultdict(list)
args = [((vm, raw_results), {}) for vm in loader_vms]
vm_util.RunThreaded(CollectResultFile, args)
metadata = {'num_keys': FLAGS.num_keys,
'num_data_nodes': len(vm_dict[CASSANDRA_GROUP]),
'num_loader_nodes': len(loader_vms),
'num_cassandra_stress_threads':
FLAGS.num_cassandra_stress_threads}
results = []
for metric in RESULTS_METRICS:
if metric in MAXIMUM_METRICS:
value = max(raw_results[metric])
else:
value = math.fsum(raw_results[metric])
if metric not in AGGREGATED_METRICS:
value = value / len(loader_vms)
if metric.startswith('latency'):
unit = 'ms'
elif metric.endswith('rate'):
unit = 'operations per second'
elif metric == 'Total operation time':
unit = 'seconds'
results.append(sample.Sample(metric, value, unit, metadata))
logging.info('Cassandra results:\n%s', results)
return results
def Run(benchmark_spec):
"""Run Cassandra on target vms.
Args:
benchmark_spec: The benchmark specification. Contains all data
that is required to run the benchmark.
Returns:
A list of sample.Sample objects.
"""
RunCassandraStressTest(benchmark_spec)
return CollectResults(benchmark_spec)
def Cleanup(benchmark_spec):
"""Cleanup function.
Args:
benchmark_spec: The benchmark specification. Contains all data
that is required to run the benchmark.
"""
vm_dict = benchmark_spec.vm_groups
cassandra_vms = vm_dict[CASSANDRA_GROUP]
vm_util.RunThreaded(cassandra.Stop, cassandra_vms)
vm_util.RunThreaded(cassandra.CleanNode, cassandra_vms)
|
#!/usr/bin/env python
from strips import *
def trans_star(p, s, a):
if p.final(s):
yield (p, s, a)
for p1, s1, a1 in p.trans(s):
yield from trans_star(p1, s1, a + a1)
def indigolog(p, s, a, exog_occurs=lambda s: None):
# at each step ask for an exogenous action:
exog = exog_occurs(s)
if exog:
if not isinstance(exog, GroundAction):
raise TypeError('exogenous actions must be GroundAction instances')
s1 = exog.apply(s)
a1 = a + [exog]
return indigolog(p, s1, a1, exog_occurs)
for p1, s1, a1 in p.trans(s):
# commit to the first step, since we are executing in an online fashion:
return indigolog(p1, s1, a + a1, exog_occurs)
else:
if p.final(s):
for action in a: print(action)
print('%d actions.' % len(a))
else:
print('execution failed; %d actions.' % len(a))
return
class Program:
pass
class Choose(Program):
def __init__(self, p1, p2, *ps):
self.p1 = p1
self.p2 = Choose(p2, ps[0], *ps[1:]) if ps else p2
def trans(self, s):
yield from self.p1.trans(s)
yield from self.p2.trans(s)
def final(self, s):
return self.p1.final(s) or self.p2.final(s)
def __repr__(self): return '(%s|%s)' % (self.p1, self.p2)
class Empty(Program):
def trans(self, s):
yield from () # yield nothing
def final(self, s):
return True
def __repr__(self): return 'nil'
class Exec(Program):
def __init__(self, ground_action):
self.ground_action = ground_action
def trans(self, s):
try: yield (Empty(), self.ground_action.apply(s), [self.ground_action])
except UnsatisfiedPreconditions: pass
def final(self, s):
return False
def __repr__(self): return '%s' % (self.ground_action)
class If(Program):
def __init__(self, condition, p1, p2):
self.condition = condition
self.p1 = p1
self.p2 = p2
def trans(self, s):
if self.condition(s): yield from self.p1.trans(s)
else: yield from self.p2.trans(s)
def final(self, s):
if self.condition(s): return self.p1.final(s)
else: return self.p2.final(s)
def __repr__(self): return 'if %s then %s else %s endIf' % ('<cond>', self.p1, self.p2)
class Pick(Program):
def __init__(self, domain, p1):
self.domain = domain
self.p1 = p1
def trans(self, s):
for obj in Object.get_objects_of_type(self.domain):
yield from self.p1(obj).trans(s)
def final(self, s):
for obj in Object.get_objects_of_type(self.domain):
if self.p1(obj).final(s): return True
return False
def __repr__(self): return 'pick from %s and (%s)' % (self.domain.__name__, self.p1)
class Search(Program):
def __init__(self, p1):
self.p1 = p1
def trans(self, s):
yield from trans_star(self.p1, s, [])
def final(self, s):
return any(trans_star(self.p1, s, []))
def __repr__(self): return 'search { %s }' % self.p1
class Sequence(Program):
def __init__(self, p1, p2, *ps):
self.p1 = p1
self.p2 = Sequence(p2, ps[0], *ps[1:]) if ps else p2
def trans(self, s):
if not isinstance(self.p1, Empty):
for pn, sn, an in self.p1.trans(s):
yield (Sequence(pn, self.p2), sn, an)
if self.p1.final(s) or isinstance(self.p1, Empty):
yield from self.p2.trans(s)
def final(self, s):
return self.p1.final(s) and self.p2.final(s)
def __repr__(self): return '(%s;%s)' % (self.p1, self.p2)
class Star(Program):
def __init__(self, p1):
self.p1 = p1
def trans(self, s):
for pn, sn, an in self.p1.trans(s):
yield (Sequence(pn, self), sn, an)
def final(self, s):
return True
def __repr__(self): return '(%s)*' % (self.p1)
class Test(Program):
def __init__(self, condition):
self.condition = condition
def trans(self, s):
if self.condition(s):
yield (Empty(), s, [])
def final(self, s):
return False
def __repr__(self): return '?(%s)' % ('<cond>')
class While(Program):
def __init__(self, condition, p1):
self.condition = condition
self.p1 = p1
def trans(self, s):
if self.condition(s):
for pn, sn, an in self.p1.trans(s):
yield (Sequence(pn, self), sn, an)
def final(self, s):
return not self.condition(s) or self.p1.final(s)
def __repr__(self): return 'while %s do %s endWhile' % ('<cond>', self.p1)
switch from exogenous actions to exogenous functions (of state)
#!/usr/bin/env python
from strips import *
def trans_star(p, s, a):
if p.final(s):
yield (p, s, a)
for p1, s1, a1 in p.trans(s):
yield from trans_star(p1, s1, a + a1)
def indigolog(p, s, a, exog=lambda s: s, verbose=True):
# at each step apply exogenous events if any:
s = exog(s)
for p1, s1, a1 in p.trans(s):
if verbose: print(a1)
# commit to the first step, since we are executing in an online fashion:
return indigolog(p1, s1, a + a1, exog, verbose)
else:
if not verbose: return
if p.final(s): print('%d actions.' % len(a))
else: print('execution failed; %d actions.' % len(a))
return
class Program:
pass
class Choose(Program):
def __init__(self, p1, p2, *ps):
self.p1 = p1
self.p2 = Choose(p2, ps[0], *ps[1:]) if ps else p2
def trans(self, s):
yield from self.p1.trans(s)
yield from self.p2.trans(s)
def final(self, s):
return self.p1.final(s) or self.p2.final(s)
def __repr__(self): return '(%s|%s)' % (self.p1, self.p2)
class Empty(Program):
def trans(self, s):
yield from () # yield nothing
def final(self, s):
return True
def __repr__(self): return 'nil'
class Exec(Program):
def __init__(self, ground_action):
self.ground_action = ground_action
def trans(self, s):
try: yield (Empty(), self.ground_action.apply(s), [self.ground_action])
except UnsatisfiedPreconditions: pass
def final(self, s):
return False
def __repr__(self): return '%s' % (self.ground_action)
class If(Program):
def __init__(self, condition, p1, p2):
self.condition = condition
self.p1 = p1
self.p2 = p2
def trans(self, s):
if self.condition(s): yield from self.p1.trans(s)
else: yield from self.p2.trans(s)
def final(self, s):
if self.condition(s): return self.p1.final(s)
else: return self.p2.final(s)
def __repr__(self): return 'if %s then %s else %s endIf' % ('<cond>', self.p1, self.p2)
class Pick(Program):
def __init__(self, domain, p1):
self.domain = domain
self.p1 = p1
def trans(self, s):
for obj in Object.get_objects_of_type(self.domain):
yield from self.p1(obj).trans(s)
def final(self, s):
for obj in Object.get_objects_of_type(self.domain):
if self.p1(obj).final(s): return True
return False
def __repr__(self): return 'pick from %s and (%s)' % (self.domain.__name__, self.p1)
class Search(Program):
def __init__(self, p1):
self.p1 = p1
def trans(self, s):
yield from trans_star(self.p1, s, [])
def final(self, s):
return any(trans_star(self.p1, s, []))
def __repr__(self): return 'search { %s }' % self.p1
class Sequence(Program):
def __init__(self, p1, p2, *ps):
self.p1 = p1
self.p2 = Sequence(p2, ps[0], *ps[1:]) if ps else p2
def trans(self, s):
if not isinstance(self.p1, Empty):
for pn, sn, an in self.p1.trans(s):
yield (Sequence(pn, self.p2), sn, an)
if self.p1.final(s) or isinstance(self.p1, Empty):
yield from self.p2.trans(s)
def final(self, s):
return self.p1.final(s) and self.p2.final(s)
def __repr__(self): return '(%s;%s)' % (self.p1, self.p2)
class Star(Program):
def __init__(self, p1):
self.p1 = p1
def trans(self, s):
for pn, sn, an in self.p1.trans(s):
yield (Sequence(pn, self), sn, an)
def final(self, s):
return True
def __repr__(self): return '(%s)*' % (self.p1)
class Test(Program):
def __init__(self, condition):
self.condition = condition
def trans(self, s):
if self.condition(s):
yield (Empty(), s, [])
def final(self, s):
return False
def __repr__(self): return '?(%s)' % ('<cond>')
class While(Program):
def __init__(self, condition, p1):
self.condition = condition
self.p1 = p1
def trans(self, s):
if self.condition(s):
for pn, sn, an in self.p1.trans(s):
yield (Sequence(pn, self), sn, an)
def final(self, s):
return not self.condition(s) or self.p1.final(s)
def __repr__(self): return 'while %s do %s endWhile' % ('<cond>', self.p1)
|
import re
import requests
import xmltodict
from xml.parsers.expat import ExpatError
from googleapiclient.discovery import build
from settings import (GOODREADS_API_KEY, GOOGLE_DEV_API_KEY,
GOOGLE_CUSTOM_SEARCH_CX)
class BookNotFound(Exception):
pass
def get_top_google_goodreads_search(search_term):
service = build("customsearch", "v1", developerKey=GOOGLE_DEV_API_KEY)
results = service.cse().list(q=search_term, cx=GOOGLE_CUSTOM_SEARCH_CX,
).execute()
return [r['link'] for r in results.get('items')
if 'goodreads.com/book/show/' in r['link']]
def get_goodreads_id(url):
# receives goodreads url
# returns the id using regex
regex = r'goodreads.com/book/show/(\d+)'
ids = re.findall(regex, url)
if ids:
return ids[0]
return False
def get_book_details_by_id(goodreads_id):
api_url = 'http://goodreads.com/book/show/{0}?format=xml&key={1}'
r = requests.get(api_url.format(goodreads_id, GOODREADS_API_KEY))
try:
book_data = xmltodict.parse(r.content)['GoodreadsResponse']['book']
except (TypeError, KeyError, ExpatError):
return False
keys = ['title', 'average_rating', 'ratings_count', 'description', 'url',
'num_pages']
book = {}
for k in keys:
book[k] = book_data.get(k)
try:
work = book_data['work']
book['publication_year'] = work['original_publication_year']['#text']
except KeyError:
book['publication_year'] = book_data.get('publication_year')
if type(book_data['authors']['author']) == list:
authors = [author['name'] for author in book_data['authors']['author']]
authors = ', '.join(authors)
else:
authors = book_data['authors']['author']['name']
book['authors'] = authors
return book
def get_book_details_by_name(book_name):
urls = get_top_google_goodreads_search(search_term=book_name)
if not urls:
raise BookNotFound
top_search_url = urls[0]
goodreads_id = get_goodreads_id(url=top_search_url)
return get_book_details_by_id(goodreads_id=goodreads_id)
PEP8 fix
import re
import requests
import xmltodict
from xml.parsers.expat import ExpatError
from googleapiclient.discovery import build
from settings import (GOODREADS_API_KEY, GOOGLE_DEV_API_KEY,
GOOGLE_CUSTOM_SEARCH_CX)
class BookNotFound(Exception):
pass
def get_top_google_goodreads_search(search_term):
service = build("customsearch", "v1", developerKey=GOOGLE_DEV_API_KEY)
results = service.cse().list(q=search_term, cx=GOOGLE_CUSTOM_SEARCH_CX,
).execute()
return [r['link'] for r in results.get('items')
if 'goodreads.com/book/show/' in r['link']]
def get_goodreads_id(url):
# receives goodreads url
# returns the id using regex
regex = r'goodreads.com/book/show/(\d+)'
ids = re.findall(regex, url)
if ids:
return ids[0]
return False
def get_book_details_by_id(goodreads_id):
api_url = 'http://goodreads.com/book/show/{0}?format=xml&key={1}'
r = requests.get(api_url.format(goodreads_id, GOODREADS_API_KEY))
try:
book_data = xmltodict.parse(r.content)['GoodreadsResponse']['book']
except (TypeError, KeyError, ExpatError):
return False
keys = ['title', 'average_rating', 'ratings_count', 'description', 'url',
'num_pages']
book = {}
for k in keys:
book[k] = book_data.get(k)
try:
work = book_data['work']
book['publication_year'] = work['original_publication_year']['#text']
except KeyError:
book['publication_year'] = book_data.get('publication_year')
if type(book_data['authors']['author']) == list:
authors = [author['name'] for author in book_data['authors']['author']]
authors = ', '.join(authors)
else:
authors = book_data['authors']['author']['name']
book['authors'] = authors
return book
def get_book_details_by_name(book_name):
urls = get_top_google_goodreads_search(search_term=book_name)
if not urls:
raise BookNotFound
top_search_url = urls[0]
goodreads_id = get_goodreads_id(url=top_search_url)
return get_book_details_by_id(goodreads_id=goodreads_id)
|
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.utils.datastructures import SortedDict
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
if "notification" in settings.INSTALLED_APPS:
from notification import models as notification
else:
notification = None
from newprojects.models import Project, ProjectMember
from newprojects.forms import ProjectForm, ProjectUpdateForm
TOPIC_COUNT_SQL = """
SELECT COUNT(*)
FROM topics_topic
WHERE
topics_topic.object_id = newprojects_project.id AND
topics_topic.content_type_id = %s
"""
MEMBER_COUNT_SQL = """
SELECT COUNT(*)
FROM newprojects_projectmember
WHERE newprojects_projectmember.project_id = newprojects_project.id
"""
@login_required
def create(request, form_class=ProjectForm, template_name="newprojects/create.html"):
project_form = form_class(request.POST or None)
if project_form.is_valid():
project = project_form.save(commit=False)
project.creator = request.user
project.save()
project_member = ProjectMember(project=project, user=request.user)
project.members.add(project_member)
project_member.save()
if notification:
# @@@ might be worth having a shortcut for sending to all users
notification.send(User.objects.all(), "projects_new_project",
{"project": project}, queue=True)
return HttpResponseRedirect(project.get_absolute_url())
return render_to_response(template_name, {
"project_form": project_form,
}, context_instance=RequestContext(request))
def projects(request, template_name="newprojects/projects.html"):
projects = Project.objects.all()
search_terms = request.GET.get('search', '')
if search_terms:
projects = (projects.filter(name__icontains=search_terms) |
projects.filter(description__icontains=search_terms))
content_type = ContentType.objects.get_for_model(Project)
projects = projects.extra(select=SortedDict([
('member_count', MEMBER_COUNT_SQL),
('topic_count', TOPIC_COUNT_SQL),
]), select_params=(content_type.id,))
return render_to_response(template_name, {
'projects': projects,
'search_terms': search_terms,
}, context_instance=RequestContext(request))
def delete(request, group_slug=None, redirect_url=None):
project = get_object_or_404(Project, slug=group_slug)
if not redirect_url:
redirect_url = reverse('project_list')
# @@@ eventually, we'll remove restriction that project.creator can't leave project but we'll still require project.members.all().count() == 1
if (request.user.is_authenticated() and request.method == "POST" and
request.user == project.creator and project.members.all().count() == 1):
project.delete()
request.user.message_set.create(message=_("Project %s deleted.") % (
project,))
# no notification required as the deleter must be the only member
return HttpResponseRedirect(redirect_url)
@login_required
def your_projects(request, template_name="newprojects/your_projects.html"):
return render_to_response(template_name, {
"projects": Project.objects.filter(members=request.user).order_by("name"),
}, context_instance=RequestContext(request))
def project(request, group_slug=None, form_class=ProjectUpdateForm,
template_name="newprojects/project.html"):
project = get_object_or_404(Project, slug=group_slug)
project_form = form_class(request.POST or None, instance=project)
action = request.POST.get('action')
if action == 'update' and project_form.is_valid():
project = project_form.save()
elif action == 'join':
project_member = ProjectMember(project=project, user=request.user)
project.members.add(project_member)
project_member.save()
request.user.message_set.create(
message=_("You have joined the project %s") % (project.name,))
if notification:
notification.send([project.creator], "projects_created_new_member", {"user": request.user, "project": project})
notification.send(project.members.all(), "projects_new_member", {"user": request.user, "project": project})
elif action == 'leave':
project.members.remove(request.user)
request.user.message_set.create(message="You have left the project %s" % project.name)
if notification:
pass # @@@ no notification on departure yet
if not request.user.is_authenticated():
is_member = False
else:
is_member = project.user_is_member(request.user)
# TODO: Shouldn't have to do this in the view. Should write new "groupurl" templatetag :(
new_topic_url = reverse('topic_list', kwargs=project.get_url_kwargs())
return render_to_response(template_name, {
"project_form": project_form,
"project": project,
"is_member": is_member,
"new_topic_url": new_topic_url,
}, context_instance=RequestContext(request))
Use member_users to get User instances to pass into notification.send.
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.utils.datastructures import SortedDict
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
if "notification" in settings.INSTALLED_APPS:
from notification import models as notification
else:
notification = None
from newprojects.models import Project, ProjectMember
from newprojects.forms import ProjectForm, ProjectUpdateForm
TOPIC_COUNT_SQL = """
SELECT COUNT(*)
FROM topics_topic
WHERE
topics_topic.object_id = newprojects_project.id AND
topics_topic.content_type_id = %s
"""
MEMBER_COUNT_SQL = """
SELECT COUNT(*)
FROM newprojects_projectmember
WHERE newprojects_projectmember.project_id = newprojects_project.id
"""
@login_required
def create(request, form_class=ProjectForm, template_name="newprojects/create.html"):
project_form = form_class(request.POST or None)
if project_form.is_valid():
project = project_form.save(commit=False)
project.creator = request.user
project.save()
project_member = ProjectMember(project=project, user=request.user)
project.members.add(project_member)
project_member.save()
if notification:
# @@@ might be worth having a shortcut for sending to all users
notification.send(User.objects.all(), "projects_new_project",
{"project": project}, queue=True)
return HttpResponseRedirect(project.get_absolute_url())
return render_to_response(template_name, {
"project_form": project_form,
}, context_instance=RequestContext(request))
def projects(request, template_name="newprojects/projects.html"):
projects = Project.objects.all()
search_terms = request.GET.get('search', '')
if search_terms:
projects = (projects.filter(name__icontains=search_terms) |
projects.filter(description__icontains=search_terms))
content_type = ContentType.objects.get_for_model(Project)
projects = projects.extra(select=SortedDict([
('member_count', MEMBER_COUNT_SQL),
('topic_count', TOPIC_COUNT_SQL),
]), select_params=(content_type.id,))
return render_to_response(template_name, {
'projects': projects,
'search_terms': search_terms,
}, context_instance=RequestContext(request))
def delete(request, group_slug=None, redirect_url=None):
project = get_object_or_404(Project, slug=group_slug)
if not redirect_url:
redirect_url = reverse('project_list')
# @@@ eventually, we'll remove restriction that project.creator can't leave project but we'll still require project.members.all().count() == 1
if (request.user.is_authenticated() and request.method == "POST" and
request.user == project.creator and project.members.all().count() == 1):
project.delete()
request.user.message_set.create(message=_("Project %s deleted.") % (
project,))
# no notification required as the deleter must be the only member
return HttpResponseRedirect(redirect_url)
@login_required
def your_projects(request, template_name="newprojects/your_projects.html"):
return render_to_response(template_name, {
"projects": Project.objects.filter(members=request.user).order_by("name"),
}, context_instance=RequestContext(request))
def project(request, group_slug=None, form_class=ProjectUpdateForm,
template_name="newprojects/project.html"):
project = get_object_or_404(Project, slug=group_slug)
project_form = form_class(request.POST or None, instance=project)
action = request.POST.get('action')
if action == 'update' and project_form.is_valid():
project = project_form.save()
elif action == 'join':
project_member = ProjectMember(project=project, user=request.user)
project.members.add(project_member)
project_member.save()
request.user.message_set.create(
message=_("You have joined the project %s") % (project.name,))
if notification:
notification.send([project.creator], "projects_created_new_member", {"user": request.user, "project": project})
notification.send(project.member_users.all(), "projects_new_member", {"user": request.user, "project": project})
elif action == 'leave':
project.members.remove(request.user)
request.user.message_set.create(message="You have left the project %s" % project.name)
if notification:
pass # @@@ no notification on departure yet
if not request.user.is_authenticated():
is_member = False
else:
is_member = project.user_is_member(request.user)
# TODO: Shouldn't have to do this in the view. Should write new "groupurl" templatetag :(
new_topic_url = reverse('topic_list', kwargs=project.get_url_kwargs())
return render_to_response(template_name, {
"project_form": project_form,
"project": project,
"is_member": is_member,
"new_topic_url": new_topic_url,
}, context_instance=RequestContext(request))
|
#!/usr/bin/env python
'''
Created on Jan 29, 2014
Modified to take ICE biases into account
@author: ferhat ay
'''
### import statements ###
import sys
import os
import math
import time
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.ticker import ScalarFormatter, FormatStrFormatter
from pylab import *
from scipy.stats.mstats import mquantiles
from scipy import *
from scipy.interpolate import Rbf, UnivariateSpline
from scipy import optimize
from optparse import OptionParser
import scipy.special as scsp
import bisect
from random import *
import myStats
import myUtils
import gzip
#from numba import jit
## R dependencies
import rpy2.robjects as ro
from rpy2.robjects.packages import importr
## Install the fdrtool package from R shell using
## install.packages("fdrtool")
fdrtool = importr('fdrtool')
#########################
#### Define utility functions
## function for calculating a power-law fit
powerlaw = lambda x, amp, index: amp * (x**index)
#########################
##### global variables shared by functions ######
# intra-chromosomal contacts in-range
possibleIntraInRangeCount=0 # count of all possible inter-chr fragment pairs
observedIntraInRangeCount=0
observedIntraInRangeSum=0
# intra-chromosomal contacts
possibleIntraAllCount=0 # count of all possible intra-chr fragment pairs
observedIntraAllCount=0
observedIntraAllSum=0
# inter-chromosomal contacts
possibleInterAllCount=0 # count of all possible inter-chr fragment pairs
observedInterAllCount=0
observedInterAllSum=0
baselineIntraChrProb=0 # 1.0/possibleIntraAllCount
interChrProb=0 # 1.0/possibleInterAllCount
minObservedGenomicDist=500000000 # some number bigger than the biggest chromosome length
maxObservedGenomicDist=0
maxPossibleGenomicDist=0
#distScaling just avoids overflow - but is necessary for large genomes
distScaling=10000.0
toKb=10**-3
toMb=10**-6
toProb=10**5
#########################
def main():
### parse the command line arguments
usage = "usage: %prog [options]"
parser = OptionParser(usage=usage, version="%prog 1.0 (NGSANE adaption)")
parser.add_option("-f", "--fragments", dest="fragsfile",
help="File containing the list of midpoints (or start indices) of the fragments/windows/meta-fragments for the reference genome." )
parser.add_option("-i", "--interactions", dest="intersfile",
help="File containing the list of contact counts between fragment/window/meta-fragment pairs.")
parser.add_option("-t", "--biases", dest="biasfile",
help="OPTIONAL: biases calculated by ICE for each locus are read from BIASFILE")
parser.add_option("-r", "--resolution", dest="resolution",type="int",
help="Length of fixed-size genomic windows used to process the data. E.x. 10000")
parser.add_option("-l", "--lib", dest="libname",
help="OPTIONAL: A prefix (generally the name of the library) that is going to be used for output file names.")
parser.add_option("-b", "--noOfBins", dest="noOfBins", type="int",
help="OPTIONAL: Number of equal-occupancy bins to process the data. Default is 100")
parser.add_option("-p", "--passes", dest="noOfPasses",type="int",
help="OPTIONAL: Number of passes after the initial spline fit. DEFAULT is 2 (spline-1 and spline-2).")
parser.add_option("-m", "--mappabilityThres", dest="mappabilityThreshold", type="int",
help="OPTIONAL: Minimum number of contacts per locus that has to exist to call it mappable. DEFAULT is 1.")
parser.add_option("-U", "--upperbound", dest="distUpThres", type="int",
help="OPTIONAL: Upper bound on the mid-range distances. DEFAULT no limit.")
parser.add_option("-L", "--lowerbound", dest="distLowThres", type="int",
help="OPTIONAL: Lower bound on the mid-range distances. DEFAULT no limit.")
parser.add_option("-y", "--usebinning",
action="store_true", dest="useBinning", help="OPTIONAL: use equal-occupancy binning. DEFAULT.")
parser.add_option("-n", "--nobinning",
action="store_false", dest="useBinning", help="OPTIONAL: do not use binning and fit to raw data points. This can be infeasible for large data sets.")
parser.add_option("-v", "--verbose",
action="store_true", dest="verbose")
parser.add_option("-q", "--quiet",
action="store_false", dest="verbose")
parser.add_option("-P", "--plotImages",
action="store_true", dest="plotimages")
parser.set_defaults(verbose=True, useBinning=True, noOfBins=100, distLowThres=-1, distUpThres=-1, mappabilityThreshold=1,noOfPasses=2,libname="",biasfile='none', plotimages=False)
(options, args) = parser.parse_args()
if len(args) != 0:
parser.error("incorrect number of arguments")
global noOfBins
global distUpThres
global distLowThres
global useBinning
global libname
global mappabilityThreshold
global noOfPasses
global resolution
noOfBins=options.noOfBins # 100 by default
distUpThres=options.distUpThres # -1 by default, means no upper bound
distLowThres=options.distLowThres # -1 by default, means no lower bound
mappabilityThreshold=options.mappabilityThreshold # 1 by default, means consider any fragment that participates in one contact
useBinning=options.useBinning
libname=options.libname
noOfPasses=options.noOfPasses
resolution=options.resolution
mainDic={} # given a distance this dictionary will return [Npairs,TotalContactCount]
# read the mandatory input files -f and -i
(mainDic,noOfFrags)=generate_FragPairs(mainDic,options.fragsfile)
#for i in range(0,maxPossibleGenomicDist+1,resolution):
# print str(i)+"\t"+str(mainDic[i][0])
biasDic={}
if options.biasfile!='none':
biasDic=read_ICE_biases(options.biasfile)
# read contacts in sparse form
mainDic=read_All_Interactions(mainDic,options.intersfile,noOfFrags)
#(mainDic,Xvals,Xindices)=read_All_Interactions(mainDic,options.intersfile,allFragsDic,noOfFrags)
#t=[0,0]
#for i in range(len(Xvals)):
# t[0]+=len(Xvals[i])
# t[1]+=sum(Xvals[i])
#print t
#for i in range(0,maxPossibleGenomicDist+1,resolution):
# print str(i)+"\t"+str(mainDic[i][0])+"\t"+str(mainDic[i][1])
### DO THE FIRST PASS ###
# calculate priors using original fit-hic and plot with standard errors
print("\n\t\tSPLINE FIT PASS 1 (spline-1) \n"),
x,y,yerr=calculate_Probabilities(mainDic,libname+".fithic_pass1")
# now fit spline to the data using power-law residual by improving it <residualFactor> times
splineXinit,splineYinit,splineResidual=fit_Spline(mainDic,x,y,yerr,options.intersfile,libname+".spline_pass1",biasDic, options.plotimages)
### DO THE REFINEMENT ON THE NULL AS MANY STEPS AS DESIRED ###
#for i in range(2,1+noOfPasses):
# print("\n\t\tSPLINE FIT PASS " + str(i)+" (spline-" +str(i) +") \n"),
# x,y,yerr=calculate_Probabilities(mainDic,libname+".fithic_pass"+repr(i))
# splineX,splineY,splineResidual,isOutlier=fit_Spline(x,y,yerr,options.intersfile,mainDic,libname+".spline_pass"+repr(i))
print("\nExecution of fit-hic completed successfully. \n\n"),
return # from main
def read_ICE_biases(infilename):
sys.stderr.write("\n\nReading ICE biases. \n")
biasDic={}
rawBiases=[]
infile=gzip.open(infilename, 'r')
for line in infile:
words=line.rstrip().split()
chr=words[0]; midPoint=int(words[1]); bias=float(words[2])
if bias!=1.0:
rawBiases.append(bias)
infile.close()
#sys.stderr.write("\n\nReading ICE biases. \n")
botQ,med,topQ=mquantiles(rawBiases,prob=[0.05,0.5,0.95])
sys.stderr.write("5th quantile of biases: "+str(botQ)+"\n")
sys.stderr.write("50th quantile of biases: "+str(med)+"\n")
sys.stderr.write("95th quantile of biases: "+str(topQ)+"\n")
#m,v=myStats.meanAndVariance(rawBiases)
#sd=math.sqrt(v)
#sys.stderr.write(str(m)+"\t"+str(v)+"\t"+str(sd)+"\n")
#normFactor=sum(rawBiases)/len(rawBiases)
infile=gzip.open(infilename, 'r')
totalC=0
discardC=0
for line in infile:
words=line.rstrip().split()
chr=words[0]; midPoint=int(words[1]); bias=float(words[2])
# extra conditions
#if bias<(botQ/2.0):
if bias<0.5:
# bias=-1 #botQ
discardC+=1
elif bias>2.:
# bias=-1 #topQ
discardC+=1
#
totalC+=1
if not biasDic.has_key(chr):
biasDic[chr]={}
if not biasDic[chr].has_key(midPoint):
biasDic[chr][midPoint]=bias
infile.close()
sys.stderr.write("Out of " + str(totalC) + " loci " +str(discardC) +" were discarded with biases not in range [0.5 2]\n\n" )
return biasDic # from read_ICE_biases
def calculate_Probabilities(mainDic,outfilename):
print("\nCalculating probability means and standard deviations by equal-occupancy binning of contact counts\n"),
print("------------------------------------------------------------------------------------\n"),
outfile=open(outfilename+'.res'+str(resolution)+'.txt', 'w')
## total interaction count to put on top of the plot
#totalInteractionCountForPlot=0
#for i in range(0,maxPossibleGenomicDist+1,resolution):
# totalInteractionCountForPlot += mainDic[i][1]
#totalInteractionCountForPlot/=2
desiredPerBin=(observedIntraInRangeSum)/noOfBins
print("observed intra-chr read counts in range\t"+repr(observedIntraInRangeSum)+ ",\tdesired number of contacts per bin\t" +repr(desiredPerBin)+",\tnumber of bins\t"+repr(noOfBins)+"\n"),
# the following five lists will be the print outputs
x=[] # avg genomic distances of bins
y=[] # avg interaction probabilities of bins
yerr=[] # stderrs of bins
pairCounts=[] # number of pairs in bins
interactionTotals=[] # number of interactions (reads) in bins
interactionTotalForBinTermination=0
n=0 # bin counter so far
totalInteractionCountSoFar=0
#observedIntraInRangeSum
distsToGoInAbin=[]
binFull=0
for i in range(0,maxPossibleGenomicDist+1,resolution):
totalInteractionCountSoFar+=mainDic[i][1]
if myUtils.in_range_check(i,distLowThres,distUpThres)==False:
continue
# if one distance has more than necessary counts to fill a bin
if mainDic[i][1]>=desiredPerBin:
distsToGoInAbin.append(i)
interactionTotalForBinTermination=0
binFull=1
# if adding the next bin will fill the bin
elif interactionTotalForBinTermination+mainDic[i][1] >= desiredPerBin:
distsToGoInAbin.append(i)
interactionTotalForBinTermination=0
binFull=1
# if adding the next bin will fill the bin
else:
distsToGoInAbin.append(i)
interactionTotalForBinTermination+=mainDic[i][1]
#
if binFull==1:
noOfPairsForBin=0
interactionTotalForBin=0
avgDistance=0
# dynamically update the desiredPerBin after each bin is full
n+=1
if n<noOfBins:
desiredPerBin=1.0*(observedIntraInRangeSum-totalInteractionCountSoFar)/(noOfBins-n)
se_p=0 # for now I'm not worrying about error etc.
for b in distsToGoInAbin:
noOfPairsForBin+=mainDic[b][0]
interactionTotalForBin+=mainDic[b][1]
avgDistance+=1.0*mainDic[b][0]*(b/distScaling)
#
meanProbabilityObsv=(1.0*interactionTotalForBin/noOfPairsForBin)/observedIntraInRangeSum
avgDistance=distScaling*(avgDistance/noOfPairsForBin)
# append this bin
x.append(float(avgDistance))
y.append(float(meanProbabilityObsv))
yerr.append(float(se_p))
pairCounts.append(noOfPairsForBin)
interactionTotals.append(interactionTotalForBin)
print "%d" % n+ "\t" + "%f" % avgDistance + "\t"+"%.2e" % meanProbabilityObsv + "\t"\
+ "%.2e" % se_p +"\t" +"%d" % noOfPairsForBin +"\t" +"%d" % interactionTotalForBin
# reset counts
interactionTotalForBinTermination=0
binFull=0
distsToGoInAbin=[]
# END if
# END for
print("Writing equal-occupancy binning results to %s" % outfilename + ".txt\n"),
outfile.write("avgGenomicDist\tcontactProbability\tstandardError\tnoOfLocusPairs\ttotalOfContactCounts\n")
for i in range(len(x)):
outfile.write("%d" % x[i] + "\t"+"%.2e" % y[i]+ "\t" + "%.2e" % yerr[i] + "\t" +"%d" % pairCounts[i] + "\t" +"%d" % interactionTotals[i]+"\n")
outfile.close()
return [x,y,yerr] # from calculate_Probabilities
def read_All_Interactions(mainDic,contactCountsFile,noOfFrags):
print("\nReading all the contact counts\n"),
print("------------------------------------------------------------------------------------\n"),
global observedInterAllSum
global observedInterAllCount
global observedIntraAllSum
global observedIntraAllCount
global observedIntraInRangeSum
global observedIntraInRangeCount
global minObservedGenomicDist
global maxObservedGenomicDist
#Xvals=[]
#Xindices=[]
#for i in range(noOfFrags):
# Xvals.append([])
# Xindices.append([])
##
infile=gzip.open(contactCountsFile,'r')
count=0
for line in infile:
ch1,mid1,ch2,mid2,contactCount=line.split()
### FIXME: this part will need to be fixed for human etc
#ch1='chr'+ch1
#ch2='chr'+ch2
contactCount=float(contactCount)
interxn=myUtils.Interaction([ch1, int(mid1), ch2, int(mid2)])
interxn.setCount(contactCount)
count+=1
if count%1000000==0:
print count
if interxn.type=='inter':
observedInterAllSum +=interxn.hitCount
observedInterAllCount +=1
else: # any type of intra
observedIntraAllSum +=interxn.hitCount
observedIntraAllCount +=1
if interxn.getType(distLowThres,distUpThres)=='intraInRange':
minObservedGenomicDist=min(minObservedGenomicDist,interxn.distance)
maxObservedGenomicDist=max(maxObservedGenomicDist,interxn.distance)
if interxn.distance in mainDic:
mainDic[interxn.distance][1]+=contactCount
observedIntraInRangeSum +=interxn.hitCount
observedIntraInRangeCount +=1
# END else
# indx1=allFragsDic[ch1][mid1]
# indx2=allFragsDic[ch2][mid2]
#print str(indx1)+"\t"+str(indx2)
# Xvals[indx1].append(contactCount)
# Xindices[indx1].append(indx2)
# Xvals[indx2].append(contactCount)
# Xindices[indx2].append(indx1)
# END for
infile.close()
print("Observed, Intra-chr in range: pairs= "+str(observedIntraInRangeCount) +"\t totalCount= "+str(observedIntraInRangeSum))
print("Observed, Intra-chr all: pairs= "+str(observedIntraAllCount) +"\t totalCount= "+str(observedIntraAllSum))
print("Observed, Inter-chr all: pairs= "+str(observedInterAllCount) +"\t totalCount= "+str(observedInterAllSum))
print("Range of observed genomic distances [%d %d]" % (minObservedGenomicDist,maxObservedGenomicDist) + "\n"),
#return (mainDic,Xvals,Xindices) # from read_All_Interactions
return mainDic # from read_All_Interactions
def generate_FragPairs(mainDic,infilename): # lowMappThres
print("\nEnumerating all possible intra-chromosomal fragment pairs in-range\n"),
print("------------------------------------------------------------------------------------\n"),
global maxPossibleGenomicDist
global possibleIntraAllCount
global possibleInterAllCount
global possibleIntraInRangeCount
global interChrProb
global baselineIntraChrProb
#badFrags=[]
allFragsDic={}
#allFragsDicReverse={}
infile=gzip.open(infilename,'r')
indx=0
for line in infile:
words=line.split()
currChr=words[0]; currMid=words[1]; mapp=float(words[3]);
if currChr not in allFragsDic:
allFragsDic[currChr]={}
allFragsDic[currChr][currMid]=indx
# allFragsDicReverse[indx]=[currChr,currMid]
#if mapp<=lowMappThres:
# badFrags.append(indx)
indx+=1
#END
infile.close()
noOfFrags=0
maxFrags={}
for ch in allFragsDic:
maxFrags[ch]=max([int(i)-resolution/2 for i in allFragsDic[ch]])
noOfFrags+=len(allFragsDic[ch])
maxPossibleGenomicDist=max(maxPossibleGenomicDist,maxFrags[ch])
#print badFrags
for i in range(0,maxPossibleGenomicDist+1,resolution):
mainDic[i]=[0,0]
for ch in allFragsDic:
maxFrag=maxFrags[ch]
n=len(allFragsDic[ch])
d=0
for i in range(0,maxFrag+1,resolution):
mainDic[i][0]+=n-d
d+=1
#
possibleInterAllCount+=n*(noOfFrags-n)
possibleIntraAllCount+=(n*(n+1))/2 # n(n-1) if excluding self
#
possibleInterAllCount/=2
interChrProb=1.0
if possibleInterAllCount>0:
interChrProb=1.0/possibleInterAllCount
baselineIntraChrProb=1.0/possibleIntraAllCount
for i in range(0,maxPossibleGenomicDist+1,resolution):
if myUtils.in_range_check(i,distLowThres,distUpThres):
possibleIntraInRangeCount+=mainDic[i][0]
#print str(i)+"\t"+str(mainDic[i][0])
print("Number of all fragments= "+str(noOfFrags)+"\t resolution= "+ str(resolution))
print("Possible, Intra-chr in range: pairs= "+str(possibleIntraInRangeCount))
print("Possible, Intra-chr all: pairs= "+str(possibleIntraAllCount))
print("Possible, Inter-chr all: pairs= "+str(possibleInterAllCount))
print("Desired genomic distance range [%d %d]" % (distLowThres,distUpThres) + "\n"),
print("Range of possible genomic distances [0 %d]" % (maxPossibleGenomicDist) + "\n"),
return (mainDic,noOfFrags) # return from generate_FragPairs
#@jit
def call_bdtrc(hitCount, observedSum, prior_p):
try:
p_val=scsp.bdtrc(int(hitCount),int(observedSum),prior_p)
if (np.isnan(p_val)):
p_val=call_bdtrc(int(hitCount/2), int(observedSum/2), prior_p)
except:
# catching case when interxn count is too big
p_val=call_bdtrc(int(hitCount/2), int(observedSum/2), prior_p)
return p_val
def fit_Spline(mainDic,x,y,yerr,infilename,outfilename,biasDic,plotimages):
print("\nFit a univariate spline to the probability means\n"),
print("------------------------------------------------------------------------------------\n"),
#print("baseline intra-chr probability: " + repr(baselineIntraChrProb)+ "\n"),
# maximum residual allowed for spline is set to min(y)^2
splineError=min(y)*min(y)
# use fitpack2 method -fit on the real x and y from equal occupancy binning
ius = UnivariateSpline(x, y, s=splineError)
#### POST-PROCESS THE SPLINE TO MAKE SURE IT'S NON-INCREASING
### NOW I DO THIS BY CALLING AN R function CALLED MONOREG
### This does the isotonic regression using option antitonic to make sure
### I get monotonically decreasing probabilites with increasion genomic distance
tempMaxX=max(x)
tempMinX=min(x)
tempList=sorted([dis for dis in mainDic])
splineX=[]
### The below for loop will make sure nothing is out of range of [min(x) max(x)]
### Therefore everything will be within the range where the spline is defined
for i in tempList:
if tempMinX<=i and i<=tempMaxX:
splineX.append(i)
# END for
splineY=ius(splineX)
# R vector format
rSplineX=ro.FloatVector(splineX)
rSplineY=ro.FloatVector(splineY)
rMonoReg=ro.r['monoreg']
# do the antitonic regression
allRres=rMonoReg(rSplineX,rSplineY,type="antitonic")
rNewSplineY=allRres[3]
# convert data back to Python format
newSplineY=[]
diff=[]
diffX=[]
for i in range(len(rNewSplineY)):
newSplineY.append(rNewSplineY[i])
if (splineY[i]-newSplineY[i]) > 0:
diff.append(splineY[i]-newSplineY[i])
diffX.append(splineX[i])
# END if
# END for
### Now newSplineY holds the monotonic contact probabilities
residual =sum([i*i for i in (y - ius(x))])
if (plotimages):
### Now plot the results
plt.clf()
fig = plt.figure()
ax = fig.add_subplot(2,1,1)
plt.title('Univariate spline fit to the output of equal occupancy binning. \n Residual= %e' % (residual),size='small')
plt.plot([i/1000.0 for i in x], [i*100000 for i in y], 'ro', label="Means")
#plt.plot([i/1000.0 for i in xi], [i*100000 for i in yi],'g-',label="Spline fit")
plt.plot([i/1000.0 for i in splineX], [i*100000 for i in newSplineY],'g-',label="Spline fit")
#plt.plot([i/1000.0 for i in x], [normalizedInterChrProb*100000 for i in x],'k-',label="Random intra-chromosomal")
#plt.plot([i/1000.0 for i in x], [interChrProb*100000 for i in x],'b-',label="Inter-chromosomal")
plt.ylabel('Probability (1e-5)')
plt.xlabel('Genomic distance (kb)')
plt.xlim([min(x)/1000.0,max(x)/1000.0])
ax.legend(loc="upper right")
ax = fig.add_subplot(2,1,2)
plt.loglog(splineX,newSplineY,'g-')
#plt.loglog(xi, yi, 'g-')
plt.loglog(x, y, 'r.') # Data
#plt.loglog(x, [normalizedInterChrProb for i in x],'k-')
#plt.loglog(x, [interChrProb for i in x],'b-')
plt.ylabel('Probability (log scale)')
plt.xlabel('Genomic distance (log scale)')
#plt.xlim([20000,100000])
plt.xlim([min(x),max(x)])
plt.savefig(outfilename+'.res'+str(resolution)+'.png')
sys.stderr.write("Plotting %s" % outfilename + ".png\n")
# NOW write the calculated pvalues and corrected pvalues in a file
infile =gzip.open(infilename, 'r')
intraInRangeCount=0
intraOutOfRangeCount=0
intraVeryProximalCount=0
interCount=0
discardCount=0
print("lower bound on mid-range distances "+ repr(distLowThres) + ", upper bound on mid-range distances " + repr(distUpThres) +"\n"),
p_vals=[]
q_vals=[]
for line in infile:
words=line.rstrip().split()
interxn=myUtils.Interaction([words[0], int(words[1]), words[2], int(words[3])])
interxn.setCount(float(words[4]))
chr1=words[0]
chr2=words[2]
midPoint1=int(words[1])
midPoint2=int(words[3])
bias1=1.0; bias2=1.0; # assumes there is no bias to begin with
# if the biasDic is not null sets the real bias values
if len(biasDic)>0:
if biasDic.has_key(chr1) and biasDic[chr1].has_key(midPoint1):
bias1=biasDic[chr1][midPoint1]
if biasDic.has_key(chr2) and biasDic[chr2].has_key(midPoint2):
bias2=biasDic[chr2][midPoint2]
if (bias1 < 0.5 or bias1 > 2. or bias2 < 0.5 or bias2 > 2.):
# if bias1==-1 or bias2==-1:
p_val=1.0
discardCount+=1
elif interxn.type=='intra':
if interxn.getType(distLowThres,distUpThres)=='intraInRange':
# make sure the interaction distance is covered by the probability bins
distToLookUp=max(interxn.distance,min(x))
distToLookUp=min(distToLookUp,max(x))
i=min(bisect.bisect_left(splineX, distToLookUp),len(splineX)-1)
prior_p=newSplineY[i]*(bias1*bias2) # biases added in the picture
p_val = call_bdtrc(interxn.hitCount-1,observedIntraInRangeSum,prior_p)
intraInRangeCount +=1
elif interxn.getType(distLowThres,distUpThres)=='intraShort':
prior_p=1.0
p_val=1.0
intraVeryProximalCount +=1
elif interxn.getType(distLowThres,distUpThres)=='intraLong':
## out of range distance
## use the prior of the baseline intra-chr interaction probability
prior_p=baselineIntraChrProb*(bias1*bias2) # biases added in the picture
p_val = call_bdtrc(interxn.hitCount-1,observedIntraAllSum,prior_p)
intraOutOfRangeCount +=1
# END if
else: # inter
#prior_p=normalizedInterChrProb
prior_p=interChrProb*(bias1*bias2) # biases added in the picture
############# THIS HAS TO BE interactionCount-1 ##################
p_val = call_bdtrc(interxn.hitCount-1,observedInterAllSum,prior_p)
interCount +=1
#
p_vals.append(p_val)
# END for
infile.close()
print("CHECK: sum of p-values %.2f" % (sum(p_vals)))
# Do the BH FDR correction
q_vals=myStats.benjamini_hochberg_correction(p_vals, possibleInterAllCount+possibleIntraAllCount)
#q_vals=myStats.benjamini_hochberg_correction(p_vals, possibleIntraInRangeCount)
#print("possibleIntraInRangeCount " + repr(possibleIntraInRangeCount)+"\n"),
infile =gzip.open(infilename, 'r')
outfile =gzip.open(outfilename+'.res'+str(resolution)+'.significances.txt.gz', 'w')
print("Writing p-values and q-values to file %s" % outfilename + ".significances.txt\n"),
print("Number of pairs discarded due to bias not in range [0.5 2]\n"),
if len(biasDic)>0:
outfile.write("chr1\tfragmentMid1\tchr2\tfragmentMid2\tcontactCount\tp-value\tq-value\tbias1\tbias2\tbiasCorrectedContactCount\n")
else:
outfile.write("chr1\tfragmentMid1\tchr2\tfragmentMid2\tcontactCount\tp-value\tq-value\n")
count=0
for line in infile:
words=line.rstrip().split()
chrNo1=str(words[0])
midPoint1=int(words[1])
chrNo2=str(words[2])
midPoint2=int(words[3])
interactionCount=int(words[4])
p_val=p_vals[count]
q_val=q_vals[count]
bias1=1.0; bias2=1.0; # assumes there is no bias to begin with
if len(biasDic)>0:
if biasDic.has_key(chr1) and biasDic[chr1].has_key(midPoint1):
bias1=biasDic[chr1][midPoint1]
if biasDic.has_key(chr2) and biasDic[chr2].has_key(midPoint2):
bias2=biasDic[chr2][midPoint2]
bcCount=bias1*bias2*interactionCount
#if chrNo1==chrNo2: # intra
# interactionDistance=abs(midPoint1-midPoint2) # dist
# if myUtils.in_range_check(interactionDistance,distLowThres,distUpThres):
# outfile.write("%s\t%d\t%s\t%d\t%d\t%e\t%e\n" % (str(chrNo1),midPoint1,str(chrNo2),midPoint2,interactionCount,p_val,q_val))
#else:
# outfile.write("%s\t%d\t%s\t%d\t%d\t%e\t%e\n" % (str(chrNo1),midPoint1,str(chrNo2),midPoint2,interactionCount,p_val,q_val))
if len(biasDic)>0:
outfile.write("%s\t%d\t%s\t%d\t%.2f\t%e\t%e\t%.3f\t%.3f\t%.2f\n" % (chrNo1,midPoint1,chrNo2,midPoint2,interactionCount,p_val,q_val, bias1, bias2, bcCount))
else:
outfile.write("%s\t%d\t%s\t%d\t%.2f\t%e\t%e\n" % (chrNo1,midPoint1,chrNo2,midPoint2,interactionCount,p_val,q_val))
count+=1
# END for - printing pvals and qvals for all the interactions
outfile.close()
infile.close()
return [splineX, newSplineY, residual] # from fit_Spline
if __name__ == "__main__":
main()
Catching nan from stats test
#!/usr/bin/env python
'''
Created on Jan 29, 2014
Modified to take ICE biases into account
@author: ferhat ay
'''
### import statements ###
import sys
import os
import math
import time
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.ticker import ScalarFormatter, FormatStrFormatter
from pylab import *
from scipy.stats.mstats import mquantiles
from scipy import *
from scipy.interpolate import Rbf, UnivariateSpline
from scipy import optimize
from optparse import OptionParser
import scipy.special as scsp
import bisect
from random import *
import myStats
import myUtils
import gzip
## R dependencies
import rpy2.robjects as ro
from rpy2.robjects.packages import importr
## Install the fdrtool package from R shell using
## install.packages("fdrtool")
fdrtool = importr('fdrtool')
#########################
#### Define utility functions
## function for calculating a power-law fit
powerlaw = lambda x, amp, index: amp * (x**index)
#########################
##### global variables shared by functions ######
# intra-chromosomal contacts in-range
possibleIntraInRangeCount=0 # count of all possible inter-chr fragment pairs
observedIntraInRangeCount=0
observedIntraInRangeSum=0
# intra-chromosomal contacts
possibleIntraAllCount=0 # count of all possible intra-chr fragment pairs
observedIntraAllCount=0
observedIntraAllSum=0
# inter-chromosomal contacts
possibleInterAllCount=0 # count of all possible inter-chr fragment pairs
observedInterAllCount=0
observedInterAllSum=0
baselineIntraChrProb=0 # 1.0/possibleIntraAllCount
interChrProb=0 # 1.0/possibleInterAllCount
minObservedGenomicDist=500000000 # some number bigger than the biggest chromosome length
maxObservedGenomicDist=0
maxPossibleGenomicDist=0
#distScaling just avoids overflow - but is necessary for large genomes
distScaling=10000.0
toKb=10**-3
toMb=10**-6
toProb=10**5
#########################
def main():
### parse the command line arguments
usage = "usage: %prog [options]"
parser = OptionParser(usage=usage, version="%prog 1.0 (NGSANE adaption)")
parser.add_option("-f", "--fragments", dest="fragsfile",
help="File containing the list of midpoints (or start indices) of the fragments/windows/meta-fragments for the reference genome." )
parser.add_option("-i", "--interactions", dest="intersfile",
help="File containing the list of contact counts between fragment/window/meta-fragment pairs.")
parser.add_option("-t", "--biases", dest="biasfile",
help="OPTIONAL: biases calculated by ICE for each locus are read from BIASFILE")
parser.add_option("-r", "--resolution", dest="resolution",type="int",
help="Length of fixed-size genomic windows used to process the data. E.x. 10000")
parser.add_option("-l", "--lib", dest="libname",
help="OPTIONAL: A prefix (generally the name of the library) that is going to be used for output file names.")
parser.add_option("-b", "--noOfBins", dest="noOfBins", type="int",
help="OPTIONAL: Number of equal-occupancy bins to process the data. Default is 100")
parser.add_option("-p", "--passes", dest="noOfPasses",type="int",
help="OPTIONAL: Number of passes after the initial spline fit. DEFAULT is 2 (spline-1 and spline-2).")
parser.add_option("-m", "--mappabilityThres", dest="mappabilityThreshold", type="int",
help="OPTIONAL: Minimum number of contacts per locus that has to exist to call it mappable. DEFAULT is 1.")
parser.add_option("-U", "--upperbound", dest="distUpThres", type="int",
help="OPTIONAL: Upper bound on the mid-range distances. DEFAULT no limit.")
parser.add_option("-L", "--lowerbound", dest="distLowThres", type="int",
help="OPTIONAL: Lower bound on the mid-range distances. DEFAULT no limit.")
parser.add_option("-y", "--usebinning",
action="store_true", dest="useBinning", help="OPTIONAL: use equal-occupancy binning. DEFAULT.")
parser.add_option("-n", "--nobinning",
action="store_false", dest="useBinning", help="OPTIONAL: do not use binning and fit to raw data points. This can be infeasible for large data sets.")
parser.add_option("-v", "--verbose",
action="store_true", dest="verbose")
parser.add_option("-q", "--quiet",
action="store_false", dest="verbose")
parser.add_option("-P", "--plotImages",
action="store_true", dest="plotimages")
parser.set_defaults(verbose=True, useBinning=True, noOfBins=100, distLowThres=-1, distUpThres=-1, mappabilityThreshold=1,noOfPasses=2,libname="",biasfile='none', plotimages=False)
(options, args) = parser.parse_args()
if len(args) != 0:
parser.error("incorrect number of arguments")
global noOfBins
global distUpThres
global distLowThres
global useBinning
global libname
global mappabilityThreshold
global noOfPasses
global resolution
noOfBins=options.noOfBins # 100 by default
distUpThres=options.distUpThres # -1 by default, means no upper bound
distLowThres=options.distLowThres # -1 by default, means no lower bound
mappabilityThreshold=options.mappabilityThreshold # 1 by default, means consider any fragment that participates in one contact
useBinning=options.useBinning
libname=options.libname
noOfPasses=options.noOfPasses
resolution=options.resolution
mainDic={} # given a distance this dictionary will return [Npairs,TotalContactCount]
# read the mandatory input files -f and -i
(mainDic,noOfFrags)=generate_FragPairs(mainDic,options.fragsfile)
#for i in range(0,maxPossibleGenomicDist+1,resolution):
# print str(i)+"\t"+str(mainDic[i][0])
biasDic={}
if options.biasfile!='none':
biasDic=read_ICE_biases(options.biasfile)
# read contacts in sparse form
mainDic=read_All_Interactions(mainDic,options.intersfile,noOfFrags)
#(mainDic,Xvals,Xindices)=read_All_Interactions(mainDic,options.intersfile,allFragsDic,noOfFrags)
#t=[0,0]
#for i in range(len(Xvals)):
# t[0]+=len(Xvals[i])
# t[1]+=sum(Xvals[i])
#print t
#for i in range(0,maxPossibleGenomicDist+1,resolution):
# print str(i)+"\t"+str(mainDic[i][0])+"\t"+str(mainDic[i][1])
### DO THE FIRST PASS ###
# calculate priors using original fit-hic and plot with standard errors
print("\n\t\tSPLINE FIT PASS 1 (spline-1) \n"),
x,y,yerr=calculate_Probabilities(mainDic,libname+".fithic_pass1")
# now fit spline to the data using power-law residual by improving it <residualFactor> times
splineXinit,splineYinit,splineResidual=fit_Spline(mainDic,x,y,yerr,options.intersfile,libname+".spline_pass1",biasDic, options.plotimages)
### DO THE REFINEMENT ON THE NULL AS MANY STEPS AS DESIRED ###
#for i in range(2,1+noOfPasses):
# print("\n\t\tSPLINE FIT PASS " + str(i)+" (spline-" +str(i) +") \n"),
# x,y,yerr=calculate_Probabilities(mainDic,libname+".fithic_pass"+repr(i))
# splineX,splineY,splineResidual,isOutlier=fit_Spline(x,y,yerr,options.intersfile,mainDic,libname+".spline_pass"+repr(i))
print("\nExecution of fit-hic completed successfully. \n\n"),
return # from main
def read_ICE_biases(infilename):
sys.stderr.write("\n\nReading ICE biases. \n")
biasDic={}
rawBiases=[]
infile=gzip.open(infilename, 'r')
for line in infile:
words=line.rstrip().split()
chr=words[0]; midPoint=int(words[1]); bias=float(words[2])
if bias!=1.0:
rawBiases.append(bias)
infile.close()
#sys.stderr.write("\n\nReading ICE biases. \n")
botQ,med,topQ=mquantiles(rawBiases,prob=[0.05,0.5,0.95])
sys.stderr.write("5th quantile of biases: "+str(botQ)+"\n")
sys.stderr.write("50th quantile of biases: "+str(med)+"\n")
sys.stderr.write("95th quantile of biases: "+str(topQ)+"\n")
#m,v=myStats.meanAndVariance(rawBiases)
#sd=math.sqrt(v)
#sys.stderr.write(str(m)+"\t"+str(v)+"\t"+str(sd)+"\n")
#normFactor=sum(rawBiases)/len(rawBiases)
infile=gzip.open(infilename, 'r')
totalC=0
discardC=0
for line in infile:
words=line.rstrip().split()
chr=words[0]; midPoint=int(words[1]); bias=float(words[2])
# extra conditions
#if bias<(botQ/2.0):
if bias<0.5:
# bias=-1 #botQ
discardC+=1
elif bias>2.:
# bias=-1 #topQ
discardC+=1
#
totalC+=1
if not biasDic.has_key(chr):
biasDic[chr]={}
if not biasDic[chr].has_key(midPoint):
biasDic[chr][midPoint]=bias
infile.close()
sys.stderr.write("Out of " + str(totalC) + " loci " +str(discardC) +" were discarded with biases not in range [0.5 2]\n\n" )
return biasDic # from read_ICE_biases
def calculate_Probabilities(mainDic,outfilename):
print("\nCalculating probability means and standard deviations by equal-occupancy binning of contact counts\n"),
print("------------------------------------------------------------------------------------\n"),
outfile=open(outfilename+'.res'+str(resolution)+'.txt', 'w')
## total interaction count to put on top of the plot
#totalInteractionCountForPlot=0
#for i in range(0,maxPossibleGenomicDist+1,resolution):
# totalInteractionCountForPlot += mainDic[i][1]
#totalInteractionCountForPlot/=2
desiredPerBin=(observedIntraInRangeSum)/noOfBins
print("observed intra-chr read counts in range\t"+repr(observedIntraInRangeSum)+ ",\tdesired number of contacts per bin\t" +repr(desiredPerBin)+",\tnumber of bins\t"+repr(noOfBins)+"\n"),
# the following five lists will be the print outputs
x=[] # avg genomic distances of bins
y=[] # avg interaction probabilities of bins
yerr=[] # stderrs of bins
pairCounts=[] # number of pairs in bins
interactionTotals=[] # number of interactions (reads) in bins
interactionTotalForBinTermination=0
n=0 # bin counter so far
totalInteractionCountSoFar=0
#observedIntraInRangeSum
distsToGoInAbin=[]
binFull=0
for i in range(0,maxPossibleGenomicDist+1,resolution):
totalInteractionCountSoFar+=mainDic[i][1]
if myUtils.in_range_check(i,distLowThres,distUpThres)==False:
continue
# if one distance has more than necessary counts to fill a bin
if mainDic[i][1]>=desiredPerBin:
distsToGoInAbin.append(i)
interactionTotalForBinTermination=0
binFull=1
# if adding the next bin will fill the bin
elif interactionTotalForBinTermination+mainDic[i][1] >= desiredPerBin:
distsToGoInAbin.append(i)
interactionTotalForBinTermination=0
binFull=1
# if adding the next bin will fill the bin
else:
distsToGoInAbin.append(i)
interactionTotalForBinTermination+=mainDic[i][1]
#
if binFull==1:
noOfPairsForBin=0
interactionTotalForBin=0
avgDistance=0
# dynamically update the desiredPerBin after each bin is full
n+=1
if n<noOfBins:
desiredPerBin=1.0*(observedIntraInRangeSum-totalInteractionCountSoFar)/(noOfBins-n)
se_p=0 # for now I'm not worrying about error etc.
for b in distsToGoInAbin:
noOfPairsForBin+=mainDic[b][0]
interactionTotalForBin+=mainDic[b][1]
avgDistance+=1.0*mainDic[b][0]*(b/distScaling)
#
meanProbabilityObsv=(1.0*interactionTotalForBin/noOfPairsForBin)/observedIntraInRangeSum
avgDistance=distScaling*(avgDistance/noOfPairsForBin)
# append this bin
x.append(float(avgDistance))
y.append(float(meanProbabilityObsv))
yerr.append(float(se_p))
pairCounts.append(noOfPairsForBin)
interactionTotals.append(interactionTotalForBin)
print "%d" % n+ "\t" + "%f" % avgDistance + "\t"+"%.2e" % meanProbabilityObsv + "\t"\
+ "%.2e" % se_p +"\t" +"%d" % noOfPairsForBin +"\t" +"%d" % interactionTotalForBin
# reset counts
interactionTotalForBinTermination=0
binFull=0
distsToGoInAbin=[]
# END if
# END for
print("Writing equal-occupancy binning results to %s" % outfilename + ".txt\n"),
outfile.write("avgGenomicDist\tcontactProbability\tstandardError\tnoOfLocusPairs\ttotalOfContactCounts\n")
for i in range(len(x)):
outfile.write("%d" % x[i] + "\t"+"%.2e" % y[i]+ "\t" + "%.2e" % yerr[i] + "\t" +"%d" % pairCounts[i] + "\t" +"%d" % interactionTotals[i]+"\n")
outfile.close()
return [x,y,yerr] # from calculate_Probabilities
def read_All_Interactions(mainDic,contactCountsFile,noOfFrags):
print("\nReading all the contact counts\n"),
print("------------------------------------------------------------------------------------\n"),
global observedInterAllSum
global observedInterAllCount
global observedIntraAllSum
global observedIntraAllCount
global observedIntraInRangeSum
global observedIntraInRangeCount
global minObservedGenomicDist
global maxObservedGenomicDist
#Xvals=[]
#Xindices=[]
#for i in range(noOfFrags):
# Xvals.append([])
# Xindices.append([])
##
infile=gzip.open(contactCountsFile,'r')
count=0
for line in infile:
ch1,mid1,ch2,mid2,contactCount=line.split()
### FIXME: this part will need to be fixed for human etc
#ch1='chr'+ch1
#ch2='chr'+ch2
contactCount=float(contactCount)
interxn=myUtils.Interaction([ch1, int(mid1), ch2, int(mid2)])
interxn.setCount(contactCount)
count+=1
if count%1000000==0:
print count
if interxn.type=='inter':
observedInterAllSum +=interxn.hitCount
observedInterAllCount +=1
else: # any type of intra
observedIntraAllSum +=interxn.hitCount
observedIntraAllCount +=1
if interxn.getType(distLowThres,distUpThres)=='intraInRange':
minObservedGenomicDist=min(minObservedGenomicDist,interxn.distance)
maxObservedGenomicDist=max(maxObservedGenomicDist,interxn.distance)
if interxn.distance in mainDic:
mainDic[interxn.distance][1]+=contactCount
observedIntraInRangeSum +=interxn.hitCount
observedIntraInRangeCount +=1
# END else
# indx1=allFragsDic[ch1][mid1]
# indx2=allFragsDic[ch2][mid2]
#print str(indx1)+"\t"+str(indx2)
# Xvals[indx1].append(contactCount)
# Xindices[indx1].append(indx2)
# Xvals[indx2].append(contactCount)
# Xindices[indx2].append(indx1)
# END for
infile.close()
print("Observed, Intra-chr in range: pairs= "+str(observedIntraInRangeCount) +"\t totalCount= "+str(observedIntraInRangeSum))
print("Observed, Intra-chr all: pairs= "+str(observedIntraAllCount) +"\t totalCount= "+str(observedIntraAllSum))
print("Observed, Inter-chr all: pairs= "+str(observedInterAllCount) +"\t totalCount= "+str(observedInterAllSum))
print("Range of observed genomic distances [%d %d]" % (minObservedGenomicDist,maxObservedGenomicDist) + "\n"),
#return (mainDic,Xvals,Xindices) # from read_All_Interactions
return mainDic # from read_All_Interactions
def generate_FragPairs(mainDic,infilename): # lowMappThres
print("\nEnumerating all possible intra-chromosomal fragment pairs in-range\n"),
print("------------------------------------------------------------------------------------\n"),
global maxPossibleGenomicDist
global possibleIntraAllCount
global possibleInterAllCount
global possibleIntraInRangeCount
global interChrProb
global baselineIntraChrProb
#badFrags=[]
allFragsDic={}
#allFragsDicReverse={}
infile=gzip.open(infilename,'r')
indx=0
for line in infile:
words=line.split()
currChr=words[0]; currMid=words[1]; mapp=float(words[3]);
if currChr not in allFragsDic:
allFragsDic[currChr]={}
allFragsDic[currChr][currMid]=indx
# allFragsDicReverse[indx]=[currChr,currMid]
#if mapp<=lowMappThres:
# badFrags.append(indx)
indx+=1
#END
infile.close()
noOfFrags=0
maxFrags={}
for ch in allFragsDic:
maxFrags[ch]=max([int(i)-resolution/2 for i in allFragsDic[ch]])
noOfFrags+=len(allFragsDic[ch])
maxPossibleGenomicDist=max(maxPossibleGenomicDist,maxFrags[ch])
#print badFrags
for i in range(0,maxPossibleGenomicDist+1,resolution):
mainDic[i]=[0,0]
for ch in allFragsDic:
maxFrag=maxFrags[ch]
n=len(allFragsDic[ch])
d=0
for i in range(0,maxFrag+1,resolution):
mainDic[i][0]+=n-d
d+=1
#
possibleInterAllCount+=n*(noOfFrags-n)
possibleIntraAllCount+=(n*(n+1))/2 # n(n-1) if excluding self
#
possibleInterAllCount/=2
interChrProb=1.0
if possibleInterAllCount>0:
interChrProb=1.0/possibleInterAllCount
baselineIntraChrProb=1.0/possibleIntraAllCount
for i in range(0,maxPossibleGenomicDist+1,resolution):
if myUtils.in_range_check(i,distLowThres,distUpThres):
possibleIntraInRangeCount+=mainDic[i][0]
#print str(i)+"\t"+str(mainDic[i][0])
print("Number of all fragments= "+str(noOfFrags)+"\t resolution= "+ str(resolution))
print("Possible, Intra-chr in range: pairs= "+str(possibleIntraInRangeCount))
print("Possible, Intra-chr all: pairs= "+str(possibleIntraAllCount))
print("Possible, Inter-chr all: pairs= "+str(possibleInterAllCount))
print("Desired genomic distance range [%d %d]" % (distLowThres,distUpThres) + "\n"),
print("Range of possible genomic distances [0 %d]" % (maxPossibleGenomicDist) + "\n"),
return (mainDic,noOfFrags) # return from generate_FragPairs
def call_bdtrc(hitCount, observedSum, prior_p, recursion=0):
if (recursion>= 10):
return 1.0
p_val=scsp.bdtrc(int(hitCount),int(observedSum),prior_p)
if (np.isnan(p_val)):
p_val=call_bdtrc(int(hitCount/2), int(observedSum/2), prior_p, recursion+1)
return p_val
def fit_Spline(mainDic,x,y,yerr,infilename,outfilename,biasDic,plotimages):
print("\nFit a univariate spline to the probability means\n"),
print("------------------------------------------------------------------------------------\n"),
#print("baseline intra-chr probability: " + repr(baselineIntraChrProb)+ "\n"),
# maximum residual allowed for spline is set to min(y)^2
splineError=min(y)*min(y)
# use fitpack2 method -fit on the real x and y from equal occupancy binning
ius = UnivariateSpline(x, y, s=splineError)
#### POST-PROCESS THE SPLINE TO MAKE SURE IT'S NON-INCREASING
### NOW I DO THIS BY CALLING AN R function CALLED MONOREG
### This does the isotonic regression using option antitonic to make sure
### I get monotonically decreasing probabilites with increasion genomic distance
tempMaxX=max(x)
tempMinX=min(x)
tempList=sorted([dis for dis in mainDic])
splineX=[]
### The below for loop will make sure nothing is out of range of [min(x) max(x)]
### Therefore everything will be within the range where the spline is defined
for i in tempList:
if tempMinX<=i and i<=tempMaxX:
splineX.append(i)
# END for
splineY=ius(splineX)
# R vector format
rSplineX=ro.FloatVector(splineX)
rSplineY=ro.FloatVector(splineY)
rMonoReg=ro.r['monoreg']
# do the antitonic regression
allRres=rMonoReg(rSplineX,rSplineY,type="antitonic")
rNewSplineY=allRres[3]
# convert data back to Python format
newSplineY=[]
diff=[]
diffX=[]
for i in range(len(rNewSplineY)):
newSplineY.append(rNewSplineY[i])
if (splineY[i]-newSplineY[i]) > 0:
diff.append(splineY[i]-newSplineY[i])
diffX.append(splineX[i])
# END if
# END for
### Now newSplineY holds the monotonic contact probabilities
residual =sum([i*i for i in (y - ius(x))])
if (plotimages):
### Now plot the results
plt.clf()
fig = plt.figure()
ax = fig.add_subplot(2,1,1)
plt.title('Univariate spline fit to the output of equal occupancy binning. \n Residual= %e' % (residual),size='small')
plt.plot([i/1000.0 for i in x], [i*100000 for i in y], 'ro', label="Means")
#plt.plot([i/1000.0 for i in xi], [i*100000 for i in yi],'g-',label="Spline fit")
plt.plot([i/1000.0 for i in splineX], [i*100000 for i in newSplineY],'g-',label="Spline fit")
#plt.plot([i/1000.0 for i in x], [normalizedInterChrProb*100000 for i in x],'k-',label="Random intra-chromosomal")
#plt.plot([i/1000.0 for i in x], [interChrProb*100000 for i in x],'b-',label="Inter-chromosomal")
plt.ylabel('Probability (1e-5)')
plt.xlabel('Genomic distance (kb)')
plt.xlim([min(x)/1000.0,max(x)/1000.0])
ax.legend(loc="upper right")
ax = fig.add_subplot(2,1,2)
plt.loglog(splineX,newSplineY,'g-')
#plt.loglog(xi, yi, 'g-')
plt.loglog(x, y, 'r.') # Data
#plt.loglog(x, [normalizedInterChrProb for i in x],'k-')
#plt.loglog(x, [interChrProb for i in x],'b-')
plt.ylabel('Probability (log scale)')
plt.xlabel('Genomic distance (log scale)')
#plt.xlim([20000,100000])
plt.xlim([min(x),max(x)])
plt.savefig(outfilename+'.res'+str(resolution)+'.png')
sys.stderr.write("Plotting %s" % outfilename + ".png\n")
# NOW write the calculated pvalues and corrected pvalues in a file
infile =gzip.open(infilename, 'r')
intraInRangeCount=0
intraOutOfRangeCount=0
intraVeryProximalCount=0
interCount=0
discardCount=0
print("lower bound on mid-range distances "+ repr(distLowThres) + ", upper bound on mid-range distances " + repr(distUpThres) +"\n"),
p_vals=[]
q_vals=[]
for line in infile:
words=line.rstrip().split()
interxn=myUtils.Interaction([words[0], int(words[1]), words[2], int(words[3])])
interxn.setCount(float(words[4]))
chr1=words[0]
chr2=words[2]
midPoint1=int(words[1])
midPoint2=int(words[3])
bias1=1.0; bias2=1.0; # assumes there is no bias to begin with
# if the biasDic is not null sets the real bias values
if len(biasDic)>0:
if biasDic.has_key(chr1) and biasDic[chr1].has_key(midPoint1):
bias1=biasDic[chr1][midPoint1]
if biasDic.has_key(chr2) and biasDic[chr2].has_key(midPoint2):
bias2=biasDic[chr2][midPoint2]
if (bias1 < 0.5 or bias1 > 2. or bias2 < 0.5 or bias2 > 2.):
# if bias1==-1 or bias2==-1:
p_val=1.0
discardCount+=1
elif interxn.type=='intra':
if interxn.getType(distLowThres,distUpThres)=='intraInRange':
# make sure the interaction distance is covered by the probability bins
distToLookUp=max(interxn.distance,min(x))
distToLookUp=min(distToLookUp,max(x))
i=min(bisect.bisect_left(splineX, distToLookUp),len(splineX)-1)
prior_p=newSplineY[i]*(bias1*bias2) # biases added in the picture
p_val = call_bdtrc(interxn.hitCount-1,observedIntraInRangeSum,prior_p)
intraInRangeCount +=1
elif interxn.getType(distLowThres,distUpThres)=='intraShort':
prior_p=1.0
p_val=1.0
intraVeryProximalCount +=1
elif interxn.getType(distLowThres,distUpThres)=='intraLong':
## out of range distance
## use the prior of the baseline intra-chr interaction probability
prior_p=baselineIntraChrProb*(bias1*bias2) # biases added in the picture
p_val = call_bdtrc(interxn.hitCount-1,observedIntraAllSum,prior_p)
intraOutOfRangeCount +=1
# END if
else: # inter
#prior_p=normalizedInterChrProb
prior_p=interChrProb*(bias1*bias2) # biases added in the picture
############# THIS HAS TO BE interactionCount-1 ##################
p_val = call_bdtrc(interxn.hitCount-1,observedInterAllSum,prior_p)
interCount +=1
#
p_vals.append(p_val)
# END for
infile.close()
print("CHECK: sum of p-values %.2f" % (sum(p_vals)))
# Do the BH FDR correction
q_vals=myStats.benjamini_hochberg_correction(p_vals, possibleInterAllCount+possibleIntraAllCount)
#q_vals=myStats.benjamini_hochberg_correction(p_vals, possibleIntraInRangeCount)
#print("possibleIntraInRangeCount " + repr(possibleIntraInRangeCount)+"\n"),
infile =gzip.open(infilename, 'r')
outfile =gzip.open(outfilename+'.res'+str(resolution)+'.significances.txt.gz', 'w')
print("Writing p-values and q-values to file %s" % outfilename + ".significances.txt\n"),
print("Number of pairs discarded due to bias not in range [0.5 2]\n"),
if len(biasDic)>0:
outfile.write("chr1\tfragmentMid1\tchr2\tfragmentMid2\tcontactCount\tp-value\tq-value\tbias1\tbias2\tbiasCorrectedContactCount\n")
else:
outfile.write("chr1\tfragmentMid1\tchr2\tfragmentMid2\tcontactCount\tp-value\tq-value\n")
count=0
for line in infile:
words=line.rstrip().split()
chrNo1=str(words[0])
midPoint1=int(words[1])
chrNo2=str(words[2])
midPoint2=int(words[3])
interactionCount=int(words[4])
p_val=p_vals[count]
q_val=q_vals[count]
bias1=1.0; bias2=1.0; # assumes there is no bias to begin with
if len(biasDic)>0:
if biasDic.has_key(chr1) and biasDic[chr1].has_key(midPoint1):
bias1=biasDic[chr1][midPoint1]
if biasDic.has_key(chr2) and biasDic[chr2].has_key(midPoint2):
bias2=biasDic[chr2][midPoint2]
bcCount=bias1*bias2*interactionCount
#if chrNo1==chrNo2: # intra
# interactionDistance=abs(midPoint1-midPoint2) # dist
# if myUtils.in_range_check(interactionDistance,distLowThres,distUpThres):
# outfile.write("%s\t%d\t%s\t%d\t%d\t%e\t%e\n" % (str(chrNo1),midPoint1,str(chrNo2),midPoint2,interactionCount,p_val,q_val))
#else:
# outfile.write("%s\t%d\t%s\t%d\t%d\t%e\t%e\n" % (str(chrNo1),midPoint1,str(chrNo2),midPoint2,interactionCount,p_val,q_val))
if len(biasDic)>0:
outfile.write("%s\t%d\t%s\t%d\t%.2f\t%e\t%e\t%.3f\t%.3f\t%.2f\n" % (chrNo1,midPoint1,chrNo2,midPoint2,interactionCount,p_val,q_val, bias1, bias2, bcCount))
else:
outfile.write("%s\t%d\t%s\t%d\t%.2f\t%e\t%e\n" % (chrNo1,midPoint1,chrNo2,midPoint2,interactionCount,p_val,q_val))
count+=1
# END for - printing pvals and qvals for all the interactions
outfile.close()
infile.close()
return [splineX, newSplineY, residual] # from fit_Spline
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import, unicode_literals
import sys
import re
from contextlib import contextmanager
from .util import memoize
from . import buffering
from .exceptions import * # @UnusedWildImport
from .ast import AST
class Parser(object):
def __init__(self, text,
whitespace=None,
comments_re=None,
ignorecase=False,
verbose=False,
bufferClass=buffering.Buffer):
self.text = text
self.whitespace = set(whitespace if whitespace else '\t\v\n\r ')
self.comments_re = comments_re
self.ignorecase = ignorecase
self.verbose = verbose
self._bufferClass = bufferClass
self._buffer = None
self._ast_stack = []
self._concrete_stack = []
self._rule_stack = []
def parse(self, rule_name):
try:
self._buffer = self._bufferClass(self.text, self.whitespace)
self._push_ast()
self._concrete_stack.append([])
self._call(rule_name, rule_name)
return self.ast
finally:
del self._ast_stack[1:]
@property
def ast(self):
return self._ast_stack[-1]
def result(self):
return self.ast
def rulestack(self):
return '.'.join(self._rule_stack)
@property
def _pos(self):
return self._buffer.pos
def _goto(self, pos):
self._buffer.goto(pos)
def _eatwhitespace(self):
self._buffer.eatwhitespace()
def _eatcomments(self):
if self.comments_re is not None:
opts = re.MULTILINE if '\n' in self.comments_re else 0
while self._buffer.matchre(self.comments_re, opts):
pass
def _next_token(self):
p = None
while self._pos != p:
p = self._pos
self._eatwhitespace()
self._eatcomments()
def _call(self, name, node_name=None, force_list=False):
self._rule_stack.append(name)
if name[0].islower():
self._next_token()
pos = self._pos
try:
self.trace('%s <<\n\t->%s', self.rulestack(), self._buffer.lookahead())
result, newpos = self._invoke_rule(name, pos)
self.trace('SUCCESS %s', self.rulestack())
self._add_ast_node(node_name, result, force_list)
self._goto(newpos)
return result
except FailedParse:
self.trace('FAILED %s', self.rulestack())
self._goto(pos)
raise
finally:
self._rule_stack.pop()
@memoize
def _invoke_rule(self, name, pos):
rule = self._find_rule(name)
self._push_ast()
self._concrete_stack.append([])
try:
rule()
node = self.ast
if not node:
node = self._concrete_stack[-1]
if len(node) == 1:
node = node[0]
finally:
self._concrete_stack.pop()
self._pop_ast()
semantic_rule = self._find_semantic_rule(name)
if semantic_rule:
node = semantic_rule(node)
return (node, self._pos)
def _token(self, token, node_name=None, force_list=False):
self._next_token()
self.trace('match <%s> \n\t->%s', token, self._buffer.lookahead())
if self._buffer.match(token, self.ignorecase) is None:
self.trace('failed <%s>', token)
raise FailedToken(self._buffer, token)
self._add_ast_node(node_name, token, force_list)
return token
def _try(self, token, node_name=None, force_list=False):
self._next_token()
self.trace('try <%s> \n\t->%s', token, self._buffer.lookahead())
if self._buffer.match(token, self.ignorecase) is not None:
self._add_ast_node(node_name, token, force_list)
return True
def _pattern(self, pattern, node_name=None, force_list=False):
self.trace('match %s\n\t->%s', pattern, self._buffer.lookahead())
token = self._buffer.matchre(pattern, self.ignorecase)
if token is None:
self.trace('failed %s', pattern)
raise FailedPattern(self._buffer, pattern)
self.trace('matched %s', token)
self._add_ast_node(node_name, token, force_list)
return token
def _try_pattern(self, pattern, node_name=None, force_list=False):
self.trace('match %s\n\t->%s', pattern, self._buffer.lookahead())
token = self._buffer.matchre(pattern, self.ignorecase)
if token is None:
self.trace('failed %s', pattern)
self._add_ast_node(node_name, token, force_list)
return token
def _find_rule(self, name):
rule = getattr(self, '_%s_' % name, None)
if rule is None or not isinstance(rule, type(self._find_rule)):
raise FailedRef(self._buffer, name)
return rule
def _find_semantic_rule(self, name):
result = getattr(self, name, None)
if result is None or not isinstance(result, type(self._find_rule)):
return None
return result
def _push_ast(self):
self._ast_stack.append(AST())
def _pop_ast(self):
return self._ast_stack.pop()
def _add_ast_node(self, name, node, force_list=False):
if name is not None: # and node:
self.ast.add(name, node, force_list)
self._concrete_stack[-1].append(node)
return node
def error(self, item, etype=FailedParse):
raise etype(self._buffer, item)
def trace(self, msg, *params):
if self.verbose:
print(msg % params, file=sys.stderr)
@contextmanager
def _choice_context(self):
p = self._pos
try:
yield
except FailedCut:
raise
except FailedParse:
self._goto(p)
@contextmanager
def _repeat_context(self):
p = self._pos
try:
yield
except FailedParse:
self._goto(p)
raise
def _repeat_iterator(self, f):
while 1:
with self._repeat_context():
try:
value = f()
if value is not None:
yield value
except FailedCut as e:
raise e.nested
except FailedParse:
raise StopIteration()
def _repeat(self, f):
return list(self._repeat_iterator(f))
def _eof(self):
return self._buffer.atend()
def _eol(self):
return self._buffer.ateol()
def _check_eof(self):
self._next_token()
if not self._buffer.atend():
raise FailedParse(self._buffer, 'expecting end of file')
BUG in repeat iterator! Must return to last position after last repeat.
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import, unicode_literals
import sys
import re
from contextlib import contextmanager
from .util import memoize
from . import buffering
from .exceptions import * # @UnusedWildImport
from .ast import AST
class Parser(object):
def __init__(self, text,
whitespace=None,
comments_re=None,
ignorecase=False,
verbose=False,
bufferClass=buffering.Buffer):
self.text = text
self.whitespace = set(whitespace if whitespace else '\t\v\n\r ')
self.comments_re = comments_re
self.ignorecase = ignorecase
self.verbose = verbose
self._bufferClass = bufferClass
self._buffer = None
self._ast_stack = []
self._concrete_stack = []
self._rule_stack = []
def parse(self, rule_name):
try:
self._buffer = self._bufferClass(self.text, self.whitespace)
self._push_ast()
self._concrete_stack.append([])
self._call(rule_name, rule_name)
return self.ast
finally:
del self._ast_stack[1:]
@property
def ast(self):
return self._ast_stack[-1]
def result(self):
return self.ast
def rulestack(self):
return '.'.join(self._rule_stack)
@property
def _pos(self):
return self._buffer.pos
def _goto(self, pos):
self._buffer.goto(pos)
def _eatwhitespace(self):
self._buffer.eatwhitespace()
def _eatcomments(self):
if self.comments_re is not None:
opts = re.MULTILINE if '\n' in self.comments_re else 0
while self._buffer.matchre(self.comments_re, opts):
pass
def _next_token(self):
p = None
while self._pos != p:
p = self._pos
self._eatwhitespace()
self._eatcomments()
def _call(self, name, node_name=None, force_list=False):
self._rule_stack.append(name)
if name[0].islower():
self._next_token()
pos = self._pos
try:
self.trace('%s <<\n\t->%s', self.rulestack(), self._buffer.lookahead())
result, newpos = self._invoke_rule(name, pos)
self.trace('SUCCESS %s', self.rulestack())
self._add_ast_node(node_name, result, force_list)
self._goto(newpos)
return result
except FailedParse:
self.trace('FAILED %s', self.rulestack())
self._goto(pos)
raise
finally:
self._rule_stack.pop()
@memoize
def _invoke_rule(self, name, pos):
rule = self._find_rule(name)
self._push_ast()
self._concrete_stack.append([])
try:
rule()
node = self.ast
if not node:
node = self._concrete_stack[-1]
if len(node) == 1:
node = node[0]
finally:
self._concrete_stack.pop()
self._pop_ast()
semantic_rule = self._find_semantic_rule(name)
if semantic_rule:
node = semantic_rule(node)
return (node, self._pos)
def _token(self, token, node_name=None, force_list=False):
self._next_token()
self.trace('match <%s> \n\t->%s', token, self._buffer.lookahead())
if self._buffer.match(token, self.ignorecase) is None:
self.trace('failed <%s>', token)
raise FailedToken(self._buffer, token)
self._add_ast_node(node_name, token, force_list)
return token
def _try(self, token, node_name=None, force_list=False):
self._next_token()
self.trace('try <%s> \n\t->%s', token, self._buffer.lookahead())
if self._buffer.match(token, self.ignorecase) is not None:
self._add_ast_node(node_name, token, force_list)
return True
def _pattern(self, pattern, node_name=None, force_list=False):
self.trace('match %s\n\t->%s', pattern, self._buffer.lookahead())
token = self._buffer.matchre(pattern, self.ignorecase)
if token is None:
self.trace('failed %s', pattern)
raise FailedPattern(self._buffer, pattern)
self.trace('matched %s', token)
self._add_ast_node(node_name, token, force_list)
return token
def _try_pattern(self, pattern, node_name=None, force_list=False):
self.trace('match %s\n\t->%s', pattern, self._buffer.lookahead())
token = self._buffer.matchre(pattern, self.ignorecase)
if token is None:
self.trace('failed %s', pattern)
self._add_ast_node(node_name, token, force_list)
return token
def _find_rule(self, name):
rule = getattr(self, '_%s_' % name, None)
if rule is None or not isinstance(rule, type(self._find_rule)):
raise FailedRef(self._buffer, name)
return rule
def _find_semantic_rule(self, name):
result = getattr(self, name, None)
if result is None or not isinstance(result, type(self._find_rule)):
return None
return result
def _push_ast(self):
self._ast_stack.append(AST())
def _pop_ast(self):
return self._ast_stack.pop()
def _add_ast_node(self, name, node, force_list=False):
if name is not None: # and node:
self.ast.add(name, node, force_list)
self._concrete_stack[-1].append(node)
return node
def error(self, item, etype=FailedParse):
raise etype(self._buffer, item)
def trace(self, msg, *params):
if self.verbose:
print(msg % params, file=sys.stderr)
@contextmanager
def _choice_context(self):
p = self._pos
try:
yield
except FailedCut:
raise
except FailedParse:
self._goto(p)
@contextmanager
def _repeat_context(self):
p = self._pos
try:
yield
except FailedParse:
self._goto(p)
raise
def _repeat_iterator(self, f):
while 1:
with self._repeat_context():
p = self._pos
try:
value = f()
if value is not None:
yield value
except FailedParse:
self._goto(p)
raise StopIteration()
def _repeat(self, f):
return list(self._repeat_iterator(f))
def _eof(self):
return self._buffer.atend()
def _eol(self):
return self._buffer.ateol()
def _check_eof(self):
self._next_token()
if not self._buffer.atend():
raise FailedParse(self._buffer, 'expecting end of file')
|
from django.views.generic.list import ListView
from django.views.generic.detail import DetailView
from django.db.models import Q
from django.db.models.fields import NullBooleanField
from django.http import HttpResponse, HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.core.exceptions import PermissionDenied
from django.utils.translation import override
from collections import OrderedDict
import csv
import operator
import re
import xml.etree.ElementTree as ET
from xml.dom import minidom
import datetime as DT
from signbank.dictionary.models import *
from signbank.dictionary.forms import *
from signbank.feedback.models import *
from signbank.video.forms import VideoUploadForGlossForm
from tagging.models import Tag, TaggedItem
from signbank.settings.base import ECV_FILE,EARLIEST_GLOSS_CREATION_DATE, OTHER_MEDIA_DIRECTORY, FIELDS, SEPARATE_ENGLISH_IDGLOSS_FIELD, LANGUAGE_CODE, ECV_SETTINGS
def order_queryset_by_sort_order(get, qs):
"""Change the sort-order of the query set, depending on the form field [sortOrder]
This function is used both by GlossListView as well as by MorphemeListView.
The value of [sortOrder] is 'idgloss' by default.
[sortOrder] is a hidden field inside the "adminsearch" html form in the template admin_gloss_list.html
Its value is changed by clicking the up/down buttons in the second row of the search result table
"""
def get_string_from_tuple_list(lstTuples, number):
"""Get the string value corresponding to a number in a list of number-string tuples"""
sBack = [tup[1] for tup in lstTuples if tup[0] == number]
return sBack
# Helper: order a queryset on field [sOrder], which is a number from a list of tuples named [sListName]
def order_queryset_by_tuple_list(qs, sOrder, sListName):
"""Order a queryset on field [sOrder], which is a number from a list of tuples named [sListName]"""
# Get a list of tuples for this sort-order
tpList = build_choice_list(sListName)
# Determine sort order: ascending is default
bReversed = False
if (sOrder[0:1] == '-'):
# A starting '-' sign means: descending order
sOrder = sOrder[1:]
bReversed = True
# Order the list of tuples alphabetically
# (NOTE: they are alphabetical from 'build_choice_list()', except for the values 0,1)
tpList = sorted(tpList, key=operator.itemgetter(1))
# Order by the string-values in the tuple list
return sorted(qs, key=lambda x: get_string_from_tuple_list(tpList, getattr(x, sOrder)), reverse=bReversed)
# Set the default sort order
sOrder = 'idgloss' # Default sort order if nothing is specified
# See if the form contains any sort-order information
if (get.has_key('sortOrder') and get['sortOrder'] != ''):
# Take the user-indicated sort order
sOrder = get['sortOrder']
# The ordering method depends on the kind of field:
# (1) text fields are ordered straightforwardly
# (2) fields made from a choice_list need special treatment
if (sOrder.endswith('handedness')):
ordered = order_queryset_by_tuple_list(qs, sOrder, "Handedness")
elif (sOrder.endswith('domhndsh') or sOrder.endswith('subhndsh')):
ordered = order_queryset_by_tuple_list(qs, sOrder, "Handshape")
elif (sOrder.endswith('locprim')):
ordered = order_queryset_by_tuple_list(qs, sOrder, "Location")
else:
# Use straightforward ordering on field [sOrder]
ordered = qs.order_by(sOrder)
# return the ordered list
return ordered
class GlossListView(ListView):
model = Gloss
template_name = 'dictionary/admin_gloss_list.html'
paginate_by = 500
only_export_ecv = False #Used to call the 'export ecv' functionality of this view without the need for an extra GET parameter
search_type = 'sign'
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super(GlossListView, self).get_context_data(**kwargs)
# Add in a QuerySet of all the books
# Retrieve the search_type,so that we know whether the search should be restricted to Gloss or not
if 'search_type' in self.request.GET:
self.search_type = self.request.GET['search_type']
search_form = GlossSearchForm(self.request.GET)
context['searchform'] = search_form
context['search_type'] = self.search_type
if self.search_type == 'sign':
context['glosscount'] = Gloss.none_morpheme_objects().count() # Only count the none-morpheme glosses
else:
context['glosscount'] = Gloss.objects.count() # Count the glosses + morphemes
context['add_gloss_form'] = GlossCreateForm()
context['ADMIN_RESULT_FIELDS'] = settings.ADMIN_RESULT_FIELDS
context['input_names_fields_and_labels'] = {}
for topic in ['main','phonology','semantics']:
context['input_names_fields_and_labels'][topic] = []
for fieldname in settings.FIELDS[topic]:
field = search_form[fieldname]
label = field.label
context['input_names_fields_and_labels'][topic].append((fieldname,field,label))
return context
def get_paginate_by(self, queryset):
"""
Paginate by specified value in querystring, or use default class property value.
"""
return self.request.GET.get('paginate_by', self.paginate_by)
def render_to_response(self, context):
# Look for a 'format=json' GET argument
if self.request.GET.get('format') == 'CSV':
return self.render_to_csv_response(context)
elif self.request.GET.get('export_ecv') == 'ECV' or self.only_export_ecv:
return self.render_to_ecv_export_response(context)
else:
return super(GlossListView, self).render_to_response(context)
def render_to_ecv_export_response(self, context):
description = 'DESCRIPTION'
language = 'LANGUAGE'
lang_ref = 'LANG_REF'
cv_entry_ml = 'CV_ENTRY_ML'
cve_id = 'CVE_ID'
cve_value = 'CVE_VALUE'
topattributes = {'xmlns:xsi':"http://www.w3.org/2001/XMLSchema-instance",
'DATE':str(DT.date.today())+ 'T'+str(DT.datetime.now().time()),
'AUTHOR':'',
'VERSION':'0.2',
'xsi:noNamespaceSchemaLocation':"http://www.mpi.nl/tools/elan/EAFv2.8.xsd"}
top = ET.Element('CV_RESOURCE', topattributes)
for lang in ECV_SETTINGS['languages']:
ET.SubElement(top, language, lang['attributes'])
cv_element = ET.SubElement(top, 'CONTROLLED_VOCABULARY', {'CV_ID':ECV_SETTINGS['CV_ID']})
# description f0r cv_element
for lang in ECV_SETTINGS['languages']:
myattributes = {lang_ref: lang['id']}
desc_element = ET.SubElement(cv_element, description, myattributes)
desc_element.text = lang['description']
# Make sure we iterate only over the none-Morpheme glosses
for gloss in Gloss.none_morpheme_objects():
glossid = str(gloss.pk)
myattributes = {cve_id: glossid}
cve_entry_element = ET.SubElement(cv_element, cv_entry_ml, myattributes)
desc = self.get_ecv_descripion_for_gloss(gloss, ECV_SETTINGS['include_phonology_and_frequencies'])
for lang in ECV_SETTINGS['languages']:
cve_value_element = ET.SubElement(cve_entry_element, cve_value, {description:desc, lang_ref:lang['id']})
cve_value_element.text = self.get_value_for_ecv(gloss, lang['annotation_idgloss_fieldname'])
xmlstr = minidom.parseString(ET.tostring(top,'utf-8')).toprettyxml(indent=" ")
with open(ECV_FILE, "w") as f:
f.write(xmlstr.encode('utf-8'))
# tree = ET.ElementTree(top)
# tree.write(open(ECV_FILE, 'w'), encoding ="utf-8",xml_declaration=True, method="xml")
return HttpResponse('OK')
def get_ecv_descripion_for_gloss(self, gloss, include_phonology_and_frequencies=False):
desc = ""
if include_phonology_and_frequencies:
description_fields = ['handedness','domhndsh', 'subhndsh', 'handCh', 'locprim', 'relOriMov', 'movDir','movSh', 'tokNo',
'tokNoSgnr'];
for f in description_fields:
value = self.get_value_for_ecv(gloss,f)
if f == 'handedness':
desc = value
elif f == 'domhndsh':
desc = desc+ ', ('+ value
elif f == 'subhndsh':
desc = desc+','+value
elif f == 'handCh':
desc = desc+'; '+value+')'
elif f == 'tokNo':
desc = desc+' ['+value
elif f == 'tokNoSgnr':
desc = desc+'/'+value+']'
else:
desc = desc+', '+value
if desc:
desc += ", "
trans = [t.translation.text for t in gloss.translation_set.all()]
desc += ", ".join(
# The next line was adapted from an older version of this code,
# that happened to do nothing. I left this for future usage.
#map(lambda t: str(t.encode('ascii','xmlcharrefreplace')) if isinstance(t, unicode) else t, trans)
trans
)
return desc
def get_value_for_ecv(self, gloss, fieldname):
try:
value = getattr(gloss, 'get_'+fieldname+'_display')()
except AttributeError:
value = getattr(gloss,fieldname)
if isinstance(value,unicode):
value = str(value.encode('ascii','xmlcharrefreplace'))
elif value is None:
value = " "
elif not isinstance(value,str):
value = str(value)
if value == '-':
value = ' '
return value
# noinspection PyInterpreter,PyInterpreter
def render_to_csv_response(self, context):
if not self.request.user.has_perm('dictionary.export_csv'):
raise PermissionDenied
# Create the HttpResponse object with the appropriate CSV header.
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="dictionary-export.csv"'
# fields = [f.name for f in Gloss._meta.fields]
#We want to manually set which fields to export here
fieldnames = ['idgloss', 'annotation_idgloss', 'annotation_idgloss_en', 'useInstr', 'sense', 'StemSN', 'rmrks', 'handedness',
'domhndsh', 'subhndsh', 'handCh', 'relatArtic', 'locprim', 'locVirtObj', 'relOriMov', 'relOriLoc', 'oriCh', 'contType',
'movSh', 'movDir', 'repeat', 'altern', 'phonOth', 'mouthG', 'mouthing', 'phonetVar',
'domSF', 'domFlex', 'oriChAbd', 'oriChFlex', 'iconImg', 'iconType',
'namEnt', 'semField', 'valence', 'lexCatNotes', 'tokNo', 'tokNoSgnr', 'tokNoA', 'tokNoV', 'tokNoR', 'tokNoGe',
'tokNoGr', 'tokNoO', 'tokNoSgnrA', 'tokNoSgnrV', 'tokNoSgnrR', 'tokNoSgnrGe',
'tokNoSgnrGr', 'tokNoSgnrO', 'inWeb', 'isNew']
fields = [Gloss._meta.get_field(fieldname) for fieldname in fieldnames]
writer = csv.writer(response)
with override(LANGUAGE_CODE):
header = ['Signbank ID'] + [f.verbose_name.title().encode('ascii','ignore') for f in fields]
for extra_column in ['Languages','Dialects','Keywords','Morphology','Relations to other signs','Relations to foreign signs',]:
header.append(extra_column);
writer.writerow(header)
for gloss in self.get_queryset():
row = [str(gloss.pk)]
for f in fields:
#Try the value of the choicelist
try:
row.append(getattr(gloss, 'get_'+f.name+'_display')())
#If it's not there, try the raw value
except AttributeError:
value = getattr(gloss,f.name)
if isinstance(value,unicode):
value = str(value.encode('ascii','xmlcharrefreplace'));
elif not isinstance(value,str):
value = str(value);
row.append(value)
# get languages
languages = [language.name for language in gloss.language.all()]
row.append(", ".join(languages));
# get dialects
dialects = [dialect.name for dialect in gloss.dialect.all()]
row.append(", ".join(dialects));
# get translations
trans = [t.translation.text for t in gloss.translation_set.all()]
row.append(", ".join(trans))
# get morphology
morphemes = [morpheme.morpheme.annotation_idgloss for morpheme in MorphologyDefinition.objects.filter(parent_gloss=gloss)]
row.append(", ".join(morphemes))
# get relations to other signs
relations = [relation.target.idgloss for relation in Relation.objects.filter(source=gloss)]
row.append(", ".join(relations))
# get relations to foreign signs
relations = [relation.other_lang_gloss for relation in RelationToForeignSign.objects.filter(gloss=gloss)]
row.append(", ".join(relations))
#Make it safe for weird chars
safe_row = [];
for column in row:
try:
safe_row.append(column.encode('utf-8'))
except AttributeError:
safe_row.append(None);
writer.writerow(safe_row)
return response
def get_queryset(self):
get = self.request.GET
#First check whether we want to show everything or a subset
try:
if self.kwargs['show_all']:
show_all = True
except (KeyError,TypeError):
show_all = False
#Then check what kind of stuff we want
if 'search_type' in get:
self.search_type = get['search_type']
else:
self.search_type = 'sign'
setattr(self.request, 'search_type', self.search_type)
#Get the initial selection
if len(get) > 0 or show_all:
if self.search_type == 'sign':
# Get all the GLOSS items that are not member of the sub-class Morpheme
qs = Gloss.none_morpheme_objects()
else:
qs = Gloss.objects.all()
#No filters or 'show_all' specified? show nothing
else:
qs = Gloss.objects.none()
#If we wanted to get everything, we're done now
if show_all:
return qs
#If not, we will go trhough a long list of filters
if get.has_key('search') and get['search'] != '':
val = get['search']
query = Q(idgloss__istartswith=val) | \
Q(annotation_idgloss__istartswith=val)
if re.match('^\d+$', val):
query = query | Q(sn__exact=val)
qs = qs.filter(query)
#print "A: ", len(qs)
if get.has_key('englishGloss') and get['englishGloss'] != '':
val = get['englishGloss']
qs = qs.filter(annotation_idgloss_en__istartswith=val)
if get.has_key('keyword') and get['keyword'] != '':
val = get['keyword']
qs = qs.filter(translation__translation__text__istartswith=val)
if get.has_key('inWeb') and get['inWeb'] != '0':
# Don't apply 'inWeb' filter, if it is unspecified ('0' according to the NULLBOOLEANCHOICES)
val = get['inWeb'] == 'yes'
qs = qs.filter(inWeb__exact=val)
#print "B :", len(qs)
if not self.request.user.has_perm('dictionary.search_gloss'):
qs = qs.filter(inWeb__exact=True)
if get.has_key('hasvideo') and get['hasvideo'] != 'unspecified':
val = get['hasvideo'] == 'no'
qs = qs.filter(glossvideo__isnull=val)
if get.has_key('defspublished') and get['defspublished'] != 'unspecified':
val = get['defspublished'] == 'yes'
qs = qs.filter(definition__published=val)
fieldnames = ['idgloss', 'annotation_idgloss', 'annotation_idgloss_en', 'useInstr', 'sense', 'morph', 'StemSN', 'compound', 'rmrks', 'handedness',
'domhndsh', 'subhndsh', 'locprim', 'locVirtObj', 'relatArtic', 'relOriMov', 'relOriLoc', 'oriCh', 'handCh', 'repeat', 'altern',
'movSh', 'movDir', 'contType', 'phonOth', 'mouthG', 'mouthing', 'phonetVar',
'domSF', 'domFlex', 'oriChAbd', 'oriChFlex', 'iconImg', 'iconType', 'namEnt', 'semField', 'valence',
'lexCatNotes','tokNo', 'tokNoSgnr','tokNoA', 'tokNoV', 'tokNoR', 'tokNoGe', 'tokNoGr', 'tokNoO', 'tokNoSgnrA',
'tokNoSgnrV', 'tokNoSgnrR', 'tokNoSgnrGe', 'tokNoSgnrGr', 'tokNoSgnrO', 'inWeb', 'isNew'];
#Language and basic property filters
vals = get.getlist('dialect', [])
if vals != []:
qs = qs.filter(dialect__in=vals)
vals = get.getlist('language', [])
if vals != []:
qs = qs.filter(language__in=vals)
if get.has_key('useInstr') and get['useInstr'] != '':
qs = qs.filter(useInstr__icontains=get['useInstr'])
## phonology and semantics field filters
for fieldname in fieldnames:
if get.has_key(fieldname):
key = fieldname+'__exact';
val = get[fieldname];
if isinstance(Gloss._meta.get_field(fieldname),NullBooleanField):
val = {'0':'','1': None, '2': True, '3': False}[val];
if val != '':
kwargs = {key:val};
qs = qs.filter(**kwargs);
if get.has_key('initial_relative_orientation') and get['initial_relative_orientation'] != '':
val = get['initial_relative_orientation']
qs = qs.filter(initial_relative_orientation__exact=val)
if get.has_key('final_relative_orientation') and get['final_relative_orientation'] != '':
val = get['final_relative_orientation']
qs = qs.filter(final_relative_orientation__exact=val)
if get.has_key('initial_palm_orientation') and get['initial_palm_orientation'] != '':
val = get['initial_palm_orientation']
qs = qs.filter(initial_palm_orientation__exact=val)
if get.has_key('final_palm_orientation') and get['final_palm_orientation'] != '':
val = get['final_palm_orientation']
qs = qs.filter(final_palm_orientation__exact=val)
if get.has_key('initial_secondary_loc') and get['initial_secondary_loc'] != '':
val = get['initial_secondary_loc']
qs = qs.filter(initial_secondary_loc__exact=val)
if get.has_key('final_secondary_loc') and get['final_secondary_loc'] != '':
val = get['final_secondary_loc']
qs = qs.filter(final_secondary_loc__exact=val)
if get.has_key('final_secondary_loc') and get['final_secondary_loc'] != '':
val = get['final_secondary_loc']
qs = qs.filter(final_secondary_loc__exact=val)
if get.has_key('defsearch') and get['defsearch'] != '':
val = get['defsearch']
if get.has_key('defrole'):
role = get['defrole']
else:
role = 'all'
if role == 'all':
qs = qs.filter(definition__text__icontains=val)
else:
qs = qs.filter(definition__text__icontains=val, definition__role__exact=role)
if get.has_key('tags') and get['tags'] != '':
vals = get.getlist('tags')
tags = []
for t in vals:
tags.extend(Tag.objects.filter(name=t))
# search is an implicit AND so intersection
tqs = TaggedItem.objects.get_intersection_by_model(Gloss, tags)
# intersection
qs = qs & tqs
#print "J :", len(qs)
qs = qs.distinct()
if get.has_key('nottags') and get['nottags'] != '':
vals = get.getlist('nottags')
# print "NOT TAGS: ", vals
tags = []
for t in vals:
tags.extend(Tag.objects.filter(name=t))
# search is an implicit AND so intersection
tqs = TaggedItem.objects.get_intersection_by_model(Gloss, tags)
# print "NOT", tags, len(tqs)
# exclude all of tqs from qs
qs = [q for q in qs if q not in tqs]
# print "K :", len(qs)
if get.has_key('relationToForeignSign') and get['relationToForeignSign'] != '':
relations = RelationToForeignSign.objects.filter(other_lang_gloss__icontains=get['relationToForeignSign'])
potential_pks = [relation.gloss.pk for relation in relations]
qs = qs.filter(pk__in=potential_pks)
if get.has_key('hasRelationToForeignSign') and get['hasRelationToForeignSign'] != '0':
pks_for_glosses_with_relations = [relation.gloss.pk for relation in RelationToForeignSign.objects.all()];
print('pks_for_glosses',pks_for_glosses_with_relations)
if get['hasRelationToForeignSign'] == '1': #We only want glosses with a relation to a foreign sign
qs = qs.filter(pk__in=pks_for_glosses_with_relations)
elif get['hasRelationToForeignSign'] == '2': #We only want glosses without a relation to a foreign sign
qs = qs.exclude(pk__in=pks_for_glosses_with_relations)
if get.has_key('relation') and get['relation'] != '':
potential_targets = Gloss.objects.filter(idgloss__icontains=get['relation'])
relations = Relation.objects.filter(target__in=potential_targets)
potential_pks = [relation.source.pk for relation in relations]
qs = qs.filter(pk__in=potential_pks)
if get.has_key('hasRelation') and get['hasRelation'] != '':
#Find all relations with this role
if get['hasRelation'] == 'all':
relations_with_this_role = Relation.objects.all();
else:
relations_with_this_role = Relation.objects.filter(role__exact=get['hasRelation']);
#Remember the pk of all glosses that take part in the collected relations
pks_for_glosses_with_correct_relation = [relation.source.pk for relation in relations_with_this_role];
qs = qs.filter(pk__in=pks_for_glosses_with_correct_relation)
if get.has_key('id_morpheme') and get['id_morpheme'] != '':
# Filter all glosses that contain a morpheme with the indicated text in its gloss
# Step 1: get all morphemes containing the indicated text
potential_morphemes = Morpheme.objects.filter(idgloss__exact=get['id_morpheme']);
if (potential_morphemes.count() > 0):
# At least one has been found: take the first one
selected_morpheme = potential_morphemes[0];
# Step 2: get all Glosses containing the above morphemes
potential_pks = [appears.pk for appears in Gloss.objects.filter(morphemePart=selected_morpheme)];
qs = qs.filter(pk__in=potential_pks)
if get.has_key('hasComponentOfType') and get['hasComponentOfType'] != '':
# Look for "compound-components" of the indicated type. Compound Components are defined in class[MorphologyDefinition]
morphdefs_with_correct_role = MorphologyDefinition.objects.filter(role__exact=get['hasComponentOfType']);
pks_for_glosses_with_morphdefs_with_correct_role = [morphdef.parent_gloss.pk for morphdef in morphdefs_with_correct_role];
qs = qs.filter(pk__in=pks_for_glosses_with_morphdefs_with_correct_role)
if get.has_key('hasMorphemeOfType') and get['hasMorphemeOfType'] != '':
# Get all Morphemes of the indicated mrpType
target_morphemes = Morpheme.objects.filter(mrpType__exact=get['hasMorphemeOfType'])
# Get all glosses that have one of the morphemes in this set
glosses_with_correct_mrpType = Gloss.objects.filter(morphemePart__in=target_morphemes)
# Turn this into a list with pks
pks_for_glosses_with_correct_mrpType = [glossdef.pk for glossdef in glosses_with_correct_mrpType];
qs = qs.filter(pk__in=pks_for_glosses_with_correct_mrpType)
if get.has_key('definitionRole') and get['definitionRole'] != '':
#Find all definitions with this role
if get['definitionRole'] == 'all':
definitions_with_this_role = Definition.objects.all();
else:
definitions_with_this_role = Definition.objects.filter(role__exact=get['definitionRole']);
#Remember the pk of all glosses that are referenced in the collection definitions
pks_for_glosses_with_these_definitions = [definition.gloss.pk for definition in definitions_with_this_role];
qs = qs.filter(pk__in=pks_for_glosses_with_these_definitions)
if get.has_key('definitionContains') and get['definitionContains'] != '':
definitions_with_this_text = Definition.objects.filter(text__icontains=get['definitionContains']);
#Remember the pk of all glosses that are referenced in the collection definitions
pks_for_glosses_with_these_definitions = [definition.gloss.pk for definition in definitions_with_this_text];
qs = qs.filter(pk__in=pks_for_glosses_with_these_definitions)
if get.has_key('createdBefore') and get['createdBefore'] != '':
created_before_date = DT.datetime.strptime(get['createdBefore'], "%m/%d/%Y").date()
qs = qs.filter(creationDate__range=(EARLIEST_GLOSS_CREATION_DATE,created_before_date))
if get.has_key('createdAfter') and get['createdAfter'] != '':
created_after_date = DT.datetime.strptime(get['createdAfter'], "%m/%d/%Y").date()
qs = qs.filter(creationDate__range=(created_after_date,DT.datetime.now()))
# Saving querysets results to sessions, these results can then be used elsewhere (like in gloss_detail)
# Flush the previous queryset (just in case)
self.request.session['search_results'] = None
# Make sure that the QuerySet has filters applied (user is searching for something instead of showing all results [objects.all()])
if hasattr(qs.query.where, 'children') and len(qs.query.where.children) > 0:
items = []
for item in qs:
items.append(dict(id = item.id, gloss = item.annotation_idgloss))
self.request.session['search_results'] = items
# print "Final :", len(qs)
# Sort the queryset by the parameters given
qs = order_queryset_by_sort_order(self.request.GET, qs)
# Return the resulting filtered and sorted queryset
return qs
class GlossDetailView(DetailView):
model = Gloss
context_object_name = 'gloss'
#Overriding the get method get permissions right
def get(self, request, *args, **kwargs):
try:
self.object = self.get_object()
except Http404:
# return custom template
return render(request, 'no_object.html', status=404)
if request.user.is_authenticated():
if not request.user.has_perm('dictionary.search_gloss'):
if self.object.inWeb:
return HttpResponseRedirect(reverse('dictionary:public_gloss',kwargs={'idgloss':self.object.idgloss}))
else:
return HttpResponse('')
else:
if self.object.inWeb:
return HttpResponseRedirect(reverse('dictionary:public_gloss', kwargs={'idgloss': self.object.idgloss}))
else:
return HttpResponseRedirect(reverse('registration:auth_login'))
context = self.get_context_data(object=self.object)
return self.render_to_response(context)
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super(GlossDetailView, self).get_context_data(**kwargs)
# Add in a QuerySet of all the books
context['tagform'] = TagUpdateForm()
context['videoform'] = VideoUploadForGlossForm()
context['imageform'] = ImageUploadForGlossForm()
context['definitionform'] = DefinitionForm()
context['relationform'] = RelationForm()
context['morphologyform'] = GlossMorphologyForm()
context['morphemeform'] = GlossMorphemeForm()
context['othermediaform'] = OtherMediaForm()
context['navigation'] = context['gloss'].navigation(True)
context['interpform'] = InterpreterFeedbackForm()
context['SIGN_NAVIGATION'] = settings.SIGN_NAVIGATION
next_gloss = Gloss.objects.get(pk=context['gloss'].pk).admin_next_gloss()
if next_gloss == None:
context['nextglossid'] = context['gloss'].pk #context['gloss']
else:
context['nextglossid'] = next_gloss.pk
if settings.SIGN_NAVIGATION:
context['glosscount'] = Gloss.objects.count()
context['glossposn'] = Gloss.objects.filter(sn__lt=context['gloss'].sn).count()+1
#Pass info about which fields we want to see
gl = context['gloss'];
labels = gl.field_labels();
context['choice_lists'] = {}
#Translate the machine values to human values in the correct language, and save the choice lists along the way
for topic in ['main','phonology','semantics','frequency']:
context[topic+'_fields'] = [];
for field in FIELDS[topic]:
#Get and save the choice list for this field
field_category = fieldname_to_category(field)
choice_list = FieldChoice.objects.filter(field__iexact=field_category)
if len(choice_list) > 0:
context['choice_lists'][field] = choicelist_queryset_to_translated_ordered_dict (choice_list,self.request.LANGUAGE_CODE)
#Take the human value in the language we are using
machine_value = getattr(gl,field);
if machine_value == '0':
human_value = '-'
elif machine_value == '1':
human_value = 'N/A'
else:
try:
selected_field_choice = choice_list.filter(machine_value=machine_value)[0]
if self.request.LANGUAGE_CODE == 'nl':
human_value = selected_field_choice.dutch_name
else:
human_value = selected_field_choice.english_name
except (IndexError, ValueError):
human_value = machine_value
#And add the kind of field
if field in ['useInstr','phonOth','mouthG','mouthing','phonetVar','iconImg','locVirtObj']:
kind = 'text';
elif field in ['repeat','altern','oriChAbd','oriChFlex']:
kind = 'check';
else:
kind = 'list';
context[topic+'_fields'].append([human_value,field,labels[field],kind]);
#Gather the OtherMedia
context['other_media'] = []
other_media_type_choice_list = FieldChoice.objects.filter(field__iexact='OthermediaType')
for other_media in gl.othermedia_set.all():
if int(other_media.type) == 0:
human_value_media_type = '-'
elif int(other_media.type) == 1:
human_value_media_type = 'N/A'
else:
selected_field_choice = other_media_type_choice_list.filter(machine_value=other_media.type)[0]
codes_to_adjectives = dict(settings.LANGUAGES)
if self.request.LANGUAGE_CODE not in codes_to_adjectives.keys():
adjective = 'english'
else:
adjective = codes_to_adjectives[self.request.LANGUAGE_CODE].lower()
try:
human_value_media_type = getattr(selected_field_choice,adjective+'_name')
except AttributeError:
human_value_media_type = getattr(selected_field_choice,'english_name')
path = settings.STATIC_URL+'othermedia/'+other_media.path
context['other_media'].append([other_media.pk, path, human_value_media_type, other_media.alternative_gloss])
#Save the other_media_type choices (same for every other_media, but necessary because they all have other ids)
context['choice_lists']['other-media-type_'+str(other_media.pk)] = choicelist_queryset_to_translated_ordered_dict(other_media_type_choice_list,self.request.LANGUAGE_CODE)
#context['choice_lists'] = gl.get_choice_lists()
context['choice_lists'] = json.dumps(context['choice_lists'])
context['separate_english_idgloss_field'] = SEPARATE_ENGLISH_IDGLOSS_FIELD
return context
class MorphemeListView(ListView):
"""The morpheme list view basically copies the gloss list view"""
model = Morpheme
template_name = 'dictionary/admin_morpheme_list.html'
paginate_by = 500
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super(MorphemeListView, self).get_context_data(**kwargs)
# Add in a QuerySet of all the books
search_form = MorphemeSearchForm(self.request.GET)
context['searchform'] = search_form
context['glosscount'] = Morpheme.objects.all().count()
context['add_morpheme_form'] = MorphemeCreateForm()
context['ADMIN_RESULT_FIELDS'] = settings.ADMIN_RESULT_FIELDS
# make sure that the morpheme-type options are available to the listview
oChoiceLists = {}
choice_list = FieldChoice.objects.filter(field__iexact = fieldname_to_category('mrpType'))
if (len(choice_list) > 0):
ordered_dict = choicelist_queryset_to_translated_ordered_dict(choice_list, self.request.LANGUAGE_CODE)
oChoiceLists['mrpType'] = ordered_dict
# Make all choice lists available in the context (currently only mrpType)
context['choice_lists'] = json.dumps(oChoiceLists)
context['input_names_fields_and_labels'] = {}
for topic in ['phonology', 'semantics']:
context['input_names_fields_and_labels'][topic] = []
for fieldname in settings.FIELDS[topic]:
field = search_form[fieldname]
label = field.label
context['input_names_fields_and_labels'][topic].append((fieldname, field, label))
return context
def get_paginate_by(self, queryset):
"""
Paginate by specified value in querystring, or use default class property value.
"""
return self.request.GET.get('paginate_by', self.paginate_by)
def get_queryset(self):
# get query terms from self.request
qs = Morpheme.objects.all()
# print "QS:", len(qs)
get = self.request.GET
if get.has_key('search') and get['search'] != '':
val = get['search']
query = Q(idgloss__istartswith=val) | \
Q(annotation_idgloss__istartswith=val)
if re.match('^\d+$', val):
query = query | Q(sn__exact=val)
qs = qs.filter(query)
# print "A: ", len(qs)
if get.has_key('englishGloss') and get['englishGloss'] != '':
val = get['englishGloss']
qs = qs.filter(annotation_idgloss_en__istartswith=val)
if get.has_key('keyword') and get['keyword'] != '':
val = get['keyword']
qs = qs.filter(translation__translation__text__istartswith=val)
if get.has_key('inWeb') and get['inWeb'] != '0':
# Don't apply 'inWeb' filter, if it is unspecified ('0' according to the NULLBOOLEANCHOICES)
val = get['inWeb'] == 'yes'
qs = qs.filter(inWeb__exact=val)
# print "B :", len(qs)
if get.has_key('hasvideo') and get['hasvideo'] != 'unspecified':
val = get['hasvideo'] == 'no'
qs = qs.filter(glossvideo__isnull=val)
if get.has_key('defspublished') and get['defspublished'] != 'unspecified':
val = get['defspublished'] == 'yes'
qs = qs.filter(definition__published=val)
fieldnames = ['idgloss', 'annotation_idgloss', 'annotation_idgloss_en', 'useInstr', 'sense', 'morph', 'StemSN',
'compound', 'rmrks', 'handedness',
'domhndsh', 'subhndsh', 'locprim', 'locVirtObj', 'relatArtic', 'relOriMov', 'relOriLoc', 'oriCh',
'handCh', 'repeat', 'altern',
'movSh', 'movDir', 'contType', 'phonOth', 'mouthG', 'mouthing', 'phonetVar', 'iconImg', 'iconType',
'namEnt', 'semField', 'valence',
'lexCatNotes', 'tokNo', 'tokNoSgnr', 'tokNoA', 'tokNoV', 'tokNoR', 'tokNoGe', 'tokNoGr', 'tokNoO',
'tokNoSgnrA',
'tokNoSgnrV', 'tokNoSgnrR', 'tokNoSgnrGe', 'tokNoSgnrGr', 'tokNoSgnrO', 'inWeb', 'isNew'];
# Language and basic property filters
vals = get.getlist('dialect', [])
if vals != []:
qs = qs.filter(dialect__in=vals)
vals = get.getlist('language', [])
if vals != []:
qs = qs.filter(language__in=vals)
if get.has_key('useInstr') and get['useInstr'] != '':
qs = qs.filter(useInstr__icontains=get['useInstr'])
## phonology and semantics field filters
for fieldname in fieldnames:
if get.has_key(fieldname):
key = fieldname + '__exact';
val = get[fieldname];
if isinstance(Gloss._meta.get_field(fieldname), NullBooleanField):
val = {'0': '', '1': None, '2': True, '3': False}[val];
if val != '':
kwargs = {key: val};
qs = qs.filter(**kwargs);
if get.has_key('initial_relative_orientation') and get['initial_relative_orientation'] != '':
val = get['initial_relative_orientation']
qs = qs.filter(initial_relative_orientation__exact=val)
if get.has_key('final_relative_orientation') and get['final_relative_orientation'] != '':
val = get['final_relative_orientation']
qs = qs.filter(final_relative_orientation__exact=val)
if get.has_key('initial_palm_orientation') and get['initial_palm_orientation'] != '':
val = get['initial_palm_orientation']
qs = qs.filter(initial_palm_orientation__exact=val)
if get.has_key('final_palm_orientation') and get['final_palm_orientation'] != '':
val = get['final_palm_orientation']
qs = qs.filter(final_palm_orientation__exact=val)
if get.has_key('initial_secondary_loc') and get['initial_secondary_loc'] != '':
val = get['initial_secondary_loc']
qs = qs.filter(initial_secondary_loc__exact=val)
if get.has_key('final_secondary_loc') and get['final_secondary_loc'] != '':
val = get['final_secondary_loc']
qs = qs.filter(final_secondary_loc__exact=val)
if get.has_key('final_secondary_loc') and get['final_secondary_loc'] != '':
val = get['final_secondary_loc']
qs = qs.filter(final_secondary_loc__exact=val)
if get.has_key('defsearch') and get['defsearch'] != '':
val = get['defsearch']
if get.has_key('defrole'):
role = get['defrole']
else:
role = 'all'
if role == 'all':
qs = qs.filter(definition__text__icontains=val)
else:
qs = qs.filter(definition__text__icontains=val, definition__role__exact=role)
if get.has_key('tags') and get['tags'] != '':
vals = get.getlist('tags')
tags = []
for t in vals:
tags.extend(Tag.objects.filter(name=t))
# search is an implicit AND so intersection
tqs = TaggedItem.objects.get_intersection_by_model(Gloss, tags)
# intersection
qs = qs & tqs
# print "J :", len(qs)
qs = qs.distinct()
if get.has_key('nottags') and get['nottags'] != '':
vals = get.getlist('nottags')
# print "NOT TAGS: ", vals
tags = []
for t in vals:
tags.extend(Tag.objects.filter(name=t))
# search is an implicit AND so intersection
tqs = TaggedItem.objects.get_intersection_by_model(Gloss, tags)
# print "NOT", tags, len(tqs)
# exclude all of tqs from qs
qs = [q for q in qs if q not in tqs]
# print "K :", len(qs)
if get.has_key('relationToForeignSign') and get['relationToForeignSign'] != '':
relations = RelationToForeignSign.objects.filter(other_lang_gloss__icontains=get['relationToForeignSign'])
potential_pks = [relation.gloss.pk for relation in relations]
qs = qs.filter(pk__in=potential_pks)
if get.has_key('hasRelationToForeignSign') and get['hasRelationToForeignSign'] != '0':
pks_for_glosses_with_relations = [relation.gloss.pk for relation in RelationToForeignSign.objects.all()];
print('pks_for_glosses', pks_for_glosses_with_relations)
if get['hasRelationToForeignSign'] == '1': # We only want glosses with a relation to a foreign sign
qs = qs.filter(pk__in=pks_for_glosses_with_relations)
elif get['hasRelationToForeignSign'] == '2': # We only want glosses without a relation to a foreign sign
qs = qs.exclude(pk__in=pks_for_glosses_with_relations)
if get.has_key('relation') and get['relation'] != '':
potential_targets = Gloss.objects.filter(idgloss__icontains=get['relation'])
relations = Relation.objects.filter(target__in=potential_targets)
potential_pks = [relation.source.pk for relation in relations]
qs = qs.filter(pk__in=potential_pks)
if get.has_key('hasRelation') and get['hasRelation'] != '':
# Find all relations with this role
if get['hasRelation'] == 'all':
relations_with_this_role = Relation.objects.all();
else:
relations_with_this_role = Relation.objects.filter(role__exact=get['hasRelation']);
# Remember the pk of all glosses that take part in the collected relations
pks_for_glosses_with_correct_relation = [relation.source.pk for relation in relations_with_this_role];
qs = qs.filter(pk__in=pks_for_glosses_with_correct_relation)
if get.has_key('morpheme') and get['morpheme'] != '':
potential_morphemes = Gloss.objects.filter(idgloss__icontains=get['morpheme']);
potential_morphdefs = MorphologyDefinition.objects.filter(
morpheme__in=[morpheme.pk for morpheme in potential_morphemes])
potential_pks = [morphdef.parent_gloss.pk for morphdef in potential_morphdefs];
qs = qs.filter(pk__in=potential_pks)
if get.has_key('hasMorphemeOfType') and get['hasMorphemeOfType'] != '':
# Get all Morphemes of the indicated mrpType
target_morphemes = Morpheme.objects.filter(mrpType__exact=get['hasMorphemeOfType'])
# Turn this into a list with pks
pks_for_glosses_with_correct_mrpType = [glossdef.pk for glossdef in target_morphemes];
qs = qs.filter(pk__in=pks_for_glosses_with_correct_mrpType)
# if get.has_key('hasMorphemeOfType') and get['hasMorphemeOfType'] != '':
# morphdefs_with_correct_role = MorphologyDefinition.objects.filter(role__exact=get['hasMorphemeOfType']);
# pks_for_glosses_with_morphdefs_with_correct_role = [morphdef.parent_gloss.pk for morphdef in
# morphdefs_with_correct_role];
# qs = qs.filter(pk__in=pks_for_glosses_with_morphdefs_with_correct_role)
if get.has_key('definitionRole') and get['definitionRole'] != '':
# Find all definitions with this role
if get['definitionRole'] == 'all':
definitions_with_this_role = Definition.objects.all();
else:
definitions_with_this_role = Definition.objects.filter(role__exact=get['definitionRole']);
# Remember the pk of all glosses that are referenced in the collection definitions
pks_for_glosses_with_these_definitions = [definition.gloss.pk for definition in definitions_with_this_role];
qs = qs.filter(pk__in=pks_for_glosses_with_these_definitions)
if get.has_key('definitionContains') and get['definitionContains'] != '':
definitions_with_this_text = Definition.objects.filter(text__icontains=get['definitionContains']);
# Remember the pk of all glosses that are referenced in the collection definitions
pks_for_glosses_with_these_definitions = [definition.gloss.pk for definition in definitions_with_this_text];
qs = qs.filter(pk__in=pks_for_glosses_with_these_definitions)
if get.has_key('createdBefore') and get['createdBefore'] != '':
created_before_date = DT.datetime.strptime(get['createdBefore'], "%m/%d/%Y").date()
qs = qs.filter(creationDate__range=(EARLIEST_GLOSS_CREATION_DATE, created_before_date))
if get.has_key('createdAfter') and get['createdAfter'] != '':
created_after_date = DT.datetime.strptime(get['createdAfter'], "%m/%d/%Y").date()
qs = qs.filter(creationDate__range=(created_after_date, DT.datetime.now()))
# Saving querysets results to sessions, these results can then be used elsewhere (like in gloss_detail)
# Flush the previous queryset (just in case)
self.request.session['search_results'] = None
# Make sure that the QuerySet has filters applied (user is searching for something instead of showing all results [objects.all()])
if hasattr(qs.query.where, 'children') and len(qs.query.where.children) > 0:
items = []
for item in qs:
items.append(dict(id=item.id, gloss=item.idgloss))
self.request.session['search_results'] = items
# print "Final :", len(qs)
# Sort the queryset by the parameters given
qs = order_queryset_by_sort_order(self.request.GET, qs)
# Return the resulting filtered and sorted queryset
return qs
def render_to_response(self, context):
# Look for a 'format=json' GET argument
if self.request.GET.get('format') == 'CSV':
return self.render_to_csv_response(context)
else:
return super(MorphemeListView, self).render_to_response(context)
# noinspection PyInterpreter,PyInterpreter
def render_to_csv_response(self, context):
"""Convert all Morphemes into a CSV
This function is derived from and similar to the one used in class GlossListView
Differences:
1 - this one adds the field [mrpType]
2 - the filename is different"""
if not self.request.user.has_perm('dictionary.export_csv'):
raise PermissionDenied
# Create the HttpResponse object with the appropriate CSV header.
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="dictionary-morph-export.csv"'
# fields = [f.name for f in Gloss._meta.fields]
# We want to manually set which fields to export here
fieldnames = ['idgloss', 'annotation_idgloss', 'annotation_idgloss_en',
'mrpType',
'useInstr', 'sense', 'StemSN', 'rmrks',
'handedness',
'domhndsh', 'subhndsh', 'handCh', 'relatArtic', 'locprim', 'locVirtObj', 'relOriMov', 'relOriLoc',
'oriCh', 'contType',
'movSh', 'movDir', 'repeat', 'altern', 'phonOth', 'mouthG', 'mouthing', 'phonetVar',
'domSF', 'domFlex', 'oriChAbd', 'oriChFlex', 'iconImg', 'iconType',
'namEnt', 'semField', 'valence', 'lexCatNotes', 'tokNo', 'tokNoSgnr', 'tokNoA', 'tokNoV',
'tokNoR', 'tokNoGe',
'tokNoGr', 'tokNoO', 'tokNoSgnrA', 'tokNoSgnrV', 'tokNoSgnrR', 'tokNoSgnrGe',
'tokNoSgnrGr', 'tokNoSgnrO', 'inWeb', 'isNew']
# Different from Gloss: we use Morpheme here
fields = [Morpheme._meta.get_field(fieldname) for fieldname in fieldnames]
writer = csv.writer(response)
with override(LANGUAGE_CODE):
header = ['Signbank ID'] + [f.verbose_name.title().encode('ascii', 'ignore') for f in fields]
for extra_column in ['Languages', 'Dialects', 'Keywords', 'Morphology', 'Relations to other signs',
'Relations to foreign signs', 'Appears in signs', ]:
header.append(extra_column);
writer.writerow(header)
for gloss in self.get_queryset():
row = [str(gloss.pk)]
for f in fields:
# Try the value of the choicelist
try:
row.append(getattr(gloss, 'get_' + f.name + '_display')())
# If it's not there, try the raw value
except AttributeError:
value = getattr(gloss, f.name)
if isinstance(value, unicode):
value = str(value.encode('ascii', 'xmlcharrefreplace'));
elif not isinstance(value, str):
value = str(value);
row.append(value)
# get languages
languages = [language.name for language in gloss.language.all()]
row.append(", ".join(languages));
# get dialects
dialects = [dialect.name for dialect in gloss.dialect.all()]
row.append(", ".join(dialects));
# get translations
trans = [t.translation.text for t in gloss.translation_set.all()]
row.append(", ".join(trans))
# get compound's component type
morphemes = [morpheme.role for morpheme in MorphologyDefinition.objects.filter(parent_gloss=gloss)]
row.append(", ".join(morphemes))
# get relations to other signs
relations = [relation.target.idgloss for relation in Relation.objects.filter(source=gloss)]
row.append(", ".join(relations))
# get relations to foreign signs
relations = [relation.other_lang_gloss for relation in RelationToForeignSign.objects.filter(gloss=gloss)]
row.append(", ".join(relations))
# Got all the glosses (=signs) this morpheme appears in
appearsin = [appears.idgloss for appears in Gloss.objects.filter(morphemePart=gloss)]
row.append(", ".join(appearsin))
# Make it safe for weird chars
safe_row = [];
for column in row:
try:
safe_row.append(column.encode('utf-8'))
except AttributeError:
safe_row.append(None);
writer.writerow(safe_row)
return response
class MorphemeDetailView(DetailView):
model = Morpheme
context_object_name = 'morpheme'
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super(MorphemeDetailView, self).get_context_data(**kwargs)
# Add in a QuerySet of all the books
context['tagform'] = TagUpdateForm()
context['videoform'] = VideoUploadForGlossForm()
context['imageform'] = ImageUploadForGlossForm()
context['definitionform'] = DefinitionForm()
context['relationform'] = RelationForm()
context['morphologyform'] = MorphemeMorphologyForm()
context['othermediaform'] = OtherMediaForm()
context['navigation'] = context['morpheme'].navigation(True)
context['interpform'] = InterpreterFeedbackForm()
context['SIGN_NAVIGATION'] = settings.SIGN_NAVIGATION
# Get the set of all the Gloss signs that point to me
context['glosslinks'] = Gloss.objects.filter(morphemePart__id=context['morpheme'].id)
# context['glosslinks'] = self.gloss_set.all()
try:
# Note: setting idgloss to context['morpheme'] is not enough; the ".idgloss" needs to be specified
next_morpheme = Morpheme.objects.get(idgloss=context['morpheme'].idgloss).admin_next_morpheme()
except:
next_morpheme = None
if next_morpheme == None:
context['nextmorphemeid'] = context['morpheme'].pk
else:
context['nextmorphemeid'] = next_morpheme.pk
if settings.SIGN_NAVIGATION:
context['glosscount'] = Morpheme.objects.count()
context['glossposn'] = Morpheme.objects.filter(sn__lt=context['morpheme'].sn).count() + 1
# Pass info about which fields we want to see
gl = context['morpheme'];
labels = gl.field_labels();
context['choice_lists'] = {}
# Translate the machine values to human values in the correct language, and save the choice lists along the way
for topic in ['phonology', 'semantics', 'frequency']:
context[topic + '_fields'] = [];
for field in FIELDS[topic]:
# Get and save the choice list for this field
field_category = fieldname_to_category(field)
choice_list = FieldChoice.objects.filter(field__iexact=field_category)
if len(choice_list) > 0:
context['choice_lists'][field] = choicelist_queryset_to_translated_ordered_dict(choice_list,
self.request.LANGUAGE_CODE)
# Take the human value in the language we are using
machine_value = getattr(gl, field);
if machine_value == '0':
human_value = '-'
elif machine_value == '1':
human_value = 'N/A'
else:
try:
selected_field_choice = choice_list.filter(machine_value=machine_value)[0]
if self.request.LANGUAGE_CODE == 'nl':
human_value = selected_field_choice.dutch_name
else:
human_value = selected_field_choice.english_name
except (IndexError, ValueError):
human_value = machine_value
# And add the kind of field
if field in ['phonOth', 'mouthG', 'mouthing', 'phonetVar', 'iconImg', 'locVirtObj']:
kind = 'text';
elif field in ['repeat', 'altern']:
kind = 'check';
else:
kind = 'list';
context[topic + '_fields'].append([human_value, field, labels[field], kind]);
# Gather the OtherMedia
context['other_media'] = []
other_media_type_choice_list = FieldChoice.objects.filter(field__iexact='OthermediaType')
for other_media in gl.othermedia_set.all():
if int(other_media.type) == 0:
human_value_media_type = '-'
elif int(other_media.type) == 1:
human_value_media_type = 'N/A'
else:
selected_field_choice = other_media_type_choice_list.filter(machine_value=other_media.type)[0]
codes_to_adjectives = dict(settings.LANGUAGES)
if self.request.LANGUAGE_CODE not in codes_to_adjectives.keys():
adjective = 'english'
else:
adjective = codes_to_adjectives[self.request.LANGUAGE_CODE].lower()
try:
human_value_media_type = getattr(selected_field_choice, adjective + '_name')
except AttributeError:
human_value_media_type = getattr(selected_field_choice, 'english_name')
path = settings.STATIC_URL + 'othermedia/' + other_media.path
context['other_media'].append([other_media.pk, path, human_value_media_type, other_media.alternative_gloss])
# Save the other_media_type choices (same for every other_media, but necessary because they all have other ids)
context['choice_lists'][
'other-media-type_' + str(other_media.pk)] = choicelist_queryset_to_translated_ordered_dict(
other_media_type_choice_list, self.request.LANGUAGE_CODE)
# context['choice_lists'] = gl.get_choice_lists()
context['choice_lists'] = json.dumps(context['choice_lists'])
context['separate_english_idgloss_field'] = SEPARATE_ENGLISH_IDGLOSS_FIELD
return context
def gloss_ajax_search_results(request):
"""Returns a JSON list of glosses that match the previous search stored in sessions"""
return HttpResponse(json.dumps(request.session['search_results']))
def gloss_ajax_complete(request, prefix):
"""Return a list of glosses matching the search term
as a JSON structure suitable for typeahead."""
query = Q(idgloss__istartswith=prefix) | \
Q(annotation_idgloss__istartswith=prefix) | \
Q(sn__startswith=prefix)
# TODO: possibly reduce the possibilities of [Gloss.objects] to exclude Morphemes??
# Suggestion: qs = Gloss.none_morpheme_objects.filter(query) -- if that works
qs = Gloss.objects.filter(query)
result = []
for g in qs:
result.append({'idgloss': g.idgloss, 'annotation_idgloss': g.annotation_idgloss, 'sn': g.sn, 'pk': "%s" % (g.idgloss)})
return HttpResponse(json.dumps(result), {'content-type': 'application/json'})
def morph_ajax_complete(request, prefix):
"""Return a list of morphs matching the search term
as a JSON structure suitable for typeahead."""
query = Q(idgloss__istartswith=prefix) | \
Q(annotation_idgloss__istartswith=prefix) | \
Q(sn__startswith=prefix)
qs = Morpheme.objects.filter(query)
result = []
for g in qs:
result.append({'idgloss': g.idgloss, 'annotation_idgloss': g.annotation_idgloss, 'sn': g.sn,
'pk': "%s" % (g.idgloss)})
return HttpResponse(json.dumps(result), {'content-type': 'application/json'})
def choicelist_queryset_to_translated_ordered_dict(queryset,language_code):
codes_to_adjectives = dict(settings.LANGUAGES)
if language_code not in codes_to_adjectives.keys():
adjective = 'english'
else:
adjective = codes_to_adjectives[language_code].lower()
try:
raw_choice_list = [('_'+str(choice.machine_value),unicode(getattr(choice,adjective+'_name'))) for choice in queryset]
except AttributeError:
raw_choice_list = [('_'+str(choice.machine_value),unicode(getattr(choice,'english_name'))) for choice in queryset]
sorted_choice_list = [('_0','-'),('_1','N/A')]+sorted(raw_choice_list,key = lambda x: x[1])
return OrderedDict(sorted_choice_list)
No longer show all morphemes before searching #241
from django.views.generic.list import ListView
from django.views.generic.detail import DetailView
from django.db.models import Q
from django.db.models.fields import NullBooleanField
from django.http import HttpResponse, HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.core.exceptions import PermissionDenied
from django.utils.translation import override
from collections import OrderedDict
import csv
import operator
import re
import xml.etree.ElementTree as ET
from xml.dom import minidom
import datetime as DT
from signbank.dictionary.models import *
from signbank.dictionary.forms import *
from signbank.feedback.models import *
from signbank.video.forms import VideoUploadForGlossForm
from tagging.models import Tag, TaggedItem
from signbank.settings.base import ECV_FILE,EARLIEST_GLOSS_CREATION_DATE, OTHER_MEDIA_DIRECTORY, FIELDS, SEPARATE_ENGLISH_IDGLOSS_FIELD, LANGUAGE_CODE, ECV_SETTINGS
def order_queryset_by_sort_order(get, qs):
"""Change the sort-order of the query set, depending on the form field [sortOrder]
This function is used both by GlossListView as well as by MorphemeListView.
The value of [sortOrder] is 'idgloss' by default.
[sortOrder] is a hidden field inside the "adminsearch" html form in the template admin_gloss_list.html
Its value is changed by clicking the up/down buttons in the second row of the search result table
"""
def get_string_from_tuple_list(lstTuples, number):
"""Get the string value corresponding to a number in a list of number-string tuples"""
sBack = [tup[1] for tup in lstTuples if tup[0] == number]
return sBack
# Helper: order a queryset on field [sOrder], which is a number from a list of tuples named [sListName]
def order_queryset_by_tuple_list(qs, sOrder, sListName):
"""Order a queryset on field [sOrder], which is a number from a list of tuples named [sListName]"""
# Get a list of tuples for this sort-order
tpList = build_choice_list(sListName)
# Determine sort order: ascending is default
bReversed = False
if (sOrder[0:1] == '-'):
# A starting '-' sign means: descending order
sOrder = sOrder[1:]
bReversed = True
# Order the list of tuples alphabetically
# (NOTE: they are alphabetical from 'build_choice_list()', except for the values 0,1)
tpList = sorted(tpList, key=operator.itemgetter(1))
# Order by the string-values in the tuple list
return sorted(qs, key=lambda x: get_string_from_tuple_list(tpList, getattr(x, sOrder)), reverse=bReversed)
# Set the default sort order
sOrder = 'idgloss' # Default sort order if nothing is specified
# See if the form contains any sort-order information
if (get.has_key('sortOrder') and get['sortOrder'] != ''):
# Take the user-indicated sort order
sOrder = get['sortOrder']
# The ordering method depends on the kind of field:
# (1) text fields are ordered straightforwardly
# (2) fields made from a choice_list need special treatment
if (sOrder.endswith('handedness')):
ordered = order_queryset_by_tuple_list(qs, sOrder, "Handedness")
elif (sOrder.endswith('domhndsh') or sOrder.endswith('subhndsh')):
ordered = order_queryset_by_tuple_list(qs, sOrder, "Handshape")
elif (sOrder.endswith('locprim')):
ordered = order_queryset_by_tuple_list(qs, sOrder, "Location")
else:
# Use straightforward ordering on field [sOrder]
ordered = qs.order_by(sOrder)
# return the ordered list
return ordered
class GlossListView(ListView):
model = Gloss
template_name = 'dictionary/admin_gloss_list.html'
paginate_by = 500
only_export_ecv = False #Used to call the 'export ecv' functionality of this view without the need for an extra GET parameter
search_type = 'sign'
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super(GlossListView, self).get_context_data(**kwargs)
# Add in a QuerySet of all the books
# Retrieve the search_type,so that we know whether the search should be restricted to Gloss or not
if 'search_type' in self.request.GET:
self.search_type = self.request.GET['search_type']
search_form = GlossSearchForm(self.request.GET)
context['searchform'] = search_form
context['search_type'] = self.search_type
if self.search_type == 'sign':
context['glosscount'] = Gloss.none_morpheme_objects().count() # Only count the none-morpheme glosses
else:
context['glosscount'] = Gloss.objects.count() # Count the glosses + morphemes
context['add_gloss_form'] = GlossCreateForm()
context['ADMIN_RESULT_FIELDS'] = settings.ADMIN_RESULT_FIELDS
context['input_names_fields_and_labels'] = {}
for topic in ['main','phonology','semantics']:
context['input_names_fields_and_labels'][topic] = []
for fieldname in settings.FIELDS[topic]:
field = search_form[fieldname]
label = field.label
context['input_names_fields_and_labels'][topic].append((fieldname,field,label))
return context
def get_paginate_by(self, queryset):
"""
Paginate by specified value in querystring, or use default class property value.
"""
return self.request.GET.get('paginate_by', self.paginate_by)
def render_to_response(self, context):
# Look for a 'format=json' GET argument
if self.request.GET.get('format') == 'CSV':
return self.render_to_csv_response(context)
elif self.request.GET.get('export_ecv') == 'ECV' or self.only_export_ecv:
return self.render_to_ecv_export_response(context)
else:
return super(GlossListView, self).render_to_response(context)
def render_to_ecv_export_response(self, context):
description = 'DESCRIPTION'
language = 'LANGUAGE'
lang_ref = 'LANG_REF'
cv_entry_ml = 'CV_ENTRY_ML'
cve_id = 'CVE_ID'
cve_value = 'CVE_VALUE'
topattributes = {'xmlns:xsi':"http://www.w3.org/2001/XMLSchema-instance",
'DATE':str(DT.date.today())+ 'T'+str(DT.datetime.now().time()),
'AUTHOR':'',
'VERSION':'0.2',
'xsi:noNamespaceSchemaLocation':"http://www.mpi.nl/tools/elan/EAFv2.8.xsd"}
top = ET.Element('CV_RESOURCE', topattributes)
for lang in ECV_SETTINGS['languages']:
ET.SubElement(top, language, lang['attributes'])
cv_element = ET.SubElement(top, 'CONTROLLED_VOCABULARY', {'CV_ID':ECV_SETTINGS['CV_ID']})
# description f0r cv_element
for lang in ECV_SETTINGS['languages']:
myattributes = {lang_ref: lang['id']}
desc_element = ET.SubElement(cv_element, description, myattributes)
desc_element.text = lang['description']
# Make sure we iterate only over the none-Morpheme glosses
for gloss in Gloss.none_morpheme_objects():
glossid = str(gloss.pk)
myattributes = {cve_id: glossid}
cve_entry_element = ET.SubElement(cv_element, cv_entry_ml, myattributes)
desc = self.get_ecv_descripion_for_gloss(gloss, ECV_SETTINGS['include_phonology_and_frequencies'])
for lang in ECV_SETTINGS['languages']:
cve_value_element = ET.SubElement(cve_entry_element, cve_value, {description:desc, lang_ref:lang['id']})
cve_value_element.text = self.get_value_for_ecv(gloss, lang['annotation_idgloss_fieldname'])
xmlstr = minidom.parseString(ET.tostring(top,'utf-8')).toprettyxml(indent=" ")
with open(ECV_FILE, "w") as f:
f.write(xmlstr.encode('utf-8'))
# tree = ET.ElementTree(top)
# tree.write(open(ECV_FILE, 'w'), encoding ="utf-8",xml_declaration=True, method="xml")
return HttpResponse('OK')
def get_ecv_descripion_for_gloss(self, gloss, include_phonology_and_frequencies=False):
desc = ""
if include_phonology_and_frequencies:
description_fields = ['handedness','domhndsh', 'subhndsh', 'handCh', 'locprim', 'relOriMov', 'movDir','movSh', 'tokNo',
'tokNoSgnr'];
for f in description_fields:
value = self.get_value_for_ecv(gloss,f)
if f == 'handedness':
desc = value
elif f == 'domhndsh':
desc = desc+ ', ('+ value
elif f == 'subhndsh':
desc = desc+','+value
elif f == 'handCh':
desc = desc+'; '+value+')'
elif f == 'tokNo':
desc = desc+' ['+value
elif f == 'tokNoSgnr':
desc = desc+'/'+value+']'
else:
desc = desc+', '+value
if desc:
desc += ", "
trans = [t.translation.text for t in gloss.translation_set.all()]
desc += ", ".join(
# The next line was adapted from an older version of this code,
# that happened to do nothing. I left this for future usage.
#map(lambda t: str(t.encode('ascii','xmlcharrefreplace')) if isinstance(t, unicode) else t, trans)
trans
)
return desc
def get_value_for_ecv(self, gloss, fieldname):
try:
value = getattr(gloss, 'get_'+fieldname+'_display')()
except AttributeError:
value = getattr(gloss,fieldname)
if isinstance(value,unicode):
value = str(value.encode('ascii','xmlcharrefreplace'))
elif value is None:
value = " "
elif not isinstance(value,str):
value = str(value)
if value == '-':
value = ' '
return value
# noinspection PyInterpreter,PyInterpreter
def render_to_csv_response(self, context):
if not self.request.user.has_perm('dictionary.export_csv'):
raise PermissionDenied
# Create the HttpResponse object with the appropriate CSV header.
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="dictionary-export.csv"'
# fields = [f.name for f in Gloss._meta.fields]
#We want to manually set which fields to export here
fieldnames = ['idgloss', 'annotation_idgloss', 'annotation_idgloss_en', 'useInstr', 'sense', 'StemSN', 'rmrks', 'handedness',
'domhndsh', 'subhndsh', 'handCh', 'relatArtic', 'locprim', 'locVirtObj', 'relOriMov', 'relOriLoc', 'oriCh', 'contType',
'movSh', 'movDir', 'repeat', 'altern', 'phonOth', 'mouthG', 'mouthing', 'phonetVar',
'domSF', 'domFlex', 'oriChAbd', 'oriChFlex', 'iconImg', 'iconType',
'namEnt', 'semField', 'valence', 'lexCatNotes', 'tokNo', 'tokNoSgnr', 'tokNoA', 'tokNoV', 'tokNoR', 'tokNoGe',
'tokNoGr', 'tokNoO', 'tokNoSgnrA', 'tokNoSgnrV', 'tokNoSgnrR', 'tokNoSgnrGe',
'tokNoSgnrGr', 'tokNoSgnrO', 'inWeb', 'isNew']
fields = [Gloss._meta.get_field(fieldname) for fieldname in fieldnames]
writer = csv.writer(response)
with override(LANGUAGE_CODE):
header = ['Signbank ID'] + [f.verbose_name.title().encode('ascii','ignore') for f in fields]
for extra_column in ['Languages','Dialects','Keywords','Morphology','Relations to other signs','Relations to foreign signs',]:
header.append(extra_column);
writer.writerow(header)
for gloss in self.get_queryset():
row = [str(gloss.pk)]
for f in fields:
#Try the value of the choicelist
try:
row.append(getattr(gloss, 'get_'+f.name+'_display')())
#If it's not there, try the raw value
except AttributeError:
value = getattr(gloss,f.name)
if isinstance(value,unicode):
value = str(value.encode('ascii','xmlcharrefreplace'));
elif not isinstance(value,str):
value = str(value);
row.append(value)
# get languages
languages = [language.name for language in gloss.language.all()]
row.append(", ".join(languages));
# get dialects
dialects = [dialect.name for dialect in gloss.dialect.all()]
row.append(", ".join(dialects));
# get translations
trans = [t.translation.text for t in gloss.translation_set.all()]
row.append(", ".join(trans))
# get morphology
morphemes = [morpheme.morpheme.annotation_idgloss for morpheme in MorphologyDefinition.objects.filter(parent_gloss=gloss)]
row.append(", ".join(morphemes))
# get relations to other signs
relations = [relation.target.idgloss for relation in Relation.objects.filter(source=gloss)]
row.append(", ".join(relations))
# get relations to foreign signs
relations = [relation.other_lang_gloss for relation in RelationToForeignSign.objects.filter(gloss=gloss)]
row.append(", ".join(relations))
#Make it safe for weird chars
safe_row = [];
for column in row:
try:
safe_row.append(column.encode('utf-8'))
except AttributeError:
safe_row.append(None);
writer.writerow(safe_row)
return response
def get_queryset(self):
get = self.request.GET
#First check whether we want to show everything or a subset
try:
if self.kwargs['show_all']:
show_all = True
except (KeyError,TypeError):
show_all = False
#Then check what kind of stuff we want
if 'search_type' in get:
self.search_type = get['search_type']
else:
self.search_type = 'sign'
setattr(self.request, 'search_type', self.search_type)
#Get the initial selection
if len(get) > 0 or show_all:
if self.search_type == 'sign':
# Get all the GLOSS items that are not member of the sub-class Morpheme
qs = Gloss.none_morpheme_objects()
else:
qs = Gloss.objects.all()
#No filters or 'show_all' specified? show nothing
else:
qs = Gloss.objects.none()
#If we wanted to get everything, we're done now
if show_all:
return qs
#If not, we will go trhough a long list of filters
if get.has_key('search') and get['search'] != '':
val = get['search']
query = Q(idgloss__istartswith=val) | \
Q(annotation_idgloss__istartswith=val)
if re.match('^\d+$', val):
query = query | Q(sn__exact=val)
qs = qs.filter(query)
#print "A: ", len(qs)
if get.has_key('englishGloss') and get['englishGloss'] != '':
val = get['englishGloss']
qs = qs.filter(annotation_idgloss_en__istartswith=val)
if get.has_key('keyword') and get['keyword'] != '':
val = get['keyword']
qs = qs.filter(translation__translation__text__istartswith=val)
if get.has_key('inWeb') and get['inWeb'] != '0':
# Don't apply 'inWeb' filter, if it is unspecified ('0' according to the NULLBOOLEANCHOICES)
val = get['inWeb'] == 'yes'
qs = qs.filter(inWeb__exact=val)
#print "B :", len(qs)
if not self.request.user.has_perm('dictionary.search_gloss'):
qs = qs.filter(inWeb__exact=True)
if get.has_key('hasvideo') and get['hasvideo'] != 'unspecified':
val = get['hasvideo'] == 'no'
qs = qs.filter(glossvideo__isnull=val)
if get.has_key('defspublished') and get['defspublished'] != 'unspecified':
val = get['defspublished'] == 'yes'
qs = qs.filter(definition__published=val)
fieldnames = ['idgloss', 'annotation_idgloss', 'annotation_idgloss_en', 'useInstr', 'sense', 'morph', 'StemSN', 'compound', 'rmrks', 'handedness',
'domhndsh', 'subhndsh', 'locprim', 'locVirtObj', 'relatArtic', 'relOriMov', 'relOriLoc', 'oriCh', 'handCh', 'repeat', 'altern',
'movSh', 'movDir', 'contType', 'phonOth', 'mouthG', 'mouthing', 'phonetVar',
'domSF', 'domFlex', 'oriChAbd', 'oriChFlex', 'iconImg', 'iconType', 'namEnt', 'semField', 'valence',
'lexCatNotes','tokNo', 'tokNoSgnr','tokNoA', 'tokNoV', 'tokNoR', 'tokNoGe', 'tokNoGr', 'tokNoO', 'tokNoSgnrA',
'tokNoSgnrV', 'tokNoSgnrR', 'tokNoSgnrGe', 'tokNoSgnrGr', 'tokNoSgnrO', 'inWeb', 'isNew'];
#Language and basic property filters
vals = get.getlist('dialect', [])
if vals != []:
qs = qs.filter(dialect__in=vals)
vals = get.getlist('language', [])
if vals != []:
qs = qs.filter(language__in=vals)
if get.has_key('useInstr') and get['useInstr'] != '':
qs = qs.filter(useInstr__icontains=get['useInstr'])
## phonology and semantics field filters
for fieldname in fieldnames:
if get.has_key(fieldname):
key = fieldname+'__exact';
val = get[fieldname];
if isinstance(Gloss._meta.get_field(fieldname),NullBooleanField):
val = {'0':'','1': None, '2': True, '3': False}[val];
if val != '':
kwargs = {key:val};
qs = qs.filter(**kwargs);
if get.has_key('initial_relative_orientation') and get['initial_relative_orientation'] != '':
val = get['initial_relative_orientation']
qs = qs.filter(initial_relative_orientation__exact=val)
if get.has_key('final_relative_orientation') and get['final_relative_orientation'] != '':
val = get['final_relative_orientation']
qs = qs.filter(final_relative_orientation__exact=val)
if get.has_key('initial_palm_orientation') and get['initial_palm_orientation'] != '':
val = get['initial_palm_orientation']
qs = qs.filter(initial_palm_orientation__exact=val)
if get.has_key('final_palm_orientation') and get['final_palm_orientation'] != '':
val = get['final_palm_orientation']
qs = qs.filter(final_palm_orientation__exact=val)
if get.has_key('initial_secondary_loc') and get['initial_secondary_loc'] != '':
val = get['initial_secondary_loc']
qs = qs.filter(initial_secondary_loc__exact=val)
if get.has_key('final_secondary_loc') and get['final_secondary_loc'] != '':
val = get['final_secondary_loc']
qs = qs.filter(final_secondary_loc__exact=val)
if get.has_key('final_secondary_loc') and get['final_secondary_loc'] != '':
val = get['final_secondary_loc']
qs = qs.filter(final_secondary_loc__exact=val)
if get.has_key('defsearch') and get['defsearch'] != '':
val = get['defsearch']
if get.has_key('defrole'):
role = get['defrole']
else:
role = 'all'
if role == 'all':
qs = qs.filter(definition__text__icontains=val)
else:
qs = qs.filter(definition__text__icontains=val, definition__role__exact=role)
if get.has_key('tags') and get['tags'] != '':
vals = get.getlist('tags')
tags = []
for t in vals:
tags.extend(Tag.objects.filter(name=t))
# search is an implicit AND so intersection
tqs = TaggedItem.objects.get_intersection_by_model(Gloss, tags)
# intersection
qs = qs & tqs
#print "J :", len(qs)
qs = qs.distinct()
if get.has_key('nottags') and get['nottags'] != '':
vals = get.getlist('nottags')
# print "NOT TAGS: ", vals
tags = []
for t in vals:
tags.extend(Tag.objects.filter(name=t))
# search is an implicit AND so intersection
tqs = TaggedItem.objects.get_intersection_by_model(Gloss, tags)
# print "NOT", tags, len(tqs)
# exclude all of tqs from qs
qs = [q for q in qs if q not in tqs]
# print "K :", len(qs)
if get.has_key('relationToForeignSign') and get['relationToForeignSign'] != '':
relations = RelationToForeignSign.objects.filter(other_lang_gloss__icontains=get['relationToForeignSign'])
potential_pks = [relation.gloss.pk for relation in relations]
qs = qs.filter(pk__in=potential_pks)
if get.has_key('hasRelationToForeignSign') and get['hasRelationToForeignSign'] != '0':
pks_for_glosses_with_relations = [relation.gloss.pk for relation in RelationToForeignSign.objects.all()];
print('pks_for_glosses',pks_for_glosses_with_relations)
if get['hasRelationToForeignSign'] == '1': #We only want glosses with a relation to a foreign sign
qs = qs.filter(pk__in=pks_for_glosses_with_relations)
elif get['hasRelationToForeignSign'] == '2': #We only want glosses without a relation to a foreign sign
qs = qs.exclude(pk__in=pks_for_glosses_with_relations)
if get.has_key('relation') and get['relation'] != '':
potential_targets = Gloss.objects.filter(idgloss__icontains=get['relation'])
relations = Relation.objects.filter(target__in=potential_targets)
potential_pks = [relation.source.pk for relation in relations]
qs = qs.filter(pk__in=potential_pks)
if get.has_key('hasRelation') and get['hasRelation'] != '':
#Find all relations with this role
if get['hasRelation'] == 'all':
relations_with_this_role = Relation.objects.all();
else:
relations_with_this_role = Relation.objects.filter(role__exact=get['hasRelation']);
#Remember the pk of all glosses that take part in the collected relations
pks_for_glosses_with_correct_relation = [relation.source.pk for relation in relations_with_this_role];
qs = qs.filter(pk__in=pks_for_glosses_with_correct_relation)
if get.has_key('id_morpheme') and get['id_morpheme'] != '':
# Filter all glosses that contain a morpheme with the indicated text in its gloss
# Step 1: get all morphemes containing the indicated text
potential_morphemes = Morpheme.objects.filter(idgloss__exact=get['id_morpheme']);
if (potential_morphemes.count() > 0):
# At least one has been found: take the first one
selected_morpheme = potential_morphemes[0];
# Step 2: get all Glosses containing the above morphemes
potential_pks = [appears.pk for appears in Gloss.objects.filter(morphemePart=selected_morpheme)];
qs = qs.filter(pk__in=potential_pks)
if get.has_key('hasComponentOfType') and get['hasComponentOfType'] != '':
# Look for "compound-components" of the indicated type. Compound Components are defined in class[MorphologyDefinition]
morphdefs_with_correct_role = MorphologyDefinition.objects.filter(role__exact=get['hasComponentOfType']);
pks_for_glosses_with_morphdefs_with_correct_role = [morphdef.parent_gloss.pk for morphdef in morphdefs_with_correct_role];
qs = qs.filter(pk__in=pks_for_glosses_with_morphdefs_with_correct_role)
if get.has_key('hasMorphemeOfType') and get['hasMorphemeOfType'] != '':
# Get all Morphemes of the indicated mrpType
target_morphemes = Morpheme.objects.filter(mrpType__exact=get['hasMorphemeOfType'])
# Get all glosses that have one of the morphemes in this set
glosses_with_correct_mrpType = Gloss.objects.filter(morphemePart__in=target_morphemes)
# Turn this into a list with pks
pks_for_glosses_with_correct_mrpType = [glossdef.pk for glossdef in glosses_with_correct_mrpType];
qs = qs.filter(pk__in=pks_for_glosses_with_correct_mrpType)
if get.has_key('definitionRole') and get['definitionRole'] != '':
#Find all definitions with this role
if get['definitionRole'] == 'all':
definitions_with_this_role = Definition.objects.all();
else:
definitions_with_this_role = Definition.objects.filter(role__exact=get['definitionRole']);
#Remember the pk of all glosses that are referenced in the collection definitions
pks_for_glosses_with_these_definitions = [definition.gloss.pk for definition in definitions_with_this_role];
qs = qs.filter(pk__in=pks_for_glosses_with_these_definitions)
if get.has_key('definitionContains') and get['definitionContains'] != '':
definitions_with_this_text = Definition.objects.filter(text__icontains=get['definitionContains']);
#Remember the pk of all glosses that are referenced in the collection definitions
pks_for_glosses_with_these_definitions = [definition.gloss.pk for definition in definitions_with_this_text];
qs = qs.filter(pk__in=pks_for_glosses_with_these_definitions)
if get.has_key('createdBefore') and get['createdBefore'] != '':
created_before_date = DT.datetime.strptime(get['createdBefore'], "%m/%d/%Y").date()
qs = qs.filter(creationDate__range=(EARLIEST_GLOSS_CREATION_DATE,created_before_date))
if get.has_key('createdAfter') and get['createdAfter'] != '':
created_after_date = DT.datetime.strptime(get['createdAfter'], "%m/%d/%Y").date()
qs = qs.filter(creationDate__range=(created_after_date,DT.datetime.now()))
# Saving querysets results to sessions, these results can then be used elsewhere (like in gloss_detail)
# Flush the previous queryset (just in case)
self.request.session['search_results'] = None
# Make sure that the QuerySet has filters applied (user is searching for something instead of showing all results [objects.all()])
if hasattr(qs.query.where, 'children') and len(qs.query.where.children) > 0:
items = []
for item in qs:
items.append(dict(id = item.id, gloss = item.annotation_idgloss))
self.request.session['search_results'] = items
# print "Final :", len(qs)
# Sort the queryset by the parameters given
qs = order_queryset_by_sort_order(self.request.GET, qs)
# Return the resulting filtered and sorted queryset
return qs
class GlossDetailView(DetailView):
model = Gloss
context_object_name = 'gloss'
#Overriding the get method get permissions right
def get(self, request, *args, **kwargs):
try:
self.object = self.get_object()
except Http404:
# return custom template
return render(request, 'no_object.html', status=404)
if request.user.is_authenticated():
if not request.user.has_perm('dictionary.search_gloss'):
if self.object.inWeb:
return HttpResponseRedirect(reverse('dictionary:public_gloss',kwargs={'idgloss':self.object.idgloss}))
else:
return HttpResponse('')
else:
if self.object.inWeb:
return HttpResponseRedirect(reverse('dictionary:public_gloss', kwargs={'idgloss': self.object.idgloss}))
else:
return HttpResponseRedirect(reverse('registration:auth_login'))
context = self.get_context_data(object=self.object)
return self.render_to_response(context)
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super(GlossDetailView, self).get_context_data(**kwargs)
# Add in a QuerySet of all the books
context['tagform'] = TagUpdateForm()
context['videoform'] = VideoUploadForGlossForm()
context['imageform'] = ImageUploadForGlossForm()
context['definitionform'] = DefinitionForm()
context['relationform'] = RelationForm()
context['morphologyform'] = GlossMorphologyForm()
context['morphemeform'] = GlossMorphemeForm()
context['othermediaform'] = OtherMediaForm()
context['navigation'] = context['gloss'].navigation(True)
context['interpform'] = InterpreterFeedbackForm()
context['SIGN_NAVIGATION'] = settings.SIGN_NAVIGATION
next_gloss = Gloss.objects.get(pk=context['gloss'].pk).admin_next_gloss()
if next_gloss == None:
context['nextglossid'] = context['gloss'].pk #context['gloss']
else:
context['nextglossid'] = next_gloss.pk
if settings.SIGN_NAVIGATION:
context['glosscount'] = Gloss.objects.count()
context['glossposn'] = Gloss.objects.filter(sn__lt=context['gloss'].sn).count()+1
#Pass info about which fields we want to see
gl = context['gloss'];
labels = gl.field_labels();
context['choice_lists'] = {}
#Translate the machine values to human values in the correct language, and save the choice lists along the way
for topic in ['main','phonology','semantics','frequency']:
context[topic+'_fields'] = [];
for field in FIELDS[topic]:
#Get and save the choice list for this field
field_category = fieldname_to_category(field)
choice_list = FieldChoice.objects.filter(field__iexact=field_category)
if len(choice_list) > 0:
context['choice_lists'][field] = choicelist_queryset_to_translated_ordered_dict (choice_list,self.request.LANGUAGE_CODE)
#Take the human value in the language we are using
machine_value = getattr(gl,field);
if machine_value == '0':
human_value = '-'
elif machine_value == '1':
human_value = 'N/A'
else:
try:
selected_field_choice = choice_list.filter(machine_value=machine_value)[0]
if self.request.LANGUAGE_CODE == 'nl':
human_value = selected_field_choice.dutch_name
else:
human_value = selected_field_choice.english_name
except (IndexError, ValueError):
human_value = machine_value
#And add the kind of field
if field in ['useInstr','phonOth','mouthG','mouthing','phonetVar','iconImg','locVirtObj']:
kind = 'text';
elif field in ['repeat','altern','oriChAbd','oriChFlex']:
kind = 'check';
else:
kind = 'list';
context[topic+'_fields'].append([human_value,field,labels[field],kind]);
#Gather the OtherMedia
context['other_media'] = []
other_media_type_choice_list = FieldChoice.objects.filter(field__iexact='OthermediaType')
for other_media in gl.othermedia_set.all():
if int(other_media.type) == 0:
human_value_media_type = '-'
elif int(other_media.type) == 1:
human_value_media_type = 'N/A'
else:
selected_field_choice = other_media_type_choice_list.filter(machine_value=other_media.type)[0]
codes_to_adjectives = dict(settings.LANGUAGES)
if self.request.LANGUAGE_CODE not in codes_to_adjectives.keys():
adjective = 'english'
else:
adjective = codes_to_adjectives[self.request.LANGUAGE_CODE].lower()
try:
human_value_media_type = getattr(selected_field_choice,adjective+'_name')
except AttributeError:
human_value_media_type = getattr(selected_field_choice,'english_name')
path = settings.STATIC_URL+'othermedia/'+other_media.path
context['other_media'].append([other_media.pk, path, human_value_media_type, other_media.alternative_gloss])
#Save the other_media_type choices (same for every other_media, but necessary because they all have other ids)
context['choice_lists']['other-media-type_'+str(other_media.pk)] = choicelist_queryset_to_translated_ordered_dict(other_media_type_choice_list,self.request.LANGUAGE_CODE)
#context['choice_lists'] = gl.get_choice_lists()
context['choice_lists'] = json.dumps(context['choice_lists'])
context['separate_english_idgloss_field'] = SEPARATE_ENGLISH_IDGLOSS_FIELD
return context
class MorphemeListView(ListView):
"""The morpheme list view basically copies the gloss list view"""
model = Morpheme
template_name = 'dictionary/admin_morpheme_list.html'
paginate_by = 500
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super(MorphemeListView, self).get_context_data(**kwargs)
# Add in a QuerySet of all the books
search_form = MorphemeSearchForm(self.request.GET)
context['searchform'] = search_form
context['glosscount'] = Morpheme.objects.all().count()
context['add_morpheme_form'] = MorphemeCreateForm()
context['ADMIN_RESULT_FIELDS'] = settings.ADMIN_RESULT_FIELDS
# make sure that the morpheme-type options are available to the listview
oChoiceLists = {}
choice_list = FieldChoice.objects.filter(field__iexact = fieldname_to_category('mrpType'))
if (len(choice_list) > 0):
ordered_dict = choicelist_queryset_to_translated_ordered_dict(choice_list, self.request.LANGUAGE_CODE)
oChoiceLists['mrpType'] = ordered_dict
# Make all choice lists available in the context (currently only mrpType)
context['choice_lists'] = json.dumps(oChoiceLists)
context['input_names_fields_and_labels'] = {}
for topic in ['phonology', 'semantics']:
context['input_names_fields_and_labels'][topic] = []
for fieldname in settings.FIELDS[topic]:
field = search_form[fieldname]
label = field.label
context['input_names_fields_and_labels'][topic].append((fieldname, field, label))
return context
def get_paginate_by(self, queryset):
"""
Paginate by specified value in querystring, or use default class property value.
"""
return self.request.GET.get('paginate_by', self.paginate_by)
def get_queryset(self):
# get query terms from self.request
get = self.request.GET
if len(get) > 0:
qs = Morpheme.objects.all()
#Don't show anything when we're not searching yet
else:
qs = Morpheme.objects.none()
if get.has_key('search') and get['search'] != '':
val = get['search']
query = Q(idgloss__istartswith=val) | \
Q(annotation_idgloss__istartswith=val)
if re.match('^\d+$', val):
query = query | Q(sn__exact=val)
qs = qs.filter(query)
# print "A: ", len(qs)
if get.has_key('englishGloss') and get['englishGloss'] != '':
val = get['englishGloss']
qs = qs.filter(annotation_idgloss_en__istartswith=val)
if get.has_key('keyword') and get['keyword'] != '':
val = get['keyword']
qs = qs.filter(translation__translation__text__istartswith=val)
if get.has_key('inWeb') and get['inWeb'] != '0':
# Don't apply 'inWeb' filter, if it is unspecified ('0' according to the NULLBOOLEANCHOICES)
val = get['inWeb'] == 'yes'
qs = qs.filter(inWeb__exact=val)
# print "B :", len(qs)
if get.has_key('hasvideo') and get['hasvideo'] != 'unspecified':
val = get['hasvideo'] == 'no'
qs = qs.filter(glossvideo__isnull=val)
if get.has_key('defspublished') and get['defspublished'] != 'unspecified':
val = get['defspublished'] == 'yes'
qs = qs.filter(definition__published=val)
fieldnames = ['idgloss', 'annotation_idgloss', 'annotation_idgloss_en', 'useInstr', 'sense', 'morph', 'StemSN',
'compound', 'rmrks', 'handedness',
'domhndsh', 'subhndsh', 'locprim', 'locVirtObj', 'relatArtic', 'relOriMov', 'relOriLoc', 'oriCh',
'handCh', 'repeat', 'altern',
'movSh', 'movDir', 'contType', 'phonOth', 'mouthG', 'mouthing', 'phonetVar', 'iconImg', 'iconType',
'namEnt', 'semField', 'valence',
'lexCatNotes', 'tokNo', 'tokNoSgnr', 'tokNoA', 'tokNoV', 'tokNoR', 'tokNoGe', 'tokNoGr', 'tokNoO',
'tokNoSgnrA',
'tokNoSgnrV', 'tokNoSgnrR', 'tokNoSgnrGe', 'tokNoSgnrGr', 'tokNoSgnrO', 'inWeb', 'isNew'];
# Language and basic property filters
vals = get.getlist('dialect', [])
if vals != []:
qs = qs.filter(dialect__in=vals)
vals = get.getlist('language', [])
if vals != []:
qs = qs.filter(language__in=vals)
if get.has_key('useInstr') and get['useInstr'] != '':
qs = qs.filter(useInstr__icontains=get['useInstr'])
## phonology and semantics field filters
for fieldname in fieldnames:
if get.has_key(fieldname):
key = fieldname + '__exact';
val = get[fieldname];
if isinstance(Gloss._meta.get_field(fieldname), NullBooleanField):
val = {'0': '', '1': None, '2': True, '3': False}[val];
if val != '':
kwargs = {key: val};
qs = qs.filter(**kwargs);
if get.has_key('initial_relative_orientation') and get['initial_relative_orientation'] != '':
val = get['initial_relative_orientation']
qs = qs.filter(initial_relative_orientation__exact=val)
if get.has_key('final_relative_orientation') and get['final_relative_orientation'] != '':
val = get['final_relative_orientation']
qs = qs.filter(final_relative_orientation__exact=val)
if get.has_key('initial_palm_orientation') and get['initial_palm_orientation'] != '':
val = get['initial_palm_orientation']
qs = qs.filter(initial_palm_orientation__exact=val)
if get.has_key('final_palm_orientation') and get['final_palm_orientation'] != '':
val = get['final_palm_orientation']
qs = qs.filter(final_palm_orientation__exact=val)
if get.has_key('initial_secondary_loc') and get['initial_secondary_loc'] != '':
val = get['initial_secondary_loc']
qs = qs.filter(initial_secondary_loc__exact=val)
if get.has_key('final_secondary_loc') and get['final_secondary_loc'] != '':
val = get['final_secondary_loc']
qs = qs.filter(final_secondary_loc__exact=val)
if get.has_key('final_secondary_loc') and get['final_secondary_loc'] != '':
val = get['final_secondary_loc']
qs = qs.filter(final_secondary_loc__exact=val)
if get.has_key('defsearch') and get['defsearch'] != '':
val = get['defsearch']
if get.has_key('defrole'):
role = get['defrole']
else:
role = 'all'
if role == 'all':
qs = qs.filter(definition__text__icontains=val)
else:
qs = qs.filter(definition__text__icontains=val, definition__role__exact=role)
if get.has_key('tags') and get['tags'] != '':
vals = get.getlist('tags')
tags = []
for t in vals:
tags.extend(Tag.objects.filter(name=t))
# search is an implicit AND so intersection
tqs = TaggedItem.objects.get_intersection_by_model(Gloss, tags)
# intersection
qs = qs & tqs
# print "J :", len(qs)
qs = qs.distinct()
if get.has_key('nottags') and get['nottags'] != '':
vals = get.getlist('nottags')
# print "NOT TAGS: ", vals
tags = []
for t in vals:
tags.extend(Tag.objects.filter(name=t))
# search is an implicit AND so intersection
tqs = TaggedItem.objects.get_intersection_by_model(Gloss, tags)
# print "NOT", tags, len(tqs)
# exclude all of tqs from qs
qs = [q for q in qs if q not in tqs]
# print "K :", len(qs)
if get.has_key('relationToForeignSign') and get['relationToForeignSign'] != '':
relations = RelationToForeignSign.objects.filter(other_lang_gloss__icontains=get['relationToForeignSign'])
potential_pks = [relation.gloss.pk for relation in relations]
qs = qs.filter(pk__in=potential_pks)
if get.has_key('hasRelationToForeignSign') and get['hasRelationToForeignSign'] != '0':
pks_for_glosses_with_relations = [relation.gloss.pk for relation in RelationToForeignSign.objects.all()];
print('pks_for_glosses', pks_for_glosses_with_relations)
if get['hasRelationToForeignSign'] == '1': # We only want glosses with a relation to a foreign sign
qs = qs.filter(pk__in=pks_for_glosses_with_relations)
elif get['hasRelationToForeignSign'] == '2': # We only want glosses without a relation to a foreign sign
qs = qs.exclude(pk__in=pks_for_glosses_with_relations)
if get.has_key('relation') and get['relation'] != '':
potential_targets = Gloss.objects.filter(idgloss__icontains=get['relation'])
relations = Relation.objects.filter(target__in=potential_targets)
potential_pks = [relation.source.pk for relation in relations]
qs = qs.filter(pk__in=potential_pks)
if get.has_key('hasRelation') and get['hasRelation'] != '':
# Find all relations with this role
if get['hasRelation'] == 'all':
relations_with_this_role = Relation.objects.all();
else:
relations_with_this_role = Relation.objects.filter(role__exact=get['hasRelation']);
# Remember the pk of all glosses that take part in the collected relations
pks_for_glosses_with_correct_relation = [relation.source.pk for relation in relations_with_this_role];
qs = qs.filter(pk__in=pks_for_glosses_with_correct_relation)
if get.has_key('morpheme') and get['morpheme'] != '':
potential_morphemes = Gloss.objects.filter(idgloss__icontains=get['morpheme']);
potential_morphdefs = MorphologyDefinition.objects.filter(
morpheme__in=[morpheme.pk for morpheme in potential_morphemes])
potential_pks = [morphdef.parent_gloss.pk for morphdef in potential_morphdefs];
qs = qs.filter(pk__in=potential_pks)
if get.has_key('hasMorphemeOfType') and get['hasMorphemeOfType'] != '':
# Get all Morphemes of the indicated mrpType
target_morphemes = Morpheme.objects.filter(mrpType__exact=get['hasMorphemeOfType'])
# Turn this into a list with pks
pks_for_glosses_with_correct_mrpType = [glossdef.pk for glossdef in target_morphemes];
qs = qs.filter(pk__in=pks_for_glosses_with_correct_mrpType)
# if get.has_key('hasMorphemeOfType') and get['hasMorphemeOfType'] != '':
# morphdefs_with_correct_role = MorphologyDefinition.objects.filter(role__exact=get['hasMorphemeOfType']);
# pks_for_glosses_with_morphdefs_with_correct_role = [morphdef.parent_gloss.pk for morphdef in
# morphdefs_with_correct_role];
# qs = qs.filter(pk__in=pks_for_glosses_with_morphdefs_with_correct_role)
if get.has_key('definitionRole') and get['definitionRole'] != '':
# Find all definitions with this role
if get['definitionRole'] == 'all':
definitions_with_this_role = Definition.objects.all();
else:
definitions_with_this_role = Definition.objects.filter(role__exact=get['definitionRole']);
# Remember the pk of all glosses that are referenced in the collection definitions
pks_for_glosses_with_these_definitions = [definition.gloss.pk for definition in definitions_with_this_role];
qs = qs.filter(pk__in=pks_for_glosses_with_these_definitions)
if get.has_key('definitionContains') and get['definitionContains'] != '':
definitions_with_this_text = Definition.objects.filter(text__icontains=get['definitionContains']);
# Remember the pk of all glosses that are referenced in the collection definitions
pks_for_glosses_with_these_definitions = [definition.gloss.pk for definition in definitions_with_this_text];
qs = qs.filter(pk__in=pks_for_glosses_with_these_definitions)
if get.has_key('createdBefore') and get['createdBefore'] != '':
created_before_date = DT.datetime.strptime(get['createdBefore'], "%m/%d/%Y").date()
qs = qs.filter(creationDate__range=(EARLIEST_GLOSS_CREATION_DATE, created_before_date))
if get.has_key('createdAfter') and get['createdAfter'] != '':
created_after_date = DT.datetime.strptime(get['createdAfter'], "%m/%d/%Y").date()
qs = qs.filter(creationDate__range=(created_after_date, DT.datetime.now()))
# Saving querysets results to sessions, these results can then be used elsewhere (like in gloss_detail)
# Flush the previous queryset (just in case)
self.request.session['search_results'] = None
# Make sure that the QuerySet has filters applied (user is searching for something instead of showing all results [objects.all()])
if hasattr(qs.query.where, 'children') and len(qs.query.where.children) > 0:
items = []
for item in qs:
items.append(dict(id=item.id, gloss=item.idgloss))
self.request.session['search_results'] = items
# print "Final :", len(qs)
# Sort the queryset by the parameters given
qs = order_queryset_by_sort_order(self.request.GET, qs)
# Return the resulting filtered and sorted queryset
return qs
def render_to_response(self, context):
# Look for a 'format=json' GET argument
if self.request.GET.get('format') == 'CSV':
return self.render_to_csv_response(context)
else:
return super(MorphemeListView, self).render_to_response(context)
# noinspection PyInterpreter,PyInterpreter
def render_to_csv_response(self, context):
"""Convert all Morphemes into a CSV
This function is derived from and similar to the one used in class GlossListView
Differences:
1 - this one adds the field [mrpType]
2 - the filename is different"""
if not self.request.user.has_perm('dictionary.export_csv'):
raise PermissionDenied
# Create the HttpResponse object with the appropriate CSV header.
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="dictionary-morph-export.csv"'
# fields = [f.name for f in Gloss._meta.fields]
# We want to manually set which fields to export here
fieldnames = ['idgloss', 'annotation_idgloss', 'annotation_idgloss_en',
'mrpType',
'useInstr', 'sense', 'StemSN', 'rmrks',
'handedness',
'domhndsh', 'subhndsh', 'handCh', 'relatArtic', 'locprim', 'locVirtObj', 'relOriMov', 'relOriLoc',
'oriCh', 'contType',
'movSh', 'movDir', 'repeat', 'altern', 'phonOth', 'mouthG', 'mouthing', 'phonetVar',
'domSF', 'domFlex', 'oriChAbd', 'oriChFlex', 'iconImg', 'iconType',
'namEnt', 'semField', 'valence', 'lexCatNotes', 'tokNo', 'tokNoSgnr', 'tokNoA', 'tokNoV',
'tokNoR', 'tokNoGe',
'tokNoGr', 'tokNoO', 'tokNoSgnrA', 'tokNoSgnrV', 'tokNoSgnrR', 'tokNoSgnrGe',
'tokNoSgnrGr', 'tokNoSgnrO', 'inWeb', 'isNew']
# Different from Gloss: we use Morpheme here
fields = [Morpheme._meta.get_field(fieldname) for fieldname in fieldnames]
writer = csv.writer(response)
with override(LANGUAGE_CODE):
header = ['Signbank ID'] + [f.verbose_name.title().encode('ascii', 'ignore') for f in fields]
for extra_column in ['Languages', 'Dialects', 'Keywords', 'Morphology', 'Relations to other signs',
'Relations to foreign signs', 'Appears in signs', ]:
header.append(extra_column);
writer.writerow(header)
for gloss in self.get_queryset():
row = [str(gloss.pk)]
for f in fields:
# Try the value of the choicelist
try:
row.append(getattr(gloss, 'get_' + f.name + '_display')())
# If it's not there, try the raw value
except AttributeError:
value = getattr(gloss, f.name)
if isinstance(value, unicode):
value = str(value.encode('ascii', 'xmlcharrefreplace'));
elif not isinstance(value, str):
value = str(value);
row.append(value)
# get languages
languages = [language.name for language in gloss.language.all()]
row.append(", ".join(languages));
# get dialects
dialects = [dialect.name for dialect in gloss.dialect.all()]
row.append(", ".join(dialects));
# get translations
trans = [t.translation.text for t in gloss.translation_set.all()]
row.append(", ".join(trans))
# get compound's component type
morphemes = [morpheme.role for morpheme in MorphologyDefinition.objects.filter(parent_gloss=gloss)]
row.append(", ".join(morphemes))
# get relations to other signs
relations = [relation.target.idgloss for relation in Relation.objects.filter(source=gloss)]
row.append(", ".join(relations))
# get relations to foreign signs
relations = [relation.other_lang_gloss for relation in RelationToForeignSign.objects.filter(gloss=gloss)]
row.append(", ".join(relations))
# Got all the glosses (=signs) this morpheme appears in
appearsin = [appears.idgloss for appears in Gloss.objects.filter(morphemePart=gloss)]
row.append(", ".join(appearsin))
# Make it safe for weird chars
safe_row = [];
for column in row:
try:
safe_row.append(column.encode('utf-8'))
except AttributeError:
safe_row.append(None);
writer.writerow(safe_row)
return response
class MorphemeDetailView(DetailView):
model = Morpheme
context_object_name = 'morpheme'
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super(MorphemeDetailView, self).get_context_data(**kwargs)
# Add in a QuerySet of all the books
context['tagform'] = TagUpdateForm()
context['videoform'] = VideoUploadForGlossForm()
context['imageform'] = ImageUploadForGlossForm()
context['definitionform'] = DefinitionForm()
context['relationform'] = RelationForm()
context['morphologyform'] = MorphemeMorphologyForm()
context['othermediaform'] = OtherMediaForm()
context['navigation'] = context['morpheme'].navigation(True)
context['interpform'] = InterpreterFeedbackForm()
context['SIGN_NAVIGATION'] = settings.SIGN_NAVIGATION
# Get the set of all the Gloss signs that point to me
context['glosslinks'] = Gloss.objects.filter(morphemePart__id=context['morpheme'].id)
# context['glosslinks'] = self.gloss_set.all()
try:
# Note: setting idgloss to context['morpheme'] is not enough; the ".idgloss" needs to be specified
next_morpheme = Morpheme.objects.get(idgloss=context['morpheme'].idgloss).admin_next_morpheme()
except:
next_morpheme = None
if next_morpheme == None:
context['nextmorphemeid'] = context['morpheme'].pk
else:
context['nextmorphemeid'] = next_morpheme.pk
if settings.SIGN_NAVIGATION:
context['glosscount'] = Morpheme.objects.count()
context['glossposn'] = Morpheme.objects.filter(sn__lt=context['morpheme'].sn).count() + 1
# Pass info about which fields we want to see
gl = context['morpheme'];
labels = gl.field_labels();
context['choice_lists'] = {}
# Translate the machine values to human values in the correct language, and save the choice lists along the way
for topic in ['phonology', 'semantics', 'frequency']:
context[topic + '_fields'] = [];
for field in FIELDS[topic]:
# Get and save the choice list for this field
field_category = fieldname_to_category(field)
choice_list = FieldChoice.objects.filter(field__iexact=field_category)
if len(choice_list) > 0:
context['choice_lists'][field] = choicelist_queryset_to_translated_ordered_dict(choice_list,
self.request.LANGUAGE_CODE)
# Take the human value in the language we are using
machine_value = getattr(gl, field);
if machine_value == '0':
human_value = '-'
elif machine_value == '1':
human_value = 'N/A'
else:
try:
selected_field_choice = choice_list.filter(machine_value=machine_value)[0]
if self.request.LANGUAGE_CODE == 'nl':
human_value = selected_field_choice.dutch_name
else:
human_value = selected_field_choice.english_name
except (IndexError, ValueError):
human_value = machine_value
# And add the kind of field
if field in ['phonOth', 'mouthG', 'mouthing', 'phonetVar', 'iconImg', 'locVirtObj']:
kind = 'text';
elif field in ['repeat', 'altern']:
kind = 'check';
else:
kind = 'list';
context[topic + '_fields'].append([human_value, field, labels[field], kind]);
# Gather the OtherMedia
context['other_media'] = []
other_media_type_choice_list = FieldChoice.objects.filter(field__iexact='OthermediaType')
for other_media in gl.othermedia_set.all():
if int(other_media.type) == 0:
human_value_media_type = '-'
elif int(other_media.type) == 1:
human_value_media_type = 'N/A'
else:
selected_field_choice = other_media_type_choice_list.filter(machine_value=other_media.type)[0]
codes_to_adjectives = dict(settings.LANGUAGES)
if self.request.LANGUAGE_CODE not in codes_to_adjectives.keys():
adjective = 'english'
else:
adjective = codes_to_adjectives[self.request.LANGUAGE_CODE].lower()
try:
human_value_media_type = getattr(selected_field_choice, adjective + '_name')
except AttributeError:
human_value_media_type = getattr(selected_field_choice, 'english_name')
path = settings.STATIC_URL + 'othermedia/' + other_media.path
context['other_media'].append([other_media.pk, path, human_value_media_type, other_media.alternative_gloss])
# Save the other_media_type choices (same for every other_media, but necessary because they all have other ids)
context['choice_lists'][
'other-media-type_' + str(other_media.pk)] = choicelist_queryset_to_translated_ordered_dict(
other_media_type_choice_list, self.request.LANGUAGE_CODE)
# context['choice_lists'] = gl.get_choice_lists()
context['choice_lists'] = json.dumps(context['choice_lists'])
context['separate_english_idgloss_field'] = SEPARATE_ENGLISH_IDGLOSS_FIELD
return context
def gloss_ajax_search_results(request):
"""Returns a JSON list of glosses that match the previous search stored in sessions"""
return HttpResponse(json.dumps(request.session['search_results']))
def gloss_ajax_complete(request, prefix):
"""Return a list of glosses matching the search term
as a JSON structure suitable for typeahead."""
query = Q(idgloss__istartswith=prefix) | \
Q(annotation_idgloss__istartswith=prefix) | \
Q(sn__startswith=prefix)
# TODO: possibly reduce the possibilities of [Gloss.objects] to exclude Morphemes??
# Suggestion: qs = Gloss.none_morpheme_objects.filter(query) -- if that works
qs = Gloss.objects.filter(query)
result = []
for g in qs:
result.append({'idgloss': g.idgloss, 'annotation_idgloss': g.annotation_idgloss, 'sn': g.sn, 'pk': "%s" % (g.idgloss)})
return HttpResponse(json.dumps(result), {'content-type': 'application/json'})
def morph_ajax_complete(request, prefix):
"""Return a list of morphs matching the search term
as a JSON structure suitable for typeahead."""
query = Q(idgloss__istartswith=prefix) | \
Q(annotation_idgloss__istartswith=prefix) | \
Q(sn__startswith=prefix)
qs = Morpheme.objects.filter(query)
result = []
for g in qs:
result.append({'idgloss': g.idgloss, 'annotation_idgloss': g.annotation_idgloss, 'sn': g.sn,
'pk': "%s" % (g.idgloss)})
return HttpResponse(json.dumps(result), {'content-type': 'application/json'})
def choicelist_queryset_to_translated_ordered_dict(queryset,language_code):
codes_to_adjectives = dict(settings.LANGUAGES)
if language_code not in codes_to_adjectives.keys():
adjective = 'english'
else:
adjective = codes_to_adjectives[language_code].lower()
try:
raw_choice_list = [('_'+str(choice.machine_value),unicode(getattr(choice,adjective+'_name'))) for choice in queryset]
except AttributeError:
raw_choice_list = [('_'+str(choice.machine_value),unicode(getattr(choice,'english_name'))) for choice in queryset]
sorted_choice_list = [('_0','-'),('_1','N/A')]+sorted(raw_choice_list,key = lambda x: x[1])
return OrderedDict(sorted_choice_list)
|
from smbool import SMBool
from rom_patches import RomPatches
from helpers import Helpers, Bosses
from graph_access import getAccessPoint
from cache import Cache
from math import ceil
from parameters import Settings
class HelpersGraph(Helpers):
def __init__(self, smbm):
self.smbm = smbm
self.draygonConnection = None
def canEnterAndLeaveGauntletQty(self, nPB, nTanksSpark):
sm = self.smbm
# EXPLAINED: to access Gauntlet Entrance from Landing site we can either:
# -fly to it (infinite bomb jumps or space jump)
# -shinespark to it
# -wall jump with high jump boots
# -wall jump without high jump boots
# then inside it to break the bomb wals:
# -use screw attack (easy way)
# -use power bombs
# -use bombs
# -perform a simple short charge on the way in
# and use power bombs on the way out
return sm.wand(sm.wor(sm.canFly(),
sm.haveItem('SpeedBooster'),
sm.wand(sm.knowsHiJumpGauntletAccess(),
sm.haveItem('HiJump')),
sm.knowsHiJumpLessGauntletAccess()),
sm.wor(sm.haveItem('ScrewAttack'),
sm.wor(sm.wand(sm.energyReserveCountOkHardRoom('Gauntlet'),
sm.wand(sm.canUsePowerBombs(),
sm.wor(sm.itemCountOk('PowerBomb', nPB),
sm.wand(sm.haveItem('SpeedBooster'),
sm.energyReserveCountOk(nTanksSpark))))),
sm.wand(sm.energyReserveCountOkHardRoom('Gauntlet', 0.51),
sm.canUseBombs()))))
@Cache.decorator
def canEnterAndLeaveGauntlet(self):
sm = self.smbm
return sm.wor(sm.wand(sm.canShortCharge(),
sm.canEnterAndLeaveGauntletQty(2, 2)),
sm.canEnterAndLeaveGauntletQty(2, 3))
def canPassTerminatorBombWall(self, fromLandingSite=True):
sm = self.smbm
return sm.wand(sm.wor(sm.wand(sm.haveItem('SpeedBooster'),
sm.wor(SMBool(not fromLandingSite, 0), sm.knowsSimpleShortCharge(), sm.knowsShortCharge())),
sm.canDestroyBombWalls()),
sm.canPassCrateriaGreenPirates())
# mostly for going up but let's be noob friendly and add the condition for both ways
@Cache.decorator
def canPassCrateriaGreenPirates(self):
sm = self.smbm
return sm.wor(sm.canPassBombPassages(),
sm.canOpenRedDoors(),
sm.energyReserveCountOk(1),
sm.wor(sm.haveItem('Charge'),
sm.haveItem('Ice'),
sm.haveItem('Wave'),
sm.wor(sm.haveItem('Spazer'),
sm.haveItem('Plasma'),
sm.haveItem('ScrewAttack'))))
# from blue brin elevator
@Cache.decorator
def canAccessBillyMays(self):
sm = self.smbm
return sm.wand(sm.wor(RomPatches.has(RomPatches.BlueBrinstarBlueDoor),
sm.canOpenRedDoors()),
sm.canUsePowerBombs(),
sm.wor(sm.knowsBillyMays(),
sm.haveItem('Gravity'),
sm.haveItem('SpaceJump')))
@Cache.decorator
def canAccessKraidsLair(self):
sm = self.smbm
# EXPLAINED: access the upper right platform with either:
# -hijump boots (easy regular way)
# -fly (space jump or infinite bomb jump)
# -know how to wall jump on the platform without the hijump boots
return sm.wand(sm.canOpenGreenDoors(),
sm.wor(sm.haveItem('HiJump'),
sm.canFly(),
sm.knowsEarlyKraid()))
@Cache.decorator
def canPassMoat(self):
sm = self.smbm
# EXPLAINED: In the Moat we can either:
# -use grapple or space jump (easy way)
# -do a continuous wall jump (https://www.youtube.com/watch?v=4HVhTwwax6g)
# -do a diagonal bomb jump from the middle platform (https://www.youtube.com/watch?v=5NRqQ7RbK3A&t=10m58s)
# -do a short charge from the Keyhunter room (https://www.youtube.com/watch?v=kFAYji2gFok)
# -do a gravity jump from below the right platform
# -do a mock ball and a bounce ball (https://www.youtube.com/watch?v=WYxtRF--834)
# -with gravity, either hijump or IBJ
return sm.wor(sm.wor(sm.haveItem('Grapple'),
sm.haveItem('SpaceJump'),
sm.knowsContinuousWallJump()),
sm.wor(sm.wand(sm.knowsDiagonalBombJump(), sm.canUseBombs()),
sm.canSimpleShortCharge(),
sm.wand(sm.haveItem('Gravity'),
sm.wor(sm.knowsGravityJump(),
sm.haveItem('HiJump'),
sm.canInfiniteBombJump())),
sm.wand(sm.knowsMockballWs(), sm.canUseSpringBall())))
@Cache.decorator
def canPassMoatReverse(self):
sm = self.smbm
return sm.wor(sm.haveItem('Grapple'),
sm.haveItem('SpaceJump'),
sm.haveItem('Gravity'),
sm.wand(sm.haveItem('Morph'),
sm.wor(RomPatches.has(RomPatches.MoatShotBlock),
sm.canPassBombPassages())))
@Cache.decorator
def canPassSpongeBath(self):
sm = self.smbm
return sm.wor(sm.wand(sm.canPassBombPassages(),
sm.knowsSpongeBathBombJump()),
sm.wand(sm.haveItem('HiJump'),
sm.knowsSpongeBathHiJump()),
sm.wor(sm.haveItem('Gravity'),
sm.haveItem('SpaceJump'),
sm.wand(sm.haveItem('SpeedBooster'),
sm.knowsSpongeBathSpeed()),
sm.canSpringBallJump()))
@Cache.decorator
def canPassBowling(self):
sm = self.smbm
return sm.wand(Bosses.bossDead('Phantoon'),
sm.wor(sm.heatProof(),
sm.energyReserveCountOk(1),
sm.haveItem("SpaceJump"),
sm.haveItem("Grapple")))
@Cache.decorator
def canAccessEtecoons(self):
sm = self.smbm
return sm.wor(sm.canUsePowerBombs(),
sm.wand(sm.knowsMoondance(), sm.canUseBombs(), sm.canOpenRedDoors()))
# the water zone east of WS
def canPassForgottenHighway(self, fromWs):
sm = self.smbm
suitless = sm.wand(sm.haveItem('HiJump'), sm.knowsGravLessLevel1())
if fromWs is True and RomPatches.has(RomPatches.EastOceanPlatforms).bool is False:
suitless = sm.wand(suitless,
# to break water line and go through the door on the right
sm.haveItem('SpaceJump'))
return sm.wand(sm.wor(sm.haveItem('Gravity'),
suitless),
sm.haveItem('Morph')) # for crab maze
@Cache.decorator
def canExitCrabHole(self):
sm = self.smbm
return sm.wand(sm.haveItem('Morph'), # morph to exit the hole
sm.wor(sm.wand(sm.haveItem('Gravity'), # even with gravity you need some way to climb...
sm.wor(sm.haveItem('Ice'), # ...on crabs...
sm.haveItem('HiJump'), # ...or by jumping
sm.knowsGravityJump(),
sm.canFly())),
sm.wand(sm.haveItem('Ice'), sm.canDoSuitlessOuterMaridia()), # climbing crabs
sm.canDoubleSpringBallJump()))
# bottom sandpits with the evirs
@Cache.decorator
def canTraverseSandPits(self):
sm = self.smbm
return sm.wor(sm.haveItem('Gravity'),
sm.wand(sm.knowsGravLessLevel3(),
sm.haveItem('HiJump'),
sm.haveItem('Ice')))
@Cache.decorator
def canPassMaridiaToRedTowerNode(self):
sm = self.smbm
return sm.wand(sm.haveItem('Morph'),
sm.wor(RomPatches.has(RomPatches.AreaRandoGatesBase),
sm.canOpenGreenDoors()))
@Cache.decorator
def canPassRedTowerToMaridiaNode(self):
sm = self.smbm
return sm.wand(sm.haveItem('Morph'),
RomPatches.has(RomPatches.AreaRandoGatesBase))
def canEnterCathedral(self, mult=1.0):
sm = self.smbm
return sm.wand(sm.canOpenRedDoors(),
sm.wor(sm.wand(sm.canHellRun('MainUpperNorfair', mult),
sm.wor(sm.wor(RomPatches.has(RomPatches.CathedralEntranceWallJump),
sm.haveItem('HiJump'),
sm.canFly()),
sm.wor(sm.haveItem('SpeedBooster'), # spark
sm.canSpringBallJump()))),
sm.wand(sm.canHellRun('MainUpperNorfair', 0.5*mult),
sm.haveItem('Morph'),
sm.knowsNovaBoost())))
@Cache.decorator
def canClimbBubbleMountain(self):
sm = self.smbm
return sm.wor(sm.haveItem('HiJump'),
sm.canFly(),
sm.haveItem('Ice'),
sm.knowsBubbleMountainWallJump())
@Cache.decorator
def canHellRunToSpeedBooster(self):
sm = self.smbm
return sm.canHellRun(**Settings.hellRunsTable['MainUpperNorfair']['Bubble -> Speed Booster w/Speed' if sm.haveItem('SpeedBooster') else 'Bubble -> Speed Booster'])
@Cache.decorator
def canExitCathedral(self):
# from top: can use bomb/powerbomb jumps
# from bottom: can do a shinespark or use space jump
# can do it with highjump + wall jump
# can do it with only two wall jumps (the first one is delayed like on alcatraz)
# can do it with a spring ball jump from wall
sm = self.smbm
return sm.wand(sm.wor(sm.canHellRun(**Settings.hellRunsTable['MainUpperNorfair']['Bubble -> Norfair Entrance']),
sm.heatProof()),
sm.wor(sm.wor(sm.canPassBombPassages(),
sm.haveItem("SpeedBooster")),
sm.wor(sm.haveItem("SpaceJump"),
sm.haveItem("HiJump"),
sm.knowsWallJumpCathedralExit(),
sm.wand(sm.knowsSpringBallJumpFromWall(), sm.canUseSpringBall()))))
@Cache.decorator
def canGrappleEscape(self):
sm = self.smbm
return sm.wor(sm.wor(sm.haveItem('SpaceJump'),
sm.wand(sm.canInfiniteBombJump(), # IBJ from lava...either have grav or freeze the enemy there if hellrunning (otherwise single DBJ at the end)
sm.wor(sm.heatProof(),
sm.haveItem('Gravity'),
sm.haveItem('Ice')))),
sm.haveItem('Grapple'),
sm.wand(sm.haveItem('SpeedBooster'),
sm.wor(sm.haveItem('HiJump'), # jump from the blocks below
sm.knowsShortCharge())), # spark from across the grapple blocks
sm.wand(sm.haveItem('HiJump'), sm.canSpringBallJump())) # jump from the blocks below
@Cache.decorator
def canPassFrogSpeedwayRightToLeft(self):
sm = self.smbm
return sm.wor(sm.haveItem('SpeedBooster'),
sm.wand(sm.knowsFrogSpeedwayWithoutSpeed(),
sm.haveItem('Wave'),
sm.wor(sm.haveItem('Spazer'),
sm.haveItem('Plasma'))))
@Cache.decorator
def canEnterNorfairReserveAreaFromBubbleMoutain(self):
sm = self.smbm
return sm.wand(sm.canOpenGreenDoors(),
sm.wor(sm.canFly(),
sm.haveItem('Ice'),
sm.wand(sm.haveItem('HiJump'),
sm.knowsGetAroundWallJump()),
sm.wand(sm.canUseSpringBall(),
sm.knowsSpringBallJumpFromWall())))
@Cache.decorator
def canEnterNorfairReserveAreaFromBubbleMoutainTop(self):
sm = self.smbm
return sm.wand(sm.canOpenGreenDoors(),
sm.wor(sm.haveItem('Grapple'),
sm.haveItem('SpaceJump'),
sm.knowsNorfairReserveDBoost()))
@Cache.decorator
def canPassLavaPit(self):
sm = self.smbm
nTanks4Dive = 8 / sm.getDmgReduction()[0]
if sm.haveItem('HiJump').bool == False:
nTanks4Dive = ceil(nTanks4Dive * 1.25)
return sm.wand(sm.wor(sm.wand(sm.haveItem('Gravity'), sm.haveItem('SpaceJump')),
sm.wand(sm.knowsGravityJump(), sm.haveItem('Gravity'), sm.wor(sm.haveItem('HiJump'), sm.knowsLavaDive())),
sm.wand(sm.wor(sm.wand(sm.knowsLavaDive(), sm.haveItem('HiJump')),
sm.knowsLavaDiveNoHiJump()),
sm.energyReserveCountOk(nTanks4Dive))),
sm.canUsePowerBombs()) # power bomb blocks left and right of LN entrance without any items before
@Cache.decorator
def canPassLavaPitReverse(self):
sm = self.smbm
nTanks = 2
if sm.heatProof().bool == False:
nTanks = 6
return sm.energyReserveCountOk(nTanks)
@Cache.decorator
def canPassLowerNorfairChozo(self):
sm = self.smbm
# to require one more CF if no heat protection because of distance to cover, wait times, acid...
return sm.wand(sm.canHellRun(**Settings.hellRunsTable['LowerNorfair']['Entrance -> GT via Chozo']),
sm.canUsePowerBombs(),
sm.wor(RomPatches.has(RomPatches.LNChozoSJCheckDisabled), sm.haveItem('SpaceJump')))
@Cache.decorator
def canExitScrewAttackArea(self):
sm = self.smbm
return sm.wand(sm.canDestroyBombWalls(),
sm.wor(sm.canFly(),
sm.wand(sm.haveItem('HiJump'),
sm.haveItem('SpeedBooster'),
sm.wor(sm.wand(sm.haveItem('ScrewAttack'), sm.knowsScrewAttackExit()),
sm.knowsScrewAttackExitWithoutScrew())),
sm.wand(sm.canUseSpringBall(),
sm.knowsSpringBallJumpFromWall()),
sm.wand(sm.canSimpleShortCharge(), # fight GT and spark out
sm.enoughStuffGT())))
@Cache.decorator
def canPassWorstRoom(self):
sm = self.smbm
return sm.wand(sm.canDestroyBombWalls(),
sm.wor(sm.canFly(),
sm.wand(sm.knowsWorstRoomIceCharge(), sm.haveItem('Ice'), sm.canFireChargedShots()),
sm.wand(sm.knowsGetAroundWallJump(), sm.haveItem('HiJump')),
sm.wand(sm.knowsSpringBallJumpFromWall(), sm.canUseSpringBall())))
@Cache.decorator
def canPassThreeMuskateers(self):
sm = self.smbm
destroy = sm.wor(sm.haveItem('Plasma'),
sm.haveItem('ScrewAttack'),
sm.wand(sm.heatProof(), # this takes a loooong time ...
sm.wor(sm.haveItem('Spazer'),
sm.haveItem('Ice'))))
if destroy.bool == True:
return destroy
# if no adapted beams or screw attack, check if we can go both ways
# (no easy refill around) with supers and/or health
# - super only?
ki = 1800.0
sup = 300.0
nbKi = 6.0
if sm.itemCount('Super')*5*sup >= nbKi*ki:
return SMBool(True, 0, items=['Super'])
# - or with taking damage as well?
(dmgRed, redItems) = sm.getDmgReduction(envDmg=False)
dmgKi = 200.0 / dmgRed
if (sm.itemCount('Super')*5*sup)/ki + (sm.energyReserveCount()*100 - 2)/dmgKi >= nbKi:
# require heat proof as long as taking damage is necessary.
# display all the available energy in the solver.
return sm.wand(sm.heatProof(), SMBool(True, 0, items=redItems+['Super', '{}-ETank - {}-Reserve'.format(self.smbm.itemCount('ETank'), self.smbm.itemCount('Reserve'))]))
return sm.knowsDodgeThreeMuskateers()
# go though the pirates room filled with acid
@Cache.decorator
def canPassAmphitheaterReverse(self):
sm = self.smbm
dmgRed = sm.getDmgReduction()[0]
nTanksGrav = 4 * 4/dmgRed
nTanksNoGrav = 6 * 4/dmgRed
return sm.wor(sm.wand(sm.haveItem('Gravity'),
sm.energyReserveCountOk(nTanksGrav)),
sm.wand(sm.energyReserveCountOk(nTanksNoGrav),
sm.knowsLavaDive())) # should be a good enough skill filter for acid wall jumps with no grav...
@Cache.decorator
def canClimbRedTower(self):
sm = self.smbm
return sm.wor(sm.knowsRedTowerClimb(),
sm.haveItem('Ice'),
sm.haveItem('SpaceJump'))
@Cache.decorator
def canClimbBottomRedTower(self):
sm = self.smbm
return sm.wor(sm.wor(RomPatches.has(RomPatches.RedTowerLeftPassage),
sm.haveItem('HiJump'),
sm.haveItem('Ice'),
sm.canFly()),
sm.canShortCharge())
@Cache.decorator
def canGoUpMtEverest(self):
sm = self.smbm
return sm.wor(sm.wand(sm.haveItem('Gravity'),
sm.wor(sm.haveItem('Grapple'),
sm.haveItem('SpeedBooster'),
sm.canFly(),
sm.wand(sm.haveItem('HiJump'), sm.knowsGravityJump()))),
sm.wand(sm.canDoSuitlessOuterMaridia(),
sm.haveItem('Grapple')))
@Cache.decorator
def canPassMtEverest(self):
sm = self.smbm
return sm.wor(sm.wand(sm.haveItem('Gravity'),
sm.wor(sm.haveItem('Grapple'),
sm.haveItem('SpeedBooster'),
sm.canFly(),
sm.knowsGravityJump())),
sm.wand(sm.canDoSuitlessOuterMaridia(),
sm.wor(sm.haveItem('Grapple'),
sm.wand(sm.haveItem('Ice'), sm.knowsTediousMountEverest(), sm.haveItem('Super')),
sm.canDoubleSpringBallJump())))
@Cache.decorator
def canJumpUnderwater(self):
sm = self.smbm
return sm.wor(sm.haveItem('Gravity'),
sm.wand(sm.knowsGravLessLevel1(),
sm.haveItem('HiJump')))
@Cache.decorator
def canDoSuitlessOuterMaridia(self):
sm = self.smbm
return sm.wand(sm.knowsGravLessLevel1(),
sm.haveItem('HiJump'),
sm.wor(sm.haveItem('Ice'),
sm.canSpringBallJump()))
@Cache.decorator
def canPassBotwoonHallway(self):
sm = self.smbm
return sm.wor(sm.wand(sm.haveItem('SpeedBooster'),
sm.haveItem('Gravity')),
sm.wand(sm.knowsMochtroidClip(), sm.haveItem('Ice')),
sm.canCrystalFlashClip())
@Cache.decorator
def canDefeatBotwoon(self):
sm = self.smbm
return sm.wand(sm.enoughStuffBotwoon(),
sm.canPassBotwoonHallway())
# the sandpits from aqueduct
@Cache.decorator
def canAccessSandPits(self):
sm = self.smbm
return sm.wor(sm.haveItem('Gravity'),
sm.wand(sm.haveItem('HiJump'),
sm.knowsGravLessLevel3()))
@Cache.decorator
def canReachCacatacAlleyFromBotowoon(self):
sm = self.smbm
return sm.wor(sm.haveItem('Gravity'),
sm.wand(sm.knowsGravLessLevel2(),
sm.haveItem("HiJump"),
sm.wor(sm.haveItem('Grapple'),
sm.haveItem('Ice'),
sm.canDoubleSpringBallJump())))
@Cache.decorator
def canPassCacatacAlley(self):
sm = self.smbm
return sm.wand(Bosses.bossDead('Draygon'),
sm.wor(sm.haveItem('Gravity'),
sm.wand(sm.knowsGravLessLevel2(),
sm.haveItem('HiJump'),
sm.haveItem('SpaceJump'))))
@Cache.decorator
def canBotwoonExitToAndFromDraygon(self):
sm = self.smbm
return sm.wor(sm.haveItem('Gravity'),
sm.wand(sm.knowsGravLessLevel2(),
sm.haveItem("HiJump"),
# B -> D : get to top right door
# D -> B : climb to room top
sm.wor(sm.haveItem('Grapple'),
sm.haveItem('Ice')), # climb mochtroids
# go through Colosseum
sm.wor(sm.haveItem('Grapple'),
sm.haveItem('SpaceJump'),
sm.wand(sm.haveItem('Ice'),
sm.energyReserveCountOk(int(7.0/sm.getDmgReduction(False)[0])), # mochtroid dmg
sm.knowsBotwoonToDraygonWithIce()))))
def getDraygonConnection(self):
if self.draygonConnection is None:
drayRoomOut = getAccessPoint('DraygonRoomOut')
self.draygonConnection = drayRoomOut.ConnectedTo
return self.draygonConnection
def isVanillaDraygon(self):
return self.getDraygonConnection() == 'DraygonRoomIn'
@Cache.decorator
def canFightDraygon(self):
sm = self.smbm
return sm.wor(sm.haveItem('Gravity'),
sm.wand(sm.haveItem('HiJump'),
sm.wor(sm.knowsGravLessLevel2(),
sm.knowsGravLessLevel3())))
@Cache.decorator
def canDraygonCrystalFlashSuit(self):
sm = self.smbm
return sm.wand(sm.canCrystalFlash(),
sm.knowsDraygonRoomCrystalFlash(),
# ask for 4 PB pack as an ugly workaround for
# a rando bug which can place a PB at space
# jump to "get you out" (this check is in
# PostAvailable condition of the Dray/Space
# Jump locs)
sm.itemCountOk('PowerBomb', 4))
@Cache.decorator
def canExitDraygonRoomWithGravity(self):
sm = self.smbm
return sm.wand(sm.haveItem('Gravity'),
sm.wor(sm.canFly(),
sm.knowsGravityJump(),
sm.wand(sm.haveItem('HiJump'),
sm.haveItem('SpeedBooster'))))
@Cache.decorator
def canExitDraygonVanilla(self):
sm = self.smbm
# to get out of draygon room:
# with gravity but without highjump/bomb/space jump: gravity jump
# to exit draygon room: grapple or crystal flash (for free shine spark)
# to exit precious room: spring ball jump, xray scope glitch or stored spark
return sm.wor(sm.canExitDraygonRoomWithGravity(),
sm.wand(sm.canDraygonCrystalFlashSuit(),
# use the spark either to exit draygon room or precious room
sm.wor(sm.wand(sm.haveItem('Grapple'),
sm.knowsDraygonRoomGrappleExit()),
sm.wand(sm.haveItem('XRayScope'),
sm.knowsPreciousRoomXRayExit()),
sm.canSpringBallJump())),
# spark-less exit (no CF)
sm.wand(sm.wand(sm.haveItem('Grapple'),
sm.knowsDraygonRoomGrappleExit()),
sm.wor(sm.wand(sm.haveItem('XRayScope'),
sm.knowsPreciousRoomXRayExit()),
sm.canSpringBallJump())),
sm.canDoubleSpringBallJump())
@Cache.decorator
def canExitDraygonRandomized(self):
sm = self.smbm
# disregard precious room
return sm.wor(sm.canExitDraygonRoomWithGravity(),
sm.canDraygonCrystalFlashSuit(),
sm.wand(sm.haveItem('Grapple'),
sm.knowsDraygonRoomGrappleExit()),
sm.canDoubleSpringBallJump())
def canExitDraygon(self):
if self.isVanillaDraygon():
return self.canExitDraygonVanilla()
else:
return self.canExitDraygonRandomized()
@Cache.decorator
def canExitPreciousRoomVanilla(self):
return SMBool(True) # handled by canExitDraygonVanilla
@Cache.decorator
def canExitPreciousRoomRandomized(self):
sm = self.smbm
suitlessRoomExit = sm.canSpringBallJump()
if suitlessRoomExit.bool == False:
if self.getDraygonConnection() == 'KraidRoomIn':
suitlessRoomExit = sm.canShortCharge() # charge spark in kraid's room
elif self.getDraygonConnection() == 'RidleyRoomIn':
suitlessRoomExit = sm.wand(sm.haveItem('XRayScope'), # get doorstuck in compatible transition
sm.knowsPreciousRoomXRayExit())
return sm.wor(sm.wand(sm.haveItem('Gravity'),
sm.wor(sm.canFly(),
sm.knowsGravityJump(),
sm.haveItem('HiJump'))),
suitlessRoomExit)
def canExitPreciousRoom(self):
if self.isVanillaDraygon():
return self.canExitPreciousRoomVanilla()
else:
return self.canExitPreciousRoomRandomized()
logic: fix bug in canPassThreeMuskateers when no varia suit.
from smbool import SMBool
from rom_patches import RomPatches
from helpers import Helpers, Bosses
from graph_access import getAccessPoint
from cache import Cache
from math import ceil
from parameters import Settings
class HelpersGraph(Helpers):
def __init__(self, smbm):
self.smbm = smbm
self.draygonConnection = None
def canEnterAndLeaveGauntletQty(self, nPB, nTanksSpark):
sm = self.smbm
# EXPLAINED: to access Gauntlet Entrance from Landing site we can either:
# -fly to it (infinite bomb jumps or space jump)
# -shinespark to it
# -wall jump with high jump boots
# -wall jump without high jump boots
# then inside it to break the bomb wals:
# -use screw attack (easy way)
# -use power bombs
# -use bombs
# -perform a simple short charge on the way in
# and use power bombs on the way out
return sm.wand(sm.wor(sm.canFly(),
sm.haveItem('SpeedBooster'),
sm.wand(sm.knowsHiJumpGauntletAccess(),
sm.haveItem('HiJump')),
sm.knowsHiJumpLessGauntletAccess()),
sm.wor(sm.haveItem('ScrewAttack'),
sm.wor(sm.wand(sm.energyReserveCountOkHardRoom('Gauntlet'),
sm.wand(sm.canUsePowerBombs(),
sm.wor(sm.itemCountOk('PowerBomb', nPB),
sm.wand(sm.haveItem('SpeedBooster'),
sm.energyReserveCountOk(nTanksSpark))))),
sm.wand(sm.energyReserveCountOkHardRoom('Gauntlet', 0.51),
sm.canUseBombs()))))
@Cache.decorator
def canEnterAndLeaveGauntlet(self):
sm = self.smbm
return sm.wor(sm.wand(sm.canShortCharge(),
sm.canEnterAndLeaveGauntletQty(2, 2)),
sm.canEnterAndLeaveGauntletQty(2, 3))
def canPassTerminatorBombWall(self, fromLandingSite=True):
sm = self.smbm
return sm.wand(sm.wor(sm.wand(sm.haveItem('SpeedBooster'),
sm.wor(SMBool(not fromLandingSite, 0), sm.knowsSimpleShortCharge(), sm.knowsShortCharge())),
sm.canDestroyBombWalls()),
sm.canPassCrateriaGreenPirates())
# mostly for going up but let's be noob friendly and add the condition for both ways
@Cache.decorator
def canPassCrateriaGreenPirates(self):
sm = self.smbm
return sm.wor(sm.canPassBombPassages(),
sm.canOpenRedDoors(),
sm.energyReserveCountOk(1),
sm.wor(sm.haveItem('Charge'),
sm.haveItem('Ice'),
sm.haveItem('Wave'),
sm.wor(sm.haveItem('Spazer'),
sm.haveItem('Plasma'),
sm.haveItem('ScrewAttack'))))
# from blue brin elevator
@Cache.decorator
def canAccessBillyMays(self):
sm = self.smbm
return sm.wand(sm.wor(RomPatches.has(RomPatches.BlueBrinstarBlueDoor),
sm.canOpenRedDoors()),
sm.canUsePowerBombs(),
sm.wor(sm.knowsBillyMays(),
sm.haveItem('Gravity'),
sm.haveItem('SpaceJump')))
@Cache.decorator
def canAccessKraidsLair(self):
sm = self.smbm
# EXPLAINED: access the upper right platform with either:
# -hijump boots (easy regular way)
# -fly (space jump or infinite bomb jump)
# -know how to wall jump on the platform without the hijump boots
return sm.wand(sm.canOpenGreenDoors(),
sm.wor(sm.haveItem('HiJump'),
sm.canFly(),
sm.knowsEarlyKraid()))
@Cache.decorator
def canPassMoat(self):
sm = self.smbm
# EXPLAINED: In the Moat we can either:
# -use grapple or space jump (easy way)
# -do a continuous wall jump (https://www.youtube.com/watch?v=4HVhTwwax6g)
# -do a diagonal bomb jump from the middle platform (https://www.youtube.com/watch?v=5NRqQ7RbK3A&t=10m58s)
# -do a short charge from the Keyhunter room (https://www.youtube.com/watch?v=kFAYji2gFok)
# -do a gravity jump from below the right platform
# -do a mock ball and a bounce ball (https://www.youtube.com/watch?v=WYxtRF--834)
# -with gravity, either hijump or IBJ
return sm.wor(sm.wor(sm.haveItem('Grapple'),
sm.haveItem('SpaceJump'),
sm.knowsContinuousWallJump()),
sm.wor(sm.wand(sm.knowsDiagonalBombJump(), sm.canUseBombs()),
sm.canSimpleShortCharge(),
sm.wand(sm.haveItem('Gravity'),
sm.wor(sm.knowsGravityJump(),
sm.haveItem('HiJump'),
sm.canInfiniteBombJump())),
sm.wand(sm.knowsMockballWs(), sm.canUseSpringBall())))
@Cache.decorator
def canPassMoatReverse(self):
sm = self.smbm
return sm.wor(sm.haveItem('Grapple'),
sm.haveItem('SpaceJump'),
sm.haveItem('Gravity'),
sm.wand(sm.haveItem('Morph'),
sm.wor(RomPatches.has(RomPatches.MoatShotBlock),
sm.canPassBombPassages())))
@Cache.decorator
def canPassSpongeBath(self):
sm = self.smbm
return sm.wor(sm.wand(sm.canPassBombPassages(),
sm.knowsSpongeBathBombJump()),
sm.wand(sm.haveItem('HiJump'),
sm.knowsSpongeBathHiJump()),
sm.wor(sm.haveItem('Gravity'),
sm.haveItem('SpaceJump'),
sm.wand(sm.haveItem('SpeedBooster'),
sm.knowsSpongeBathSpeed()),
sm.canSpringBallJump()))
@Cache.decorator
def canPassBowling(self):
sm = self.smbm
return sm.wand(Bosses.bossDead('Phantoon'),
sm.wor(sm.heatProof(),
sm.energyReserveCountOk(1),
sm.haveItem("SpaceJump"),
sm.haveItem("Grapple")))
@Cache.decorator
def canAccessEtecoons(self):
sm = self.smbm
return sm.wor(sm.canUsePowerBombs(),
sm.wand(sm.knowsMoondance(), sm.canUseBombs(), sm.canOpenRedDoors()))
# the water zone east of WS
def canPassForgottenHighway(self, fromWs):
sm = self.smbm
suitless = sm.wand(sm.haveItem('HiJump'), sm.knowsGravLessLevel1())
if fromWs is True and RomPatches.has(RomPatches.EastOceanPlatforms).bool is False:
suitless = sm.wand(suitless,
# to break water line and go through the door on the right
sm.haveItem('SpaceJump'))
return sm.wand(sm.wor(sm.haveItem('Gravity'),
suitless),
sm.haveItem('Morph')) # for crab maze
@Cache.decorator
def canExitCrabHole(self):
sm = self.smbm
return sm.wand(sm.haveItem('Morph'), # morph to exit the hole
sm.wor(sm.wand(sm.haveItem('Gravity'), # even with gravity you need some way to climb...
sm.wor(sm.haveItem('Ice'), # ...on crabs...
sm.haveItem('HiJump'), # ...or by jumping
sm.knowsGravityJump(),
sm.canFly())),
sm.wand(sm.haveItem('Ice'), sm.canDoSuitlessOuterMaridia()), # climbing crabs
sm.canDoubleSpringBallJump()))
# bottom sandpits with the evirs
@Cache.decorator
def canTraverseSandPits(self):
sm = self.smbm
return sm.wor(sm.haveItem('Gravity'),
sm.wand(sm.knowsGravLessLevel3(),
sm.haveItem('HiJump'),
sm.haveItem('Ice')))
@Cache.decorator
def canPassMaridiaToRedTowerNode(self):
sm = self.smbm
return sm.wand(sm.haveItem('Morph'),
sm.wor(RomPatches.has(RomPatches.AreaRandoGatesBase),
sm.canOpenGreenDoors()))
@Cache.decorator
def canPassRedTowerToMaridiaNode(self):
sm = self.smbm
return sm.wand(sm.haveItem('Morph'),
RomPatches.has(RomPatches.AreaRandoGatesBase))
def canEnterCathedral(self, mult=1.0):
sm = self.smbm
return sm.wand(sm.canOpenRedDoors(),
sm.wor(sm.wand(sm.canHellRun('MainUpperNorfair', mult),
sm.wor(sm.wor(RomPatches.has(RomPatches.CathedralEntranceWallJump),
sm.haveItem('HiJump'),
sm.canFly()),
sm.wor(sm.haveItem('SpeedBooster'), # spark
sm.canSpringBallJump()))),
sm.wand(sm.canHellRun('MainUpperNorfair', 0.5*mult),
sm.haveItem('Morph'),
sm.knowsNovaBoost())))
@Cache.decorator
def canClimbBubbleMountain(self):
sm = self.smbm
return sm.wor(sm.haveItem('HiJump'),
sm.canFly(),
sm.haveItem('Ice'),
sm.knowsBubbleMountainWallJump())
@Cache.decorator
def canHellRunToSpeedBooster(self):
sm = self.smbm
return sm.canHellRun(**Settings.hellRunsTable['MainUpperNorfair']['Bubble -> Speed Booster w/Speed' if sm.haveItem('SpeedBooster') else 'Bubble -> Speed Booster'])
@Cache.decorator
def canExitCathedral(self):
# from top: can use bomb/powerbomb jumps
# from bottom: can do a shinespark or use space jump
# can do it with highjump + wall jump
# can do it with only two wall jumps (the first one is delayed like on alcatraz)
# can do it with a spring ball jump from wall
sm = self.smbm
return sm.wand(sm.wor(sm.canHellRun(**Settings.hellRunsTable['MainUpperNorfair']['Bubble -> Norfair Entrance']),
sm.heatProof()),
sm.wor(sm.wor(sm.canPassBombPassages(),
sm.haveItem("SpeedBooster")),
sm.wor(sm.haveItem("SpaceJump"),
sm.haveItem("HiJump"),
sm.knowsWallJumpCathedralExit(),
sm.wand(sm.knowsSpringBallJumpFromWall(), sm.canUseSpringBall()))))
@Cache.decorator
def canGrappleEscape(self):
sm = self.smbm
return sm.wor(sm.wor(sm.haveItem('SpaceJump'),
sm.wand(sm.canInfiniteBombJump(), # IBJ from lava...either have grav or freeze the enemy there if hellrunning (otherwise single DBJ at the end)
sm.wor(sm.heatProof(),
sm.haveItem('Gravity'),
sm.haveItem('Ice')))),
sm.haveItem('Grapple'),
sm.wand(sm.haveItem('SpeedBooster'),
sm.wor(sm.haveItem('HiJump'), # jump from the blocks below
sm.knowsShortCharge())), # spark from across the grapple blocks
sm.wand(sm.haveItem('HiJump'), sm.canSpringBallJump())) # jump from the blocks below
@Cache.decorator
def canPassFrogSpeedwayRightToLeft(self):
sm = self.smbm
return sm.wor(sm.haveItem('SpeedBooster'),
sm.wand(sm.knowsFrogSpeedwayWithoutSpeed(),
sm.haveItem('Wave'),
sm.wor(sm.haveItem('Spazer'),
sm.haveItem('Plasma'))))
@Cache.decorator
def canEnterNorfairReserveAreaFromBubbleMoutain(self):
sm = self.smbm
return sm.wand(sm.canOpenGreenDoors(),
sm.wor(sm.canFly(),
sm.haveItem('Ice'),
sm.wand(sm.haveItem('HiJump'),
sm.knowsGetAroundWallJump()),
sm.wand(sm.canUseSpringBall(),
sm.knowsSpringBallJumpFromWall())))
@Cache.decorator
def canEnterNorfairReserveAreaFromBubbleMoutainTop(self):
sm = self.smbm
return sm.wand(sm.canOpenGreenDoors(),
sm.wor(sm.haveItem('Grapple'),
sm.haveItem('SpaceJump'),
sm.knowsNorfairReserveDBoost()))
@Cache.decorator
def canPassLavaPit(self):
sm = self.smbm
nTanks4Dive = 8 / sm.getDmgReduction()[0]
if sm.haveItem('HiJump').bool == False:
nTanks4Dive = ceil(nTanks4Dive * 1.25)
return sm.wand(sm.wor(sm.wand(sm.haveItem('Gravity'), sm.haveItem('SpaceJump')),
sm.wand(sm.knowsGravityJump(), sm.haveItem('Gravity'), sm.wor(sm.haveItem('HiJump'), sm.knowsLavaDive())),
sm.wand(sm.wor(sm.wand(sm.knowsLavaDive(), sm.haveItem('HiJump')),
sm.knowsLavaDiveNoHiJump()),
sm.energyReserveCountOk(nTanks4Dive))),
sm.canUsePowerBombs()) # power bomb blocks left and right of LN entrance without any items before
@Cache.decorator
def canPassLavaPitReverse(self):
sm = self.smbm
nTanks = 2
if sm.heatProof().bool == False:
nTanks = 6
return sm.energyReserveCountOk(nTanks)
@Cache.decorator
def canPassLowerNorfairChozo(self):
sm = self.smbm
# to require one more CF if no heat protection because of distance to cover, wait times, acid...
return sm.wand(sm.canHellRun(**Settings.hellRunsTable['LowerNorfair']['Entrance -> GT via Chozo']),
sm.canUsePowerBombs(),
sm.wor(RomPatches.has(RomPatches.LNChozoSJCheckDisabled), sm.haveItem('SpaceJump')))
@Cache.decorator
def canExitScrewAttackArea(self):
sm = self.smbm
return sm.wand(sm.canDestroyBombWalls(),
sm.wor(sm.canFly(),
sm.wand(sm.haveItem('HiJump'),
sm.haveItem('SpeedBooster'),
sm.wor(sm.wand(sm.haveItem('ScrewAttack'), sm.knowsScrewAttackExit()),
sm.knowsScrewAttackExitWithoutScrew())),
sm.wand(sm.canUseSpringBall(),
sm.knowsSpringBallJumpFromWall()),
sm.wand(sm.canSimpleShortCharge(), # fight GT and spark out
sm.enoughStuffGT())))
@Cache.decorator
def canPassWorstRoom(self):
sm = self.smbm
return sm.wand(sm.canDestroyBombWalls(),
sm.wor(sm.canFly(),
sm.wand(sm.knowsWorstRoomIceCharge(), sm.haveItem('Ice'), sm.canFireChargedShots()),
sm.wand(sm.knowsGetAroundWallJump(), sm.haveItem('HiJump')),
sm.wand(sm.knowsSpringBallJumpFromWall(), sm.canUseSpringBall())))
@Cache.decorator
def canPassThreeMuskateers(self):
sm = self.smbm
destroy = sm.wor(sm.haveItem('Plasma'),
sm.haveItem('ScrewAttack'),
sm.wand(sm.heatProof(), # this takes a loooong time ...
sm.wor(sm.haveItem('Spazer'),
sm.haveItem('Ice'))))
if destroy.bool == True:
return destroy
# if no adapted beams or screw attack, check if we can go both ways
# (no easy refill around) with supers and/or health
# - super only?
ki = 1800.0
sup = 300.0
nbKi = 6.0
if sm.itemCount('Super')*5*sup >= nbKi*ki:
return SMBool(True, 0, items=['Super'])
# - or with taking damage as well?
(dmgRed, redItems) = sm.getDmgReduction(envDmg=False)
dmgKi = 200.0 / dmgRed
if sm.heatProof() and e(sm.itemCount('Super')*5*sup)/ki + (sm.energyReserveCount()*100 - 2)/dmgKi >= nbKi:
# require heat proof as long as taking damage is necessary.
# display all the available energy in the solver.
return sm.wand(sm.heatProof(), SMBool(True, 0, items=redItems+['Super', '{}-ETank - {}-Reserve'.format(self.smbm.itemCount('ETank'), self.smbm.itemCount('Reserve'))]))
return sm.knowsDodgeThreeMuskateers()
# go though the pirates room filled with acid
@Cache.decorator
def canPassAmphitheaterReverse(self):
sm = self.smbm
dmgRed = sm.getDmgReduction()[0]
nTanksGrav = 4 * 4/dmgRed
nTanksNoGrav = 6 * 4/dmgRed
return sm.wor(sm.wand(sm.haveItem('Gravity'),
sm.energyReserveCountOk(nTanksGrav)),
sm.wand(sm.energyReserveCountOk(nTanksNoGrav),
sm.knowsLavaDive())) # should be a good enough skill filter for acid wall jumps with no grav...
@Cache.decorator
def canClimbRedTower(self):
sm = self.smbm
return sm.wor(sm.knowsRedTowerClimb(),
sm.haveItem('Ice'),
sm.haveItem('SpaceJump'))
@Cache.decorator
def canClimbBottomRedTower(self):
sm = self.smbm
return sm.wor(sm.wor(RomPatches.has(RomPatches.RedTowerLeftPassage),
sm.haveItem('HiJump'),
sm.haveItem('Ice'),
sm.canFly()),
sm.canShortCharge())
@Cache.decorator
def canGoUpMtEverest(self):
sm = self.smbm
return sm.wor(sm.wand(sm.haveItem('Gravity'),
sm.wor(sm.haveItem('Grapple'),
sm.haveItem('SpeedBooster'),
sm.canFly(),
sm.wand(sm.haveItem('HiJump'), sm.knowsGravityJump()))),
sm.wand(sm.canDoSuitlessOuterMaridia(),
sm.haveItem('Grapple')))
@Cache.decorator
def canPassMtEverest(self):
sm = self.smbm
return sm.wor(sm.wand(sm.haveItem('Gravity'),
sm.wor(sm.haveItem('Grapple'),
sm.haveItem('SpeedBooster'),
sm.canFly(),
sm.knowsGravityJump())),
sm.wand(sm.canDoSuitlessOuterMaridia(),
sm.wor(sm.haveItem('Grapple'),
sm.wand(sm.haveItem('Ice'), sm.knowsTediousMountEverest(), sm.haveItem('Super')),
sm.canDoubleSpringBallJump())))
@Cache.decorator
def canJumpUnderwater(self):
sm = self.smbm
return sm.wor(sm.haveItem('Gravity'),
sm.wand(sm.knowsGravLessLevel1(),
sm.haveItem('HiJump')))
@Cache.decorator
def canDoSuitlessOuterMaridia(self):
sm = self.smbm
return sm.wand(sm.knowsGravLessLevel1(),
sm.haveItem('HiJump'),
sm.wor(sm.haveItem('Ice'),
sm.canSpringBallJump()))
@Cache.decorator
def canPassBotwoonHallway(self):
sm = self.smbm
return sm.wor(sm.wand(sm.haveItem('SpeedBooster'),
sm.haveItem('Gravity')),
sm.wand(sm.knowsMochtroidClip(), sm.haveItem('Ice')),
sm.canCrystalFlashClip())
@Cache.decorator
def canDefeatBotwoon(self):
sm = self.smbm
return sm.wand(sm.enoughStuffBotwoon(),
sm.canPassBotwoonHallway())
# the sandpits from aqueduct
@Cache.decorator
def canAccessSandPits(self):
sm = self.smbm
return sm.wor(sm.haveItem('Gravity'),
sm.wand(sm.haveItem('HiJump'),
sm.knowsGravLessLevel3()))
@Cache.decorator
def canReachCacatacAlleyFromBotowoon(self):
sm = self.smbm
return sm.wor(sm.haveItem('Gravity'),
sm.wand(sm.knowsGravLessLevel2(),
sm.haveItem("HiJump"),
sm.wor(sm.haveItem('Grapple'),
sm.haveItem('Ice'),
sm.canDoubleSpringBallJump())))
@Cache.decorator
def canPassCacatacAlley(self):
sm = self.smbm
return sm.wand(Bosses.bossDead('Draygon'),
sm.wor(sm.haveItem('Gravity'),
sm.wand(sm.knowsGravLessLevel2(),
sm.haveItem('HiJump'),
sm.haveItem('SpaceJump'))))
@Cache.decorator
def canBotwoonExitToAndFromDraygon(self):
sm = self.smbm
return sm.wor(sm.haveItem('Gravity'),
sm.wand(sm.knowsGravLessLevel2(),
sm.haveItem("HiJump"),
# B -> D : get to top right door
# D -> B : climb to room top
sm.wor(sm.haveItem('Grapple'),
sm.haveItem('Ice')), # climb mochtroids
# go through Colosseum
sm.wor(sm.haveItem('Grapple'),
sm.haveItem('SpaceJump'),
sm.wand(sm.haveItem('Ice'),
sm.energyReserveCountOk(int(7.0/sm.getDmgReduction(False)[0])), # mochtroid dmg
sm.knowsBotwoonToDraygonWithIce()))))
def getDraygonConnection(self):
if self.draygonConnection is None:
drayRoomOut = getAccessPoint('DraygonRoomOut')
self.draygonConnection = drayRoomOut.ConnectedTo
return self.draygonConnection
def isVanillaDraygon(self):
return self.getDraygonConnection() == 'DraygonRoomIn'
@Cache.decorator
def canFightDraygon(self):
sm = self.smbm
return sm.wor(sm.haveItem('Gravity'),
sm.wand(sm.haveItem('HiJump'),
sm.wor(sm.knowsGravLessLevel2(),
sm.knowsGravLessLevel3())))
@Cache.decorator
def canDraygonCrystalFlashSuit(self):
sm = self.smbm
return sm.wand(sm.canCrystalFlash(),
sm.knowsDraygonRoomCrystalFlash(),
# ask for 4 PB pack as an ugly workaround for
# a rando bug which can place a PB at space
# jump to "get you out" (this check is in
# PostAvailable condition of the Dray/Space
# Jump locs)
sm.itemCountOk('PowerBomb', 4))
@Cache.decorator
def canExitDraygonRoomWithGravity(self):
sm = self.smbm
return sm.wand(sm.haveItem('Gravity'),
sm.wor(sm.canFly(),
sm.knowsGravityJump(),
sm.wand(sm.haveItem('HiJump'),
sm.haveItem('SpeedBooster'))))
@Cache.decorator
def canExitDraygonVanilla(self):
sm = self.smbm
# to get out of draygon room:
# with gravity but without highjump/bomb/space jump: gravity jump
# to exit draygon room: grapple or crystal flash (for free shine spark)
# to exit precious room: spring ball jump, xray scope glitch or stored spark
return sm.wor(sm.canExitDraygonRoomWithGravity(),
sm.wand(sm.canDraygonCrystalFlashSuit(),
# use the spark either to exit draygon room or precious room
sm.wor(sm.wand(sm.haveItem('Grapple'),
sm.knowsDraygonRoomGrappleExit()),
sm.wand(sm.haveItem('XRayScope'),
sm.knowsPreciousRoomXRayExit()),
sm.canSpringBallJump())),
# spark-less exit (no CF)
sm.wand(sm.wand(sm.haveItem('Grapple'),
sm.knowsDraygonRoomGrappleExit()),
sm.wor(sm.wand(sm.haveItem('XRayScope'),
sm.knowsPreciousRoomXRayExit()),
sm.canSpringBallJump())),
sm.canDoubleSpringBallJump())
@Cache.decorator
def canExitDraygonRandomized(self):
sm = self.smbm
# disregard precious room
return sm.wor(sm.canExitDraygonRoomWithGravity(),
sm.canDraygonCrystalFlashSuit(),
sm.wand(sm.haveItem('Grapple'),
sm.knowsDraygonRoomGrappleExit()),
sm.canDoubleSpringBallJump())
def canExitDraygon(self):
if self.isVanillaDraygon():
return self.canExitDraygonVanilla()
else:
return self.canExitDraygonRandomized()
@Cache.decorator
def canExitPreciousRoomVanilla(self):
return SMBool(True) # handled by canExitDraygonVanilla
@Cache.decorator
def canExitPreciousRoomRandomized(self):
sm = self.smbm
suitlessRoomExit = sm.canSpringBallJump()
if suitlessRoomExit.bool == False:
if self.getDraygonConnection() == 'KraidRoomIn':
suitlessRoomExit = sm.canShortCharge() # charge spark in kraid's room
elif self.getDraygonConnection() == 'RidleyRoomIn':
suitlessRoomExit = sm.wand(sm.haveItem('XRayScope'), # get doorstuck in compatible transition
sm.knowsPreciousRoomXRayExit())
return sm.wor(sm.wand(sm.haveItem('Gravity'),
sm.wor(sm.canFly(),
sm.knowsGravityJump(),
sm.haveItem('HiJump'))),
suitlessRoomExit)
def canExitPreciousRoom(self):
if self.isVanillaDraygon():
return self.canExitPreciousRoomVanilla()
else:
return self.canExitPreciousRoomRandomized()
|
from django.views.generic.list import ListView
from django.views.generic.detail import DetailView
from django.views.generic.edit import CreateView, DeleteView, UpdateView
from django.db.models import Q, F, ExpressionWrapper, IntegerField, Count
from django.db.models import CharField, TextField, Value as V
from django.db.models import OuterRef, Subquery
from django.db.models.functions import Concat
from django.db.models.fields import NullBooleanField
from django.db.models.sql.where import NothingNode, WhereNode
from django.http import HttpResponse, HttpResponseRedirect
from django.template import RequestContext
from django.core.urlresolvers import reverse_lazy
from django.core.exceptions import PermissionDenied, ObjectDoesNotExist
from django.utils.translation import override, ugettext_lazy as _
from django.forms.fields import TypedChoiceField, ChoiceField
from django.shortcuts import *
from django.contrib import messages
from django.contrib.sites.models import Site
from django.template.loader import render_to_string
from signbank.dictionary.templatetags.field_choice import get_field_choice
import csv
import operator
import re
import xml.etree.ElementTree as ET
from xml.dom import minidom
import datetime as DT
from guardian.core import ObjectPermissionChecker
from guardian.shortcuts import get_objects_for_user
from signbank.dictionary.models import *
from signbank.dictionary.forms import *
from signbank.feedback.models import *
from signbank.video.forms import VideoUploadForGlossForm
from tagging.models import Tag, TaggedItem
from signbank.settings.base import ECV_FILE,EARLIEST_GLOSS_CREATION_DATE, FIELDS, SEPARATE_ENGLISH_IDGLOSS_FIELD, LANGUAGE_CODE, ECV_SETTINGS, URL, LANGUAGE_CODE_MAP
from signbank.settings import server_specific
from signbank.settings.server_specific import *
from signbank.dictionary.translate_choice_list import machine_value_to_translated_human_value, choicelist_queryset_to_translated_dict, choicelist_queryset_to_machine_value_dict
from signbank.dictionary.forms import GlossSearchForm, MorphemeSearchForm
from signbank.tools import get_selected_datasets_for_user, write_ecv_file_for_dataset, write_csv_for_handshapes
def order_queryset_by_sort_order(get, qs):
"""Change the sort-order of the query set, depending on the form field [sortOrder]
This function is used both by GlossListView as well as by MorphemeListView.
The value of [sortOrder] is 'lemma__lemmaidglosstranslation__text' by default.
[sortOrder] is a hidden field inside the "adminsearch" html form in the template admin_gloss_list.html
Its value is changed by clicking the up/down buttons in the second row of the search result table
"""
def get_string_from_tuple_list(lstTuples, number):
"""Get the string value corresponding to a number in a list of number-string tuples"""
sBack = [tup[1] for tup in lstTuples if tup[0] == number]
return sBack
# Helper: order a queryset on field [sOrder], which is a number from a list of tuples named [sListName]
def order_queryset_by_tuple_list(qs, sOrder, sListName):
"""Order a queryset on field [sOrder], which is a number from a list of tuples named [sListName]"""
# Get a list of tuples for this sort-order
tpList = build_choice_list(sListName)
# Determine sort order: ascending is default
bReversed = False
if (sOrder[0:1] == '-'):
# A starting '-' sign means: descending order
sOrder = sOrder[1:]
bReversed = True
# Order the list of tuples alphabetically
# (NOTE: they are alphabetical from 'build_choice_list()', except for the values 0,1)
tpList = sorted(tpList, key=operator.itemgetter(1))
# Order by the string-values in the tuple list
return sorted(qs, key=lambda x: get_string_from_tuple_list(tpList, getattr(x, sOrder)), reverse=bReversed)
def order_queryset_by_annotationidglosstranslation(qs, sOrder):
language_code_2char = sOrder[-2:]
sOrderAsc = sOrder
if (sOrder[0:1] == '-'):
# A starting '-' sign means: descending order
sOrderAsc = sOrder[1:]
annotationidglosstranslation = AnnotationIdglossTranslation.objects.filter(gloss=OuterRef('pk'), language__language_code_2char__iexact=language_code_2char)
qs = qs.annotate(**{sOrderAsc: Subquery(annotationidglosstranslation.values('text')[:1])}).order_by(sOrder)
return qs
def order_queryset_by_lemmaidglosstranslation(qs, sOrder):
language_code_2char = sOrder[-2:]
sOrderAsc = sOrder
if (sOrder[0:1] == '-'):
# A starting '-' sign means: descending order
sOrderAsc = sOrder[1:]
lemmaidglosstranslation = LemmaIdglossTranslation.objects.filter(lemma=OuterRef('lemma'), language__language_code_2char__iexact=language_code_2char)
qs = qs.annotate(**{sOrderAsc: Subquery(lemmaidglosstranslation.values('text')[:1])}).order_by(sOrder)
return qs
# Set the default sort order
default_sort_order = True
sOrder = 'annotationidglosstranslation__text' # Default sort order if nothing is specified
# See if the form contains any sort-order information
if ('sortOrder' in get and get['sortOrder'] != ''):
# Take the user-indicated sort order
sOrder = get['sortOrder']
default_sort_order = False
# The ordering method depends on the kind of field:
# (1) text fields are ordered straightforwardly
# (2) fields made from a choice_list need special treatment
if (sOrder.endswith('handedness')):
ordered = order_queryset_by_tuple_list(qs, sOrder, "Handedness")
elif (sOrder.endswith('domhndsh') or sOrder.endswith('subhndsh')):
ordered = order_queryset_by_tuple_list(qs, sOrder, "Handshape")
elif (sOrder.endswith('locprim')):
ordered = order_queryset_by_tuple_list(qs, sOrder, "Location")
elif "annotationidglosstranslation_order_" in sOrder:
ordered = order_queryset_by_annotationidglosstranslation(qs, sOrder)
elif "lemmaidglosstranslation_order_" in sOrder:
ordered = order_queryset_by_lemmaidglosstranslation(qs, sOrder)
else:
# Use straightforward ordering on field [sOrder]
bReversed = False
if (sOrder[0:1] == '-'):
# A starting '-' sign means: descending order
sOrder = sOrder[1:]
bReversed = True
if default_sort_order:
lang_attr_name = DEFAULT_KEYWORDS_LANGUAGE['language_code_2char']
sort_language = 'annotationidglosstranslation__language__language_code_2char'
qs_letters = qs.filter(**{sOrder+'__regex':r'^[a-zA-Z]'}, **{sort_language:lang_attr_name})
qs_special = qs.filter(**{sOrder+'__regex':r'^[^a-zA-Z]'}, **{sort_language:lang_attr_name})
sort_key = sOrder
ordered = list(qs_letters.order_by(sort_key))
ordered += list(qs_special.order_by(sort_key))
else:
ordered = qs
if bReversed:
ordered.reverse()
# return the ordered list
return ordered
class GlossListView(ListView):
model = Gloss
template_name = 'dictionary/admin_gloss_list.html'
paginate_by = 500
only_export_ecv = False #Used to call the 'export ecv' functionality of this view without the need for an extra GET parameter
search_type = 'sign'
view_type = 'gloss_list'
web_search = False
show_all = False
dataset_name = DEFAULT_DATASET
last_used_dataset = None
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super(GlossListView, self).get_context_data(**kwargs)
# Retrieve the search_type,so that we know whether the search should be restricted to Gloss or not
if 'search_type' in self.request.GET:
self.search_type = self.request.GET['search_type']
# self.request.session['search_type'] = self.search_type
if 'view_type' in self.request.GET:
# user is adjusting the view, leave the rest of the context alone
self.view_type = self.request.GET['view_type']
context['view_type'] = self.view_type
if 'last_used_dataset' in self.request.session.keys():
self.last_used_dataset = self.request.session['last_used_dataset']
if 'inWeb' in self.request.GET:
# user is searching for signs / morphemes visible to anonymous uers
self.web_search = self.request.GET['inWeb'] == '2'
elif not self.request.user.is_authenticated():
self.web_search = True
context['web_search'] = self.web_search
selected_datasets = get_selected_datasets_for_user(self.request.user)
dataset_languages = Language.objects.filter(dataset__in=selected_datasets).distinct()
context['dataset_languages'] = dataset_languages
selected_datasets_signlanguage = list(SignLanguage.objects.filter(dataset__in=selected_datasets))
sign_languages = []
for sl in selected_datasets_signlanguage:
if not ((str(sl.id),sl.name) in sign_languages):
sign_languages.append((str(sl.id), sl.name))
selected_datasets_dialects = Dialect.objects.filter(signlanguage__in=selected_datasets_signlanguage)\
.prefetch_related('signlanguage').distinct()
dialects = []
for dl in selected_datasets_dialects:
dialect_name = dl.signlanguage.name + "/" + dl.name
dialects.append((str(dl.id),dialect_name))
search_form = GlossSearchForm(self.request.GET, languages=dataset_languages, sign_languages=sign_languages,
dialects=dialects, language_code=self.request.LANGUAGE_CODE)
#Translations for field choices dropdown menu
fields_that_need_translated_options = ['hasComponentOfType','hasMorphemeOfType']
for field_group in FIELDS.values():
for field in field_group:
fields_that_need_translated_options.append(field)
for field in fields_that_need_translated_options:
try:
if isinstance(search_form.fields[field], TypedChoiceField):
choices = FieldChoice.objects.filter(field__iexact=fieldname_to_category(field))
translated_choices = [('','---------')]+choicelist_queryset_to_translated_dict(choices,self.request.LANGUAGE_CODE,
ordered=False,id_prefix='')
search_form.fields[field] = forms.ChoiceField(label=search_form.fields[field].label,
choices=translated_choices,
widget=forms.Select(attrs={'class':'form-control'}))
except KeyError:
continue
context['searchform'] = search_form
context['search_type'] = self.search_type
context['view_type'] = self.view_type
context['web_search'] = self.web_search
context['add_gloss_form'] = GlossCreateForm(self.request.GET, languages=dataset_languages, user=self.request.user, last_used_dataset=self.last_used_dataset)
if hasattr(settings, 'SHOW_DATASET_INTERFACE_OPTIONS') and self.request.user.is_authenticated():
context['SHOW_DATASET_INTERFACE_OPTIONS'] = settings.SHOW_DATASET_INTERFACE_OPTIONS
else:
context['SHOW_DATASET_INTERFACE_OPTIONS'] = False
if hasattr(settings, 'SHOW_MORPHEME_SEARCH') and self.request.user.is_authenticated():
context['SHOW_MORPHEME_SEARCH'] = settings.SHOW_MORPHEME_SEARCH
else:
context['SHOW_MORPHEME_SEARCH'] = False
context['MULTIPLE_SELECT_GLOSS_FIELDS'] = settings.MULTIPLE_SELECT_GLOSS_FIELDS
if hasattr(settings, 'DISABLE_MOVING_THUMBNAILS_ABOVE_NR_OF_GLOSSES'):
context['DISABLE_MOVING_THUMBNAILS_ABOVE_NR_OF_GLOSSES'] = settings.DISABLE_MOVING_THUMBNAILS_ABOVE_NR_OF_GLOSSES
else:
context['DISABLE_MOVING_THUMBNAILS_ABOVE_NR_OF_GLOSSES'] = 0
context['input_names_fields_and_labels'] = {}
for topic in ['main','phonology','semantics']:
context['input_names_fields_and_labels'][topic] = []
for fieldname in settings.FIELDS[topic]:
# exclude the dependent fields for Handedness, Strong Hand, and Weak Hand for purposes of nested dependencies in Search form
if fieldname not in ['weakprop', 'weakdrop', 'domhndsh_letter', 'domhndsh_number', 'subhndsh_letter', 'subhndsh_number']:
field = search_form[fieldname]
label = field.label
context['input_names_fields_and_labels'][topic].append((fieldname,field,label))
context['input_names_fields_labels_handedness'] = []
field = search_form['weakdrop']
label = field.label
context['input_names_fields_labels_handedness'].append(('weakdrop', field, label))
field = search_form['weakprop']
label = field.label
context['input_names_fields_labels_handedness'].append(('weakprop',field,label))
context['input_names_fields_labels_domhndsh'] = []
field = search_form['domhndsh_letter']
label = field.label
context['input_names_fields_labels_domhndsh'].append(('domhndsh_letter',field,label))
field = search_form['domhndsh_number']
label = field.label
context['input_names_fields_labels_domhndsh'].append(('domhndsh_number',field,label))
context['input_names_fields_labels_subhndsh'] = []
field = search_form['subhndsh_letter']
label = field.label
context['input_names_fields_labels_subhndsh'].append(('subhndsh_letter',field,label))
field = search_form['subhndsh_number']
label = field.label
context['input_names_fields_labels_subhndsh'].append(('subhndsh_number',field,label))
try:
if self.kwargs['show_all']:
context['show_all'] = True
except KeyError:
context['show_all'] = False
context['paginate_by'] = self.request.GET.get('paginate_by', self.paginate_by)
context['lemma_create_field_prefix'] = LemmaCreateForm.lemma_create_field_prefix
context['generate_translated_choice_list_table'] = generate_translated_choice_list_table()
if self.search_type == 'sign' or not self.request.user.is_authenticated():
# Only count the none-morpheme glosses
# this branch is slower than the other one
context['glosscount'] = Gloss.none_morpheme_objects().select_related('lemma').select_related('dataset').filter(lemma__dataset__in=selected_datasets).count()
else:
context['glosscount'] = Gloss.objects.select_related('lemma').select_related('dataset').filter(lemma__dataset__in=selected_datasets).count() # Count the glosses + morphemes
return context
def get_paginate_by(self, queryset):
"""
Paginate by specified value in querystring, or use default class property value.
"""
return self.request.GET.get('paginate_by', self.paginate_by)
def render_to_response(self, context):
# Look for a 'format=json' GET argument
if self.request.GET.get('format') == 'CSV':
return self.render_to_csv_response(context)
elif self.request.GET.get('export_ecv') == 'ECV' or self.only_export_ecv:
return self.render_to_ecv_export_response(context)
else:
return super(GlossListView, self).render_to_response(context)
def render_to_ecv_export_response(self, context):
# check that the user is logged in
if self.request.user.is_authenticated():
pass
else:
messages.add_message(self.request, messages.ERROR, ('Please login to use this functionality.'))
return HttpResponseRedirect(URL + settings.PREFIX_URL + '/signs/search/')
# if the dataset is specified in the url parameters, set the dataset_name variable
get = self.request.GET
if 'dataset_name' in get:
self.dataset_name = get['dataset_name']
if self.dataset_name == '':
messages.add_message(self.request, messages.ERROR, ('Dataset name must be non-empty.'))
return HttpResponseRedirect(URL + settings.PREFIX_URL + '/signs/search/')
try:
dataset_object = Dataset.objects.get(name=self.dataset_name)
except:
messages.add_message(self.request, messages.ERROR, ('No dataset with name '+self.dataset_name+' found.'))
return HttpResponseRedirect(URL + settings.PREFIX_URL + '/signs/search/')
# make sure the user can write to this dataset
import guardian
# from guardian.shortcuts import get_objects_for_user
user_change_datasets = guardian.shortcuts.get_objects_for_user(self.request.user, 'change_dataset', Dataset)
if user_change_datasets and dataset_object in user_change_datasets:
pass
else:
messages.add_message(self.request, messages.ERROR, ('No permission to export dataset.'))
return HttpResponseRedirect(URL + settings.PREFIX_URL + '/signs/search/')
# if we get to here, the user is authenticated and has permission to export the dataset
ecv_file = write_ecv_file_for_dataset(self.dataset_name)
messages.add_message(self.request, messages.INFO, ('ECV ' + self.dataset_name + ' successfully updated.'))
return HttpResponseRedirect(URL + settings.PREFIX_URL + '/signs/search/')
# noinspection PyInterpreter,PyInterpreter
def render_to_csv_response(self, context):
if not self.request.user.has_perm('dictionary.export_csv'):
raise PermissionDenied
# Create the HttpResponse object with the appropriate CSV header.
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="dictionary-export.csv"'
# fields = [f.name for f in Gloss._meta.fields]
#We want to manually set which fields to export here
fieldnames = FIELDS['main']+FIELDS['phonology']+FIELDS['semantics']+FIELDS['frequency']+['inWeb', 'isNew']
fields = [Gloss._meta.get_field(fieldname) for fieldname in fieldnames]
selected_datasets = get_selected_datasets_for_user(self.request.user)
dataset_languages = Language.objects.filter(dataset__in=selected_datasets).distinct()
lang_attr_name = 'name_' + DEFAULT_KEYWORDS_LANGUAGE['language_code_2char']
annotationidglosstranslation_fields = ["Annotation ID Gloss" + " (" + getattr(language, lang_attr_name) + ")"
for language in dataset_languages]
lemmaidglosstranslation_fields = ["Lemma ID Gloss" + " (" + getattr(language, lang_attr_name) + ")"
for language in dataset_languages]
writer = csv.writer(response)
with override(LANGUAGE_CODE):
header = ['Signbank ID', 'Dataset'] + lemmaidglosstranslation_fields + annotationidglosstranslation_fields + [f.verbose_name.encode('ascii','ignore').decode() for f in fields]
for extra_column in ['SignLanguages','Dialects','Keywords','Sequential Morphology', 'Simultaneous Morphology', 'Blend Morphology',
'Relations to other signs','Relations to foreign signs', 'Tags', 'Notes']:
header.append(extra_column)
writer.writerow(header)
for gloss in self.get_queryset():
row = [str(gloss.pk), gloss.lemma.dataset.acronym]
for language in dataset_languages:
lemmaidglosstranslations = gloss.lemma.lemmaidglosstranslation_set.filter(language=language)
if lemmaidglosstranslations and len(lemmaidglosstranslations) == 1:
row.append(lemmaidglosstranslations[0].text)
else:
row.append("")
for language in dataset_languages:
annotationidglosstranslations = gloss.annotationidglosstranslation_set.filter(language=language)
if annotationidglosstranslations and len(annotationidglosstranslations) == 1:
row.append(annotationidglosstranslations[0].text)
else:
row.append("")
for f in fields:
#Try the value of the choicelist
try:
value = getattr(gloss, 'get_' + f.name + '_display')()
#If it's not there, try the raw value
except AttributeError:
value = getattr(gloss,f.name)
if f.name == 'weakdrop' or f.name == 'weakprop':
if value == None:
value = 'Neutral'
# This was disabled with the move to Python 3... might not be needed anymore?
# if isinstance(value,unicode):
# value = str(value.encode('ascii','xmlcharrefreplace'))
if not isinstance(value,str):
value = str(value)
# A handshape name can begin with =. To avoid Office thinking this is a formula, preface with '
if value[:1] == '=':
value = '\'' + value
row.append(value)
# get languages
signlanguages = [signlanguage.name for signlanguage in gloss.signlanguage.all()]
row.append(", ".join(signlanguages))
# get dialects
dialects = [dialect.name for dialect in gloss.dialect.all()]
row.append(", ".join(dialects))
# get translations (keywords)
trans = [t.translation.text + ":" + t.language.language_code_2char for t in gloss.translation_set.all()]
row.append(", ".join(trans))
# get morphology
# Sequential Morphology
morphemes = [str(morpheme.morpheme.id) for morpheme in MorphologyDefinition.objects.filter(parent_gloss=gloss)]
row.append(", ".join(morphemes))
# Simultaneous Morphology
morphemes = [(str(m.morpheme.id), m.role) for m in gloss.simultaneous_morphology.all()]
sim_morphs = []
for m in morphemes:
sim_morphs.append(':'.join(m))
simultaneous_morphemes = ', '.join(sim_morphs)
row.append(simultaneous_morphemes)
# Blend Morphology
ble_morphemes = [(str(m.glosses.id), m.role) for m in gloss.blend_morphology.all()]
ble_morphs = []
for m in ble_morphemes:
ble_morphs.append(':'.join(m))
blend_morphemes = ', '.join(ble_morphs)
row.append(blend_morphemes)
# get relations to other signs
relations = [(relation.role, str(relation.target.id)) for relation in Relation.objects.filter(source=gloss)]
relations_with_categories = []
for rel_cat in relations:
relations_with_categories.append(':'.join(rel_cat))
relations_categories = ", ".join(relations_with_categories)
row.append(relations_categories)
# get relations to foreign signs
relations = [(str(relation.loan), relation.other_lang, relation.other_lang_gloss) for relation in RelationToForeignSign.objects.filter(gloss=gloss)]
relations_with_categories = []
for rel_cat in relations:
relations_with_categories.append(':'.join(rel_cat))
relations_categories = ", ".join(relations_with_categories)
row.append(relations_categories)
# export tags
tags_of_gloss = TaggedItem.objects.filter(object_id=gloss.id)
tag_names_of_gloss = []
for t_obj in tags_of_gloss:
tag_id = t_obj.tag_id
tag_name = Tag.objects.get(id=tag_id)
tag_names_of_gloss += [str(tag_name).replace('_',' ')]
tag_names = ", ".join(tag_names_of_gloss)
row.append(tag_names)
# export notes
note_role_choices = FieldChoice.objects.filter(field__iexact='NoteType')
notes_of_gloss = gloss.definition_set.all()
notes_list = []
for note in notes_of_gloss:
translated_note_role = machine_value_to_translated_human_value(note.role, note_role_choices, 'en')
note_string = translated_note_role + ": (" + str(note.published) +","+ str(note.count) +","+ note.text + ")"
notes_list.append(note_string)
sorted_notes_list = sorted(notes_list)
notes_display = ", ".join(sorted_notes_list)
row.append(notes_display)
#Make it safe for weird chars
safe_row = []
for column in row:
try:
safe_row.append(column.encode('utf-8').decode())
except AttributeError:
safe_row.append(None)
writer.writerow(safe_row)
return response
def get_queryset(self):
get = self.request.GET
#First check whether we want to show everything or a subset
try:
if self.kwargs['show_all']:
show_all = True
except (KeyError,TypeError):
show_all = False
#Then check what kind of stuff we want
if 'search_type' in get:
self.search_type = get['search_type']
else:
self.search_type = 'sign'
setattr(self.request, 'search_type', self.search_type)
if 'view_type' in get:
self.view_type = get['view_type']
# don't change query, just change display
# return self.request.session['search_results']
else:
# set to default
self.view_type = 'gloss_list'
setattr(self.request, 'view_type', self.view_type)
if 'inWeb' in self.request.GET:
# user is searching for signs / morphemes visible to anonymous uers
self.web_search = self.request.GET['inWeb'] == '2'
elif not self.request.user.is_authenticated():
self.web_search = True
setattr(self.request, 'web_search', self.web_search)
selected_datasets = get_selected_datasets_for_user(self.request.user)
#Get the initial selection
if len(get) > 0 or show_all:
# anonymous users can search signs, make sure no morphemes are in the results
if self.search_type == 'sign' or not self.request.user.is_authenticated():
# Get all the GLOSS items that are not member of the sub-class Morpheme
if SPEED_UP_RETRIEVING_ALL_SIGNS:
qs = Gloss.none_morpheme_objects().select_related('lemma').prefetch_related('parent_glosses').prefetch_related('simultaneous_morphology').prefetch_related('translation_set').filter(lemma__dataset__in=selected_datasets)
else:
qs = Gloss.none_morpheme_objects().filter(lemma__dataset__in=selected_datasets)
else:
if SPEED_UP_RETRIEVING_ALL_SIGNS:
qs = Gloss.objects.all().prefetch_related('lemma').prefetch_related('parent_glosses').prefetch_related('simultaneous_morphology').prefetch_related('translation_set').filter(lemma__dataset__in=selected_datasets)
else:
qs = Gloss.objects.all().filter(lemma__dataset__in=selected_datasets)
#No filters or 'show_all' specified? show nothing
else:
qs = Gloss.objects.none()
if not self.request.user.has_perm('dictionary.search_gloss'):
qs = qs.filter(inWeb__exact=True)
#If we wanted to get everything, we're done now
if show_all:
return order_queryset_by_sort_order(self.request.GET, qs)
# return qs
#If not, we will go trhough a long list of filters
if 'search' in get and get['search'] != '':
val = get['search']
query = Q(annotationidglosstranslation__text__iregex=val)
if re.match('^\d+$', val):
query = query | Q(sn__exact=val)
qs = qs.filter(query)
# Evaluate all gloss/language search fields
for get_key, get_value in get.items():
if get_key.startswith(GlossSearchForm.gloss_search_field_prefix) and get_value != '':
language_code_2char = get_key[len(GlossSearchForm.gloss_search_field_prefix):]
language = Language.objects.filter(language_code_2char=language_code_2char)
qs = qs.filter(annotationidglosstranslation__text__iregex=get_value,
annotationidglosstranslation__language=language)
elif get_key.startswith(GlossSearchForm.lemma_search_field_prefix) and get_value != '':
language_code_2char = get_key[len(GlossSearchForm.lemma_search_field_prefix):]
language = Language.objects.filter(language_code_2char=language_code_2char)
qs = qs.filter(lemma__lemmaidglosstranslation__text__iregex=get_value,
lemma__lemmaidglosstranslation__language=language)
elif get_key.startswith(GlossSearchForm.keyword_search_field_prefix) and get_value != '':
language_code_2char = get_key[len(GlossSearchForm.keyword_search_field_prefix):]
language = Language.objects.filter(language_code_2char=language_code_2char)
qs = qs.filter(translation__translation__text__iregex=get_value,
translation__language=language)
if 'keyword' in get and get['keyword'] != '':
val = get['keyword']
qs = qs.filter(translation__translation__text__iregex=val)
# NULLBOOLEANCHOICES = [(0, '---------'), (1, 'Unknown'), (2, 'True'), (3, 'False')]
if 'inWeb' in get and get['inWeb'] != '0':
# Don't apply 'inWeb' filter, if it is unspecified ('0' according to the NULLBOOLEANCHOICES)
val = get['inWeb'] == '2'
qs = qs.filter(inWeb__exact=val)
if 'hasvideo' in get and get['hasvideo'] != 'unspecified':
val = get['hasvideo'] == 'no'
qs = qs.filter(glossvideo__isnull=val)
if 'defspublished' in get and get['defspublished'] != 'unspecified':
val = get['defspublished'] == 'yes'
qs = qs.filter(definition__published=val)
fieldnames = FIELDS['main']+FIELDS['phonology']+FIELDS['semantics']+['inWeb', 'isNew']
# SignLanguage and basic property filters
# allows for multiselect
vals = get.getlist('dialect[]')
if '' in vals:
vals.remove('')
if vals != []:
qs = qs.filter(dialect__in=vals)
# allows for multiselect
vals = get.getlist('signlanguage[]')
if '' in vals:
vals.remove('')
if vals != []:
qs = qs.filter(signlanguage__in=vals)
if 'useInstr' in get and get['useInstr'] != '':
qs = qs.filter(useInstr__iregex=get['useInstr'])
for fieldnamemulti in settings.MULTIPLE_SELECT_GLOSS_FIELDS:
fieldnamemultiVarname = fieldnamemulti + '[]'
fieldnameQuery = fieldnamemulti + '__in'
vals = get.getlist(fieldnamemultiVarname)
if '' in vals:
vals.remove('')
if vals != []:
qs = qs.filter(**{ fieldnameQuery: vals })
## phonology and semantics field filters
fieldnames = [ f for f in fieldnames if f not in settings.MULTIPLE_SELECT_GLOSS_FIELDS ]
for fieldname in fieldnames:
if fieldname in get and get[fieldname] != '':
field_obj = Gloss._meta.get_field(fieldname)
if type(field_obj) in [CharField,TextField] and len(field_obj.choices) == 0:
key = fieldname + '__iregex'
else:
key = fieldname + '__exact'
val = get[fieldname]
if isinstance(field_obj,NullBooleanField):
val = {'0':'','1': None, '2': True, '3': False}[val]
if val != '':
kwargs = {key:val}
qs = qs.filter(**kwargs)
if 'defsearch' in get and get['defsearch'] != '':
val = get['defsearch']
if 'defrole' in get:
role = get['defrole']
else:
role = 'all'
if role == 'all':
qs = qs.filter(definition__text__icontains=val)
else:
qs = qs.filter(definition__text__icontains=val, definition__role__exact=role)
if 'tags' in get and get['tags'] != '':
vals = get.getlist('tags')
tags = []
for t in vals:
tags.extend(Tag.objects.filter(name=t))
# search is an implicit AND so intersection
tqs = TaggedItem.objects.get_intersection_by_model(Gloss, tags)
# intersection
qs = qs & tqs
qs = qs.distinct()
if 'nottags' in get and get['nottags'] != '':
vals = get.getlist('nottags')
tags = []
for t in vals:
tags.extend(Tag.objects.filter(name=t))
# search is an implicit AND so intersection
tqs = TaggedItem.objects.get_intersection_by_model(Gloss, tags)
# exclude all of tqs from qs
qs = [q for q in qs if q not in tqs]
if 'relationToForeignSign' in get and get['relationToForeignSign'] != '':
relations = RelationToForeignSign.objects.filter(other_lang_gloss__icontains=get['relationToForeignSign'])
potential_pks = [relation.gloss.pk for relation in relations]
qs = qs.filter(pk__in=potential_pks)
if 'hasRelationToForeignSign' in get and get['hasRelationToForeignSign'] != '0':
pks_for_glosses_with_relations = [relation.gloss.pk for relation in RelationToForeignSign.objects.all()]
if get['hasRelationToForeignSign'] == '1': #We only want glosses with a relation to a foreign sign
qs = qs.filter(pk__in=pks_for_glosses_with_relations)
elif get['hasRelationToForeignSign'] == '2': #We only want glosses without a relation to a foreign sign
qs = qs.exclude(pk__in=pks_for_glosses_with_relations)
if 'relation' in get and get['relation'] != '':
potential_targets = Gloss.objects.filter(idgloss__icontains=get['relation'])
relations = Relation.objects.filter(target__in=potential_targets)
potential_pks = [relation.source.pk for relation in relations]
qs = qs.filter(pk__in=potential_pks)
if 'hasRelation' in get and get['hasRelation'] != '':
#Find all relations with this role
if get['hasRelation'] == 'all':
relations_with_this_role = Relation.objects.all()
else:
relations_with_this_role = Relation.objects.filter(role__exact=get['hasRelation'])
#Remember the pk of all glosses that take part in the collected relations
pks_for_glosses_with_correct_relation = [relation.source.pk for relation in relations_with_this_role]
qs = qs.filter(pk__in=pks_for_glosses_with_correct_relation)
if 'morpheme' in get and get['morpheme'] != '':
# morpheme is an integer
input_morpheme = get['morpheme']
# Filter all glosses that contain this morpheme in their simultaneous morphology
try:
selected_morpheme = Morpheme.objects.get(pk=get['morpheme'])
potential_pks = [appears.parent_gloss.pk for appears in SimultaneousMorphologyDefinition.objects.filter(morpheme=selected_morpheme)]
qs = qs.filter(pk__in=potential_pks)
except:
# This error should not occur, the input search form requires the selection of a morpheme from a list
# If the user attempts to input a string, it is ignored by the gloss list search form
print("Morpheme not found: ", str(input_morpheme))
if 'hasComponentOfType' in get and get['hasComponentOfType'] != '':
# Look for "compound-components" of the indicated type. Compound Components are defined in class[MorphologyDefinition]
morphdefs_with_correct_role = MorphologyDefinition.objects.filter(role__exact=get['hasComponentOfType'])
pks_for_glosses_with_morphdefs_with_correct_role = [morphdef.parent_gloss.pk for morphdef in morphdefs_with_correct_role]
qs = qs.filter(pk__in=pks_for_glosses_with_morphdefs_with_correct_role)
if 'hasMorphemeOfType' in get and get['hasMorphemeOfType'] != '':
morpheme_type = get['hasMorphemeOfType']
# Get all Morphemes of the indicated mrpType
target_morphemes = Morpheme.objects.filter(mrpType__exact=morpheme_type)
sim_morphemes = SimultaneousMorphologyDefinition.objects.filter(morpheme_id__in=target_morphemes)
# Get all glosses that have one of the morphemes in this set
glosses_with_correct_mrpType = Gloss.objects.filter(simultaneous_morphology__in=sim_morphemes)
# Turn this into a list with pks
pks_for_glosses_with_correct_mrpType = [glossdef.pk for glossdef in glosses_with_correct_mrpType]
qs = qs.filter(pk__in=pks_for_glosses_with_correct_mrpType)
if 'definitionRole' in get and get['definitionRole'] != '':
#Find all definitions with this role
if get['definitionRole'] == 'all':
definitions_with_this_role = Definition.objects.all()
else:
definitions_with_this_role = Definition.objects.filter(role__exact=get['definitionRole'])
#Remember the pk of all glosses that are referenced in the collection definitions
pks_for_glosses_with_these_definitions = [definition.gloss.pk for definition in definitions_with_this_role]
qs = qs.filter(pk__in=pks_for_glosses_with_these_definitions)
if 'definitionContains' in get and get['definitionContains'] != '':
definitions_with_this_text = Definition.objects.filter(text__iregex=get['definitionContains'])
#Remember the pk of all glosses that are referenced in the collection definitions
pks_for_glosses_with_these_definitions = [definition.gloss.pk for definition in definitions_with_this_text]
qs = qs.filter(pk__in=pks_for_glosses_with_these_definitions)
if 'createdBefore' in get and get['createdBefore'] != '':
created_before_date = DT.datetime.strptime(get['createdBefore'], "%m/%d/%Y").date()
qs = qs.filter(creationDate__range=(EARLIEST_GLOSS_CREATION_DATE,created_before_date))
if 'createdAfter' in get and get['createdAfter'] != '':
created_after_date = DT.datetime.strptime(get['createdAfter'], "%m/%d/%Y").date()
qs = qs.filter(creationDate__range=(created_after_date,DT.datetime.now()))
if 'createdBy' in get and get['createdBy'] != '':
created_by_search_string = ' '.join(get['createdBy'].strip().split()) # remove redundant spaces
qs = qs.annotate(
created_by=Concat('creator__first_name', V(' '), 'creator__last_name', output_field=CharField())) \
.filter(created_by__icontains=created_by_search_string)
# Saving querysets results to sessions, these results can then be used elsewhere (like in gloss_detail)
# Flush the previous queryset (just in case)
# self.request.session['search_results'] = None
qs = qs.select_related('lemma')
try:
print('qs: ', qs.query.as_sql())
except:
pass
# Make sure that the QuerySet has filters applied (user is searching for something instead of showing all results [objects.all()])
if hasattr(qs.query.where, 'children') and len(qs.query.where.children) > 0:
# if not isinstance(qs.query.where.children, NothingNode):
items = []
for item in qs:
annotationidglosstranslations = item.annotationidglosstranslation_set.filter(
language__language_code_2char__exact=self.request.LANGUAGE_CODE
)
if annotationidglosstranslations and len(annotationidglosstranslations) > 0:
items.append(dict(id = item.id, gloss = annotationidglosstranslations[0].text))
else:
annotationidglosstranslations = item.annotationidglosstranslation_set.filter(
language__language_code_2char__exact='en'
)
if annotationidglosstranslations and len(annotationidglosstranslations) > 0:
items.append(dict(id=item.id, gloss=annotationidglosstranslations[0].text))
else:
items.append(dict(id=item.id, gloss=item.idgloss))
self.request.session['search_results'] = items
# Sort the queryset by the parameters given
qs = order_queryset_by_sort_order(self.request.GET, qs)
self.request.session['search_type'] = self.search_type
self.request.session['web_search'] = self.web_search
if not 'last_used_dataset' in self.request.session.keys():
self.request.session['last_used_dataset'] = self.last_used_dataset
# Return the resulting filtered and sorted queryset
return qs
class GlossDetailView(DetailView):
model = Gloss
context_object_name = 'gloss'
last_used_dataset = None
#Overriding the get method get permissions right
def get(self, request, *args, **kwargs):
try:
self.object = self.get_object()
# except Http404:
except:
# return custom template
# return render(request, 'dictionary/warning.html', status=404)
raise Http404()
dataset_of_requested_gloss = self.object.dataset
datasets_user_can_view = get_objects_for_user(request.user, 'view_dataset', Dataset, accept_global_perms=False)
selected_datasets = get_selected_datasets_for_user(self.request.user)
if request.user.is_authenticated():
if dataset_of_requested_gloss not in selected_datasets:
return render(request, 'dictionary/warning.html',
{'warning': 'The gloss you are trying to view (' + str(
self.object.id) + ') is not in your selected datasets.'})
if dataset_of_requested_gloss not in datasets_user_can_view:
if self.object.inWeb:
return HttpResponseRedirect(reverse('dictionary:public_gloss',kwargs={'glossid':self.object.pk}))
else:
return render(request, 'dictionary/warning.html',
{'warning': 'The gloss you are trying to view ('+str(self.object.id)+') is not assigned to a dataset.'})
else:
if self.object.inWeb:
return HttpResponseRedirect(reverse('dictionary:public_gloss', kwargs={'glossid': self.object.pk}))
else:
return HttpResponseRedirect(reverse('registration:auth_login'))
context = self.get_context_data(object=self.object)
return self.render_to_response(context)
def get_context_data(self, **kwargs):
# reformat LANGUAGE_CODE for use in dictionary domain, accomodate multilingual codings
from signbank.tools import convert_language_code_to_2char
language_code = convert_language_code_to_2char(self.request.LANGUAGE_CODE)
language = Language.objects.get(id=get_default_language_id())
default_language_code = language.language_code_2char
# Call the base implementation first to get a context
context = super(GlossDetailView, self).get_context_data(**kwargs)
# Add in a QuerySet of all the books
context['tagform'] = TagUpdateForm()
context['videoform'] = VideoUploadForGlossForm()
context['imageform'] = ImageUploadForGlossForm()
context['definitionform'] = DefinitionForm()
context['relationform'] = RelationForm()
context['morphologyform'] = GlossMorphologyForm()
context['morphologyform'].fields['role'] = forms.ChoiceField(label='Type', widget=forms.Select(attrs=ATTRS_FOR_FORMS),
choices=choicelist_queryset_to_translated_dict(FieldChoice.objects.filter(field__iexact='MorphologyType'),
self.request.LANGUAGE_CODE,ordered=False,id_prefix=''), required=True)
context['morphemeform'] = GlossMorphemeForm()
context['blendform'] = GlossBlendForm()
context['othermediaform'] = OtherMediaForm()
context['navigation'] = context['gloss'].navigation(True)
context['interpform'] = InterpreterFeedbackForm()
context['lemma_create_field_prefix'] = LemmaCreateForm.lemma_create_field_prefix
context['SIGN_NAVIGATION'] = settings.SIGN_NAVIGATION
context['handedness'] = (int(self.object.handedness) > 1) if self.object.handedness else 0 # minimal machine value is 2
context['domhndsh'] = (int(self.object.domhndsh) > 2) if self.object.domhndsh else 0 # minimal machine value -s 3
context['tokNo'] = self.object.tokNo # Number of occurrences of Sign, used to display Stars
# check for existence of strong hand and weak hand shapes
try:
strong_hand_obj = Handshape.objects.get(machine_value = self.object.domhndsh)
except Handshape.DoesNotExist:
strong_hand_obj = None
context['StrongHand'] = self.object.domhndsh if strong_hand_obj else 0
context['WeakHand'] = self.object.subhndsh
# context['NamedEntityDefined'] = (int(self.object.namEnt) > 1) if self.object.namEnt else 0 # minimal machine value is 2
context['SemanticFieldDefined'] = (int(self.object.semField) > 1) if self.object.semField else 0 # minimal machine value is 2
# context['ValenceDefined'] = (int(self.object.valence) > 1) if self.object.valence else 0 # minimal machine value is 2
# context['IconicImageDefined'] = self.object.iconImage # exists if not emtpy
next_gloss = Gloss.objects.get(pk=context['gloss'].pk).admin_next_gloss()
if next_gloss == None:
context['nextglossid'] = context['gloss'].pk #context['gloss']
else:
context['nextglossid'] = next_gloss.pk
if settings.SIGN_NAVIGATION:
context['glosscount'] = Gloss.objects.count()
context['glossposn'] = Gloss.objects.filter(sn__lt=context['gloss'].sn).count()+1
#Pass info about which fields we want to see
gl = context['gloss']
labels = gl.field_labels()
# set a session variable to be able to pass the gloss's id to the ajax_complete method
# the last_used_dataset name is updated to that of this gloss
# if a sequesce of glosses are being created by hand, this keeps the dataset setting the same
if gl.dataset:
self.request.session['datasetid'] = gl.dataset.id
self.last_used_dataset = gl.dataset.acronym
else:
self.request.session['datasetid'] = get_default_language_id()
self.request.session['last_used_dataset'] = self.last_used_dataset
selected_datasets = get_selected_datasets_for_user(self.request.user)
dataset_languages = Language.objects.filter(dataset__in=selected_datasets).distinct()
context['dataset_languages'] = dataset_languages
# set up weak drop weak prop fields
context['handedness_fields'] = []
weak_drop = getattr(gl, 'weakdrop')
weak_prop = getattr(gl, 'weakprop')
context['handedness_fields'].append([weak_drop,'weakdrop',labels['weakdrop'],'list'])
context['handedness_fields'].append([weak_prop,'weakprop',labels['weakprop'],'list'])
context['etymology_fields_dom'] = []
domhndsh_letter = getattr(gl, 'domhndsh_letter')
domhndsh_number = getattr(gl, 'domhndsh_number')
context['etymology_fields_sub'] = []
subhndsh_letter = getattr(gl, 'subhndsh_letter')
subhndsh_number = getattr(gl, 'subhndsh_number')
context['etymology_fields_dom'].append([domhndsh_letter,'domhndsh_letter',labels['domhndsh_letter'],'check'])
context['etymology_fields_dom'].append([domhndsh_number,'domhndsh_number',labels['domhndsh_number'],'check'])
context['etymology_fields_sub'].append([subhndsh_letter,'subhndsh_letter',labels['subhndsh_letter'],'check'])
context['etymology_fields_sub'].append([subhndsh_number,'subhndsh_number',labels['subhndsh_number'],'check'])
#Translate the machine values to human values in the correct language, and save the choice lists along the way
for topic in ['main','phonology','semantics','frequency']:
context[topic+'_fields'] = []
for field in FIELDS[topic]:
# the following check will be used when querying is added, at the moment these don't appear in the phonology list
if field not in ['weakprop', 'weakdrop', 'domhndsh_number', 'domhndsh_letter', 'subhndsh_number', 'subhndsh_letter']:
#Get and save the choice list for this field
fieldchoice_category = fieldname_to_category(field)
choice_list = FieldChoice.objects.filter(field__iexact=fieldchoice_category)
#Take the human value in the language we are using
machine_value = getattr(gl,field)
human_value = machine_value_to_translated_human_value(machine_value,choice_list,self.request.LANGUAGE_CODE)
#And add the kind of field
kind = fieldname_to_kind(field)
context[topic+'_fields'].append([human_value,field,labels[field],kind])
#Collect all morphology definitions for th sequential morphology section, and make some translations in advance
morphdef_roles = FieldChoice.objects.filter(field__iexact='MorphologyType')
morphdefs = []
for morphdef in context['gloss'].parent_glosses.all():
translated_role = machine_value_to_translated_human_value(morphdef.role,morphdef_roles,self.request.LANGUAGE_CODE)
sign_display = str(morphdef.morpheme.id)
morph_texts = morphdef.morpheme.get_annotationidglosstranslation_texts()
if morph_texts.keys():
if language_code in morph_texts.keys():
sign_display = morph_texts[language_code]
else:
sign_display = morph_texts[default_language_code]
morphdefs.append((morphdef,translated_role,sign_display))
morphdefs = sorted(morphdefs, key=lambda tup: tup[1])
context['morphdefs'] = morphdefs
(homonyms_of_this_gloss, homonyms_not_saved, saved_but_not_homonyms) = gl.homonyms()
homonyms_different_phonology = []
for saved_gl in saved_but_not_homonyms:
homo_trans = {}
if saved_gl.dataset:
for language in saved_gl.dataset.translation_languages.all():
homo_trans[language.language_code_2char] = saved_gl.annotationidglosstranslation_set.filter(language=language)
else:
language = Language.objects.get(id=get_default_language_id())
homo_trans[language.language_code_2char] = saved_gl.annotationidglosstranslation_set.filter(language=language)
if language_code in homo_trans:
homo_display = homo_trans[language_code][0].text
else:
# This should be set to the default language if the interface language hasn't been set for this gloss
homo_display = homo_trans[default_language_code][0].text
homonyms_different_phonology.append((saved_gl,homo_display))
context['homonyms_different_phonology'] = homonyms_different_phonology
homonyms_but_not_saved = []
for homonym in homonyms_not_saved:
homo_trans = {}
if homonym.dataset:
for language in homonym.dataset.translation_languages.all():
homo_trans[language.language_code_2char] = homonym.annotationidglosstranslation_set.filter(language=language)
else:
language = Language.objects.get(id=get_default_language_id())
homo_trans[language.language_code_2char] = homonym.annotationidglosstranslation_set.filter(language=language)
if language_code in homo_trans:
homo_display = homo_trans[language_code][0].text
else:
# This should be set to the default language if the interface language hasn't been set for this gloss
homo_display = homo_trans[default_language_code][0].text
homonyms_but_not_saved.append((homonym,homo_display))
context['homonyms_but_not_saved'] = homonyms_but_not_saved
# Regroup notes
note_role_choices = FieldChoice.objects.filter(field__iexact='NoteType')
notes = context['gloss'].definition_set.all()
notes_groupedby_role = {}
for note in notes:
# print('note: ', note.id, ', ', note.role, ', ', note.published, ', ', note.text, ', ', note.count)
translated_note_role = machine_value_to_translated_human_value(note.role,note_role_choices,self.request.LANGUAGE_CODE)
role_id = (note.role, translated_note_role)
if role_id not in notes_groupedby_role:
notes_groupedby_role[role_id] = []
notes_groupedby_role[role_id].append(note)
context['notes_groupedby_role'] = notes_groupedby_role
#Gather the OtherMedia
context['other_media'] = []
context['other_media_field_choices'] = {}
other_media_type_choice_list = FieldChoice.objects.filter(field__iexact='OthermediaType')
for other_media in gl.othermedia_set.all():
human_value_media_type = machine_value_to_translated_human_value(other_media.type,other_media_type_choice_list,self.request.LANGUAGE_CODE)
path = settings.URL+'dictionary/protected_media/othermedia/'+other_media.path
if '/' in other_media.path:
other_media_filename = other_media.path.split('/')[1]
else:
other_media_filename = other_media.path
if other_media_filename.split('.')[-1] == 'mp4':
file_type = 'video/mp4'
elif other_media_filename.split('.')[-1] == 'png':
file_type = 'image/png'
else:
file_type = ''
context['other_media'].append([other_media.pk, path, file_type, human_value_media_type, other_media.alternative_gloss, other_media_filename])
# Save the other_media_type choices (same for every other_media, but necessary because they all have other ids)
context['other_media_field_choices'][
'other-media-type_' + str(other_media.pk)] = choicelist_queryset_to_translated_dict(
other_media_type_choice_list, self.request.LANGUAGE_CODE)
context['other_media_field_choices'] = json.dumps(context['other_media_field_choices'])
context['separate_english_idgloss_field'] = SEPARATE_ENGLISH_IDGLOSS_FIELD
try:
lemma_group_count = gl.lemma.gloss_set.count()
if lemma_group_count > 1:
context['lemma_group'] = True
lemma_group_url_params = {'search_type': 'sign', 'view_type': 'lemma_groups'}
for lemmaidglosstranslation in gl.lemma.lemmaidglosstranslation_set.prefetch_related('language'):
lang_code_2char = lemmaidglosstranslation.language.language_code_2char
lemma_group_url_params['lemma_'+lang_code_2char] = '^' + lemmaidglosstranslation.text + '$'
from urllib.parse import urlencode
url_query = urlencode(lemma_group_url_params)
url_query = ("?" + url_query) if url_query else ''
context['lemma_group_url'] = reverse_lazy('signs_search') + url_query
else:
context['lemma_group'] = False
context['lemma_group_url'] = ''
except:
print("lemma_group_count: except")
context['lemma_group'] = False
context['lemma_group_url'] = ''
# Put annotation_idgloss per language in the context
context['annotation_idgloss'] = {}
if gl.dataset:
for language in gl.dataset.translation_languages.all():
context['annotation_idgloss'][language] = gl.annotationidglosstranslation_set.filter(language=language)
else:
language = Language.objects.get(id=get_default_language_id())
context['annotation_idgloss'][language] = gl.annotationidglosstranslation_set.filter(language=language)
# Put translations (keywords) per language in the context
context['translations_per_language'] = {}
if gl.dataset:
for language in gl.dataset.translation_languages.all():
context['translations_per_language'][language] = gl.translation_set.filter(language=language)
else:
language = Language.objects.get(id=get_default_language_id())
context['translations_per_language'][language] = gl.translation_set.filter(language=language)
simultaneous_morphology = []
sim_morph_typ_choices = FieldChoice.objects.filter(field__iexact='MorphemeType')
if gl.simultaneous_morphology:
for sim_morph in gl.simultaneous_morphology.all():
translated_morph_type = machine_value_to_translated_human_value(sim_morph.morpheme.mrpType,sim_morph_typ_choices,self.request.LANGUAGE_CODE)
morpheme_annotation_idgloss = {}
if sim_morph.morpheme.dataset:
for language in sim_morph.morpheme.dataset.translation_languages.all():
morpheme_annotation_idgloss[language.language_code_2char] = sim_morph.morpheme.annotationidglosstranslation_set.filter(language=language)
else:
language = Language.objects.get(id=get_default_language_id())
morpheme_annotation_idgloss[language.language_code_2char] = sim_morph.morpheme.annotationidglosstranslation_set.filter(language=language)
if language_code in morpheme_annotation_idgloss.keys():
morpheme_display = morpheme_annotation_idgloss[language_code][0].text
else:
# This should be set to the default language if the interface language hasn't been set for this gloss
morpheme_display = morpheme_annotation_idgloss[default_language_code][0].text
simultaneous_morphology.append((sim_morph,morpheme_display,translated_morph_type))
context['simultaneous_morphology'] = simultaneous_morphology
# Obtain the number of morphemes in the dataset of this gloss
# The template will not show the facility to add simultaneous morphology if there are no morphemes to choose from
dataset_id_of_gloss = gl.dataset
count_morphemes_in_dataset = Morpheme.objects.filter(lemma__dataset=dataset_id_of_gloss).count()
context['count_morphemes_in_dataset'] = count_morphemes_in_dataset
blend_morphology = []
if gl.blend_morphology:
for ble_morph in gl.blend_morphology.all():
glosses_annotation_idgloss = {}
if ble_morph.glosses.dataset:
for language in ble_morph.glosses.dataset.translation_languages.all():
glosses_annotation_idgloss[language.language_code_2char] = ble_morph.glosses.annotationidglosstranslation_set.filter(language=language)
else:
language = Language.objects.get(id=get_default_language_id())
glosses_annotation_idgloss[language.language_code_2char] = ble_morph.glosses.annotationidglosstranslation_set.filter(language=language)
if language_code in glosses_annotation_idgloss.keys():
morpheme_display = glosses_annotation_idgloss[language_code][0].text
else:
# This should be set to the default language if the interface language hasn't been set for this gloss
morpheme_display = glosses_annotation_idgloss[default_language_code][0].text
blend_morphology.append((ble_morph,morpheme_display))
context['blend_morphology'] = blend_morphology
otherrelations = []
if gl.relation_sources:
for oth_rel in gl.relation_sources.all():
other_relations_dict = {}
if oth_rel.target.dataset:
for language in oth_rel.target.dataset.translation_languages.all():
other_relations_dict[language.language_code_2char] = oth_rel.target.annotationidglosstranslation_set.filter(language=language)
else:
language = Language.objects.get(id=get_default_language_id())
other_relations_dict[language.language_code_2char] = oth_rel.target.annotationidglosstranslation_set.filter(language=language)
if language_code in other_relations_dict.keys():
target_display = other_relations_dict[language_code][0].text
else:
# This should be set to the default language if the interface language hasn't been set for this gloss
target_display = other_relations_dict[default_language_code][0].text
otherrelations.append((oth_rel,target_display))
context['otherrelations'] = otherrelations
if hasattr(settings, 'SHOW_DATASET_INTERFACE_OPTIONS') and settings.SHOW_DATASET_INTERFACE_OPTIONS:
context['dataset_choices'] = {}
user = self.request.user
if user.is_authenticated():
qs = get_objects_for_user(user, 'view_dataset', Dataset, accept_global_perms=False)
dataset_choices = {}
for dataset in qs:
dataset_choices[dataset.acronym] = dataset.acronym
context['dataset_choices'] = json.dumps(dataset_choices)
if hasattr(settings, 'SHOW_DATASET_INTERFACE_OPTIONS') and settings.SHOW_DATASET_INTERFACE_OPTIONS:
context['SHOW_DATASET_INTERFACE_OPTIONS'] = settings.SHOW_DATASET_INTERFACE_OPTIONS
else:
context['SHOW_DATASET_INTERFACE_OPTIONS'] = False
if hasattr(settings, 'SHOW_LETTER_NUMBER_PHONOLOGY'):
context['SHOW_LETTER_NUMBER_PHONOLOGY'] = settings.SHOW_LETTER_NUMBER_PHONOLOGY
else:
context['SHOW_LETTER_NUMBER_PHONOLOGY'] = False
context['generate_translated_choice_list_table'] = generate_translated_choice_list_table()
return context
class GlossRelationsDetailView(DetailView):
model = Gloss
template_name = 'dictionary/related_signs_detail_view.html'
context_object_name = 'gloss'
#Overriding the get method get permissions right
def get(self, request, *args, **kwargs):
try:
self.object = self.get_object()
except Http404:
# return custom template
return render(request, 'no_object.html', status=404)
if request.user.is_authenticated():
if self.object.dataset not in get_objects_for_user(request.user, 'view_dataset', Dataset, accept_global_perms=False):
if self.object.inWeb:
return HttpResponseRedirect(reverse('dictionary:public_gloss',kwargs={'glossid':self.object.pk}))
else:
return HttpResponse('')
else:
if self.object.inWeb:
return HttpResponseRedirect(reverse('dictionary:public_gloss', kwargs={'glossid': self.object.pk}))
else:
return HttpResponseRedirect(reverse('registration:auth_login'))
context = self.get_context_data(object=self.object)
return self.render_to_response(context)
def get_context_data(self, **kwargs):
# reformat LANGUAGE_CODE for use in dictionary domain, accomodate multilingual codings
from signbank.tools import convert_language_code_to_2char
language_code = convert_language_code_to_2char(self.request.LANGUAGE_CODE)
language = Language.objects.get(id=get_default_language_id())
default_language_code = language.language_code_2char
# Call the base implementation first to get a context
context = super(GlossRelationsDetailView, self).get_context_data(**kwargs)
# Add in a QuerySet of all the books
context['tagform'] = TagUpdateForm()
context['videoform'] = VideoUploadForGlossForm()
context['imageform'] = ImageUploadForGlossForm()
context['definitionform'] = DefinitionForm()
context['relationform'] = RelationForm()
context['morphologyform'] = GlossMorphologyForm()
context['morphologyform'].fields['role'] = forms.ChoiceField(label='Type', widget=forms.Select(attrs=ATTRS_FOR_FORMS),
choices=choicelist_queryset_to_translated_dict(FieldChoice.objects.filter(field__iexact='MorphologyType'),
self.request.LANGUAGE_CODE,ordered=False,id_prefix=''), required=True)
context['morphemeform'] = GlossMorphemeForm()
context['blendform'] = GlossBlendForm()
context['othermediaform'] = OtherMediaForm()
context['navigation'] = context['gloss'].navigation(True)
context['interpform'] = InterpreterFeedbackForm()
context['SIGN_NAVIGATION'] = settings.SIGN_NAVIGATION
#Pass info about which fields we want to see
gl = context['gloss']
labels = gl.field_labels()
context['choice_lists'] = {}
#Translate the machine values to human values in the correct language, and save the choice lists along the way
for topic in ['main','phonology','semantics','frequency']:
context[topic+'_fields'] = []
for field in FIELDS[topic]:
#Get and save the choice list for this field
fieldchoice_category = fieldname_to_category(field)
choice_list = FieldChoice.objects.filter(field__iexact=fieldchoice_category)
if len(choice_list) > 0:
context['choice_lists'][field] = choicelist_queryset_to_translated_dict (choice_list,self.request.LANGUAGE_CODE)
#Take the human value in the language we are using
machine_value = getattr(gl,field)
human_value = machine_value_to_translated_human_value(machine_value,choice_list,self.request.LANGUAGE_CODE)
#And add the kind of field
kind = fieldname_to_kind(field)
context[topic+'_fields'].append([human_value,field,labels[field],kind])
#Add morphology to choice lists
context['choice_lists']['morphology_role'] = choicelist_queryset_to_translated_dict(FieldChoice.objects.filter(field__iexact='MorphologyType'),
self.request.LANGUAGE_CODE)
#Collect all morphology definitions for th sequential morphology section, and make some translations in advance
morphdef_roles = FieldChoice.objects.filter(field__iexact='MorphologyType')
morphdefs = []
for morphdef in context['gloss'].parent_glosses.all():
translated_role = machine_value_to_translated_human_value(morphdef.role,morphdef_roles,self.request.LANGUAGE_CODE)
sign_display = str(morphdef.morpheme.id)
morph_texts = morphdef.morpheme.get_annotationidglosstranslation_texts()
if morph_texts.keys():
if language_code in morph_texts.keys():
sign_display = morph_texts[language_code]
else:
sign_display = morph_texts[default_language_code]
morphdefs.append((morphdef,translated_role,sign_display))
context['morphdefs'] = morphdefs
context['separate_english_idgloss_field'] = SEPARATE_ENGLISH_IDGLOSS_FIELD
try:
lemma_group_count = gl.lemma.gloss_set.count()
if lemma_group_count > 1:
context['lemma_group'] = True
lemma_group_url_params = {'search_type': 'sign', 'view_type': 'lemma_groups'}
for lemmaidglosstranslation in gl.lemma.lemmaidglosstranslation_set.prefetch_related('language'):
lang_code_2char = lemmaidglosstranslation.language.language_code_2char
lemma_group_url_params['lemma_'+lang_code_2char] = '^' + lemmaidglosstranslation.text + '$'
from urllib.parse import urlencode
url_query = urlencode(lemma_group_url_params)
url_query = ("?" + url_query) if url_query else ''
context['lemma_group_url'] = reverse_lazy('signs_search') + url_query
else:
context['lemma_group'] = False
context['lemma_group_url'] = ''
except:
print("lemma_group_count: except")
context['lemma_group'] = False
context['lemma_group_url'] = ''
lemma_group_glosses = gl.lemma.gloss_set.all()
glosses_in_lemma_group = []
if lemma_group_glosses:
for gl_lem in lemma_group_glosses:
lemma_dict = {}
if gl_lem.dataset:
for language in gl_lem.dataset.translation_languages.all():
lemma_dict[language.language_code_2char] = gl_lem.annotationidglosstranslation_set.filter(language=language)
else:
language = Language.objects.get(id=get_default_language_id())
lemma_dict[language.language_code_2char] = gl_lem.annotationidglosstranslation_set.filter(language=language)
if language_code in lemma_dict.keys():
gl_lem_display = lemma_dict[language_code][0].text
else:
# This should be set to the default language if the interface language hasn't been set for this gloss
gl_lem_display = lemma_dict[default_language_code][0].text
glosses_in_lemma_group.append((gl_lem,gl_lem_display))
context['glosses_in_lemma_group'] = glosses_in_lemma_group
otherrelations = []
if gl.relation_sources:
for oth_rel in gl.relation_sources.all():
other_relations_dict = {}
if oth_rel.target.dataset:
for language in oth_rel.target.dataset.translation_languages.all():
other_relations_dict[language.language_code_2char] = oth_rel.target.annotationidglosstranslation_set.filter(language=language)
else:
language = Language.objects.get(id=get_default_language_id())
other_relations_dict[language.language_code_2char] = oth_rel.target.annotationidglosstranslation_set.filter(language=language)
if language_code in other_relations_dict.keys():
target_display = other_relations_dict[language_code][0].text
else:
# This should be set to the default language if the interface language hasn't been set for this gloss
target_display = other_relations_dict[default_language_code][0].text
otherrelations.append((oth_rel,target_display))
context['otherrelations'] = otherrelations
has_variants = gl.has_variants()
variants = []
if has_variants:
for gl_var in has_variants:
variants_dict = {}
if gl_var.dataset:
for language in gl_var.dataset.translation_languages.all():
variants_dict[language.language_code_2char] = gl_var.annotationidglosstranslation_set.filter(language=language)
else:
language = Language.objects.get(id=get_default_language_id())
variants_dict[language.language_code_2char] = gl_var.annotationidglosstranslation_set.filter(language=language)
if language_code in variants_dict.keys():
gl_var_display = variants_dict[language_code][0].text
else:
# This should be set to the default language if the interface language hasn't been set for this gloss
gl_var_display = variants_dict[default_language_code][0].text
variants.append((gl_var,gl_var_display))
context['variants'] = variants
minimal_pairs_dict = gl.minimal_pairs_dict()
minimalpairs = []
for mpg, dict in minimal_pairs_dict.items():
minimal_pairs_trans = {}
if mpg.dataset:
for language in mpg.dataset.translation_languages.all():
minimal_pairs_trans[language.language_code_2char] = mpg.annotationidglosstranslation_set.filter(language=language)
else:
language = Language.objects.get(id=get_default_language_id())
minimal_pairs_trans[language.language_code_2char] = mpg.annotationidglosstranslation_set.filter(language=language)
if language_code in minimal_pairs_trans.keys():
minpar_display = minimal_pairs_trans[language_code][0].text
else:
# This should be set to the default language if the interface language hasn't been set for this gloss
minpar_display = minimal_pairs_trans[default_language_code][0].text
minimalpairs.append((mpg,dict,minpar_display))
context['minimalpairs'] = minimalpairs
# Put annotation_idgloss per language in the context
context['annotation_idgloss'] = {}
if gl.dataset:
for language in gl.dataset.translation_languages.all():
context['annotation_idgloss'][language] = gl.annotationidglosstranslation_set.filter(language=language)
else:
language = Language.objects.get(id=get_default_language_id())
context['annotation_idgloss'][language] = gl.annotationidglosstranslation_set.filter(language=language)
selected_datasets = get_selected_datasets_for_user(self.request.user)
dataset_languages = Language.objects.filter(dataset__in=selected_datasets).distinct()
context['dataset_languages'] = dataset_languages
if hasattr(settings, 'SHOW_DATASET_INTERFACE_OPTIONS'):
context['SHOW_DATASET_INTERFACE_OPTIONS'] = settings.SHOW_DATASET_INTERFACE_OPTIONS
else:
context['SHOW_DATASET_INTERFACE_OPTIONS'] = False
return context
class MorphemeListView(ListView):
"""The morpheme list view basically copies the gloss list view"""
model = Morpheme
search_type = 'morpheme'
dataset_name = DEFAULT_DATASET
last_used_dataset = None
template_name = 'dictionary/admin_morpheme_list.html'
paginate_by = 500
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super(MorphemeListView, self).get_context_data(**kwargs)
# Retrieve the search_type,so that we know whether the search should be restricted to Gloss or not
if 'search_type' in self.request.GET:
self.search_type = self.request.GET['search_type']
if 'last_used_dataset' in self.request.session.keys():
self.last_used_dataset = self.request.session['last_used_dataset']
selected_datasets = get_selected_datasets_for_user(self.request.user)
dataset_languages = Language.objects.filter(dataset__in=selected_datasets).distinct()
context['dataset_languages'] = dataset_languages
selected_datasets_signlanguage = [ ds.signlanguage for ds in selected_datasets ]
sign_languages = []
for sl in selected_datasets_signlanguage:
if not ((str(sl.id), sl.name) in sign_languages):
sign_languages.append((str(sl.id), sl.name))
selected_datasets_dialects = Dialect.objects.filter(signlanguage__in=selected_datasets_signlanguage).distinct()
dialects = []
for dl in selected_datasets_dialects:
dialect_name = dl.signlanguage.name + "/" + dl.name
dialects.append((str(dl.id),dialect_name))
search_form = MorphemeSearchForm(self.request.GET, languages=dataset_languages, sign_languages=sign_languages,
dialects=dialects, language_code=self.request.LANGUAGE_CODE)
context['searchform'] = search_form
context['glosscount'] = Morpheme.objects.all().count()
context['search_type'] = self.search_type
context['add_morpheme_form'] = MorphemeCreateForm(self.request.GET, languages=dataset_languages, user=self.request.user, last_used_dataset=self.last_used_dataset)
# make sure that the morpheme-type options are available to the listview
oChoiceLists = {}
choice_list = FieldChoice.objects.filter(field__iexact = fieldname_to_category('mrpType'))
if (len(choice_list) > 0):
ordered_dict = choicelist_queryset_to_translated_dict(choice_list, self.request.LANGUAGE_CODE)
oChoiceLists['mrpType'] = ordered_dict
# Make all choice lists available in the context (currently only mrpType)
context['choice_lists'] = json.dumps(oChoiceLists)
context['input_names_fields_and_labels'] = {}
for topic in ['main', 'phonology', 'semantics']:
context['input_names_fields_and_labels'][topic] = []
for fieldname in settings.FIELDS[topic]:
if fieldname not in ['weakprop', 'weakdrop', 'domhndsh_number', 'domhndsh_letter', 'subhndsh_number', 'subhndsh_letter']:
field = search_form[fieldname]
label = field.label
context['input_names_fields_and_labels'][topic].append((fieldname, field, label))
context['paginate_by'] = self.request.GET.get('paginate_by', self.paginate_by)
selected_datasets = get_selected_datasets_for_user(self.request.user)
dataset_languages = Language.objects.filter(dataset__in=selected_datasets).distinct()
context['dataset_languages'] = dataset_languages
if hasattr(settings, 'SHOW_DATASET_INTERFACE_OPTIONS'):
context['SHOW_DATASET_INTERFACE_OPTIONS'] = settings.SHOW_DATASET_INTERFACE_OPTIONS
else:
context['SHOW_DATASET_INTERFACE_OPTIONS'] = False
context['lemma_create_field_prefix'] = LemmaCreateForm.lemma_create_field_prefix
context['MULTIPLE_SELECT_MORPHEME_FIELDS'] = settings.MULTIPLE_SELECT_MORPHEME_FIELDS
return context
def get_paginate_by(self, queryset):
"""
Paginate by specified value in querystring, or use default class property value.
"""
return self.request.GET.get('paginate_by', self.paginate_by)
def get_queryset(self):
# get query terms from self.request
get = self.request.GET
selected_datasets = get_selected_datasets_for_user(self.request.user)
if len(get) > 0:
qs = Morpheme.objects.all().filter(lemma__dataset__in=selected_datasets)
#Don't show anything when we're not searching yet
else:
qs = Morpheme.objects.none()
# Evaluate all morpheme/language search fields
for get_key, get_value in get.items():
if get_key.startswith(MorphemeSearchForm.morpheme_search_field_prefix) and get_value != '':
language_code_2char = get_key[len(MorphemeSearchForm.morpheme_search_field_prefix):]
language = Language.objects.filter(language_code_2char=language_code_2char)
qs = qs.filter(annotationidglosstranslation__text__iregex=get_value,
annotationidglosstranslation__language=language)
elif get_key.startswith(MorphemeSearchForm.keyword_search_field_prefix) and get_value != '':
language_code_2char = get_key[len(MorphemeSearchForm.keyword_search_field_prefix):]
language = Language.objects.filter(language_code_2char=language_code_2char)
qs = qs.filter(translation__translation__text__iregex=get_value,
translation__language=language)
if 'lemmaGloss' in get and get['lemmaGloss'] != '':
val = get['lemmaGloss']
qs = qs.filter(idgloss__iregex=val)
if 'keyword' in get and get['keyword'] != '':
val = get['keyword']
qs = qs.filter(translation__translation__text__iregex=val)
if 'inWeb' in get and get['inWeb'] != '0':
# Don't apply 'inWeb' filter, if it is unspecified ('0' according to the NULLBOOLEANCHOICES)
val = get['inWeb'] == '2'
qs = qs.filter(inWeb__exact=val)
if 'hasvideo' in get and get['hasvideo'] != 'unspecified':
val = get['hasvideo'] == 'no'
qs = qs.filter(glossvideo__isnull=val)
if 'defspublished' in get and get['defspublished'] != 'unspecified':
val = get['defspublished'] == 'yes'
qs = qs.filter(definition__published=val)
fieldnames = FIELDS['main']+FIELDS['phonology']+FIELDS['semantics']+['inWeb', 'isNew']
# SignLanguage and basic property filters
# allows for multiselect
vals = get.getlist('dialect[]')
if '' in vals:
vals.remove('')
if vals != []:
qs = qs.filter(dialect__in=vals)
# allows for multiselect
vals = get.getlist('signlanguage[]')
if '' in vals:
vals.remove('')
if vals != []:
qs = qs.filter(signlanguage__in=vals)
if 'useInstr' in get and get['useInstr'] != '':
qs = qs.filter(useInstr__icontains=get['useInstr'])
for fieldnamemulti in settings.MULTIPLE_SELECT_MORPHEME_FIELDS:
fieldnamemultiVarname = fieldnamemulti + '[]'
fieldnameQuery = fieldnamemulti + '__in'
vals = get.getlist(fieldnamemultiVarname)
if '' in vals:
vals.remove('')
if vals != []:
qs = qs.filter(**{ fieldnameQuery: vals })
## phonology and semantics field filters
fieldnames = [ f for f in fieldnames if f not in settings.MULTIPLE_SELECT_MORPHEME_FIELDS ]
for fieldname in fieldnames:
if fieldname in get:
key = fieldname + '__exact'
val = get[fieldname]
if isinstance(Gloss._meta.get_field(fieldname), NullBooleanField):
val = {'0': '', '1': None, '2': True, '3': False}[val]
if val != '':
kwargs = {key: val}
qs = qs.filter(**kwargs)
if 'defsearch' in get and get['defsearch'] != '':
val = get['defsearch']
if 'defrole' in get:
role = get['defrole']
else:
role = 'all'
if role == 'all':
qs = qs.filter(definition__text__icontains=val)
else:
qs = qs.filter(definition__text__icontains=val, definition__role__exact=role)
if 'tags' in get and get['tags'] != '':
vals = get.getlist('tags')
tags = []
for t in vals:
tags.extend(Tag.objects.filter(name=t))
# search is an implicit AND so intersection
tqs = TaggedItem.objects.get_intersection_by_model(Gloss, tags)
# intersection
qs = qs & tqs
qs = qs.distinct()
if 'nottags' in get and get['nottags'] != '':
vals = get.getlist('nottags')
tags = []
for t in vals:
tags.extend(Tag.objects.filter(name=t))
# search is an implicit AND so intersection
tqs = TaggedItem.objects.get_intersection_by_model(Gloss, tags)
# exclude all of tqs from qs
qs = [q for q in qs if q not in tqs]
if 'relationToForeignSign' in get and get['relationToForeignSign'] != '':
relations = RelationToForeignSign.objects.filter(other_lang_gloss__icontains=get['relationToForeignSign'])
potential_pks = [relation.gloss.pk for relation in relations]
qs = qs.filter(pk__in=potential_pks)
if 'hasRelationToForeignSign' in get and get['hasRelationToForeignSign'] != '0':
pks_for_glosses_with_relations = [relation.gloss.pk for relation in RelationToForeignSign.objects.all()]
if get['hasRelationToForeignSign'] == '1': # We only want glosses with a relation to a foreign sign
qs = qs.filter(pk__in=pks_for_glosses_with_relations)
elif get['hasRelationToForeignSign'] == '2': # We only want glosses without a relation to a foreign sign
qs = qs.exclude(pk__in=pks_for_glosses_with_relations)
if 'relation' in get and get['relation'] != '':
potential_targets = Gloss.objects.filter(idgloss__icontains=get['relation'])
relations = Relation.objects.filter(target__in=potential_targets)
potential_pks = [relation.source.pk for relation in relations]
qs = qs.filter(pk__in=potential_pks)
if 'hasRelation' in get and get['hasRelation'] != '':
# Find all relations with this role
if get['hasRelation'] == 'all':
relations_with_this_role = Relation.objects.all()
else:
relations_with_this_role = Relation.objects.filter(role__exact=get['hasRelation'])
# Remember the pk of all glosses that take part in the collected relations
pks_for_glosses_with_correct_relation = [relation.source.pk for relation in relations_with_this_role]
qs = qs.filter(pk__in=pks_for_glosses_with_correct_relation)
if 'morpheme' in get and get['morpheme'] != '':
potential_morphemes = Gloss.objects.filter(idgloss__icontains=get['morpheme'])
potential_morphdefs = MorphologyDefinition.objects.filter(
morpheme__in=[morpheme.pk for morpheme in potential_morphemes])
potential_pks = [morphdef.parent_gloss.pk for morphdef in potential_morphdefs]
qs = qs.filter(pk__in=potential_pks)
if 'definitionRole' in get and get['definitionRole'] != '':
# Find all definitions with this role
if get['definitionRole'] == 'all':
definitions_with_this_role = Definition.objects.all()
else:
definitions_with_this_role = Definition.objects.filter(role__exact=get['definitionRole'])
# Remember the pk of all glosses that are referenced in the collection definitions
pks_for_glosses_with_these_definitions = [definition.gloss.pk for definition in definitions_with_this_role]
qs = qs.filter(pk__in=pks_for_glosses_with_these_definitions)
if 'definitionContains' in get and get['definitionContains'] != '':
definitions_with_this_text = Definition.objects.filter(text__icontains=get['definitionContains'])
# Remember the pk of all glosses that are referenced in the collection definitions
pks_for_glosses_with_these_definitions = [definition.gloss.pk for definition in definitions_with_this_text]
qs = qs.filter(pk__in=pks_for_glosses_with_these_definitions)
if 'createdBefore' in get and get['createdBefore'] != '':
created_before_date = DT.datetime.strptime(get['createdBefore'], "%m/%d/%Y").date()
qs = qs.filter(creationDate__range=(EARLIEST_GLOSS_CREATION_DATE, created_before_date))
if 'createdAfter' in get and get['createdAfter'] != '':
created_after_date = DT.datetime.strptime(get['createdAfter'], "%m/%d/%Y").date()
qs = qs.filter(creationDate__range=(created_after_date, DT.datetime.now()))
if 'createdBy' in get and get['createdBy'] != '':
created_by_search_string = ' '.join(get['createdBy'].strip().split()) # remove redundant spaces
qs = qs.annotate(
created_by=Concat('creator__first_name', V(' '), 'creator__last_name', output_field=CharField())) \
.filter(created_by__iregex=created_by_search_string)
# Saving querysets results to sessions, these results can then be used elsewhere (like in gloss_detail)
# Flush the previous queryset (just in case)
self.request.session['search_results'] = None
# Make sure that the QuerySet has filters applied (user is searching for something instead of showing all results [objects.all()])
if hasattr(qs.query.where, 'children') and len(qs.query.where.children) > 0:
items = []
for item in qs:
items.append(dict(id=item.id, gloss=item.idgloss))
self.request.session['search_results'] = items
# Sort the queryset by the parameters given
qs = order_queryset_by_sort_order(self.request.GET, qs)
self.request.session['search_type'] = self.search_type
if not ('last_used_dataset' in self.request.session.keys()):
self.request.session['last_used_dataset'] = self.last_used_dataset
# Return the resulting filtered and sorted queryset
return qs
def render_to_response(self, context):
# Look for a 'format=json' GET argument
if self.request.GET.get('format') == 'CSV':
return self.render_to_csv_response(context)
else:
return super(MorphemeListView, self).render_to_response(context)
# noinspection PyInterpreter,PyInterpreter
def render_to_csv_response(self, context):
"""Convert all Morphemes into a CSV
This function is derived from and similar to the one used in class GlossListView
Differences:
1 - this one adds the field [mrpType]
2 - the filename is different"""
if not self.request.user.has_perm('dictionary.export_csv'):
raise PermissionDenied
# Create the HttpResponse object with the appropriate CSV header.
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="dictionary-morph-export.csv"'
# fields = [f.name for f in Gloss._meta.fields]
# We want to manually set which fields to export here
fieldnames = FIELDS['main']+FIELDS['phonology']+FIELDS['semantics']+FIELDS['frequency']+['inWeb', 'isNew']
# Different from Gloss: we use Morpheme here
fields = [Morpheme._meta.get_field(fieldname) for fieldname in fieldnames]
selected_datasets = get_selected_datasets_for_user(self.request.user)
dataset_languages = Language.objects.filter(dataset__in=selected_datasets).distinct()
lang_attr_name = 'name_' + DEFAULT_KEYWORDS_LANGUAGE['language_code_2char']
annotationidglosstranslation_fields = ["Annotation ID Gloss" + " (" + getattr(language, lang_attr_name) + ")" for language in
dataset_languages]
writer = csv.writer(response)
with override(LANGUAGE_CODE):
header = ['Signbank ID'] + annotationidglosstranslation_fields + [f.verbose_name.title().encode('ascii', 'ignore').decode() for f in fields]
for extra_column in ['SignLanguages', 'Dialects', 'Keywords', 'Morphology', 'Relations to other signs',
'Relations to foreign signs', 'Appears in signs', ]:
header.append(extra_column)
writer.writerow(header)
for gloss in self.get_queryset():
row = [str(gloss.pk)]
for language in dataset_languages:
annotationidglosstranslations = gloss.annotationidglosstranslation_set.filter(language=language)
if annotationidglosstranslations and len(annotationidglosstranslations) == 1:
row.append(annotationidglosstranslations[0].text)
else:
row.append("")
for f in fields:
# Try the value of the choicelist
try:
row.append(getattr(gloss, 'get_' + f.name + '_display')())
# If it's not there, try the raw value
except AttributeError:
value = getattr(gloss, f.name)
# This was disabled with the move to Python 3... might not be needed anymore?
# if isinstance(value, unicode):
# value = str(value.encode('ascii', 'xmlcharrefreplace'))
# elif not isinstance(value, str):
value = str(value)
row.append(value)
# get languages
signlanguages = [signlanguage.name for signlanguage in gloss.signlanguage.all()]
row.append(", ".join(signlanguages))
# get dialects
dialects = [dialect.name for dialect in gloss.dialect.all()]
row.append(", ".join(dialects))
# get translations
trans = [t.translation.text for t in gloss.translation_set.all()]
row.append(", ".join(trans))
# get compound's component type
morphemes = [morpheme.role for morpheme in MorphologyDefinition.objects.filter(parent_gloss=gloss)]
row.append(", ".join(morphemes))
# get relations to other signs
relations = [relation.target.idgloss for relation in Relation.objects.filter(source=gloss)]
row.append(", ".join(relations))
# get relations to foreign signs
relations = [relation.other_lang_gloss for relation in RelationToForeignSign.objects.filter(gloss=gloss)]
row.append(", ".join(relations))
# Got all the glosses (=signs) this morpheme appears in
appearsin = [appears.idgloss for appears in MorphologyDefinition.objects.filter(parent_gloss=gloss)]
row.append(", ".join(appearsin))
# Make it safe for weird chars
safe_row = []
for column in row:
try:
safe_row.append(column.encode('utf-8').decode())
except AttributeError:
safe_row.append(None)
writer.writerow(safe_row)
return response
class HandshapeDetailView(DetailView):
model = Handshape
template_name = 'dictionary/handshape_detail.html'
context_object_name = 'handshape'
search_type = 'handshape'
class Meta:
verbose_name_plural = "Handshapes"
ordering = ['machine_value']
#Overriding the get method get permissions right
def get(self, request, *args, **kwargs):
match_machine_value = int(kwargs['pk'])
try:
self.object = self.get_object()
except Http404:
# check to see if this handshape has been created but not yet viewed
# if that is the case, create a new handshape object and view that,
# otherwise return an error
handshapes = FieldChoice.objects.filter(field__iexact='Handshape')
handshape_not_created = 1
for o in handshapes:
if o.machine_value == match_machine_value: # only one match
new_id = o.machine_value
new_machine_value = o.machine_value
new_english_name = o.english_name
new_dutch_name = o.dutch_name
new_chinese_name = o.chinese_name
new_handshape = Handshape(machine_value=new_machine_value, english_name=new_english_name,
dutch_name=new_dutch_name, chinese_name=new_chinese_name)
new_handshape.save()
handshape_not_created = 0
self.object = new_handshape
break
if handshape_not_created:
return HttpResponse('<p>Handshape not configured.</p>')
context = self.get_context_data(object=self.object)
return self.render_to_response(context)
def get_context_data(self, **kwargs):
try:
context = super(HandshapeDetailView, self).get_context_data(**kwargs)
except:
# return custom template
return HttpResponse('invalid', {'content-type': 'text/plain'})
hs = context['handshape']
setattr(self.request, 'search_type', self.search_type)
labels = hs.field_labels()
context['imageform'] = ImageUploadForHandshapeForm()
context['choice_lists'] = {}
context['handshape_fields'] = []
oChoiceLists = {}
context['handshape_fields_FS1'] = []
context['handshape_fields_FS2'] = []
context['handshape_fields_FC1'] = []
context['handshape_fields_FC2'] = []
context['handshape_fields_UF'] = []
for field in FIELDS['handshape']:
#Get and save the choice list for this field
fieldchoice_category = fieldname_to_category(field)
choice_list = FieldChoice.objects.filter(field__iexact=fieldchoice_category).order_by('machine_value')
if len(choice_list) > 0:
context['choice_lists'][field] = choicelist_queryset_to_translated_dict (choice_list,self.request.LANGUAGE_CODE)
#Take the human value in the language we are using
machine_value = getattr(hs, field)
human_value = machine_value_to_translated_human_value(machine_value,choice_list,self.request.LANGUAGE_CODE)
#And add the kind of field
kind = fieldname_to_kind(field)
field_label = labels[field]
if field_label in ['Finger selection', 'T', 'I', 'M', 'R', 'P']:
if field_label != 'Finger selection':
context['handshape_fields_FS1'].append([human_value, field, field_label, kind])
elif field_label in ['Finger selection 2', 'T2', 'I2', 'M2', 'R2', 'P2']:
if field_label != 'Finger selection 2':
context['handshape_fields_FS2'].append([human_value, field, field_label, kind])
elif field_label in ['Unselected fingers', 'Tu', 'Iu', 'Mu', 'Ru', 'Pu']:
if field_label != 'Unselected fingers':
context['handshape_fields_UF'].append([human_value, field, field_label, kind])
# elif field_label == 'Finger configuration 1':
# context['handshape_fields_FC1'].append([human_value, field, field_label, kind])
elif field_label == 'Finger configuration 2':
context['handshape_fields_FC2'].append([human_value, field, field_label, kind])
else:
context['handshape_fields'].append([human_value, field, field_label, kind])
context['choice_lists'] = json.dumps(context['choice_lists'])
# Check the type of the current search results
if self.request.session['search_results'] and len(self.request.session['search_results']) > 0:
if 'gloss' in self.request.session['search_results'][0].keys():
self.request.session['search_results'] = None
# if there are no current handshape search results in the current session, display all of them in the navigation bar
if self.request.session['search_type'] != 'handshape' or self.request.session['search_results'] == None:
self.request.session['search_type'] = self.search_type
qs = Handshape.objects.all().order_by('machine_value')
items = []
for item in qs:
if self.request.LANGUAGE_CODE == 'nl':
items.append(dict(id=item.machine_value, handshape=item.dutch_name))
elif self.request.LANGUAGE_CODE == 'zh-hans':
items.append(dict(id=item.machine_value, handshape=item.chinese_name))
else:
items.append(dict(id=item.machine_value, handshape=item.english_name))
self.request.session['search_results'] = items
selected_datasets = get_selected_datasets_for_user(self.request.user)
dataset_languages = Language.objects.filter(dataset__in=selected_datasets).distinct()
context['dataset_languages'] = dataset_languages
if hasattr(settings, 'SHOW_DATASET_INTERFACE_OPTIONS'):
context['SHOW_DATASET_INTERFACE_OPTIONS'] = settings.SHOW_DATASET_INTERFACE_OPTIONS
else:
context['SHOW_DATASET_INTERFACE_OPTIONS'] = False
return context
class HomonymListView(ListView):
model = Gloss
template_name = 'dictionary/admin_homonyms_list.html'
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super(HomonymListView, self).get_context_data(**kwargs)
if self.request.LANGUAGE_CODE == 'zh-hans':
languages = Language.objects.filter(language_code_2char='zh')
else:
languages = Language.objects.filter(language_code_2char=self.request.LANGUAGE_CODE)
if languages:
context['language'] = languages[0]
else:
context['language'] = Language.objects.get(id=get_default_language_id())
selected_datasets = get_selected_datasets_for_user(self.request.user)
dataset_languages = Language.objects.filter(dataset__in=selected_datasets).distinct()
context['dataset_languages'] = dataset_languages
if hasattr(settings, 'SHOW_DATASET_INTERFACE_OPTIONS'):
context['SHOW_DATASET_INTERFACE_OPTIONS'] = settings.SHOW_DATASET_INTERFACE_OPTIONS
else:
context['SHOW_DATASET_INTERFACE_OPTIONS'] = False
# this is used to set up the ajax calls, one per each focus gloss in the table
context['ids_of_all_glosses'] = [ g.id for g in Gloss.none_morpheme_objects().select_related('lemma').filter(lemma__dataset__in=selected_datasets).exclude((Q(**{'handedness__isnull': True}))).exclude((Q(**{'domhndsh__isnull': True}))) ]
return context
def get_queryset(self):
# Get all existing saved Homonyms
# relation_homonyms = Relation.objects.filter(role='homonym')
selected_datasets = get_selected_datasets_for_user(self.request.user)
glosses_with_phonology = Gloss.none_morpheme_objects().select_related('lemma').filter(lemma__dataset__in=selected_datasets).exclude((Q(**{'handedness__isnull': True}))).exclude((Q(**{'domhndsh__isnull': True})))
return glosses_with_phonology
class MinimalPairsListView(ListView):
model = Gloss
template_name = 'dictionary/admin_minimalpairs_list.html'
paginate_by = 10
def get_context_data(self, **kwargs):
# reformat LANGUAGE_CODE for use in dictionary domain, accomodate multilingual codings
from signbank.tools import convert_language_code_to_2char
language_code = convert_language_code_to_2char(self.request.LANGUAGE_CODE)
language = Language.objects.get(id=get_default_language_id())
default_language_code = language.language_code_2char
# Refresh the "constant" translated choice lists table
translated_choice_lists_table = generate_translated_choice_list_table()
context = super(MinimalPairsListView, self).get_context_data(**kwargs)
languages = Language.objects.filter(language_code_2char=self.request.LANGUAGE_CODE)
if languages:
context['language'] = languages[0]
else:
context['language'] = Language.objects.get(id=get_default_language_id())
selected_datasets = get_selected_datasets_for_user(self.request.user)
dataset_languages = Language.objects.filter(dataset__in=selected_datasets).distinct()
context['dataset_languages'] = dataset_languages
if hasattr(settings, 'SHOW_DATASET_INTERFACE_OPTIONS'):
context['SHOW_DATASET_INTERFACE_OPTIONS'] = settings.SHOW_DATASET_INTERFACE_OPTIONS
else:
context['SHOW_DATASET_INTERFACE_OPTIONS'] = False
field_names = []
for field in FIELDS['phonology']:
# the following fields are not considered for minimal pairs
if field not in ['locVirtObj', 'phonOth', 'mouthG', 'mouthing', 'phonetVar']:
field_names.append(field)
field_labels = dict()
for field in field_names:
field_label = Gloss._meta.get_field(field).verbose_name
field_labels[field] = field_label.encode('utf-8').decode()
context['field_labels'] = field_labels
context['page_number'] = context['page_obj'].number
context['objects_on_page'] = [ g.id for g in context['page_obj'].object_list ]
context['paginate_by'] = self.request.GET.get('paginate_by', self.paginate_by)
return context
def get_paginate_by(self, queryset):
"""
Paginate by specified value in querystring, or use default class property value.
"""
return self.request.GET.get('paginate_by', self.paginate_by)
def get_queryset(self):
selected_datasets = get_selected_datasets_for_user(self.request.user)
# grab gloss ids for finger spelling glosses, identified by text #.
finger_spelling_glosses = [ a_idgloss_trans.gloss_id for a_idgloss_trans in AnnotationIdglossTranslation.objects.filter(text__startswith="#") ]
glosses_with_phonology = Gloss.none_morpheme_objects().select_related('lemma').filter(lemma__dataset__in=selected_datasets).exclude(id__in=finger_spelling_glosses).exclude((Q(**{'handedness__isnull': True}))).exclude((Q(**{'domhndsh__isnull': True})))
return glosses_with_phonology
class FrequencyListView(ListView):
# not sure what model should be used here, it applies to all the glosses in a dataset
model = Dataset
template_name = 'dictionary/admin_frequency_list.html'
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super(FrequencyListView, self).get_context_data(**kwargs)
language_code = self.request.LANGUAGE_CODE
if self.request.LANGUAGE_CODE == 'zh-hans':
languages = Language.objects.filter(language_code_2char='zh')
language_code = 'zh'
else:
languages = Language.objects.filter(language_code_2char=self.request.LANGUAGE_CODE)
if languages:
context['language'] = languages[0]
else:
context['language'] = Language.objects.get(id=get_default_language_id())
selected_datasets = get_selected_datasets_for_user(self.request.user)
dataset_languages = Language.objects.filter(dataset__in=selected_datasets).distinct()
context['dataset_languages'] = dataset_languages
codes_to_adjectives = dict(settings.LANGUAGES)
if language_code not in codes_to_adjectives.keys():
adjective = 'english'
else:
adjective = codes_to_adjectives[language_code].lower()
if hasattr(settings, 'SHOW_DATASET_INTERFACE_OPTIONS'):
context['SHOW_DATASET_INTERFACE_OPTIONS'] = settings.SHOW_DATASET_INTERFACE_OPTIONS
else:
context['SHOW_DATASET_INTERFACE_OPTIONS'] = False
context['dataset_ids'] = [ ds.id for ds in selected_datasets]
# sort the phonology fields based on field label in the designated language
# this is used for display in the template, by lookup
field_labels = dict()
for field in FIELDS['phonology']:
if field not in ['weakprop', 'weakdrop', 'domhndsh_number', 'domhndsh_letter', 'subhndsh_number',
'subhndsh_letter']:
field_kind = fieldname_to_kind(field)
if field_kind == 'list':
field_label = Gloss._meta.get_field(field).verbose_name
field_labels[field] = field_label.encode('utf-8').decode()
# note on context variables below: there are two variables for the same data
# the context variable field_labels_list is iterated over in the template to generate the pull-down menu
# this pull-down has to be sorted in the destination language
# the menu generation is done by Django as part of the form
# after Django generates the form, it is modified by javascript to convert the options to a multiple-select
# the javascript makes use of the labels generated by Django
# there were some issues getting the other dict variable (field_labels) to remain sorted in the template
# the field_labels dict is used to lookup the display names, it does not need to be sorted
field_labels_list = [ (k, v) for (k, v) in sorted(field_labels.items(), key=lambda x: x[1])]
context['field_labels'] = field_labels
context['field_labels_list'] = field_labels_list
# sort the field choices based on the designated language
# this is used for display in the template, by lookup
field_labels_choices = dict()
for field, label in field_labels.items():
field_category = fieldname_to_category(field)
field_choices = FieldChoice.objects.filter(field__iexact=field_category).order_by(adjective+'_name')
translated_choices = choicelist_queryset_to_translated_dict(field_choices,self.request.LANGUAGE_CODE,ordered=False,id_prefix='_',shortlist=False)
field_labels_choices[field] = dict(translated_choices)
context['field_labels_choices'] = field_labels_choices
# do the same for the semantics fields
# the code is here to keep phonology and semantics in separate dicts,
# but at the moment all results are displayed in one table in the template
field_labels_semantics = dict()
for field in FIELDS['semantics']:
field_kind = fieldname_to_kind(field)
if field_kind == 'list':
field_label = Gloss._meta.get_field(field).verbose_name
field_labels_semantics[field] = field_label.encode('utf-8').decode()
field_labels_semantics_list = [ (k, v) for (k, v) in sorted(field_labels_semantics.items(), key=lambda x: x[1])]
context['field_labels_semantics'] = field_labels_semantics
context['field_labels_semantics_list'] = field_labels_semantics_list
field_labels_semantics_choices = dict()
for field, label in field_labels_semantics.items():
field_category = fieldname_to_category(field)
field_choices = FieldChoice.objects.filter(field__iexact=field_category).order_by(adjective+'_name')
translated_choices = choicelist_queryset_to_translated_dict(field_choices,self.request.LANGUAGE_CODE,ordered=False,id_prefix='_',shortlist=False)
field_labels_semantics_choices[field] = dict(translated_choices)
context['field_labels_semantics_choices'] = field_labels_semantics_choices
# for ease of implementation in the template, the results of the two kinds of frequencies
# (phonology fields, semantics fields) are displayed in the same table, the lookup tables are merged so only one loop is needed
context['all_field_labels_choices'] = dict(field_labels_choices, **field_labels_semantics_choices)
context['all_field_labels'] = dict(field_labels, **field_labels_semantics)
return context
def get_queryset(self):
user = self.request.user
if user.is_authenticated():
selected_datasets = get_selected_datasets_for_user(self.request.user)
from django.db.models import Prefetch
qs = Dataset.objects.filter(id__in=selected_datasets).prefetch_related(
Prefetch(
"userprofile_set",
queryset=UserProfile.objects.filter(user=user),
to_attr="user"
)
)
checker = ObjectPermissionChecker(user)
checker.prefetch_perms(qs)
for dataset in qs:
checker.has_perm('view_dataset', dataset)
return qs
else:
# User is not authenticated
return None
class HandshapeListView(ListView):
model = Handshape
template_name = 'dictionary/admin_handshape_list.html'
search_type = 'handshape'
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super(HandshapeListView, self).get_context_data(**kwargs)
# Add in a QuerySet of all the books
search_form = HandshapeSearchForm(self.request.GET)
# Retrieve the search_type,so that we know whether the search should be restricted to Gloss or not
if 'search_type' in self.request.GET:
self.search_type = self.request.GET['search_type']
else:
self.search_type = 'handshape'
# self.request.session['search_type'] = self.search_type
context['searchform'] = search_form
context['search_type'] = self.search_type
# if self.search_type == 'sign_handshape':
# context['glosscount'] = Gloss.none_morpheme_objects().count() # Only count the none-morpheme glosses
# else:
# context['glosscount'] = Gloss.objects.count() # Count the glosses + morphemes
context['handshapefieldchoicecount'] = FieldChoice.objects.filter(field__iexact='Handshape').count()
selected_datasets = get_selected_datasets_for_user(self.request.user)
context['selected_datasets'] = selected_datasets
context['signscount'] = Gloss.objects.filter(lemma__dataset__in=selected_datasets).count()
context['HANDSHAPE_RESULT_FIELDS'] = settings.HANDSHAPE_RESULT_FIELDS
context['handshape_fields_FS1'] = []
context['choice_lists'] = {}
for field in FIELDS['handshape']:
# Get and save the choice list for this field
fieldchoice_category = fieldname_to_category(field)
choice_list = FieldChoice.objects.filter(field__iexact=fieldchoice_category).order_by('machine_value')
if len(choice_list) > 0:
context['choice_lists'][field] = choicelist_queryset_to_translated_dict(choice_list,
self.request.LANGUAGE_CODE, id_prefix='')
context['choice_lists'] = json.dumps(context['choice_lists'])
context['handshapescount'] = Handshape.objects.count()
selected_datasets = get_selected_datasets_for_user(self.request.user)
dataset_languages = Language.objects.filter(dataset__in=selected_datasets).distinct()
context['dataset_languages'] = dataset_languages
if hasattr(settings, 'SHOW_DATASET_INTERFACE_OPTIONS'):
context['SHOW_DATASET_INTERFACE_OPTIONS'] = settings.SHOW_DATASET_INTERFACE_OPTIONS
else:
context['SHOW_DATASET_INTERFACE_OPTIONS'] = False
return context
def render_to_response(self, context):
# Look for a 'format=json' GET argument
if self.request.GET.get('format') == 'CSV':
return self.render_to_csv_response(context)
else:
return super(HandshapeListView, self).render_to_response(context)
def render_to_csv_response(self, context):
if not self.request.user.has_perm('dictionary.export_csv'):
raise PermissionDenied
# Create the HttpResponse object with the appropriate CSV header.
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="dictionary-export-handshapes.csv"'
writer = csv.writer(response)
if self.search_type and self.search_type == 'handshape':
writer = write_csv_for_handshapes(self, writer)
else:
print('search type is sign')
return response
def get_queryset(self):
choice_lists = {}
for field in FIELDS['handshape']:
# Get and save the choice list for this field
fieldchoice_category = fieldname_to_category(field)
choice_list = FieldChoice.objects.filter(field__iexact=fieldchoice_category).order_by('machine_value')
if len(choice_list) > 0:
choice_lists[field] = choicelist_queryset_to_translated_dict(choice_list,
self.request.LANGUAGE_CODE, id_prefix='')
# get query terms from self.request
get = self.request.GET
#Then check what kind of stuff we want
if 'search_type' in get:
self.search_type = get['search_type']
else:
self.search_type = 'handshape'
setattr(self.request, 'search_type', self.search_type)
qs = Handshape.objects.all().order_by('machine_value')
handshapes = FieldChoice.objects.filter(field__iexact='Handshape')
# Find out if any Handshapes exist for which no Handshape object has been created
existing_handshape_objects_machine_values = [ o.machine_value for o in qs ]
new_handshape_created = 0
for h in handshapes:
if h.machine_value in existing_handshape_objects_machine_values:
pass
else:
# create a new Handshape object
new_id = h.machine_value
new_machine_value = h.machine_value
new_english_name = h.english_name
new_dutch_name = h.dutch_name
new_chinese_name = h.chinese_name
new_handshape = Handshape(machine_value=new_machine_value, english_name=new_english_name,
dutch_name=new_dutch_name, chinese_name=new_chinese_name)
new_handshape.save()
new_handshape_created = 1
if new_handshape_created: # if a new Handshape object was created, reload the query result
qs = Handshape.objects.all().order_by('machine_value')
fieldnames = ['machine_value', 'english_name', 'dutch_name', 'chinese_name']+FIELDS['handshape']
## phonology and semantics field filters
for fieldname in fieldnames:
if fieldname in get:
key = fieldname + '__exact'
val = get[fieldname]
if fieldname == 'hsNumSel' and val != '':
fieldlabel = choice_lists[fieldname][val]
if fieldlabel == 'one':
qs = qs.annotate(
count_fs1=ExpressionWrapper(F('fsT') + F('fsI') + F('fsM') + F('fsR') + F('fsP'),
output_field=IntegerField())).filter(Q(count_fs1__exact=1) | Q(hsNumSel=val))
elif fieldlabel == 'two':
qs = qs.annotate(
count_fs1=ExpressionWrapper(F('fsT') + F('fsI') + F('fsM') + F('fsR') + F('fsP'),
output_field=IntegerField())).filter(Q(count_fs1__exact=2) | Q(hsNumSel=val))
elif fieldlabel == 'three':
qs = qs.annotate(
count_fs1=ExpressionWrapper(F('fsT') + F('fsI') + F('fsM') + F('fsR') + F('fsP'),
output_field=IntegerField())).filter(Q(count_fs1__exact=3) | Q(hsNumSel=val))
elif fieldlabel == 'four':
qs = qs.annotate(
count_fs1=ExpressionWrapper(F('fsT') + F('fsI') + F('fsM') + F('fsR') + F('fsP'),
output_field=IntegerField())).filter(Q(count_fs1__exact=4) | Q(hsNumSel=val))
elif fieldlabel == 'all':
qs = qs.annotate(
count_fs1=ExpressionWrapper(F('fsT') + F('fsI') + F('fsM') + F('fsR') + F('fsP'),
output_field=IntegerField())).filter(Q(count_fs1__gt=4) | Q(hsNumSel=val))
if isinstance(Handshape._meta.get_field(fieldname), NullBooleanField):
val = {'0': False, '1': True, 'True': True, 'False': False, 'None': '', '': '' }[val]
if self.request.LANGUAGE_CODE == 'nl' and fieldname == 'dutch_name' and val != '':
query = Q(dutch_name__icontains=val)
qs = qs.filter(query)
if self.request.LANGUAGE_CODE == 'zh-hans' and fieldname == 'chinese_name' and val != '':
query = Q(chinese_name__icontains=val)
qs = qs.filter(query)
if fieldname == 'english_name' and val != '':
query = Q(english_name__icontains=val)
qs = qs.filter(query)
if val != '' and fieldname != 'hsNumSel' and fieldname != 'dutch_name' and fieldname != 'chinese_name' and fieldname != 'english_name':
kwargs = {key: val}
qs = qs.filter(**kwargs)
# Handshape searching of signs relies on using the search_results in order to search signs that have the handshapes
# The search_results is no longer set to None
# Make sure that the QuerySet has filters applied (user is searching for something instead of showing all results [objects.all()])
if hasattr(qs.query.where, 'children') and len(qs.query.where.children) > 0:
items = []
for item in qs:
if self.request.LANGUAGE_CODE == 'nl':
items.append(dict(id = item.machine_value, handshape = item.dutch_name))
elif self.request.LANGUAGE_CODE == 'zh-hans':
items.append(dict(id = item.machine_value, handshape = item.chinese_name))
else:
items.append(dict(id = item.machine_value, handshape = item.english_name))
self.request.session['search_results'] = items
if ('sortOrder' in get and get['sortOrder'] != 'machine_value'):
# User has toggled the sort order for the column
qs = order_handshape_queryset_by_sort_order(self.request.GET, qs)
else:
# The default is to order the signs alphabetically by whether there is an angle bracket
qs = order_handshape_by_angle(qs, self.request.LANGUAGE_CODE)
if self.search_type == 'sign_handshape':
# search for signs with found hadnshapes
# find relevant machine values for handshapes
selected_handshapes = [ h.machine_value for h in qs ]
selected_datasets = get_selected_datasets_for_user(self.request.user)
if len(selected_handshapes) == (Handshape.objects.all().count()):
qs = Gloss.objects.filter(lemma__dataset__in=selected_datasets).filter(Q(domhndsh__in=selected_handshapes)
| Q(domhndsh__isnull=True) | Q(domhndsh__exact='0')
| Q(subhndsh__in=selected_handshapes) | Q(subhndsh__isnull=True) | Q(subhndsh__exact='0'))
else:
qs = Gloss.objects.filter(lemma__dataset__in=selected_datasets).filter(Q(domhndsh__in=selected_handshapes) | Q(subhndsh__in=selected_handshapes))
self.request.session['search_type'] = self.search_type
return qs
class DatasetListView(ListView):
model = Dataset
# set the default dataset, this should not be empty
dataset_name = DEFAULT_DATASET
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super(DatasetListView, self).get_context_data(**kwargs)
selected_datasets = get_selected_datasets_for_user(self.request.user)
dataset_languages = Language.objects.filter(dataset__in=selected_datasets).distinct()
context['dataset_languages'] = dataset_languages
if hasattr(settings, 'SHOW_DATASET_INTERFACE_OPTIONS'):
context['SHOW_DATASET_INTERFACE_OPTIONS'] = settings.SHOW_DATASET_INTERFACE_OPTIONS
else:
context['SHOW_DATASET_INTERFACE_OPTIONS'] = False
return context
def get_template_names(self):
if 'select' in self.kwargs:
return ['dictionary/admin_dataset_select_list.html']
return ['dictionary/admin_dataset_list.html']
def render_to_response(self, context):
if self.request.GET.get('export_ecv') == 'ECV':
return self.render_to_ecv_export_response(context)
elif self.request.GET.get('request_view_access') == 'VIEW':
return self.render_to_request_response(context)
else:
return super(DatasetListView, self).render_to_response(context)
def render_to_request_response(self, context):
# check that the user is logged in
if self.request.user.is_authenticated():
pass
else:
messages.add_message(self.request, messages.ERROR, ('Please login to use this functionality.'))
return HttpResponseRedirect(URL + settings.PREFIX_URL + '/datasets/available')
# if the dataset is specified in the url parameters, set the dataset_name variable
get = self.request.GET
if 'dataset_name' in get:
self.dataset_name = get['dataset_name']
if self.dataset_name == '':
messages.add_message(self.request, messages.ERROR, ('Dataset name must be non-empty.'))
return HttpResponseRedirect(URL + settings.PREFIX_URL + '/datasets/available')
try:
dataset_object = Dataset.objects.get(name=self.dataset_name)
except:
messages.add_message(self.request, messages.ERROR, ('No dataset with name '+self.dataset_name+' found.'))
return HttpResponseRedirect(URL + settings.PREFIX_URL + '/datasets/available')
# make sure the user can write to this dataset
# from guardian.shortcuts import get_objects_for_user
user_view_datasets = get_objects_for_user(self.request.user, 'view_dataset', Dataset, accept_global_perms=False)
if user_view_datasets and not dataset_object in user_view_datasets:
# the user currently has no view permission for the requested dataset
pass
else:
# this should not happen from the html page. the check is made to catch a user adding a parameter to the url
messages.add_message(self.request, messages.INFO, ('You can already view this dataset.'))
return HttpResponseRedirect(URL + settings.PREFIX_URL + '/datasets/available')
motivation = ''
if 'motivation_for_use' in get:
motivation = get['motivation_for_use'] # motivation is a required field in the form
from django.contrib.auth.models import Group, User
group_manager = Group.objects.get(name='Dataset_Manager')
owners_of_dataset = dataset_object.owners.all()
dataset_manager_found = False
for owner in owners_of_dataset:
groups_of_user = owner.groups.all()
if not group_manager in groups_of_user:
# this owner can't manage users
continue
dataset_manager_found = True
# send email to the dataset manager
from django.core.mail import send_mail
current_site = Site.objects.get_current()
subject = render_to_string('registration/dataset_access_email_subject.txt',
context={'dataset': dataset_object.name,
'site': current_site})
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
message = render_to_string('registration/dataset_access_request_email.txt',
context={'user': self.request.user,
'dataset': dataset_object.name,
'motivation': motivation,
'site': current_site})
# for debug purposes on local machine
# print('grant access subject: ', subject)
# print('message: ', message)
# print('owner of dataset: ', owner.username, ' with email: ', owner.email)
# print('user email: ', owner.email)
send_mail(subject, message, settings.DEFAULT_FROM_EMAIL, [owner.email])
if not dataset_manager_found:
messages.add_message(self.request, messages.ERROR, ('No dataset manager has been found for '+dataset_object.name+'. Your request could not be submitted.'))
else:
messages.add_message(self.request, messages.INFO, ('Your request for view access to dataset '+dataset_object.name+' has been submitted.'))
return HttpResponseRedirect(URL + settings.PREFIX_URL + '/datasets/available')
def render_to_ecv_export_response(self, context):
# check that the user is logged in
if self.request.user.is_authenticated():
pass
else:
messages.add_message(self.request, messages.ERROR, ('Please login to use this functionality.'))
return HttpResponseRedirect(reverse('admin_dataset_view'))
# if the dataset is specified in the url parameters, set the dataset_name variable
get = self.request.GET
if 'dataset_name' in get:
self.dataset_name = get['dataset_name']
if self.dataset_name == '':
messages.add_message(self.request, messages.ERROR, ('Dataset name must be non-empty.'))
return HttpResponseRedirect(reverse('admin_dataset_view'))
try:
dataset_object = Dataset.objects.get(name=self.dataset_name)
except:
messages.add_message(self.request, messages.ERROR, ('No dataset with name '+self.dataset_name+' found.'))
return HttpResponseRedirect(reverse('admin_dataset_view'))
# make sure the user can write to this dataset
# from guardian.shortcuts import get_objects_for_user
user_change_datasets = get_objects_for_user(self.request.user, 'change_dataset', Dataset, accept_global_perms=False)
if user_change_datasets and dataset_object in user_change_datasets:
pass
else:
messages.add_message(self.request, messages.ERROR, ('No permission to export dataset.'))
return HttpResponseRedirect(reverse('admin_dataset_view'))
# if we get to here, the user is authenticated and has permission to export the dataset
ecv_file = write_ecv_file_for_dataset(self.dataset_name)
messages.add_message(self.request, messages.INFO, ('ECV ' + self.dataset_name + ' successfully updated.'))
# return HttpResponse('ECV successfully updated.')
return HttpResponseRedirect(reverse('admin_dataset_view'))
def get_queryset(self):
user = self.request.user
# get query terms from self.request
get = self.request.GET
# Then check what kind of stuff we want
if 'dataset_name' in get:
self.dataset_name = get['dataset_name']
# otherwise the default dataset_name DEFAULT_DATASET is used
setattr(self.request, 'dataset_name', self.dataset_name)
if user.is_authenticated():
from django.db.models import Prefetch
qs = Dataset.objects.all().prefetch_related(
Prefetch(
"userprofile_set",
queryset=UserProfile.objects.filter(user=user),
to_attr="user"
)
)
checker = ObjectPermissionChecker(user)
checker.prefetch_perms(qs)
for dataset in qs:
checker.has_perm('view_dataset', dataset)
qs = qs.annotate(Count('lemmaidgloss__gloss')).order_by('name')
return qs
else:
# User is not authenticated
return None
class DatasetManagerView(ListView):
model = Dataset
template_name = 'dictionary/admin_dataset_manager.html'
# set the default dataset, this should not be empty
dataset_name = DEFAULT_DATASET
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super(DatasetManagerView, self).get_context_data(**kwargs)
selected_datasets = get_selected_datasets_for_user(self.request.user)
dataset_languages = Language.objects.filter(dataset__in=selected_datasets).distinct()
context['dataset_languages'] = dataset_languages
default_language_choice_dict = dict()
for language in dataset_languages:
default_language_choice_dict[language.name] = language.name
context['default_language_choice_list'] = json.dumps(default_language_choice_dict)
if hasattr(settings, 'SHOW_DATASET_INTERFACE_OPTIONS'):
context['SHOW_DATASET_INTERFACE_OPTIONS'] = settings.SHOW_DATASET_INTERFACE_OPTIONS
else:
context['SHOW_DATASET_INTERFACE_OPTIONS'] = False
return context
def render_to_response(self, context):
if 'add_view_perm' in self.request.GET or 'add_change_perm' in self.request.GET \
or 'delete_view_perm' in self.request.GET or 'delete_change_perm' in self.request.GET:
return self.render_to_add_user_response(context)
elif 'default_language' in self.request.GET:
return self.render_to_set_default_language()
else:
return super(DatasetManagerView, self).render_to_response(context)
def check_user_permissions_for_managing_dataset(self, dataset_object):
"""
Checks whether the logged in user has permission to manage the dataset object
:return:
"""
# check that the user is logged in
if self.request.user.is_authenticated():
pass
else:
messages.add_message(self.request, messages.ERROR, ('Please login to use this functionality.'))
return HttpResponseRedirect(reverse('admin_dataset_manager'))
# check if the user can manage this dataset
from django.contrib.auth.models import Group, User
try:
group_manager = Group.objects.get(name='Dataset_Manager')
except:
messages.add_message(self.request, messages.ERROR, ('No group Dataset_Manager found.'))
return HttpResponseRedirect(reverse('admin_dataset_manager'))
groups_of_user = self.request.user.groups.all()
if not group_manager in groups_of_user:
messages.add_message(self.request, messages.ERROR,
('You must be in group Dataset Manager to modify dataset permissions.'))
return HttpResponseRedirect(reverse('admin_dataset_manager'))
# make sure the user can write to this dataset
# from guardian.shortcuts import get_objects_for_user
user_change_datasets = get_objects_for_user(self.request.user, 'change_dataset', Dataset,
accept_global_perms=False)
if user_change_datasets and dataset_object in user_change_datasets:
pass
else:
messages.add_message(self.request, messages.ERROR, ('No permission to modify dataset permissions.'))
return HttpResponseRedirect(reverse('admin_dataset_manager'))
# Everything is alright
return None
def get_dataset_from_request(self):
"""
Use the 'dataset_name' GET query string parameter to find a dataset object
:return: tuple of a dataset object and HttpResponse in which either is None
"""
# if the dataset is specified in the url parameters, set the dataset_name variable
get = self.request.GET
if 'dataset_name' in get:
self.dataset_name = get['dataset_name']
if self.dataset_name == '':
messages.add_message(self.request, messages.ERROR, ('Dataset name must be non-empty.'))
return None, HttpResponseRedirect(reverse('admin_dataset_manager'))
try:
return Dataset.objects.get(name=self.dataset_name), None
except:
messages.add_message(self.request, messages.ERROR,
('No dataset with name ' + self.dataset_name + ' found.'))
return None, HttpResponseRedirect(reverse('admin_dataset_manager'))
def get_user_from_request(self):
"""
Use the 'username' GET query string parameter to find a user object
:return: tuple of a dataset object and HttpResponse in which either is None
"""
get = self.request.GET
username = ''
if 'username' in get:
username = get['username']
if username == '':
messages.add_message(self.request, messages.ERROR,
('Username must be non-empty. Please make a selection using the drop-down list.'))
return None, HttpResponseRedirect(reverse('admin_dataset_manager'))
try:
return User.objects.get(username=username), None
except:
messages.add_message(self.request, messages.ERROR, ('No user with name ' + username + ' found.'))
return None, HttpResponseRedirect(reverse('admin_dataset_manager'))
def render_to_set_default_language(self):
"""
Sets the default language for a dataset
:return: a HttpResponse object
"""
dataset_object, response = self.get_dataset_from_request()
if response:
return response
response = self.check_user_permissions_for_managing_dataset(dataset_object)
if response:
return response
try:
language = Language.objects.get(id=self.request.GET['default_language'])
if language in dataset_object.translation_languages.all():
dataset_object.default_language = language
dataset_object.save()
messages.add_message(self.request, messages.INFO,
('The default language of {} is set to {}.'
.format(dataset_object.acronym, language.name)))
else:
messages.add_message(self.request, messages.INFO,
('{} is not in the set of languages of dataset {}.'
.format(language.name, dataset_object.acronym)))
except:
messages.add_message(self.request, messages.ERROR,
('Something went wrong setting the default language for '
+ dataset_object.acronym))
return HttpResponseRedirect(reverse('admin_dataset_manager'))
def render_to_add_user_response(self, context):
dataset_object, response = self.get_dataset_from_request()
if response:
return response
response = self.check_user_permissions_for_managing_dataset(dataset_object)
if response:
return response
user_object, response = self.get_user_from_request()
if response:
return response
username = user_object.username
# user has permission to modify dataset permissions for other users
manage_identifier = 'dataset_' + dataset_object.acronym.replace(' ','')
from guardian.shortcuts import assign_perm, remove_perm
if 'add_view_perm' in self.request.GET:
manage_identifier += '_manage_view'
if dataset_object in get_objects_for_user(user_object, 'view_dataset', Dataset, accept_global_perms=False):
if user_object.is_staff or user_object.is_superuser:
messages.add_message(self.request, messages.INFO,
('User ' + username + ' (' + user_object.first_name + ' ' + user_object.last_name +
') already has view permission for this dataset as staff or superuser.'))
else:
messages.add_message(self.request, messages.INFO,
('User ' + username + ' (' + user_object.first_name + ' ' + user_object.last_name +
') already has view permission for this dataset.'))
return HttpResponseRedirect(reverse('admin_dataset_manager')+'?'+manage_identifier)
try:
assign_perm('view_dataset', user_object, dataset_object)
messages.add_message(self.request, messages.INFO,
('View permission for user ' + username + ' (' + user_object.first_name + ' ' + user_object.last_name + ') successfully granted.'))
if not user_object.is_active:
user_object.is_active = True
assign_perm('dictionary.search_gloss', user_object)
user_object.save()
# send email to user
from django.core.mail import send_mail
current_site = Site.objects.get_current()
subject = render_to_string('registration/dataset_access_granted_email_subject.txt',
context={'dataset': dataset_object.name,
'site': current_site})
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
message = render_to_string('registration/dataset_access_granted_email.txt',
context={'dataset': dataset_object.name,
'site': current_site})
# for debug purposes on local machine
# print('grant access subject: ', subject)
# print('message: ', message)
# print('user email: ', user_object.email)
send_mail(subject, message, settings.DEFAULT_FROM_EMAIL, [user_object.email])
except:
messages.add_message(self.request, messages.ERROR, ('Error assigning view dataset permission to user '+username+'.'))
return HttpResponseRedirect(reverse('admin_dataset_manager')+'?'+manage_identifier)
if 'add_change_perm' in self.request.GET:
manage_identifier += '_manage_change'
if dataset_object in get_objects_for_user(user_object, 'change_dataset', Dataset, accept_global_perms=False):
if user_object.is_staff or user_object.is_superuser:
messages.add_message(self.request, messages.INFO,
(
'User ' + username + ' (' + user_object.first_name + ' ' + user_object.last_name +
') already has change permission for this dataset as staff or superuser.'))
else:
messages.add_message(self.request, messages.INFO,
('User ' + username + ' (' + user_object.first_name + ' ' + user_object.last_name +
') already has change permission for this dataset.'))
return HttpResponseRedirect(reverse('admin_dataset_manager') + '?' + manage_identifier)
if not dataset_object in get_objects_for_user(user_object, 'view_dataset', Dataset, accept_global_perms=False):
messages.add_message(self.request, messages.WARNING,
(
'User ' + username + ' (' + user_object.first_name + ' ' + user_object.last_name +
') does not have view permission for this dataset. Please grant view permission first.'))
# open Manage View Dataset pane instead of Manage Change Dataset
manage_identifier = 'dataset_' + dataset_object.acronym.replace(' ', '')
manage_identifier += '_manage_view'
return HttpResponseRedirect(reverse('admin_dataset_manager') + '?' + manage_identifier)
try:
assign_perm('change_dataset', user_object, dataset_object)
# send email to new user
# probably don't want to assign change permission to new users
messages.add_message(self.request, messages.INFO,
('Change permission for user ' + username + ' successfully granted.'))
except:
messages.add_message(self.request, messages.ERROR, ('Error assigning change dataset permission to user '+username+'.'))
return HttpResponseRedirect(reverse('admin_dataset_manager') + '?' + manage_identifier)
if 'delete_view_perm' in self.request.GET:
manage_identifier += '_manage_view'
if dataset_object in get_objects_for_user(user_object, 'view_dataset', Dataset, accept_global_perms=False):
if user_object.is_staff or user_object.is_superuser:
messages.add_message(self.request, messages.ERROR,
(
'User ' + username + ' (' + user_object.first_name + ' ' + user_object.last_name +
') has view permission for this dataset as staff or superuser. This cannot be modified here.'))
else:
# can remove permission
try:
# also need to remove change_dataset perm in this case
from guardian.shortcuts import remove_perm
remove_perm('view_dataset', user_object, dataset_object)
remove_perm('change_dataset', user_object, dataset_object)
messages.add_message(self.request, messages.INFO,
('View (and change) permission for user ' + username + ' successfully revoked.'))
except:
messages.add_message(self.request, messages.ERROR,
('Error revoking view dataset permission for user ' + username + '.'))
return HttpResponseRedirect(reverse('admin_dataset_manager') + '?' + manage_identifier)
else:
messages.add_message(self.request, messages.ERROR, ('User '+username+' currently has no permission to view this dataset.'))
return HttpResponseRedirect(reverse('admin_dataset_manager') + '?' + manage_identifier)
if 'delete_change_perm' in self.request.GET:
manage_identifier += '_manage_change'
if dataset_object in get_objects_for_user(user_object, 'change_dataset', Dataset, accept_global_perms=False):
if user_object.is_staff or user_object.is_superuser:
messages.add_message(self.request, messages.ERROR,
(
'User ' + username + ' (' + user_object.first_name + ' ' + user_object.last_name +
') has change permission for this dataset as staff or superuser. This cannot be modified here.'))
else:
# can remove permission
try:
remove_perm('change_dataset', user_object, dataset_object)
messages.add_message(self.request, messages.INFO,
('Change permission for user ' + username + ' successfully revoked.'))
except:
messages.add_message(self.request, messages.ERROR,
('Error revoking change dataset permission for user ' + username + '.'))
return HttpResponseRedirect(reverse('admin_dataset_manager') + '?' + manage_identifier)
else:
messages.add_message(self.request, messages.ERROR, ('User '+username+' currently has no permission to change this dataset.'))
return HttpResponseRedirect(reverse('admin_dataset_manager') + '?' + manage_identifier)
# the code doesn't seem to get here. if somebody puts something else in the url (else case), there is no (hidden) csrf token.
messages.add_message(self.request, messages.ERROR, ('Unrecognised argument to dataset manager url.'))
return HttpResponseRedirect(reverse('admin_dataset_manager'))
def get_queryset(self):
user = self.request.user
# get query terms from self.request
get = self.request.GET
# Then check what kind of stuff we want
if 'dataset_name' in get:
self.dataset_name = get['dataset_name']
# otherwise the default dataset_name DEFAULT_DATASET is used
setattr(self.request, 'dataset_name', self.dataset_name)
if user.is_authenticated():
# determine if user is a dataset manager
from django.contrib.auth.models import Group, User
try:
group_manager = Group.objects.get(name='Dataset_Manager')
except:
messages.add_message(self.request, messages.ERROR, ('No group Dataset_Manager found.'))
return HttpResponseRedirect(URL + settings.PREFIX_URL + '/datasets/available')
groups_of_user = self.request.user.groups.all()
if not group_manager in groups_of_user:
return None
from django.db.models import Prefetch
qs = Dataset.objects.all().prefetch_related(
Prefetch(
"userprofile_set",
queryset=UserProfile.objects.filter(user=user),
to_attr="user"
)
)
checker = ObjectPermissionChecker(user)
checker.prefetch_perms(qs)
for dataset in qs:
checker.has_perm('change_dataset', dataset)
return qs
else:
# User is not authenticated
return None
class DatasetDetailView(DetailView):
model = Dataset
context_object_name = 'dataset'
template_name = 'dictionary/dataset_detail.html'
# set the default dataset, this should not be empty
dataset_name = DEFAULT_DATASET
#Overriding the get method get permissions right
def get(self, request, *args, **kwargs):
try:
self.object = self.get_object()
# except Http404:
except:
# return custom template
# return render(request, 'dictionary/warning.html', status=404)
raise Http404()
context = self.get_context_data(object=self.object)
return self.render_to_response(context)
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super(DatasetDetailView, self).get_context_data(**kwargs)
dataset = context['dataset']
context['default_language_choice_list'] = {}
translation_languages = dataset.translation_languages.all()
default_language_choice_dict = dict()
for language in translation_languages:
default_language_choice_dict[language.name] = language.name
context['default_language_choice_list'] = json.dumps(default_language_choice_dict)
datasetform = DatasetUpdateForm(languages=context['default_language_choice_list'])
context['datasetform'] = datasetform
selected_datasets = get_selected_datasets_for_user(self.request.user)
dataset_languages = Language.objects.filter(dataset__in=selected_datasets).distinct()
context['dataset_languages'] = dataset_languages
if hasattr(settings, 'SHOW_DATASET_INTERFACE_OPTIONS'):
context['SHOW_DATASET_INTERFACE_OPTIONS'] = settings.SHOW_DATASET_INTERFACE_OPTIONS
else:
context['SHOW_DATASET_INTERFACE_OPTIONS'] = False
return context
def render_to_response(self, context):
if 'add_owner' in self.request.GET:
return self.render_to_add_owner_response(context)
else:
return super(DatasetDetailView, self).render_to_response(context)
def render_to_add_owner_response(self, context):
# check that the user is logged in
if self.request.user.is_authenticated():
pass
else:
messages.add_message(self.request, messages.ERROR, ('Please login to use this functionality.'))
return HttpResponseRedirect(URL + settings.PREFIX_URL + '/datasets/available')
# check if the user can manage this dataset
from django.contrib.auth.models import Group, User
try:
group_manager = Group.objects.get(name='Dataset_Manager')
except:
messages.add_message(self.request, messages.ERROR, ('No group Dataset_Manager found.'))
return HttpResponseRedirect(URL + settings.PREFIX_URL + '/datasets/available')
groups_of_user = self.request.user.groups.all()
if not group_manager in groups_of_user:
messages.add_message(self.request, messages.ERROR, ('You must be in group Dataset Manager to modify dataset permissions.'))
return HttpResponseRedirect(URL + settings.PREFIX_URL + '/datasets/available')
# if the dataset is specified in the url parameters, set the dataset_name variable
get = self.request.GET
if 'dataset_name' in get:
self.dataset_name = get['dataset_name']
if self.dataset_name == '':
messages.add_message(self.request, messages.ERROR, ('Dataset name must be non-empty.'))
return HttpResponseRedirect(URL + settings.PREFIX_URL + '/datasets/available')
try:
dataset_object = Dataset.objects.get(name=self.dataset_name)
except:
messages.add_message(self.request, messages.ERROR, ('No dataset with name '+self.dataset_name+' found.'))
return HttpResponseRedirect(URL + settings.PREFIX_URL + '/datasets/available')
username = ''
if 'username' in get:
username = get['username']
if username == '':
messages.add_message(self.request, messages.ERROR, ('Username must be non-empty.'))
return HttpResponseRedirect(URL + settings.PREFIX_URL + '/datasets/available')
try:
user_object = User.objects.get(username=username)
except:
messages.add_message(self.request, messages.ERROR, ('No user with name '+username+' found.'))
return HttpResponseRedirect(URL + settings.PREFIX_URL + '/datasets/available')
# if we get to here, we have a dataset object and a user object to add as an owner of the dataset
dataset_object.owners.add(user_object)
dataset_object.save()
messages.add_message(self.request, messages.INFO,
('User ' + username + ' successfully made (co-)owner of this dataset.'))
return HttpResponseRedirect(URL + settings.PREFIX_URL + '/datasets/detail/' + str(dataset_object.id))
def dataset_field_choices_view(request):
context = {}
context['field_choices'] = sorted(FieldChoice.objects.all(),key=lambda x: (x.field,x.english_name))
context['datasets'] = [(dataset,dataset.exclude_choices.all()) for dataset in get_objects_for_user(request.user, 'change_dataset', Dataset,accept_global_perms=False)]
return render(request,'dictionary/dataset_field_choices.html',context)
def order_handshape_queryset_by_sort_order(get, qs):
"""Change the sort-order of the query set, depending on the form field [sortOrder]
This function is used both by HandshapeListView.
The value of [sortOrder] is 'machine_value' by default.
[sortOrder] is a hidden field inside the "adminsearch" html form in the template admin_handshape_list.html
Its value is changed by clicking the up/down buttons in the second row of the search result table
"""
def get_string_from_tuple_list(lstTuples, number):
"""Get the string value corresponding to a number in a list of number-string tuples"""
sBack = [tup[1] for tup in lstTuples if tup[0] == number]
return sBack
# Helper: order a queryset on field [sOrder], which is a number from a list of tuples named [sListName]
def order_queryset_by_tuple_list(qs, sOrder, sListName):
"""Order a queryset on field [sOrder], which is a number from a list of tuples named [sListName]"""
# Get a list of tuples for this sort-order
tpList = build_choice_list(sListName)
# Determine sort order: ascending is default
bReversed = False
if (sOrder[0:1] == '-'):
# A starting '-' sign means: descending order
sOrder = sOrder[1:]
bReversed = True
# Order the list of tuples alphabetically
# (NOTE: they are alphabetical from 'build_choice_list()', except for the values 0,1)
tpList = sorted(tpList, key=operator.itemgetter(1))
# Order by the string-values in the tuple list
return sorted(qs, key=lambda x: get_string_from_tuple_list(tpList, getattr(x, sOrder)), reverse=bReversed)
# Set the default sort order
sOrder = 'machine_value' # Default sort order if nothing is specified
# See if the form contains any sort-order information
if ('sortOrder' in get and get['sortOrder'] != ''):
# Take the user-indicated sort order
sOrder = get['sortOrder']
# The ordering method depends on the kind of field:
# (1) text fields are ordered straightforwardly
# (2) fields made from a choice_list need special treatment
if (sOrder.endswith('hsThumb')):
ordered = order_queryset_by_tuple_list(qs, sOrder, "Thumb")
elif (sOrder.endswith('hsFingConf') or sOrder.endswith('hsFingConf2')):
ordered = order_queryset_by_tuple_list(qs, sOrder, "JointConfiguration")
elif (sOrder.endswith('hsAperture')):
ordered = order_queryset_by_tuple_list(qs, sOrder, "Aperture")
elif (sOrder.endswith('hsSpread')):
ordered = order_queryset_by_tuple_list(qs, sOrder, "Spreading")
elif (sOrder.endswith('hsNumSel')):
ordered = order_queryset_by_tuple_list(qs, sOrder, "Quantity")
elif (sOrder.endswith('hsFingSel') or sOrder.endswith('hsFingSel2') or sOrder.endswith('hsFingUnsel')):
ordered = order_queryset_by_tuple_list(qs, sOrder, "FingerSelection")
else:
# Use straightforward ordering on field [sOrder]
bReversed = False
if (sOrder[0:1] == '-'):
# A starting '-' sign means: descending order
sOrder = sOrder[1:]
bReversed = True
qs_letters = qs.filter(**{sOrder+'__regex':r'^[a-zA-Z]'})
qs_special = qs.filter(**{sOrder+'__regex':r'^[^a-zA-Z]'})
ordered = sorted(qs_letters, key=lambda x: getattr(x, sOrder))
ordered += sorted(qs_special, key=lambda x: getattr(x, sOrder))
if bReversed:
ordered.reverse()
# return the ordered list
return ordered
def order_handshape_by_angle(qs, language_code):
# put the handshapes with an angle bracket > in the name after the others
# the language code is that of the interface
if language_code == 'nl':
qs_no_angle = qs.filter(**{'dutch_name__regex':r'^[^>]+$'})
qs_angle = qs.filter(**{'dutch_name__regex':r'^.+>.+$'})
ordered = sorted(qs_no_angle, key=lambda x: x.dutch_name)
ordered += sorted(qs_angle, key=lambda x: x.dutch_name)
elif language_code == 'zh-hans':
qs_no_angle = qs.filter(**{'chinese_name__regex':r'^[^>]*$'})
qs_angle = qs.filter(**{'chinese_name__regex':r'^.+>.+$'})
ordered = sorted(qs_no_angle, key=lambda x: x.chinese_name)
ordered += sorted(qs_angle, key=lambda x: x.chinese_name)
else:
qs_no_angle = qs.filter(**{'english_name__regex':r'^[^>]+$'})
qs_angle = qs.filter(**{'english_name__regex':r'^.+>.+$'})
ordered = sorted(qs_no_angle, key=lambda x: x.english_name)
ordered += sorted(qs_angle, key=lambda x: x.english_name)
return ordered
class MorphemeDetailView(DetailView):
model = Morpheme
context_object_name = 'morpheme'
last_used_dataset = None
# Overriding the get method get permissions right
def get(self, request, *args, **kwargs):
try:
self.object = self.get_object()
# except Http404:
except:
# return custom template
# return render(request, 'dictionary/warning.html', status=404)
raise Http404()
if request.user.is_authenticated():
if self.object.dataset not in get_objects_for_user(request.user, 'view_dataset', Dataset, accept_global_perms=False):
if self.object.inWeb:
return HttpResponseRedirect(reverse('dictionary:public_gloss', kwargs={'glossid': self.object.pk}))
else:
messages.add_message(request, messages.WARNING, 'You are not allowed to see this morpheme.')
return HttpResponseRedirect('/')
else:
if self.object.inWeb:
return HttpResponseRedirect(reverse('dictionary:public_gloss', kwargs={'glossid': self.object.pk}))
else:
return HttpResponseRedirect(reverse('registration:auth_login'))
context = self.get_context_data(object=self.object)
return self.render_to_response(context)
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super(MorphemeDetailView, self).get_context_data(**kwargs)
# Add in a QuerySet of all the books
context['tagform'] = TagUpdateForm()
context['videoform'] = VideoUploadForGlossForm()
context['imageform'] = ImageUploadForGlossForm()
context['definitionform'] = DefinitionForm()
context['relationform'] = RelationForm()
context['morphologyform'] = MorphemeMorphologyForm()
context['othermediaform'] = OtherMediaForm()
context['navigation'] = context['morpheme'].navigation(True)
context['interpform'] = InterpreterFeedbackForm()
context['SIGN_NAVIGATION'] = settings.SIGN_NAVIGATION
# Get the set of all the Gloss signs that point to me
other_glosses_that_point_to_morpheme = SimultaneousMorphologyDefinition.objects.filter(morpheme_id__exact=context['morpheme'].id)
context['appears_in'] = []
word_class_choices = FieldChoice.objects.filter(field__iexact='WordClass')
for sim_morph in other_glosses_that_point_to_morpheme:
parent_gloss = sim_morph.parent_gloss
if parent_gloss.wordClass:
translated_word_class = machine_value_to_translated_human_value(parent_gloss.wordClass,word_class_choices,self.request.LANGUAGE_CODE)
else:
translated_word_class = ''
context['appears_in'].append((parent_gloss, translated_word_class))
try:
# Note: setting idgloss to context['morpheme'] is not enough; the ".idgloss" needs to be specified
next_morpheme = Morpheme.objects.get(idgloss=context['morpheme'].idgloss).admin_next_morpheme()
except:
next_morpheme = None
if next_morpheme == None:
context['nextmorphemeid'] = context['morpheme'].pk
else:
context['nextmorphemeid'] = next_morpheme.pk
if settings.SIGN_NAVIGATION:
context['glosscount'] = Morpheme.objects.count()
context['glossposn'] = Morpheme.objects.filter(sn__lt=context['morpheme'].sn).count() + 1
# Pass info about which fields we want to see
gl = context['morpheme']
labels = gl.field_labels()
# set a session variable to be able to pass the gloss's id to the ajax_complete method
# the last_used_dataset name is updated to that of this gloss
# if a sequesce of glosses are being created by hand, this keeps the dataset setting the same
if gl.dataset:
self.request.session['datasetid'] = gl.dataset.id
self.last_used_dataset = gl.dataset.acronym
else:
self.request.session['datasetid'] = get_default_language_id()
self.request.session['last_used_dataset'] = self.last_used_dataset
context['choice_lists'] = {}
# Translate the machine values to human values in the correct language, and save the choice lists along the way
for topic in ['phonology', 'semantics', 'frequency']:
context[topic + '_fields'] = []
for field in FIELDS[topic]:
# Get and save the choice list for this field
field_category = fieldname_to_category(field)
choice_list = FieldChoice.objects.filter(field__iexact=field_category)
if len(choice_list) > 0:
context['choice_lists'][field] = choicelist_queryset_to_translated_dict(choice_list,
self.request.LANGUAGE_CODE)
# Take the human value in the language we are using
machine_value = getattr(gl, field)
human_value = machine_value_to_translated_human_value(machine_value,choice_list,self.request.LANGUAGE_CODE)
# And add the kind of field
kind = fieldname_to_kind(field)
context[topic + '_fields'].append([human_value, field, labels[field], kind])
# Gather the OtherMedia
context['other_media'] = []
other_media_type_choice_list = FieldChoice.objects.filter(field__iexact='OthermediaType')
for other_media in gl.othermedia_set.all():
human_value_media_type = machine_value_to_translated_human_value(other_media.type,other_media_type_choice_list,self.request.LANGUAGE_CODE)
path = settings.STATIC_URL + 'othermedia/' + other_media.path
context['other_media'].append([other_media.pk, path, human_value_media_type, other_media.alternative_gloss])
# Save the other_media_type choices (same for every other_media, but necessary because they all have other ids)
context['choice_lists'][
'other-media-type_' + str(other_media.pk)] = choicelist_queryset_to_translated_dict(
other_media_type_choice_list, self.request.LANGUAGE_CODE)
context['choice_lists']['morph_type'] = choicelist_queryset_to_translated_dict(FieldChoice.objects.filter(field__iexact='MorphemeType'),self.request.LANGUAGE_CODE)
context['choice_lists'] = json.dumps(context['choice_lists'])
# make lemma group empty for Morpheme (ask Onno about this)
# Morpheme Detail View shares the gloss_edit.js code with Gloss Detail View
context['lemma_group'] = False
context['lemma_group_url'] = ''
# Put annotation_idgloss per language in the context
context['annotation_idgloss'] = {}
if gl.dataset:
for language in gl.dataset.translation_languages.all():
context['annotation_idgloss'][language] = gl.annotationidglosstranslation_set.filter(language=language)
else:
language = Language.objects.get(id=get_default_language_id())
context['annotation_idgloss'][language] = gl.annotationidglosstranslation_set.filter(language=language)
morph_typ_choices = FieldChoice.objects.filter(field__iexact='MorphemeType')
if gl.mrpType:
translated_morph_type = machine_value_to_translated_human_value(gl.mrpType,morph_typ_choices,self.request.LANGUAGE_CODE)
else:
translated_morph_type = ''
context['morpheme_type'] = translated_morph_type
# Put translations (keywords) per language in the context
context['translations_per_language'] = {}
if gl.dataset:
for language in gl.dataset.translation_languages.all():
context['translations_per_language'][language] = gl.translation_set.filter(language=language)
else:
language = Language.objects.get(id=get_default_language_id())
context['translations_per_language'][language] = gl.translation_set.filter(language=language)
context['separate_english_idgloss_field'] = SEPARATE_ENGLISH_IDGLOSS_FIELD
if hasattr(settings, 'SHOW_DATASET_INTERFACE_OPTIONS') and settings.SHOW_DATASET_INTERFACE_OPTIONS:
context['dataset_choices'] = {}
user = self.request.user
if user.is_authenticated():
qs = get_objects_for_user(user, 'view_dataset', Dataset, accept_global_perms=False)
dataset_choices = dict()
for dataset in qs:
dataset_choices[dataset.acronym] = dataset.acronym
context['dataset_choices'] = json.dumps(dataset_choices)
selected_datasets = get_selected_datasets_for_user(self.request.user)
dataset_languages = Language.objects.filter(dataset__in=selected_datasets).distinct()
context['dataset_languages'] = dataset_languages
if hasattr(settings, 'SHOW_DATASET_INTERFACE_OPTIONS'):
context['SHOW_DATASET_INTERFACE_OPTIONS'] = settings.SHOW_DATASET_INTERFACE_OPTIONS
else:
context['SHOW_DATASET_INTERFACE_OPTIONS'] = False
context['lemma_create_field_prefix'] = LemmaCreateForm.lemma_create_field_prefix
return context
def gloss_ajax_search_results(request):
"""Returns a JSON list of glosses that match the previous search stored in sessions"""
if 'search_type' in request.session.keys() and \
(request.session['search_type'] == 'sign' or request.session['search_type'] == 'morpheme' or request.session['search_type'] == 'sign_or_morpheme'):
return HttpResponse(json.dumps(request.session['search_results']))
else:
return HttpResponse(json.dumps(None))
def handshape_ajax_search_results(request):
"""Returns a JSON list of handshapes that match the previous search stored in sessions"""
if 'search_type' in request.session.keys() and request.session['search_type'] == 'handshape':
return HttpResponse(json.dumps(request.session['search_results']))
else:
return HttpResponse(json.dumps(None))
def gloss_ajax_complete(request, prefix):
"""Return a list of glosses matching the search term
as a JSON structure suitable for typeahead."""
datasetid = request.session['datasetid']
dataset_id = Dataset.objects.get(id=datasetid)
query = Q(lemma__lemmaidglosstranslation__text__istartswith=prefix) | \
Q(annotationidglosstranslation__text__istartswith=prefix) | \
Q(sn__startswith=prefix)
qs = Gloss.objects.filter(query).distinct()
from signbank.tools import convert_language_code_to_2char
language_code = convert_language_code_to_2char(request.LANGUAGE_CODE)
result = []
for g in qs:
if g.dataset == dataset_id:
default_annotationidglosstranslation = ""
annotationidglosstranslation = g.annotationidglosstranslation_set.get(language__language_code_2char=language_code)
if annotationidglosstranslation:
default_annotationidglosstranslation = annotationidglosstranslation.text
else:
annotationidglosstranslation = g.annotationidglosstranslation_set.get(
language__language_code_2char='en')
if annotationidglosstranslation:
default_annotationidglosstranslation = annotationidglosstranslation.text
result.append({'idgloss': g.idgloss, 'annotation_idgloss': default_annotationidglosstranslation, 'sn': g.sn, 'pk': "%s" % (g.id)})
return HttpResponse(json.dumps(result), {'content-type': 'application/json'})
def handshape_ajax_complete(request, prefix):
"""Return a list of handshapes matching the search term
as a JSON structure suitable for typeahead."""
if request.LANGUAGE_CODE == 'nl':
query = Q(dutch_name__istartswith=prefix)
elif request.LANGUAGE_CODE == 'zh-hans':
query = Q(chinese_name__istartswith=prefix)
else:
query = Q(english_name__istartswith=prefix)
qs = Handshape.objects.filter(query)
result = []
for g in qs:
result.append({'dutch_name': g.dutch_name, 'english_name': g.english_name, 'machine_value': g.machine_value, 'chinese_name': g.chinese_name})
return HttpResponse(json.dumps(result), {'content-type': 'application/json'})
def morph_ajax_complete(request, prefix):
"""Return a list of morphs matching the search term
as a JSON structure suitable for typeahead."""
datasetid = request.session['datasetid']
dataset_id = Dataset.objects.get(id=datasetid)
query = Q(idgloss__istartswith=prefix) | \
Q(annotationidglosstranslation__text__istartswith=prefix) | \
Q(sn__startswith=prefix)
qs = Morpheme.objects.filter(query).distinct()
result = []
for g in qs:
if g.dataset == dataset_id:
default_annotationidglosstranslation = ""
annotationidglosstranslation = g.annotationidglosstranslation_set.get(language__language_code_2char=request.LANGUAGE_CODE)
if annotationidglosstranslation:
default_annotationidglosstranslation = annotationidglosstranslation.text
else:
annotationidglosstranslation = g.annotationidglosstranslation_set.get(
language__language_code_2char='en')
if annotationidglosstranslation:
default_annotationidglosstranslation = annotationidglosstranslation.text
result.append({'idgloss': g.idgloss, 'annotation_idgloss': default_annotationidglosstranslation, 'sn': g.sn,
'pk': "%s" % (g.id)})
return HttpResponse(json.dumps(result), {'content-type': 'application/json'})
def user_ajax_complete(request, prefix):
"""Return a list of users matching the search term
as a JSON structure suitable for typeahead."""
query = Q(username__istartswith=prefix) | \
Q(first_name__istartswith=prefix) | \
Q(last_name__startswith=prefix)
qs = User.objects.filter(query).distinct()
result = []
for u in qs:
result.append({'first_name': u.first_name, 'last_name': u.last_name, 'username': u.username})
return HttpResponse(json.dumps(result), {'content-type': 'application/json'})
def lemma_ajax_complete(request, dataset_id, q):
"""Return a list of users matching the search term
as a JSON structure suitable for typeahead."""
lemmas = LemmaIdgloss.objects.filter(dataset_id=dataset_id, lemmaidglosstranslation__text__icontains=q)\
.order_by('lemmaidglosstranslation__text')
lemmas_dict = [{'pk': lemma.pk, 'lemma': str(lemma)} for lemma in lemmas]
return HttpResponse(json.dumps(lemmas_dict), {'content-type': 'application/json'})
def homonyms_ajax_complete(request, gloss_id):
language_code = request.LANGUAGE_CODE
if language_code == "zh-hans":
language_code = "zh"
try:
this_gloss = Gloss.objects.get(id=gloss_id)
homonym_objects = this_gloss.homonym_objects()
except:
homonym_objects = []
result = []
for homonym in homonym_objects:
translation = ""
translations = homonym.annotationidglosstranslation_set.filter(language__language_code_2char=language_code)
if translations is not None and len(translations) > 0:
translation = translations[0].text
else:
translations = homonym.annotationidglosstranslation_set.filter(language__language_code_3char='eng')
if translations is not None and len(translations) > 0:
translation = translations[0].text
result.append({ 'id': str(homonym.id), 'gloss': translation })
# result.append({ 'id': str(homonym.id), 'gloss': str(homonym) })
homonyms_dict = { str(gloss_id) : result }
return HttpResponse(json.dumps(homonyms_dict), {'content-type': 'application/json'})
def minimalpairs_ajax_complete(request, gloss_id, gloss_detail=False):
if 'gloss_detail' in request.GET:
gloss_detail = request.GET['gloss_detail']
language_code = request.LANGUAGE_CODE
if language_code == "zh-hans":
language_code = "zh"
this_gloss = Gloss.objects.get(id=gloss_id)
try:
minimalpairs_objects = this_gloss.minimal_pairs_dict()
except:
minimalpairs_objects = []
translation_focus_gloss = ""
translations_this_gloss = this_gloss.annotationidglosstranslation_set.filter(language__language_code_2char=language_code)
if translations_this_gloss is not None and len(translations_this_gloss) > 0:
translation_focus_gloss = translations_this_gloss[0].text
else:
translations_this_gloss = this_gloss.annotationidglosstranslation_set.filter(language__language_code_3char='eng')
if translations_this_gloss is not None and len(translations_this_gloss) > 0:
translation_focus_gloss = translations_this_gloss[0].text
result = []
for minimalpairs_object, minimal_pairs_dict in minimalpairs_objects.items():
other_gloss_dict = dict()
other_gloss_dict['id'] = str(minimalpairs_object.id)
other_gloss_dict['other_gloss'] = minimalpairs_object
for field, values in minimal_pairs_dict.items():
# print('values: ', values)
other_gloss_dict['field'] = field
other_gloss_dict['field_display'] = values[0]
other_gloss_dict['field_category'] = values[1]
# print('field: ', field, ', choice: ', values[2])
# print('translated_choice_lists_table: ', translated_choice_lists_table[field])
focus_gloss_choice = values[2]
other_gloss_choice = values[3]
field_kind = values[4]
# print('other gloss ', minimalpairs_object.id, ', field ', field, ': kind and choices: ', field_kind, ', ', focus_gloss_choice, ', ', other_gloss_choice)
if field_kind == 'list':
if focus_gloss_choice:
try:
focus_gloss_value = translated_choice_lists_table[field][int(focus_gloss_choice)][language_code]
except:
focus_gloss_value = 'ERROR_' + focus_gloss_choice
print('Error for gloss ', minimalpairs_object.id, ' on stored choice (field: ', field, ', choice: ', focus_gloss_choice, ')')
else:
focus_gloss_value = '-'
elif field_kind == 'check':
if focus_gloss_choice == 'True':
focus_gloss_value = _('Yes')
elif focus_gloss_choice == 'Neutral' and field in ['weakdrop', 'weakprop']:
focus_gloss_value = _('Neutral')
else:
focus_gloss_value = _('No')
else:
# translate Boolean fields
focus_gloss_value = focus_gloss_choice
# print('focus gloss choice: ', focus_gloss_value)
other_gloss_dict['focus_gloss_value'] = focus_gloss_value
if field_kind == 'list':
if other_gloss_choice:
try:
other_gloss_value = translated_choice_lists_table[field][int(other_gloss_choice)][language_code]
except:
other_gloss_value = 'ERROR_' + other_gloss_choice
print('Error for gloss ', minimalpairs_object.id, ' on stored choice (field: ', field, ', choice: ', other_gloss_choice, ')')
else:
other_gloss_value = '-'
elif field_kind == 'check':
if other_gloss_choice == 'True':
other_gloss_value = _('Yes')
elif other_gloss_choice == 'Neutral' and field in ['weakdrop', 'weakprop']:
other_gloss_value = _('Neutral')
else:
other_gloss_value = _('No')
else:
other_gloss_value = other_gloss_choice
# print('other gloss choice: ', other_gloss_value)
other_gloss_dict['other_gloss_value'] = other_gloss_value
other_gloss_dict['field_kind'] = field_kind
# print('min pairs other gloss dict: ', other_gloss_dict)
translation = ""
translations = minimalpairs_object.annotationidglosstranslation_set.filter(language__language_code_2char=language_code)
if translations is not None and len(translations) > 0:
translation = translations[0].text
else:
translations = minimalpairs_object.annotationidglosstranslation_set.filter(language__language_code_3char='eng')
if translations is not None and len(translations) > 0:
translation = translations[0].text
other_gloss_dict['other_gloss_idgloss'] = translation
result.append(other_gloss_dict)
if hasattr(settings, 'SHOW_DATASET_INTERFACE_OPTIONS'):
SHOW_DATASET_INTERFACE_OPTIONS = settings.SHOW_DATASET_INTERFACE_OPTIONS
else:
SHOW_DATASET_INTERFACE_OPTIONS = False
if gloss_detail:
return render(request, 'dictionary/minimalpairs_gloss_table.html', { 'focus_gloss': this_gloss,
'focus_gloss_translation': translation_focus_gloss,
'SHOW_DATASET_INTERFACE_OPTIONS' : SHOW_DATASET_INTERFACE_OPTIONS,
'minimal_pairs_dict' : result })
else:
return render(request, 'dictionary/minimalpairs_row.html', { 'focus_gloss': this_gloss,
'focus_gloss_translation': translation_focus_gloss,
'SHOW_DATASET_INTERFACE_OPTIONS' : SHOW_DATASET_INTERFACE_OPTIONS,
'minimal_pairs_dict' : result })
class LemmaListView(ListView):
model = LemmaIdgloss
template_name = 'dictionary/admin_lemma_list.html'
paginate_by = 10
def get_queryset(self, **kwargs):
queryset = super(LemmaListView, self).get_queryset(**kwargs)
selected_datasets = get_selected_datasets_for_user(self.request.user)
return queryset.filter(dataset__in=selected_datasets).annotate(num_gloss=Count('gloss'))
def get_context_data(self, **kwargs):
context = super(LemmaListView, self).get_context_data(**kwargs)
if hasattr(settings, 'SHOW_DATASET_INTERFACE_OPTIONS'):
context['SHOW_DATASET_INTERFACE_OPTIONS'] = settings.SHOW_DATASET_INTERFACE_OPTIONS
else:
context['SHOW_DATASET_INTERFACE_OPTIONS'] = False
selected_datasets = get_selected_datasets_for_user(self.request.user)
context['selected_datasets'] = selected_datasets
dataset_languages = Language.objects.filter(dataset__in=selected_datasets).distinct()
context['dataset_languages'] = dataset_languages
return context
def render_to_response(self, context):
# Look for a 'format=json' GET argument
if self.request.GET.get('format') == 'CSV':
return self.render_to_csv_response(context)
else:
return super(LemmaListView, self).render_to_response(context)
def render_to_csv_response(self, context):
if not self.request.user.has_perm('dictionary.export_csv'):
raise PermissionDenied
# Create the HttpResponse object with the appropriate CSV header.
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="dictionary-export-lemmas.csv"'
selected_datasets = get_selected_datasets_for_user(self.request.user)
dataset_languages = Language.objects.filter(dataset__in=selected_datasets).distinct()
lang_attr_name = 'name_' + DEFAULT_KEYWORDS_LANGUAGE['language_code_2char']
lemmaidglosstranslation_fields = ["Lemma ID Gloss" + " (" + getattr(language, lang_attr_name) + ")"
for language in dataset_languages]
writer = csv.writer(response)
with override(LANGUAGE_CODE):
header = ['Lemma ID', 'Dataset'] + lemmaidglosstranslation_fields
writer.writerow(header)
for lemma in self.get_queryset():
row = [str(lemma.pk), lemma.dataset.acronym]
for language in dataset_languages:
lemmaidglosstranslations = lemma.lemmaidglosstranslation_set.filter(language=language)
if lemmaidglosstranslations and len(lemmaidglosstranslations) == 1:
row.append(lemmaidglosstranslations[0].text)
else:
row.append("")
#Make it safe for weird chars
safe_row = []
for column in row:
try:
safe_row.append(column.encode('utf-8').decode())
except AttributeError:
safe_row.append(None)
writer.writerow(row)
return response
class LemmaCreateView(CreateView):
model = LemmaIdgloss
template_name = 'dictionary/add_lemma.html'
fields = []
def get_context_data(self, **kwargs):
context = super(CreateView, self).get_context_data(**kwargs)
if hasattr(settings, 'SHOW_DATASET_INTERFACE_OPTIONS'):
context['SHOW_DATASET_INTERFACE_OPTIONS'] = settings.SHOW_DATASET_INTERFACE_OPTIONS
else:
context['SHOW_DATASET_INTERFACE_OPTIONS'] = False
selected_datasets = get_selected_datasets_for_user(self.request.user)
context['selected_datasets'] = selected_datasets
dataset_languages = Language.objects.filter(dataset__in=selected_datasets).distinct()
context['dataset_languages'] = dataset_languages
context['add_lemma_form'] = LemmaCreateForm(self.request.GET, languages=dataset_languages, user=self.request.user)
context['lemma_create_field_prefix'] = LemmaCreateForm.lemma_create_field_prefix
return context
def post(self, request, *args, **kwargs):
print(request.POST)
dataset = None
if 'dataset' in request.POST and request.POST['dataset'] is not None:
dataset = Dataset.objects.get(pk=request.POST['dataset'])
selected_datasets = Dataset.objects.filter(pk=request.POST['dataset'])
else:
selected_datasets = get_selected_datasets_for_user(request.user)
dataset_languages = Language.objects.filter(dataset__in=selected_datasets).distinct()
form = LemmaCreateForm(request.POST, languages=dataset_languages, user=request.user)
for item, value in request.POST.items():
if item.startswith(form.lemma_create_field_prefix):
language_code_2char = item[len(form.lemma_create_field_prefix):]
language = Language.objects.get(language_code_2char=language_code_2char)
lemmas_for_this_language_and_annotation_idgloss = LemmaIdgloss.objects.filter(
lemmaidglosstranslation__language=language,
lemmaidglosstranslation__text__exact=value.upper(),
dataset=dataset)
if len(lemmas_for_this_language_and_annotation_idgloss) != 0:
return render(request, 'dictionary/warning.html',
{'warning': language.name + " " + 'lemma ID Gloss not unique.'})
if form.is_valid():
try:
lemma = form.save()
print("LEMMA " + str(lemma.pk))
except ValidationError as ve:
messages.add_message(request, messages.ERROR, ve.message)
return render(request, 'dictionary/add_lemma.html', {'add_lemma_form': LemmaCreateForm(request.POST, user=request.user),
'dataset_languages': dataset_languages,
'selected_datasets': get_selected_datasets_for_user(request.user)})
# return HttpResponseRedirect(reverse('dictionary:admin_lemma_list', kwargs={'pk': lemma.id}))
return HttpResponseRedirect(reverse('dictionary:admin_lemma_list'))
else:
return render(request, 'dictionary/add_gloss.html', {'add_lemma_form': form,
'dataset_languages': dataset_languages,
'selected_datasets': get_selected_datasets_for_user(
request.user)})
def create_lemma_for_gloss(request, glossid):
try:
gloss = Gloss.objects.get(id=glossid)
except ObjectDoesNotExist:
try:
gloss = Morpheme.objects.get(id=glossid).gloss
except ObjectDoesNotExist:
messages.add_message(request, messages.ERROR, _("The specified gloss does not exist."))
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
dataset = gloss.dataset
dataset_languages = dataset.translation_languages.all()
form = LemmaCreateForm(request.POST, languages=dataset_languages, user=request.user)
for item, value in request.POST.items():
if item.startswith(form.lemma_create_field_prefix):
language_code_2char = item[len(form.lemma_create_field_prefix):]
language = Language.objects.get(language_code_2char=language_code_2char)
lemmas_for_this_language_and_annotation_idgloss = LemmaIdgloss.objects.filter(
lemmaidglosstranslation__language=language,
lemmaidglosstranslation__text__exact=value.upper(),
dataset=dataset)
if len(lemmas_for_this_language_and_annotation_idgloss) != 0:
messages.add_message(request, messages.ERROR, _('Lemma ID Gloss not unique for %(language)s.') % {'language': language.name})
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
if form.is_valid():
try:
old_video_path = settings.MEDIA_ROOT + gloss.get_video_path()
with atomic():
lemma = form.save()
gloss.lemma = lemma
gloss.save()
new_video_path = settings.MEDIA_ROOT + gloss.get_video_path()
# Rename video
gloss.rename_video(old_video_path, new_video_path)
except ValidationError as ve:
messages.add_message(request, messages.ERROR, ve.message)
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
else:
messages.add_message(request, messages.ERROR, _("The form contains errors."))
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
class LemmaUpdateView(UpdateView):
model = LemmaIdgloss
success_url = reverse_lazy('dictionary:admin_lemma_list')
page_in_lemma_list = ''
template_name = 'dictionary/update_lemma.html'
fields = []
def get_context_data(self, **kwargs):
context = super(LemmaUpdateView, self).get_context_data(**kwargs)
# this is needed by the menu bar
if hasattr(settings, 'SHOW_DATASET_INTERFACE_OPTIONS'):
context['SHOW_DATASET_INTERFACE_OPTIONS'] = settings.SHOW_DATASET_INTERFACE_OPTIONS
else:
context['SHOW_DATASET_INTERFACE_OPTIONS'] = False
# get the page of the lemma list on which this lemma appears in order ro return to it after update
request_path = self.request.META.get('HTTP_REFERER')
path_parms = request_path.split('?page=')
if len(path_parms) > 1:
self.page_in_lemma_list = str(path_parms[1])
context['page_in_lemma_list'] = self.page_in_lemma_list
dataset = self.object.dataset
context['dataset'] = dataset
dataset_languages = Language.objects.filter(dataset=dataset).distinct()
context['dataset_languages'] = dataset_languages
context['change_lemma_form'] = LemmaUpdateForm(instance=self.object, page_in_lemma_list=self.page_in_lemma_list)
context['lemma_create_field_prefix'] = LemmaCreateForm.lemma_create_field_prefix
return context
def post(self, request, *args, **kwargs):
instance = self.get_object()
dataset = instance.dataset
form = LemmaUpdateForm(request.POST, instance=instance)
for item, value in request.POST.items():
if item.startswith(form.lemma_update_field_prefix):
if value != '':
language_code_2char = item[len(form.lemma_update_field_prefix):]
language = Language.objects.get(language_code_2char=language_code_2char)
lemmas_for_this_language_and_annotation_idgloss = LemmaIdgloss.objects.filter(
lemmaidglosstranslation__language=language,
lemmaidglosstranslation__text__exact=value.upper(),
dataset=dataset)
if len(lemmas_for_this_language_and_annotation_idgloss) != 0:
for nextLemma in lemmas_for_this_language_and_annotation_idgloss:
if nextLemma.id != instance.id:
# found a different lemma with same translation
return render(request, 'dictionary/warning.html',
{'warning': language.name + " " + 'lemma ID Gloss not unique.'})
else:
# intent to set lemma translation to empty
pass
elif item.startswith('page') and value:
# page of the lemma list where the gloss to update is displayed
self.page_in_lemma_list = value
if form.is_valid():
try:
form.save()
messages.add_message(request, messages.INFO, _("The changes to the lemma have been saved."))
except:
# a specific message is put into the messages frmaework rather than the message caught in the exception
# if it's not done this way, it gives a runtime error
messages.add_message(request, messages.ERROR, _("There must be at least one translation for this lemma."))
# return to the same page in the list of lemmas, if available
if self.page_in_lemma_list:
return HttpResponseRedirect(self.success_url + '?page='+self.page_in_lemma_list)
else:
return HttpResponseRedirect(self.success_url)
else:
return HttpResponseRedirect(reverse_lazy('dictionary:change_lemma', kwargs={'pk': instance.id}))
class LemmaDeleteView(DeleteView):
model = LemmaIdgloss
success_url = reverse_lazy('dictionary:admin_lemma_list')
def delete(self, request, *args, **kwargs):
self.object = self.get_object()
if self.object.gloss_set.all():
messages.add_message(request, messages.ERROR, _("There are glosses using this lemma."))
else:
self.object.delete()
return HttpResponseRedirect(self.get_success_url())
Issue #503: Undid deletion of code for ASL in previous commit 546fa18.
from django.views.generic.list import ListView
from django.views.generic.detail import DetailView
from django.views.generic.edit import CreateView, DeleteView, UpdateView
from django.db.models import Q, F, ExpressionWrapper, IntegerField, Count
from django.db.models import CharField, TextField, Value as V
from django.db.models import OuterRef, Subquery
from django.db.models.functions import Concat
from django.db.models.fields import NullBooleanField
from django.db.models.sql.where import NothingNode, WhereNode
from django.http import HttpResponse, HttpResponseRedirect
from django.template import RequestContext
from django.core.urlresolvers import reverse_lazy
from django.core.exceptions import PermissionDenied, ObjectDoesNotExist
from django.utils.translation import override, ugettext_lazy as _
from django.forms.fields import TypedChoiceField, ChoiceField
from django.shortcuts import *
from django.contrib import messages
from django.contrib.sites.models import Site
from django.template.loader import render_to_string
from signbank.dictionary.templatetags.field_choice import get_field_choice
import csv
import operator
import re
import xml.etree.ElementTree as ET
from xml.dom import minidom
import datetime as DT
from guardian.core import ObjectPermissionChecker
from guardian.shortcuts import get_objects_for_user
from signbank.dictionary.models import *
from signbank.dictionary.forms import *
from signbank.feedback.models import *
from signbank.video.forms import VideoUploadForGlossForm
from tagging.models import Tag, TaggedItem
from signbank.settings.base import ECV_FILE,EARLIEST_GLOSS_CREATION_DATE, FIELDS, SEPARATE_ENGLISH_IDGLOSS_FIELD, LANGUAGE_CODE, ECV_SETTINGS, URL, LANGUAGE_CODE_MAP
from signbank.settings import server_specific
from signbank.settings.server_specific import *
from signbank.dictionary.translate_choice_list import machine_value_to_translated_human_value, choicelist_queryset_to_translated_dict, choicelist_queryset_to_machine_value_dict
from signbank.dictionary.forms import GlossSearchForm, MorphemeSearchForm
from signbank.tools import get_selected_datasets_for_user, write_ecv_file_for_dataset, write_csv_for_handshapes
def order_queryset_by_sort_order(get, qs):
"""Change the sort-order of the query set, depending on the form field [sortOrder]
This function is used both by GlossListView as well as by MorphemeListView.
The value of [sortOrder] is 'lemma__lemmaidglosstranslation__text' by default.
[sortOrder] is a hidden field inside the "adminsearch" html form in the template admin_gloss_list.html
Its value is changed by clicking the up/down buttons in the second row of the search result table
"""
def get_string_from_tuple_list(lstTuples, number):
"""Get the string value corresponding to a number in a list of number-string tuples"""
sBack = [tup[1] for tup in lstTuples if tup[0] == number]
return sBack
# Helper: order a queryset on field [sOrder], which is a number from a list of tuples named [sListName]
def order_queryset_by_tuple_list(qs, sOrder, sListName):
"""Order a queryset on field [sOrder], which is a number from a list of tuples named [sListName]"""
# Get a list of tuples for this sort-order
tpList = build_choice_list(sListName)
# Determine sort order: ascending is default
bReversed = False
if (sOrder[0:1] == '-'):
# A starting '-' sign means: descending order
sOrder = sOrder[1:]
bReversed = True
# Order the list of tuples alphabetically
# (NOTE: they are alphabetical from 'build_choice_list()', except for the values 0,1)
tpList = sorted(tpList, key=operator.itemgetter(1))
# Order by the string-values in the tuple list
return sorted(qs, key=lambda x: get_string_from_tuple_list(tpList, getattr(x, sOrder)), reverse=bReversed)
def order_queryset_by_annotationidglosstranslation(qs, sOrder):
language_code_2char = sOrder[-2:]
sOrderAsc = sOrder
if (sOrder[0:1] == '-'):
# A starting '-' sign means: descending order
sOrderAsc = sOrder[1:]
annotationidglosstranslation = AnnotationIdglossTranslation.objects.filter(gloss=OuterRef('pk'), language__language_code_2char__iexact=language_code_2char)
qs = qs.annotate(**{sOrderAsc: Subquery(annotationidglosstranslation.values('text')[:1])}).order_by(sOrder)
return qs
def order_queryset_by_lemmaidglosstranslation(qs, sOrder):
language_code_2char = sOrder[-2:]
sOrderAsc = sOrder
if (sOrder[0:1] == '-'):
# A starting '-' sign means: descending order
sOrderAsc = sOrder[1:]
lemmaidglosstranslation = LemmaIdglossTranslation.objects.filter(lemma=OuterRef('lemma'), language__language_code_2char__iexact=language_code_2char)
qs = qs.annotate(**{sOrderAsc: Subquery(lemmaidglosstranslation.values('text')[:1])}).order_by(sOrder)
return qs
# Set the default sort order
default_sort_order = True
sOrder = 'annotationidglosstranslation__text' # Default sort order if nothing is specified
# See if the form contains any sort-order information
if ('sortOrder' in get and get['sortOrder'] != ''):
# Take the user-indicated sort order
sOrder = get['sortOrder']
default_sort_order = False
# The ordering method depends on the kind of field:
# (1) text fields are ordered straightforwardly
# (2) fields made from a choice_list need special treatment
if (sOrder.endswith('handedness')):
ordered = order_queryset_by_tuple_list(qs, sOrder, "Handedness")
elif (sOrder.endswith('domhndsh') or sOrder.endswith('subhndsh')):
ordered = order_queryset_by_tuple_list(qs, sOrder, "Handshape")
elif (sOrder.endswith('locprim')):
ordered = order_queryset_by_tuple_list(qs, sOrder, "Location")
elif "annotationidglosstranslation_order_" in sOrder:
ordered = order_queryset_by_annotationidglosstranslation(qs, sOrder)
elif "lemmaidglosstranslation_order_" in sOrder:
ordered = order_queryset_by_lemmaidglosstranslation(qs, sOrder)
else:
# Use straightforward ordering on field [sOrder]
bReversed = False
if (sOrder[0:1] == '-'):
# A starting '-' sign means: descending order
sOrder = sOrder[1:]
bReversed = True
if default_sort_order:
lang_attr_name = DEFAULT_KEYWORDS_LANGUAGE['language_code_2char']
sort_language = 'annotationidglosstranslation__language__language_code_2char'
qs_letters = qs.filter(**{sOrder+'__regex':r'^[a-zA-Z]'}, **{sort_language:lang_attr_name})
qs_special = qs.filter(**{sOrder+'__regex':r'^[^a-zA-Z]'}, **{sort_language:lang_attr_name})
sort_key = sOrder
ordered = list(qs_letters.order_by(sort_key))
ordered += list(qs_special.order_by(sort_key))
else:
ordered = qs
if bReversed:
ordered.reverse()
# return the ordered list
return ordered
class GlossListView(ListView):
model = Gloss
template_name = 'dictionary/admin_gloss_list.html'
paginate_by = 500
only_export_ecv = False #Used to call the 'export ecv' functionality of this view without the need for an extra GET parameter
search_type = 'sign'
view_type = 'gloss_list'
web_search = False
show_all = False
dataset_name = DEFAULT_DATASET
last_used_dataset = None
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super(GlossListView, self).get_context_data(**kwargs)
# Retrieve the search_type,so that we know whether the search should be restricted to Gloss or not
if 'search_type' in self.request.GET:
self.search_type = self.request.GET['search_type']
# self.request.session['search_type'] = self.search_type
if 'view_type' in self.request.GET:
# user is adjusting the view, leave the rest of the context alone
self.view_type = self.request.GET['view_type']
context['view_type'] = self.view_type
if 'last_used_dataset' in self.request.session.keys():
self.last_used_dataset = self.request.session['last_used_dataset']
if 'inWeb' in self.request.GET:
# user is searching for signs / morphemes visible to anonymous uers
self.web_search = self.request.GET['inWeb'] == '2'
elif not self.request.user.is_authenticated():
self.web_search = True
context['web_search'] = self.web_search
selected_datasets = get_selected_datasets_for_user(self.request.user)
dataset_languages = Language.objects.filter(dataset__in=selected_datasets).distinct()
context['dataset_languages'] = dataset_languages
selected_datasets_signlanguage = list(SignLanguage.objects.filter(dataset__in=selected_datasets))
sign_languages = []
for sl in selected_datasets_signlanguage:
if not ((str(sl.id),sl.name) in sign_languages):
sign_languages.append((str(sl.id), sl.name))
selected_datasets_dialects = Dialect.objects.filter(signlanguage__in=selected_datasets_signlanguage)\
.prefetch_related('signlanguage').distinct()
dialects = []
for dl in selected_datasets_dialects:
dialect_name = dl.signlanguage.name + "/" + dl.name
dialects.append((str(dl.id),dialect_name))
search_form = GlossSearchForm(self.request.GET, languages=dataset_languages, sign_languages=sign_languages,
dialects=dialects, language_code=self.request.LANGUAGE_CODE)
#Translations for field choices dropdown menu
fields_that_need_translated_options = ['hasComponentOfType','hasMorphemeOfType']
for field_group in FIELDS.values():
for field in field_group:
fields_that_need_translated_options.append(field)
for field in fields_that_need_translated_options:
try:
if isinstance(search_form.fields[field], TypedChoiceField):
choices = FieldChoice.objects.filter(field__iexact=fieldname_to_category(field))
translated_choices = [('','---------')]+choicelist_queryset_to_translated_dict(choices,self.request.LANGUAGE_CODE,
ordered=False,id_prefix='')
search_form.fields[field] = forms.ChoiceField(label=search_form.fields[field].label,
choices=translated_choices,
widget=forms.Select(attrs={'class':'form-control'}))
except KeyError:
continue
context['searchform'] = search_form
context['search_type'] = self.search_type
context['view_type'] = self.view_type
context['web_search'] = self.web_search
context['add_gloss_form'] = GlossCreateForm(self.request.GET, languages=dataset_languages, user=self.request.user, last_used_dataset=self.last_used_dataset)
if hasattr(settings, 'SHOW_DATASET_INTERFACE_OPTIONS') and self.request.user.is_authenticated():
context['SHOW_DATASET_INTERFACE_OPTIONS'] = settings.SHOW_DATASET_INTERFACE_OPTIONS
else:
context['SHOW_DATASET_INTERFACE_OPTIONS'] = False
if hasattr(settings, 'SHOW_MORPHEME_SEARCH') and self.request.user.is_authenticated():
context['SHOW_MORPHEME_SEARCH'] = settings.SHOW_MORPHEME_SEARCH
else:
context['SHOW_MORPHEME_SEARCH'] = False
context['MULTIPLE_SELECT_GLOSS_FIELDS'] = settings.MULTIPLE_SELECT_GLOSS_FIELDS
if hasattr(settings, 'DISABLE_MOVING_THUMBNAILS_ABOVE_NR_OF_GLOSSES'):
context['DISABLE_MOVING_THUMBNAILS_ABOVE_NR_OF_GLOSSES'] = settings.DISABLE_MOVING_THUMBNAILS_ABOVE_NR_OF_GLOSSES
else:
context['DISABLE_MOVING_THUMBNAILS_ABOVE_NR_OF_GLOSSES'] = 0
context['input_names_fields_and_labels'] = {}
for topic in ['main','phonology','semantics']:
context['input_names_fields_and_labels'][topic] = []
for fieldname in settings.FIELDS[topic]:
# exclude the dependent fields for Handedness, Strong Hand, and Weak Hand for purposes of nested dependencies in Search form
if fieldname not in ['weakprop', 'weakdrop', 'domhndsh_letter', 'domhndsh_number', 'subhndsh_letter', 'subhndsh_number']:
field = search_form[fieldname]
label = field.label
context['input_names_fields_and_labels'][topic].append((fieldname,field,label))
context['input_names_fields_labels_handedness'] = []
field = search_form['weakdrop']
label = field.label
context['input_names_fields_labels_handedness'].append(('weakdrop', field, label))
field = search_form['weakprop']
label = field.label
context['input_names_fields_labels_handedness'].append(('weakprop',field,label))
context['input_names_fields_labels_domhndsh'] = []
field = search_form['domhndsh_letter']
label = field.label
context['input_names_fields_labels_domhndsh'].append(('domhndsh_letter',field,label))
field = search_form['domhndsh_number']
label = field.label
context['input_names_fields_labels_domhndsh'].append(('domhndsh_number',field,label))
context['input_names_fields_labels_subhndsh'] = []
field = search_form['subhndsh_letter']
label = field.label
context['input_names_fields_labels_subhndsh'].append(('subhndsh_letter',field,label))
field = search_form['subhndsh_number']
label = field.label
context['input_names_fields_labels_subhndsh'].append(('subhndsh_number',field,label))
try:
if self.kwargs['show_all']:
context['show_all'] = True
except KeyError:
context['show_all'] = False
context['paginate_by'] = self.request.GET.get('paginate_by', self.paginate_by)
context['lemma_create_field_prefix'] = LemmaCreateForm.lemma_create_field_prefix
context['generate_translated_choice_list_table'] = generate_translated_choice_list_table()
if self.search_type == 'sign' or not self.request.user.is_authenticated():
# Only count the none-morpheme glosses
# this branch is slower than the other one
context['glosscount'] = Gloss.none_morpheme_objects().select_related('lemma').select_related('dataset').filter(lemma__dataset__in=selected_datasets).count()
else:
context['glosscount'] = Gloss.objects.select_related('lemma').select_related('dataset').filter(lemma__dataset__in=selected_datasets).count() # Count the glosses + morphemes
return context
def get_paginate_by(self, queryset):
"""
Paginate by specified value in querystring, or use default class property value.
"""
return self.request.GET.get('paginate_by', self.paginate_by)
def render_to_response(self, context):
# Look for a 'format=json' GET argument
if self.request.GET.get('format') == 'CSV':
return self.render_to_csv_response(context)
elif self.request.GET.get('export_ecv') == 'ECV' or self.only_export_ecv:
return self.render_to_ecv_export_response(context)
else:
return super(GlossListView, self).render_to_response(context)
def render_to_ecv_export_response(self, context):
# check that the user is logged in
if self.request.user.is_authenticated():
pass
else:
messages.add_message(self.request, messages.ERROR, ('Please login to use this functionality.'))
return HttpResponseRedirect(URL + settings.PREFIX_URL + '/signs/search/')
# if the dataset is specified in the url parameters, set the dataset_name variable
get = self.request.GET
if 'dataset_name' in get:
self.dataset_name = get['dataset_name']
if self.dataset_name == '':
messages.add_message(self.request, messages.ERROR, ('Dataset name must be non-empty.'))
return HttpResponseRedirect(URL + settings.PREFIX_URL + '/signs/search/')
try:
dataset_object = Dataset.objects.get(name=self.dataset_name)
except:
messages.add_message(self.request, messages.ERROR, ('No dataset with name '+self.dataset_name+' found.'))
return HttpResponseRedirect(URL + settings.PREFIX_URL + '/signs/search/')
# make sure the user can write to this dataset
import guardian
# from guardian.shortcuts import get_objects_for_user
user_change_datasets = guardian.shortcuts.get_objects_for_user(self.request.user, 'change_dataset', Dataset)
if user_change_datasets and dataset_object in user_change_datasets:
pass
else:
messages.add_message(self.request, messages.ERROR, ('No permission to export dataset.'))
return HttpResponseRedirect(URL + settings.PREFIX_URL + '/signs/search/')
# if we get to here, the user is authenticated and has permission to export the dataset
ecv_file = write_ecv_file_for_dataset(self.dataset_name)
messages.add_message(self.request, messages.INFO, ('ECV ' + self.dataset_name + ' successfully updated.'))
return HttpResponseRedirect(URL + settings.PREFIX_URL + '/signs/search/')
# noinspection PyInterpreter,PyInterpreter
def render_to_csv_response(self, context):
if not self.request.user.has_perm('dictionary.export_csv'):
raise PermissionDenied
# Create the HttpResponse object with the appropriate CSV header.
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="dictionary-export.csv"'
# fields = [f.name for f in Gloss._meta.fields]
#We want to manually set which fields to export here
fieldnames = FIELDS['main']+FIELDS['phonology']+FIELDS['semantics']+FIELDS['frequency']+['inWeb', 'isNew']
fields = [Gloss._meta.get_field(fieldname) for fieldname in fieldnames]
selected_datasets = get_selected_datasets_for_user(self.request.user)
dataset_languages = Language.objects.filter(dataset__in=selected_datasets).distinct()
lang_attr_name = 'name_' + DEFAULT_KEYWORDS_LANGUAGE['language_code_2char']
annotationidglosstranslation_fields = ["Annotation ID Gloss" + " (" + getattr(language, lang_attr_name) + ")"
for language in dataset_languages]
lemmaidglosstranslation_fields = ["Lemma ID Gloss" + " (" + getattr(language, lang_attr_name) + ")"
for language in dataset_languages]
writer = csv.writer(response)
with override(LANGUAGE_CODE):
header = ['Signbank ID', 'Dataset'] + lemmaidglosstranslation_fields + annotationidglosstranslation_fields + [f.verbose_name.encode('ascii','ignore').decode() for f in fields]
for extra_column in ['SignLanguages','Dialects','Keywords','Sequential Morphology', 'Simultaneous Morphology', 'Blend Morphology',
'Relations to other signs','Relations to foreign signs', 'Tags', 'Notes']:
header.append(extra_column)
writer.writerow(header)
for gloss in self.get_queryset():
row = [str(gloss.pk), gloss.lemma.dataset.acronym]
for language in dataset_languages:
lemmaidglosstranslations = gloss.lemma.lemmaidglosstranslation_set.filter(language=language)
if lemmaidglosstranslations and len(lemmaidglosstranslations) == 1:
row.append(lemmaidglosstranslations[0].text)
else:
row.append("")
for language in dataset_languages:
annotationidglosstranslations = gloss.annotationidglosstranslation_set.filter(language=language)
if annotationidglosstranslations and len(annotationidglosstranslations) == 1:
row.append(annotationidglosstranslations[0].text)
else:
row.append("")
for f in fields:
#Try the value of the choicelist
try:
value = getattr(gloss, 'get_' + f.name + '_display')()
#If it's not there, try the raw value
except AttributeError:
value = getattr(gloss,f.name)
if f.name == 'weakdrop' or f.name == 'weakprop':
if value == None:
value = 'Neutral'
# This was disabled with the move to Python 3... might not be needed anymore?
# if isinstance(value,unicode):
# value = str(value.encode('ascii','xmlcharrefreplace'))
if not isinstance(value,str):
value = str(value)
# A handshape name can begin with =. To avoid Office thinking this is a formula, preface with '
if value[:1] == '=':
value = '\'' + value
row.append(value)
# get languages
signlanguages = [signlanguage.name for signlanguage in gloss.signlanguage.all()]
row.append(", ".join(signlanguages))
# get dialects
dialects = [dialect.name for dialect in gloss.dialect.all()]
row.append(", ".join(dialects))
# get translations (keywords)
trans = [t.translation.text + ":" + t.language.language_code_2char for t in gloss.translation_set.all()]
row.append(", ".join(trans))
# get morphology
# Sequential Morphology
morphemes = [str(morpheme.morpheme.id) for morpheme in MorphologyDefinition.objects.filter(parent_gloss=gloss)]
row.append(", ".join(morphemes))
# Simultaneous Morphology
morphemes = [(str(m.morpheme.id), m.role) for m in gloss.simultaneous_morphology.all()]
sim_morphs = []
for m in morphemes:
sim_morphs.append(':'.join(m))
simultaneous_morphemes = ', '.join(sim_morphs)
row.append(simultaneous_morphemes)
# Blend Morphology
ble_morphemes = [(str(m.glosses.id), m.role) for m in gloss.blend_morphology.all()]
ble_morphs = []
for m in ble_morphemes:
ble_morphs.append(':'.join(m))
blend_morphemes = ', '.join(ble_morphs)
row.append(blend_morphemes)
# get relations to other signs
relations = [(relation.role, str(relation.target.id)) for relation in Relation.objects.filter(source=gloss)]
relations_with_categories = []
for rel_cat in relations:
relations_with_categories.append(':'.join(rel_cat))
relations_categories = ", ".join(relations_with_categories)
row.append(relations_categories)
# get relations to foreign signs
relations = [(str(relation.loan), relation.other_lang, relation.other_lang_gloss) for relation in RelationToForeignSign.objects.filter(gloss=gloss)]
relations_with_categories = []
for rel_cat in relations:
relations_with_categories.append(':'.join(rel_cat))
relations_categories = ", ".join(relations_with_categories)
row.append(relations_categories)
# export tags
tags_of_gloss = TaggedItem.objects.filter(object_id=gloss.id)
tag_names_of_gloss = []
for t_obj in tags_of_gloss:
tag_id = t_obj.tag_id
tag_name = Tag.objects.get(id=tag_id)
tag_names_of_gloss += [str(tag_name).replace('_',' ')]
tag_names = ", ".join(tag_names_of_gloss)
row.append(tag_names)
# export notes
note_role_choices = FieldChoice.objects.filter(field__iexact='NoteType')
notes_of_gloss = gloss.definition_set.all()
notes_list = []
for note in notes_of_gloss:
translated_note_role = machine_value_to_translated_human_value(note.role, note_role_choices, 'en')
note_string = translated_note_role + ": (" + str(note.published) +","+ str(note.count) +","+ note.text + ")"
notes_list.append(note_string)
sorted_notes_list = sorted(notes_list)
notes_display = ", ".join(sorted_notes_list)
row.append(notes_display)
#Make it safe for weird chars
safe_row = []
for column in row:
try:
safe_row.append(column.encode('utf-8').decode())
except AttributeError:
safe_row.append(None)
writer.writerow(safe_row)
return response
def get_queryset(self):
get = self.request.GET
#First check whether we want to show everything or a subset
try:
if self.kwargs['show_all']:
show_all = True
except (KeyError,TypeError):
show_all = False
#Then check what kind of stuff we want
if 'search_type' in get:
self.search_type = get['search_type']
else:
self.search_type = 'sign'
setattr(self.request, 'search_type', self.search_type)
if 'view_type' in get:
self.view_type = get['view_type']
# don't change query, just change display
# return self.request.session['search_results']
else:
# set to default
self.view_type = 'gloss_list'
setattr(self.request, 'view_type', self.view_type)
if 'inWeb' in self.request.GET:
# user is searching for signs / morphemes visible to anonymous uers
self.web_search = self.request.GET['inWeb'] == '2'
elif not self.request.user.is_authenticated():
self.web_search = True
setattr(self.request, 'web_search', self.web_search)
selected_datasets = get_selected_datasets_for_user(self.request.user)
#Get the initial selection
if len(get) > 0 or show_all:
# anonymous users can search signs, make sure no morphemes are in the results
if self.search_type == 'sign' or not self.request.user.is_authenticated():
# Get all the GLOSS items that are not member of the sub-class Morpheme
if SPEED_UP_RETRIEVING_ALL_SIGNS:
qs = Gloss.none_morpheme_objects().select_related('lemma').prefetch_related('parent_glosses').prefetch_related('simultaneous_morphology').prefetch_related('translation_set').filter(lemma__dataset__in=selected_datasets)
else:
qs = Gloss.none_morpheme_objects().filter(lemma__dataset__in=selected_datasets)
else:
if SPEED_UP_RETRIEVING_ALL_SIGNS:
qs = Gloss.objects.all().prefetch_related('lemma').prefetch_related('parent_glosses').prefetch_related('simultaneous_morphology').prefetch_related('translation_set').filter(lemma__dataset__in=selected_datasets)
else:
qs = Gloss.objects.all().filter(lemma__dataset__in=selected_datasets)
#No filters or 'show_all' specified? show nothing
else:
qs = Gloss.objects.none()
if not self.request.user.has_perm('dictionary.search_gloss'):
qs = qs.filter(inWeb__exact=True)
#If we wanted to get everything, we're done now
if show_all:
return order_queryset_by_sort_order(self.request.GET, qs)
# return qs
#If not, we will go trhough a long list of filters
if 'search' in get and get['search'] != '':
val = get['search']
query = Q(annotationidglosstranslation__text__iregex=val)
if re.match('^\d+$', val):
query = query | Q(sn__exact=val)
qs = qs.filter(query)
# Evaluate all gloss/language search fields
for get_key, get_value in get.items():
if get_key.startswith(GlossSearchForm.gloss_search_field_prefix) and get_value != '':
language_code_2char = get_key[len(GlossSearchForm.gloss_search_field_prefix):]
language = Language.objects.filter(language_code_2char=language_code_2char)
qs = qs.filter(annotationidglosstranslation__text__iregex=get_value,
annotationidglosstranslation__language=language)
elif get_key.startswith(GlossSearchForm.lemma_search_field_prefix) and get_value != '':
language_code_2char = get_key[len(GlossSearchForm.lemma_search_field_prefix):]
language = Language.objects.filter(language_code_2char=language_code_2char)
qs = qs.filter(lemma__lemmaidglosstranslation__text__iregex=get_value,
lemma__lemmaidglosstranslation__language=language)
elif get_key.startswith(GlossSearchForm.keyword_search_field_prefix) and get_value != '':
language_code_2char = get_key[len(GlossSearchForm.keyword_search_field_prefix):]
language = Language.objects.filter(language_code_2char=language_code_2char)
qs = qs.filter(translation__translation__text__iregex=get_value,
translation__language=language)
if 'keyword' in get and get['keyword'] != '':
val = get['keyword']
qs = qs.filter(translation__translation__text__iregex=val)
# NULLBOOLEANCHOICES = [(0, '---------'), (1, 'Unknown'), (2, 'True'), (3, 'False')]
if 'inWeb' in get and get['inWeb'] != '0':
# Don't apply 'inWeb' filter, if it is unspecified ('0' according to the NULLBOOLEANCHOICES)
val = get['inWeb'] == '2'
qs = qs.filter(inWeb__exact=val)
if 'hasvideo' in get and get['hasvideo'] != 'unspecified':
val = get['hasvideo'] == 'no'
qs = qs.filter(glossvideo__isnull=val)
if 'defspublished' in get and get['defspublished'] != 'unspecified':
val = get['defspublished'] == 'yes'
qs = qs.filter(definition__published=val)
fieldnames = FIELDS['main']+FIELDS['phonology']+FIELDS['semantics']+['inWeb', 'isNew']
# SignLanguage and basic property filters
# allows for multiselect
vals = get.getlist('dialect[]')
if '' in vals:
vals.remove('')
if vals != []:
qs = qs.filter(dialect__in=vals)
# allows for multiselect
vals = get.getlist('signlanguage[]')
if '' in vals:
vals.remove('')
if vals != []:
qs = qs.filter(signlanguage__in=vals)
if 'useInstr' in get and get['useInstr'] != '':
qs = qs.filter(useInstr__iregex=get['useInstr'])
for fieldnamemulti in settings.MULTIPLE_SELECT_GLOSS_FIELDS:
fieldnamemultiVarname = fieldnamemulti + '[]'
fieldnameQuery = fieldnamemulti + '__in'
vals = get.getlist(fieldnamemultiVarname)
if '' in vals:
vals.remove('')
if vals != []:
qs = qs.filter(**{ fieldnameQuery: vals })
## phonology and semantics field filters
fieldnames = [ f for f in fieldnames if f not in settings.MULTIPLE_SELECT_GLOSS_FIELDS ]
for fieldname in fieldnames:
if fieldname in get and get[fieldname] != '':
field_obj = Gloss._meta.get_field(fieldname)
if type(field_obj) in [CharField,TextField] and len(field_obj.choices) == 0:
key = fieldname + '__iregex'
else:
key = fieldname + '__exact'
val = get[fieldname]
if isinstance(field_obj,NullBooleanField):
val = {'0':'','1': None, '2': True, '3': False}[val]
if val != '':
kwargs = {key:val}
qs = qs.filter(**kwargs)
if 'defsearch' in get and get['defsearch'] != '':
val = get['defsearch']
if 'defrole' in get:
role = get['defrole']
else:
role = 'all'
if role == 'all':
qs = qs.filter(definition__text__icontains=val)
else:
qs = qs.filter(definition__text__icontains=val, definition__role__exact=role)
if 'tags' in get and get['tags'] != '':
vals = get.getlist('tags')
tags = []
for t in vals:
tags.extend(Tag.objects.filter(name=t))
# search is an implicit AND so intersection
tqs = TaggedItem.objects.get_intersection_by_model(Gloss, tags)
# intersection
qs = qs & tqs
qs = qs.distinct()
if 'nottags' in get and get['nottags'] != '':
vals = get.getlist('nottags')
tags = []
for t in vals:
tags.extend(Tag.objects.filter(name=t))
# search is an implicit AND so intersection
tqs = TaggedItem.objects.get_intersection_by_model(Gloss, tags)
# exclude all of tqs from qs
qs = [q for q in qs if q not in tqs]
if 'relationToForeignSign' in get and get['relationToForeignSign'] != '':
relations = RelationToForeignSign.objects.filter(other_lang_gloss__icontains=get['relationToForeignSign'])
potential_pks = [relation.gloss.pk for relation in relations]
qs = qs.filter(pk__in=potential_pks)
if 'hasRelationToForeignSign' in get and get['hasRelationToForeignSign'] != '0':
pks_for_glosses_with_relations = [relation.gloss.pk for relation in RelationToForeignSign.objects.all()]
if get['hasRelationToForeignSign'] == '1': #We only want glosses with a relation to a foreign sign
qs = qs.filter(pk__in=pks_for_glosses_with_relations)
elif get['hasRelationToForeignSign'] == '2': #We only want glosses without a relation to a foreign sign
qs = qs.exclude(pk__in=pks_for_glosses_with_relations)
if 'relation' in get and get['relation'] != '':
potential_targets = Gloss.objects.filter(idgloss__icontains=get['relation'])
relations = Relation.objects.filter(target__in=potential_targets)
potential_pks = [relation.source.pk for relation in relations]
qs = qs.filter(pk__in=potential_pks)
if 'hasRelation' in get and get['hasRelation'] != '':
#Find all relations with this role
if get['hasRelation'] == 'all':
relations_with_this_role = Relation.objects.all()
else:
relations_with_this_role = Relation.objects.filter(role__exact=get['hasRelation'])
#Remember the pk of all glosses that take part in the collected relations
pks_for_glosses_with_correct_relation = [relation.source.pk for relation in relations_with_this_role]
qs = qs.filter(pk__in=pks_for_glosses_with_correct_relation)
if 'morpheme' in get and get['morpheme'] != '':
# morpheme is an integer
input_morpheme = get['morpheme']
# Filter all glosses that contain this morpheme in their simultaneous morphology
try:
selected_morpheme = Morpheme.objects.get(pk=get['morpheme'])
potential_pks = [appears.parent_gloss.pk for appears in SimultaneousMorphologyDefinition.objects.filter(morpheme=selected_morpheme)]
qs = qs.filter(pk__in=potential_pks)
except:
# This error should not occur, the input search form requires the selection of a morpheme from a list
# If the user attempts to input a string, it is ignored by the gloss list search form
print("Morpheme not found: ", str(input_morpheme))
if 'hasComponentOfType' in get and get['hasComponentOfType'] != '':
# Look for "compound-components" of the indicated type. Compound Components are defined in class[MorphologyDefinition]
morphdefs_with_correct_role = MorphologyDefinition.objects.filter(role__exact=get['hasComponentOfType'])
pks_for_glosses_with_morphdefs_with_correct_role = [morphdef.parent_gloss.pk for morphdef in morphdefs_with_correct_role]
qs = qs.filter(pk__in=pks_for_glosses_with_morphdefs_with_correct_role)
if 'hasMorphemeOfType' in get and get['hasMorphemeOfType'] != '':
morpheme_type = get['hasMorphemeOfType']
# Get all Morphemes of the indicated mrpType
target_morphemes = Morpheme.objects.filter(mrpType__exact=morpheme_type)
sim_morphemes = SimultaneousMorphologyDefinition.objects.filter(morpheme_id__in=target_morphemes)
# Get all glosses that have one of the morphemes in this set
glosses_with_correct_mrpType = Gloss.objects.filter(simultaneous_morphology__in=sim_morphemes)
# Turn this into a list with pks
pks_for_glosses_with_correct_mrpType = [glossdef.pk for glossdef in glosses_with_correct_mrpType]
qs = qs.filter(pk__in=pks_for_glosses_with_correct_mrpType)
if 'definitionRole' in get and get['definitionRole'] != '':
#Find all definitions with this role
if get['definitionRole'] == 'all':
definitions_with_this_role = Definition.objects.all()
else:
definitions_with_this_role = Definition.objects.filter(role__exact=get['definitionRole'])
#Remember the pk of all glosses that are referenced in the collection definitions
pks_for_glosses_with_these_definitions = [definition.gloss.pk for definition in definitions_with_this_role]
qs = qs.filter(pk__in=pks_for_glosses_with_these_definitions)
if 'definitionContains' in get and get['definitionContains'] != '':
definitions_with_this_text = Definition.objects.filter(text__iregex=get['definitionContains'])
#Remember the pk of all glosses that are referenced in the collection definitions
pks_for_glosses_with_these_definitions = [definition.gloss.pk for definition in definitions_with_this_text]
qs = qs.filter(pk__in=pks_for_glosses_with_these_definitions)
if 'createdBefore' in get and get['createdBefore'] != '':
created_before_date = DT.datetime.strptime(get['createdBefore'], "%m/%d/%Y").date()
qs = qs.filter(creationDate__range=(EARLIEST_GLOSS_CREATION_DATE,created_before_date))
if 'createdAfter' in get and get['createdAfter'] != '':
created_after_date = DT.datetime.strptime(get['createdAfter'], "%m/%d/%Y").date()
qs = qs.filter(creationDate__range=(created_after_date,DT.datetime.now()))
if 'createdBy' in get and get['createdBy'] != '':
created_by_search_string = ' '.join(get['createdBy'].strip().split()) # remove redundant spaces
qs = qs.annotate(
created_by=Concat('creator__first_name', V(' '), 'creator__last_name', output_field=CharField())) \
.filter(created_by__icontains=created_by_search_string)
# Saving querysets results to sessions, these results can then be used elsewhere (like in gloss_detail)
# Flush the previous queryset (just in case)
# self.request.session['search_results'] = None
qs = qs.select_related('lemma')
try:
print('qs: ', qs.query.as_sql())
except:
pass
# Make sure that the QuerySet has filters applied (user is searching for something instead of showing all results [objects.all()])
if hasattr(qs.query.where, 'children') and len(qs.query.where.children) > 0:
# if not isinstance(qs.query.where.children, NothingNode):
items = []
for item in qs:
annotationidglosstranslations = item.annotationidglosstranslation_set.filter(
language__language_code_2char__exact=self.request.LANGUAGE_CODE
)
if annotationidglosstranslations and len(annotationidglosstranslations) > 0:
items.append(dict(id = item.id, gloss = annotationidglosstranslations[0].text))
else:
annotationidglosstranslations = item.annotationidglosstranslation_set.filter(
language__language_code_2char__exact='en'
)
if annotationidglosstranslations and len(annotationidglosstranslations) > 0:
items.append(dict(id=item.id, gloss=annotationidglosstranslations[0].text))
else:
items.append(dict(id=item.id, gloss=item.idgloss))
self.request.session['search_results'] = items
# Sort the queryset by the parameters given
qs = order_queryset_by_sort_order(self.request.GET, qs)
self.request.session['search_type'] = self.search_type
self.request.session['web_search'] = self.web_search
if not 'last_used_dataset' in self.request.session.keys():
self.request.session['last_used_dataset'] = self.last_used_dataset
# Return the resulting filtered and sorted queryset
return qs
class GlossDetailView(DetailView):
model = Gloss
context_object_name = 'gloss'
last_used_dataset = None
#Overriding the get method get permissions right
def get(self, request, *args, **kwargs):
try:
self.object = self.get_object()
# except Http404:
except:
# return custom template
# return render(request, 'dictionary/warning.html', status=404)
raise Http404()
dataset_of_requested_gloss = self.object.dataset
datasets_user_can_view = get_objects_for_user(request.user, 'view_dataset', Dataset, accept_global_perms=False)
selected_datasets = get_selected_datasets_for_user(self.request.user)
if request.user.is_authenticated():
if dataset_of_requested_gloss not in selected_datasets:
return render(request, 'dictionary/warning.html',
{'warning': 'The gloss you are trying to view (' + str(
self.object.id) + ') is not in your selected datasets.'})
if dataset_of_requested_gloss not in datasets_user_can_view:
if self.object.inWeb:
return HttpResponseRedirect(reverse('dictionary:public_gloss',kwargs={'glossid':self.object.pk}))
else:
return render(request, 'dictionary/warning.html',
{'warning': 'The gloss you are trying to view ('+str(self.object.id)+') is not assigned to a dataset.'})
else:
if self.object.inWeb:
return HttpResponseRedirect(reverse('dictionary:public_gloss', kwargs={'glossid': self.object.pk}))
else:
return HttpResponseRedirect(reverse('registration:auth_login'))
context = self.get_context_data(object=self.object)
return self.render_to_response(context)
def get_context_data(self, **kwargs):
# reformat LANGUAGE_CODE for use in dictionary domain, accomodate multilingual codings
from signbank.tools import convert_language_code_to_2char
language_code = convert_language_code_to_2char(self.request.LANGUAGE_CODE)
language = Language.objects.get(id=get_default_language_id())
default_language_code = language.language_code_2char
# Call the base implementation first to get a context
context = super(GlossDetailView, self).get_context_data(**kwargs)
# Add in a QuerySet of all the books
context['tagform'] = TagUpdateForm()
context['videoform'] = VideoUploadForGlossForm()
context['imageform'] = ImageUploadForGlossForm()
context['definitionform'] = DefinitionForm()
context['relationform'] = RelationForm()
context['morphologyform'] = GlossMorphologyForm()
context['morphologyform'].fields['role'] = forms.ChoiceField(label='Type', widget=forms.Select(attrs=ATTRS_FOR_FORMS),
choices=choicelist_queryset_to_translated_dict(FieldChoice.objects.filter(field__iexact='MorphologyType'),
self.request.LANGUAGE_CODE,ordered=False,id_prefix=''), required=True)
context['morphemeform'] = GlossMorphemeForm()
context['blendform'] = GlossBlendForm()
context['othermediaform'] = OtherMediaForm()
context['navigation'] = context['gloss'].navigation(True)
context['interpform'] = InterpreterFeedbackForm()
context['lemma_create_field_prefix'] = LemmaCreateForm.lemma_create_field_prefix
context['SIGN_NAVIGATION'] = settings.SIGN_NAVIGATION
context['handedness'] = (int(self.object.handedness) > 1) if self.object.handedness else 0 # minimal machine value is 2
context['domhndsh'] = (int(self.object.domhndsh) > 2) if self.object.domhndsh else 0 # minimal machine value -s 3
context['tokNo'] = self.object.tokNo # Number of occurrences of Sign, used to display Stars
# check for existence of strong hand and weak hand shapes
try:
strong_hand_obj = Handshape.objects.get(machine_value = self.object.domhndsh)
except Handshape.DoesNotExist:
strong_hand_obj = None
context['StrongHand'] = self.object.domhndsh if strong_hand_obj else 0
context['WeakHand'] = self.object.subhndsh
# context['NamedEntityDefined'] = (int(self.object.namEnt) > 1) if self.object.namEnt else 0 # minimal machine value is 2
context['SemanticFieldDefined'] = (int(self.object.semField) > 1) if self.object.semField else 0 # minimal machine value is 2
# context['ValenceDefined'] = (int(self.object.valence) > 1) if self.object.valence else 0 # minimal machine value is 2
# context['IconicImageDefined'] = self.object.iconImage # exists if not emtpy
next_gloss = Gloss.objects.get(pk=context['gloss'].pk).admin_next_gloss()
if next_gloss == None:
context['nextglossid'] = context['gloss'].pk #context['gloss']
else:
context['nextglossid'] = next_gloss.pk
if settings.SIGN_NAVIGATION:
context['glosscount'] = Gloss.objects.count()
context['glossposn'] = Gloss.objects.filter(sn__lt=context['gloss'].sn).count()+1
#Pass info about which fields we want to see
gl = context['gloss']
labels = gl.field_labels()
# set a session variable to be able to pass the gloss's id to the ajax_complete method
# the last_used_dataset name is updated to that of this gloss
# if a sequesce of glosses are being created by hand, this keeps the dataset setting the same
if gl.dataset:
self.request.session['datasetid'] = gl.dataset.id
self.last_used_dataset = gl.dataset.acronym
else:
self.request.session['datasetid'] = get_default_language_id()
self.request.session['last_used_dataset'] = self.last_used_dataset
selected_datasets = get_selected_datasets_for_user(self.request.user)
dataset_languages = Language.objects.filter(dataset__in=selected_datasets).distinct()
context['dataset_languages'] = dataset_languages
# set up weak drop weak prop fields
context['handedness_fields'] = []
weak_drop = getattr(gl, 'weakdrop')
weak_prop = getattr(gl, 'weakprop')
context['handedness_fields'].append([weak_drop,'weakdrop',labels['weakdrop'],'list'])
context['handedness_fields'].append([weak_prop,'weakprop',labels['weakprop'],'list'])
context['etymology_fields_dom'] = []
domhndsh_letter = getattr(gl, 'domhndsh_letter')
domhndsh_number = getattr(gl, 'domhndsh_number')
context['etymology_fields_sub'] = []
subhndsh_letter = getattr(gl, 'subhndsh_letter')
subhndsh_number = getattr(gl, 'subhndsh_number')
context['etymology_fields_dom'].append([domhndsh_letter,'domhndsh_letter',labels['domhndsh_letter'],'check'])
context['etymology_fields_dom'].append([domhndsh_number,'domhndsh_number',labels['domhndsh_number'],'check'])
context['etymology_fields_sub'].append([subhndsh_letter,'subhndsh_letter',labels['subhndsh_letter'],'check'])
context['etymology_fields_sub'].append([subhndsh_number,'subhndsh_number',labels['subhndsh_number'],'check'])
#Translate the machine values to human values in the correct language, and save the choice lists along the way
for topic in ['main','phonology','semantics','frequency']:
context[topic+'_fields'] = []
for field in FIELDS[topic]:
# the following check will be used when querying is added, at the moment these don't appear in the phonology list
if field not in ['weakprop', 'weakdrop', 'domhndsh_number', 'domhndsh_letter', 'subhndsh_number', 'subhndsh_letter']:
#Get and save the choice list for this field
fieldchoice_category = fieldname_to_category(field)
choice_list = FieldChoice.objects.filter(field__iexact=fieldchoice_category)
#Take the human value in the language we are using
machine_value = getattr(gl,field)
human_value = machine_value_to_translated_human_value(machine_value,choice_list,self.request.LANGUAGE_CODE)
#And add the kind of field
kind = fieldname_to_kind(field)
context[topic+'_fields'].append([human_value,field,labels[field],kind])
#Collect all morphology definitions for th sequential morphology section, and make some translations in advance
morphdef_roles = FieldChoice.objects.filter(field__iexact='MorphologyType')
morphdefs = []
for morphdef in context['gloss'].parent_glosses.all():
translated_role = machine_value_to_translated_human_value(morphdef.role,morphdef_roles,self.request.LANGUAGE_CODE)
sign_display = str(morphdef.morpheme.id)
morph_texts = morphdef.morpheme.get_annotationidglosstranslation_texts()
if morph_texts.keys():
if language_code in morph_texts.keys():
sign_display = morph_texts[language_code]
else:
sign_display = morph_texts[default_language_code]
morphdefs.append((morphdef,translated_role,sign_display))
morphdefs = sorted(morphdefs, key=lambda tup: tup[1])
context['morphdefs'] = morphdefs
(homonyms_of_this_gloss, homonyms_not_saved, saved_but_not_homonyms) = gl.homonyms()
homonyms_different_phonology = []
for saved_gl in saved_but_not_homonyms:
homo_trans = {}
if saved_gl.dataset:
for language in saved_gl.dataset.translation_languages.all():
homo_trans[language.language_code_2char] = saved_gl.annotationidglosstranslation_set.filter(language=language)
else:
language = Language.objects.get(id=get_default_language_id())
homo_trans[language.language_code_2char] = saved_gl.annotationidglosstranslation_set.filter(language=language)
if language_code in homo_trans:
homo_display = homo_trans[language_code][0].text
else:
# This should be set to the default language if the interface language hasn't been set for this gloss
homo_display = homo_trans[default_language_code][0].text
homonyms_different_phonology.append((saved_gl,homo_display))
context['homonyms_different_phonology'] = homonyms_different_phonology
homonyms_but_not_saved = []
for homonym in homonyms_not_saved:
homo_trans = {}
if homonym.dataset:
for language in homonym.dataset.translation_languages.all():
homo_trans[language.language_code_2char] = homonym.annotationidglosstranslation_set.filter(language=language)
else:
language = Language.objects.get(id=get_default_language_id())
homo_trans[language.language_code_2char] = homonym.annotationidglosstranslation_set.filter(language=language)
if language_code in homo_trans:
homo_display = homo_trans[language_code][0].text
else:
# This should be set to the default language if the interface language hasn't been set for this gloss
homo_display = homo_trans[default_language_code][0].text
homonyms_but_not_saved.append((homonym,homo_display))
context['homonyms_but_not_saved'] = homonyms_but_not_saved
# Regroup notes
note_role_choices = FieldChoice.objects.filter(field__iexact='NoteType')
notes = context['gloss'].definition_set.all()
notes_groupedby_role = {}
for note in notes:
# print('note: ', note.id, ', ', note.role, ', ', note.published, ', ', note.text, ', ', note.count)
translated_note_role = machine_value_to_translated_human_value(note.role,note_role_choices,self.request.LANGUAGE_CODE)
role_id = (note.role, translated_note_role)
if role_id not in notes_groupedby_role:
notes_groupedby_role[role_id] = []
notes_groupedby_role[role_id].append(note)
context['notes_groupedby_role'] = notes_groupedby_role
#Gather the OtherMedia
context['other_media'] = []
context['other_media_field_choices'] = {}
other_media_type_choice_list = FieldChoice.objects.filter(field__iexact='OthermediaType')
for other_media in gl.othermedia_set.all():
human_value_media_type = machine_value_to_translated_human_value(other_media.type,other_media_type_choice_list,self.request.LANGUAGE_CODE)
path = settings.URL+'dictionary/protected_media/othermedia/'+other_media.path
if '/' in other_media.path:
other_media_filename = other_media.path.split('/')[1]
else:
other_media_filename = other_media.path
if other_media_filename.split('.')[-1] == 'mp4':
file_type = 'video/mp4'
elif other_media_filename.split('.')[-1] == 'png':
file_type = 'image/png'
else:
file_type = ''
context['other_media'].append([other_media.pk, path, file_type, human_value_media_type, other_media.alternative_gloss, other_media_filename])
# Save the other_media_type choices (same for every other_media, but necessary because they all have other ids)
context['other_media_field_choices'][
'other-media-type_' + str(other_media.pk)] = choicelist_queryset_to_translated_dict(
other_media_type_choice_list, self.request.LANGUAGE_CODE)
context['other_media_field_choices'] = json.dumps(context['other_media_field_choices'])
context['separate_english_idgloss_field'] = SEPARATE_ENGLISH_IDGLOSS_FIELD
try:
lemma_group_count = gl.lemma.gloss_set.count()
if lemma_group_count > 1:
context['lemma_group'] = True
lemma_group_url_params = {'search_type': 'sign', 'view_type': 'lemma_groups'}
for lemmaidglosstranslation in gl.lemma.lemmaidglosstranslation_set.prefetch_related('language'):
lang_code_2char = lemmaidglosstranslation.language.language_code_2char
lemma_group_url_params['lemma_'+lang_code_2char] = '^' + lemmaidglosstranslation.text + '$'
from urllib.parse import urlencode
url_query = urlencode(lemma_group_url_params)
url_query = ("?" + url_query) if url_query else ''
context['lemma_group_url'] = reverse_lazy('signs_search') + url_query
else:
context['lemma_group'] = False
context['lemma_group_url'] = ''
except:
print("lemma_group_count: except")
context['lemma_group'] = False
context['lemma_group_url'] = ''
# Put annotation_idgloss per language in the context
context['annotation_idgloss'] = {}
if gl.dataset:
for language in gl.dataset.translation_languages.all():
context['annotation_idgloss'][language] = gl.annotationidglosstranslation_set.filter(language=language)
else:
language = Language.objects.get(id=get_default_language_id())
context['annotation_idgloss'][language] = gl.annotationidglosstranslation_set.filter(language=language)
# Put translations (keywords) per language in the context
context['translations_per_language'] = {}
if gl.dataset:
for language in gl.dataset.translation_languages.all():
context['translations_per_language'][language] = gl.translation_set.filter(language=language)
else:
language = Language.objects.get(id=get_default_language_id())
context['translations_per_language'][language] = gl.translation_set.filter(language=language)
simultaneous_morphology = []
sim_morph_typ_choices = FieldChoice.objects.filter(field__iexact='MorphemeType')
if gl.simultaneous_morphology:
for sim_morph in gl.simultaneous_morphology.all():
translated_morph_type = machine_value_to_translated_human_value(sim_morph.morpheme.mrpType,sim_morph_typ_choices,self.request.LANGUAGE_CODE)
morpheme_annotation_idgloss = {}
if sim_morph.morpheme.dataset:
for language in sim_morph.morpheme.dataset.translation_languages.all():
morpheme_annotation_idgloss[language.language_code_2char] = sim_morph.morpheme.annotationidglosstranslation_set.filter(language=language)
else:
language = Language.objects.get(id=get_default_language_id())
morpheme_annotation_idgloss[language.language_code_2char] = sim_morph.morpheme.annotationidglosstranslation_set.filter(language=language)
if language_code in morpheme_annotation_idgloss.keys():
morpheme_display = morpheme_annotation_idgloss[language_code][0].text
else:
# This should be set to the default language if the interface language hasn't been set for this gloss
morpheme_display = morpheme_annotation_idgloss[default_language_code][0].text
simultaneous_morphology.append((sim_morph,morpheme_display,translated_morph_type))
context['simultaneous_morphology'] = simultaneous_morphology
# Obtain the number of morphemes in the dataset of this gloss
# The template will not show the facility to add simultaneous morphology if there are no morphemes to choose from
dataset_id_of_gloss = gl.dataset
count_morphemes_in_dataset = Morpheme.objects.filter(lemma__dataset=dataset_id_of_gloss).count()
context['count_morphemes_in_dataset'] = count_morphemes_in_dataset
blend_morphology = []
if gl.blend_morphology:
for ble_morph in gl.blend_morphology.all():
glosses_annotation_idgloss = {}
if ble_morph.glosses.dataset:
for language in ble_morph.glosses.dataset.translation_languages.all():
glosses_annotation_idgloss[language.language_code_2char] = ble_morph.glosses.annotationidglosstranslation_set.filter(language=language)
else:
language = Language.objects.get(id=get_default_language_id())
glosses_annotation_idgloss[language.language_code_2char] = ble_morph.glosses.annotationidglosstranslation_set.filter(language=language)
if language_code in glosses_annotation_idgloss.keys():
morpheme_display = glosses_annotation_idgloss[language_code][0].text
else:
# This should be set to the default language if the interface language hasn't been set for this gloss
morpheme_display = glosses_annotation_idgloss[default_language_code][0].text
blend_morphology.append((ble_morph,morpheme_display))
context['blend_morphology'] = blend_morphology
otherrelations = []
if gl.relation_sources:
for oth_rel in gl.relation_sources.all():
other_relations_dict = {}
if oth_rel.target.dataset:
for language in oth_rel.target.dataset.translation_languages.all():
other_relations_dict[language.language_code_2char] = oth_rel.target.annotationidglosstranslation_set.filter(language=language)
else:
language = Language.objects.get(id=get_default_language_id())
other_relations_dict[language.language_code_2char] = oth_rel.target.annotationidglosstranslation_set.filter(language=language)
if language_code in other_relations_dict.keys():
target_display = other_relations_dict[language_code][0].text
else:
# This should be set to the default language if the interface language hasn't been set for this gloss
target_display = other_relations_dict[default_language_code][0].text
otherrelations.append((oth_rel,target_display))
context['otherrelations'] = otherrelations
if hasattr(settings, 'SHOW_DATASET_INTERFACE_OPTIONS') and settings.SHOW_DATASET_INTERFACE_OPTIONS:
context['dataset_choices'] = {}
user = self.request.user
if user.is_authenticated():
qs = get_objects_for_user(user, 'view_dataset', Dataset, accept_global_perms=False)
dataset_choices = {}
for dataset in qs:
dataset_choices[dataset.acronym] = dataset.acronym
context['dataset_choices'] = json.dumps(dataset_choices)
if hasattr(settings, 'SHOW_DATASET_INTERFACE_OPTIONS') and settings.SHOW_DATASET_INTERFACE_OPTIONS:
context['SHOW_DATASET_INTERFACE_OPTIONS'] = settings.SHOW_DATASET_INTERFACE_OPTIONS
else:
context['SHOW_DATASET_INTERFACE_OPTIONS'] = False
if hasattr(settings, 'SHOW_LETTER_NUMBER_PHONOLOGY'):
context['SHOW_LETTER_NUMBER_PHONOLOGY'] = settings.SHOW_LETTER_NUMBER_PHONOLOGY
else:
context['SHOW_LETTER_NUMBER_PHONOLOGY'] = False
context['generate_translated_choice_list_table'] = generate_translated_choice_list_table()
return context
class GlossRelationsDetailView(DetailView):
model = Gloss
template_name = 'dictionary/related_signs_detail_view.html'
context_object_name = 'gloss'
#Overriding the get method get permissions right
def get(self, request, *args, **kwargs):
try:
self.object = self.get_object()
except Http404:
# return custom template
return render(request, 'no_object.html', status=404)
if request.user.is_authenticated():
if self.object.dataset not in get_objects_for_user(request.user, 'view_dataset', Dataset, accept_global_perms=False):
if self.object.inWeb:
return HttpResponseRedirect(reverse('dictionary:public_gloss',kwargs={'glossid':self.object.pk}))
else:
return HttpResponse('')
else:
if self.object.inWeb:
return HttpResponseRedirect(reverse('dictionary:public_gloss', kwargs={'glossid': self.object.pk}))
else:
return HttpResponseRedirect(reverse('registration:auth_login'))
context = self.get_context_data(object=self.object)
return self.render_to_response(context)
def get_context_data(self, **kwargs):
# reformat LANGUAGE_CODE for use in dictionary domain, accomodate multilingual codings
from signbank.tools import convert_language_code_to_2char
language_code = convert_language_code_to_2char(self.request.LANGUAGE_CODE)
language = Language.objects.get(id=get_default_language_id())
default_language_code = language.language_code_2char
# Call the base implementation first to get a context
context = super(GlossRelationsDetailView, self).get_context_data(**kwargs)
# Add in a QuerySet of all the books
context['tagform'] = TagUpdateForm()
context['videoform'] = VideoUploadForGlossForm()
context['imageform'] = ImageUploadForGlossForm()
context['definitionform'] = DefinitionForm()
context['relationform'] = RelationForm()
context['morphologyform'] = GlossMorphologyForm()
context['morphologyform'].fields['role'] = forms.ChoiceField(label='Type', widget=forms.Select(attrs=ATTRS_FOR_FORMS),
choices=choicelist_queryset_to_translated_dict(FieldChoice.objects.filter(field__iexact='MorphologyType'),
self.request.LANGUAGE_CODE,ordered=False,id_prefix=''), required=True)
context['morphemeform'] = GlossMorphemeForm()
context['blendform'] = GlossBlendForm()
context['othermediaform'] = OtherMediaForm()
context['navigation'] = context['gloss'].navigation(True)
context['interpform'] = InterpreterFeedbackForm()
context['SIGN_NAVIGATION'] = settings.SIGN_NAVIGATION
#Pass info about which fields we want to see
gl = context['gloss']
labels = gl.field_labels()
context['choice_lists'] = {}
#Translate the machine values to human values in the correct language, and save the choice lists along the way
for topic in ['main','phonology','semantics','frequency']:
context[topic+'_fields'] = []
for field in FIELDS[topic]:
#Get and save the choice list for this field
fieldchoice_category = fieldname_to_category(field)
choice_list = FieldChoice.objects.filter(field__iexact=fieldchoice_category)
if len(choice_list) > 0:
context['choice_lists'][field] = choicelist_queryset_to_translated_dict (choice_list,self.request.LANGUAGE_CODE)
#Take the human value in the language we are using
machine_value = getattr(gl,field)
human_value = machine_value_to_translated_human_value(machine_value,choice_list,self.request.LANGUAGE_CODE)
#And add the kind of field
kind = fieldname_to_kind(field)
context[topic+'_fields'].append([human_value,field,labels[field],kind])
#Add morphology to choice lists
context['choice_lists']['morphology_role'] = choicelist_queryset_to_translated_dict(FieldChoice.objects.filter(field__iexact='MorphologyType'),
self.request.LANGUAGE_CODE)
#Collect all morphology definitions for th sequential morphology section, and make some translations in advance
morphdef_roles = FieldChoice.objects.filter(field__iexact='MorphologyType')
morphdefs = []
for morphdef in context['gloss'].parent_glosses.all():
translated_role = machine_value_to_translated_human_value(morphdef.role,morphdef_roles,self.request.LANGUAGE_CODE)
sign_display = str(morphdef.morpheme.id)
morph_texts = morphdef.morpheme.get_annotationidglosstranslation_texts()
if morph_texts.keys():
if language_code in morph_texts.keys():
sign_display = morph_texts[language_code]
else:
sign_display = morph_texts[default_language_code]
morphdefs.append((morphdef,translated_role,sign_display))
context['morphdefs'] = morphdefs
context['separate_english_idgloss_field'] = SEPARATE_ENGLISH_IDGLOSS_FIELD
try:
lemma_group_count = gl.lemma.gloss_set.count()
if lemma_group_count > 1:
context['lemma_group'] = True
lemma_group_url_params = {'search_type': 'sign', 'view_type': 'lemma_groups'}
for lemmaidglosstranslation in gl.lemma.lemmaidglosstranslation_set.prefetch_related('language'):
lang_code_2char = lemmaidglosstranslation.language.language_code_2char
lemma_group_url_params['lemma_'+lang_code_2char] = '^' + lemmaidglosstranslation.text + '$'
from urllib.parse import urlencode
url_query = urlencode(lemma_group_url_params)
url_query = ("?" + url_query) if url_query else ''
context['lemma_group_url'] = reverse_lazy('signs_search') + url_query
else:
context['lemma_group'] = False
context['lemma_group_url'] = ''
except:
print("lemma_group_count: except")
context['lemma_group'] = False
context['lemma_group_url'] = ''
lemma_group_glosses = gl.lemma.gloss_set.all()
glosses_in_lemma_group = []
if lemma_group_glosses:
for gl_lem in lemma_group_glosses:
lemma_dict = {}
if gl_lem.dataset:
for language in gl_lem.dataset.translation_languages.all():
lemma_dict[language.language_code_2char] = gl_lem.annotationidglosstranslation_set.filter(language=language)
else:
language = Language.objects.get(id=get_default_language_id())
lemma_dict[language.language_code_2char] = gl_lem.annotationidglosstranslation_set.filter(language=language)
if language_code in lemma_dict.keys():
gl_lem_display = lemma_dict[language_code][0].text
else:
# This should be set to the default language if the interface language hasn't been set for this gloss
gl_lem_display = lemma_dict[default_language_code][0].text
glosses_in_lemma_group.append((gl_lem,gl_lem_display))
context['glosses_in_lemma_group'] = glosses_in_lemma_group
otherrelations = []
if gl.relation_sources:
for oth_rel in gl.relation_sources.all():
other_relations_dict = {}
if oth_rel.target.dataset:
for language in oth_rel.target.dataset.translation_languages.all():
other_relations_dict[language.language_code_2char] = oth_rel.target.annotationidglosstranslation_set.filter(language=language)
else:
language = Language.objects.get(id=get_default_language_id())
other_relations_dict[language.language_code_2char] = oth_rel.target.annotationidglosstranslation_set.filter(language=language)
if language_code in other_relations_dict.keys():
target_display = other_relations_dict[language_code][0].text
else:
# This should be set to the default language if the interface language hasn't been set for this gloss
target_display = other_relations_dict[default_language_code][0].text
otherrelations.append((oth_rel,target_display))
context['otherrelations'] = otherrelations
has_variants = gl.has_variants()
variants = []
if has_variants:
for gl_var in has_variants:
variants_dict = {}
if gl_var.dataset:
for language in gl_var.dataset.translation_languages.all():
variants_dict[language.language_code_2char] = gl_var.annotationidglosstranslation_set.filter(language=language)
else:
language = Language.objects.get(id=get_default_language_id())
variants_dict[language.language_code_2char] = gl_var.annotationidglosstranslation_set.filter(language=language)
if language_code in variants_dict.keys():
gl_var_display = variants_dict[language_code][0].text
else:
# This should be set to the default language if the interface language hasn't been set for this gloss
gl_var_display = variants_dict[default_language_code][0].text
variants.append((gl_var,gl_var_display))
context['variants'] = variants
minimal_pairs_dict = gl.minimal_pairs_dict()
minimalpairs = []
for mpg, dict in minimal_pairs_dict.items():
minimal_pairs_trans = {}
if mpg.dataset:
for language in mpg.dataset.translation_languages.all():
minimal_pairs_trans[language.language_code_2char] = mpg.annotationidglosstranslation_set.filter(language=language)
else:
language = Language.objects.get(id=get_default_language_id())
minimal_pairs_trans[language.language_code_2char] = mpg.annotationidglosstranslation_set.filter(language=language)
if language_code in minimal_pairs_trans.keys():
minpar_display = minimal_pairs_trans[language_code][0].text
else:
# This should be set to the default language if the interface language hasn't been set for this gloss
minpar_display = minimal_pairs_trans[default_language_code][0].text
minimalpairs.append((mpg,dict,minpar_display))
context['minimalpairs'] = minimalpairs
# Put annotation_idgloss per language in the context
context['annotation_idgloss'] = {}
if gl.dataset:
for language in gl.dataset.translation_languages.all():
context['annotation_idgloss'][language] = gl.annotationidglosstranslation_set.filter(language=language)
else:
language = Language.objects.get(id=get_default_language_id())
context['annotation_idgloss'][language] = gl.annotationidglosstranslation_set.filter(language=language)
selected_datasets = get_selected_datasets_for_user(self.request.user)
dataset_languages = Language.objects.filter(dataset__in=selected_datasets).distinct()
context['dataset_languages'] = dataset_languages
if hasattr(settings, 'SHOW_DATASET_INTERFACE_OPTIONS'):
context['SHOW_DATASET_INTERFACE_OPTIONS'] = settings.SHOW_DATASET_INTERFACE_OPTIONS
else:
context['SHOW_DATASET_INTERFACE_OPTIONS'] = False
return context
class MorphemeListView(ListView):
"""The morpheme list view basically copies the gloss list view"""
model = Morpheme
search_type = 'morpheme'
dataset_name = DEFAULT_DATASET
last_used_dataset = None
template_name = 'dictionary/admin_morpheme_list.html'
paginate_by = 500
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super(MorphemeListView, self).get_context_data(**kwargs)
# Retrieve the search_type,so that we know whether the search should be restricted to Gloss or not
if 'search_type' in self.request.GET:
self.search_type = self.request.GET['search_type']
if 'last_used_dataset' in self.request.session.keys():
self.last_used_dataset = self.request.session['last_used_dataset']
selected_datasets = get_selected_datasets_for_user(self.request.user)
dataset_languages = Language.objects.filter(dataset__in=selected_datasets).distinct()
context['dataset_languages'] = dataset_languages
selected_datasets_signlanguage = [ ds.signlanguage for ds in selected_datasets ]
sign_languages = []
for sl in selected_datasets_signlanguage:
if not ((str(sl.id), sl.name) in sign_languages):
sign_languages.append((str(sl.id), sl.name))
selected_datasets_dialects = Dialect.objects.filter(signlanguage__in=selected_datasets_signlanguage).distinct()
dialects = []
for dl in selected_datasets_dialects:
dialect_name = dl.signlanguage.name + "/" + dl.name
dialects.append((str(dl.id),dialect_name))
search_form = MorphemeSearchForm(self.request.GET, languages=dataset_languages, sign_languages=sign_languages,
dialects=dialects, language_code=self.request.LANGUAGE_CODE)
context['searchform'] = search_form
context['glosscount'] = Morpheme.objects.all().count()
context['search_type'] = self.search_type
context['add_morpheme_form'] = MorphemeCreateForm(self.request.GET, languages=dataset_languages, user=self.request.user, last_used_dataset=self.last_used_dataset)
# make sure that the morpheme-type options are available to the listview
oChoiceLists = {}
choice_list = FieldChoice.objects.filter(field__iexact = fieldname_to_category('mrpType'))
if (len(choice_list) > 0):
ordered_dict = choicelist_queryset_to_translated_dict(choice_list, self.request.LANGUAGE_CODE)
oChoiceLists['mrpType'] = ordered_dict
# Make all choice lists available in the context (currently only mrpType)
context['choice_lists'] = json.dumps(oChoiceLists)
context['input_names_fields_and_labels'] = {}
for topic in ['main', 'phonology', 'semantics']:
context['input_names_fields_and_labels'][topic] = []
for fieldname in settings.FIELDS[topic]:
if fieldname not in ['weakprop', 'weakdrop', 'domhndsh_number', 'domhndsh_letter', 'subhndsh_number', 'subhndsh_letter']:
field = search_form[fieldname]
label = field.label
context['input_names_fields_and_labels'][topic].append((fieldname, field, label))
context['paginate_by'] = self.request.GET.get('paginate_by', self.paginate_by)
selected_datasets = get_selected_datasets_for_user(self.request.user)
dataset_languages = Language.objects.filter(dataset__in=selected_datasets).distinct()
context['dataset_languages'] = dataset_languages
if hasattr(settings, 'SHOW_DATASET_INTERFACE_OPTIONS'):
context['SHOW_DATASET_INTERFACE_OPTIONS'] = settings.SHOW_DATASET_INTERFACE_OPTIONS
else:
context['SHOW_DATASET_INTERFACE_OPTIONS'] = False
context['lemma_create_field_prefix'] = LemmaCreateForm.lemma_create_field_prefix
context['MULTIPLE_SELECT_MORPHEME_FIELDS'] = settings.MULTIPLE_SELECT_MORPHEME_FIELDS
return context
def get_paginate_by(self, queryset):
"""
Paginate by specified value in querystring, or use default class property value.
"""
return self.request.GET.get('paginate_by', self.paginate_by)
def get_queryset(self):
# get query terms from self.request
get = self.request.GET
selected_datasets = get_selected_datasets_for_user(self.request.user)
if len(get) > 0:
qs = Morpheme.objects.all().filter(lemma__dataset__in=selected_datasets)
#Don't show anything when we're not searching yet
else:
qs = Morpheme.objects.none()
# Evaluate all morpheme/language search fields
for get_key, get_value in get.items():
if get_key.startswith(MorphemeSearchForm.morpheme_search_field_prefix) and get_value != '':
language_code_2char = get_key[len(MorphemeSearchForm.morpheme_search_field_prefix):]
language = Language.objects.filter(language_code_2char=language_code_2char)
qs = qs.filter(annotationidglosstranslation__text__iregex=get_value,
annotationidglosstranslation__language=language)
elif get_key.startswith(MorphemeSearchForm.keyword_search_field_prefix) and get_value != '':
language_code_2char = get_key[len(MorphemeSearchForm.keyword_search_field_prefix):]
language = Language.objects.filter(language_code_2char=language_code_2char)
qs = qs.filter(translation__translation__text__iregex=get_value,
translation__language=language)
if 'lemmaGloss' in get and get['lemmaGloss'] != '':
val = get['lemmaGloss']
qs = qs.filter(idgloss__iregex=val)
if 'keyword' in get and get['keyword'] != '':
val = get['keyword']
qs = qs.filter(translation__translation__text__iregex=val)
if 'inWeb' in get and get['inWeb'] != '0':
# Don't apply 'inWeb' filter, if it is unspecified ('0' according to the NULLBOOLEANCHOICES)
val = get['inWeb'] == '2'
qs = qs.filter(inWeb__exact=val)
if 'hasvideo' in get and get['hasvideo'] != 'unspecified':
val = get['hasvideo'] == 'no'
qs = qs.filter(glossvideo__isnull=val)
if 'defspublished' in get and get['defspublished'] != 'unspecified':
val = get['defspublished'] == 'yes'
qs = qs.filter(definition__published=val)
fieldnames = FIELDS['main']+FIELDS['phonology']+FIELDS['semantics']+['inWeb', 'isNew']
# SignLanguage and basic property filters
# allows for multiselect
vals = get.getlist('dialect[]')
if '' in vals:
vals.remove('')
if vals != []:
qs = qs.filter(dialect__in=vals)
# allows for multiselect
vals = get.getlist('signlanguage[]')
if '' in vals:
vals.remove('')
if vals != []:
qs = qs.filter(signlanguage__in=vals)
if 'useInstr' in get and get['useInstr'] != '':
qs = qs.filter(useInstr__icontains=get['useInstr'])
for fieldnamemulti in settings.MULTIPLE_SELECT_MORPHEME_FIELDS:
fieldnamemultiVarname = fieldnamemulti + '[]'
fieldnameQuery = fieldnamemulti + '__in'
vals = get.getlist(fieldnamemultiVarname)
if '' in vals:
vals.remove('')
if vals != []:
qs = qs.filter(**{ fieldnameQuery: vals })
## phonology and semantics field filters
fieldnames = [ f for f in fieldnames if f not in settings.MULTIPLE_SELECT_MORPHEME_FIELDS ]
for fieldname in fieldnames:
if fieldname in get:
key = fieldname + '__exact'
val = get[fieldname]
if isinstance(Gloss._meta.get_field(fieldname), NullBooleanField):
val = {'0': '', '1': None, '2': True, '3': False}[val]
if val != '':
kwargs = {key: val}
qs = qs.filter(**kwargs)
# these fields are for ASL searching
if 'initial_relative_orientation' in get and get['initial_relative_orientation'] != '':
val = get['initial_relative_orientation']
qs = qs.filter(initial_relative_orientation__exact=val)
if 'final_relative_orientation' in get and get['final_relative_orientation'] != '':
val = get['final_relative_orientation']
qs = qs.filter(final_relative_orientation__exact=val)
if 'initial_palm_orientation' in get and get['initial_palm_orientation'] != '':
val = get['initial_palm_orientation']
qs = qs.filter(initial_palm_orientation__exact=val)
if 'final_palm_orientation' in get and get['final_palm_orientation'] != '':
val = get['final_palm_orientation']
qs = qs.filter(final_palm_orientation__exact=val)
if 'initial_secondary_loc' in get and get['initial_secondary_loc'] != '':
val = get['initial_secondary_loc']
qs = qs.filter(initial_secondary_loc__exact=val)
if 'final_secondary_loc' in get and get['final_secondary_loc'] != '':
val = get['final_secondary_loc']
qs = qs.filter(final_secondary_loc__exact=val)
if 'final_secondary_loc' in get and get['final_secondary_loc'] != '':
val = get['final_secondary_loc']
qs = qs.filter(final_secondary_loc__exact=val)
if 'defsearch' in get and get['defsearch'] != '':
val = get['defsearch']
if 'defrole' in get:
role = get['defrole']
else:
role = 'all'
if role == 'all':
qs = qs.filter(definition__text__icontains=val)
else:
qs = qs.filter(definition__text__icontains=val, definition__role__exact=role)
if 'tags' in get and get['tags'] != '':
vals = get.getlist('tags')
tags = []
for t in vals:
tags.extend(Tag.objects.filter(name=t))
# search is an implicit AND so intersection
tqs = TaggedItem.objects.get_intersection_by_model(Gloss, tags)
# intersection
qs = qs & tqs
qs = qs.distinct()
if 'nottags' in get and get['nottags'] != '':
vals = get.getlist('nottags')
tags = []
for t in vals:
tags.extend(Tag.objects.filter(name=t))
# search is an implicit AND so intersection
tqs = TaggedItem.objects.get_intersection_by_model(Gloss, tags)
# exclude all of tqs from qs
qs = [q for q in qs if q not in tqs]
if 'relationToForeignSign' in get and get['relationToForeignSign'] != '':
relations = RelationToForeignSign.objects.filter(other_lang_gloss__icontains=get['relationToForeignSign'])
potential_pks = [relation.gloss.pk for relation in relations]
qs = qs.filter(pk__in=potential_pks)
if 'hasRelationToForeignSign' in get and get['hasRelationToForeignSign'] != '0':
pks_for_glosses_with_relations = [relation.gloss.pk for relation in RelationToForeignSign.objects.all()]
if get['hasRelationToForeignSign'] == '1': # We only want glosses with a relation to a foreign sign
qs = qs.filter(pk__in=pks_for_glosses_with_relations)
elif get['hasRelationToForeignSign'] == '2': # We only want glosses without a relation to a foreign sign
qs = qs.exclude(pk__in=pks_for_glosses_with_relations)
if 'relation' in get and get['relation'] != '':
potential_targets = Gloss.objects.filter(idgloss__icontains=get['relation'])
relations = Relation.objects.filter(target__in=potential_targets)
potential_pks = [relation.source.pk for relation in relations]
qs = qs.filter(pk__in=potential_pks)
if 'hasRelation' in get and get['hasRelation'] != '':
# Find all relations with this role
if get['hasRelation'] == 'all':
relations_with_this_role = Relation.objects.all()
else:
relations_with_this_role = Relation.objects.filter(role__exact=get['hasRelation'])
# Remember the pk of all glosses that take part in the collected relations
pks_for_glosses_with_correct_relation = [relation.source.pk for relation in relations_with_this_role]
qs = qs.filter(pk__in=pks_for_glosses_with_correct_relation)
if 'morpheme' in get and get['morpheme'] != '':
potential_morphemes = Gloss.objects.filter(idgloss__icontains=get['morpheme'])
potential_morphdefs = MorphologyDefinition.objects.filter(
morpheme__in=[morpheme.pk for morpheme in potential_morphemes])
potential_pks = [morphdef.parent_gloss.pk for morphdef in potential_morphdefs]
qs = qs.filter(pk__in=potential_pks)
if 'definitionRole' in get and get['definitionRole'] != '':
# Find all definitions with this role
if get['definitionRole'] == 'all':
definitions_with_this_role = Definition.objects.all()
else:
definitions_with_this_role = Definition.objects.filter(role__exact=get['definitionRole'])
# Remember the pk of all glosses that are referenced in the collection definitions
pks_for_glosses_with_these_definitions = [definition.gloss.pk for definition in definitions_with_this_role]
qs = qs.filter(pk__in=pks_for_glosses_with_these_definitions)
if 'definitionContains' in get and get['definitionContains'] != '':
definitions_with_this_text = Definition.objects.filter(text__icontains=get['definitionContains'])
# Remember the pk of all glosses that are referenced in the collection definitions
pks_for_glosses_with_these_definitions = [definition.gloss.pk for definition in definitions_with_this_text]
qs = qs.filter(pk__in=pks_for_glosses_with_these_definitions)
if 'createdBefore' in get and get['createdBefore'] != '':
created_before_date = DT.datetime.strptime(get['createdBefore'], "%m/%d/%Y").date()
qs = qs.filter(creationDate__range=(EARLIEST_GLOSS_CREATION_DATE, created_before_date))
if 'createdAfter' in get and get['createdAfter'] != '':
created_after_date = DT.datetime.strptime(get['createdAfter'], "%m/%d/%Y").date()
qs = qs.filter(creationDate__range=(created_after_date, DT.datetime.now()))
if 'createdBy' in get and get['createdBy'] != '':
created_by_search_string = ' '.join(get['createdBy'].strip().split()) # remove redundant spaces
qs = qs.annotate(
created_by=Concat('creator__first_name', V(' '), 'creator__last_name', output_field=CharField())) \
.filter(created_by__iregex=created_by_search_string)
# Saving querysets results to sessions, these results can then be used elsewhere (like in gloss_detail)
# Flush the previous queryset (just in case)
self.request.session['search_results'] = None
# Make sure that the QuerySet has filters applied (user is searching for something instead of showing all results [objects.all()])
if hasattr(qs.query.where, 'children') and len(qs.query.where.children) > 0:
items = []
for item in qs:
items.append(dict(id=item.id, gloss=item.idgloss))
self.request.session['search_results'] = items
# Sort the queryset by the parameters given
qs = order_queryset_by_sort_order(self.request.GET, qs)
self.request.session['search_type'] = self.search_type
if not ('last_used_dataset' in self.request.session.keys()):
self.request.session['last_used_dataset'] = self.last_used_dataset
# Return the resulting filtered and sorted queryset
return qs
def render_to_response(self, context):
# Look for a 'format=json' GET argument
if self.request.GET.get('format') == 'CSV':
return self.render_to_csv_response(context)
else:
return super(MorphemeListView, self).render_to_response(context)
# noinspection PyInterpreter,PyInterpreter
def render_to_csv_response(self, context):
"""Convert all Morphemes into a CSV
This function is derived from and similar to the one used in class GlossListView
Differences:
1 - this one adds the field [mrpType]
2 - the filename is different"""
if not self.request.user.has_perm('dictionary.export_csv'):
raise PermissionDenied
# Create the HttpResponse object with the appropriate CSV header.
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="dictionary-morph-export.csv"'
# fields = [f.name for f in Gloss._meta.fields]
# We want to manually set which fields to export here
fieldnames = FIELDS['main']+FIELDS['phonology']+FIELDS['semantics']+FIELDS['frequency']+['inWeb', 'isNew']
# Different from Gloss: we use Morpheme here
fields = [Morpheme._meta.get_field(fieldname) for fieldname in fieldnames]
selected_datasets = get_selected_datasets_for_user(self.request.user)
dataset_languages = Language.objects.filter(dataset__in=selected_datasets).distinct()
lang_attr_name = 'name_' + DEFAULT_KEYWORDS_LANGUAGE['language_code_2char']
annotationidglosstranslation_fields = ["Annotation ID Gloss" + " (" + getattr(language, lang_attr_name) + ")" for language in
dataset_languages]
writer = csv.writer(response)
with override(LANGUAGE_CODE):
header = ['Signbank ID'] + annotationidglosstranslation_fields + [f.verbose_name.title().encode('ascii', 'ignore').decode() for f in fields]
for extra_column in ['SignLanguages', 'Dialects', 'Keywords', 'Morphology', 'Relations to other signs',
'Relations to foreign signs', 'Appears in signs', ]:
header.append(extra_column)
writer.writerow(header)
for gloss in self.get_queryset():
row = [str(gloss.pk)]
for language in dataset_languages:
annotationidglosstranslations = gloss.annotationidglosstranslation_set.filter(language=language)
if annotationidglosstranslations and len(annotationidglosstranslations) == 1:
row.append(annotationidglosstranslations[0].text)
else:
row.append("")
for f in fields:
# Try the value of the choicelist
try:
row.append(getattr(gloss, 'get_' + f.name + '_display')())
# If it's not there, try the raw value
except AttributeError:
value = getattr(gloss, f.name)
# This was disabled with the move to Python 3... might not be needed anymore?
# if isinstance(value, unicode):
# value = str(value.encode('ascii', 'xmlcharrefreplace'))
# elif not isinstance(value, str):
value = str(value)
row.append(value)
# get languages
signlanguages = [signlanguage.name for signlanguage in gloss.signlanguage.all()]
row.append(", ".join(signlanguages))
# get dialects
dialects = [dialect.name for dialect in gloss.dialect.all()]
row.append(", ".join(dialects))
# get translations
trans = [t.translation.text for t in gloss.translation_set.all()]
row.append(", ".join(trans))
# get compound's component type
morphemes = [morpheme.role for morpheme in MorphologyDefinition.objects.filter(parent_gloss=gloss)]
row.append(", ".join(morphemes))
# get relations to other signs
relations = [relation.target.idgloss for relation in Relation.objects.filter(source=gloss)]
row.append(", ".join(relations))
# get relations to foreign signs
relations = [relation.other_lang_gloss for relation in RelationToForeignSign.objects.filter(gloss=gloss)]
row.append(", ".join(relations))
# Got all the glosses (=signs) this morpheme appears in
appearsin = [appears.idgloss for appears in MorphologyDefinition.objects.filter(parent_gloss=gloss)]
row.append(", ".join(appearsin))
# Make it safe for weird chars
safe_row = []
for column in row:
try:
safe_row.append(column.encode('utf-8').decode())
except AttributeError:
safe_row.append(None)
writer.writerow(safe_row)
return response
class HandshapeDetailView(DetailView):
model = Handshape
template_name = 'dictionary/handshape_detail.html'
context_object_name = 'handshape'
search_type = 'handshape'
class Meta:
verbose_name_plural = "Handshapes"
ordering = ['machine_value']
#Overriding the get method get permissions right
def get(self, request, *args, **kwargs):
match_machine_value = int(kwargs['pk'])
try:
self.object = self.get_object()
except Http404:
# check to see if this handshape has been created but not yet viewed
# if that is the case, create a new handshape object and view that,
# otherwise return an error
handshapes = FieldChoice.objects.filter(field__iexact='Handshape')
handshape_not_created = 1
for o in handshapes:
if o.machine_value == match_machine_value: # only one match
new_id = o.machine_value
new_machine_value = o.machine_value
new_english_name = o.english_name
new_dutch_name = o.dutch_name
new_chinese_name = o.chinese_name
new_handshape = Handshape(machine_value=new_machine_value, english_name=new_english_name,
dutch_name=new_dutch_name, chinese_name=new_chinese_name)
new_handshape.save()
handshape_not_created = 0
self.object = new_handshape
break
if handshape_not_created:
return HttpResponse('<p>Handshape not configured.</p>')
context = self.get_context_data(object=self.object)
return self.render_to_response(context)
def get_context_data(self, **kwargs):
try:
context = super(HandshapeDetailView, self).get_context_data(**kwargs)
except:
# return custom template
return HttpResponse('invalid', {'content-type': 'text/plain'})
hs = context['handshape']
setattr(self.request, 'search_type', self.search_type)
labels = hs.field_labels()
context['imageform'] = ImageUploadForHandshapeForm()
context['choice_lists'] = {}
context['handshape_fields'] = []
oChoiceLists = {}
context['handshape_fields_FS1'] = []
context['handshape_fields_FS2'] = []
context['handshape_fields_FC1'] = []
context['handshape_fields_FC2'] = []
context['handshape_fields_UF'] = []
for field in FIELDS['handshape']:
#Get and save the choice list for this field
fieldchoice_category = fieldname_to_category(field)
choice_list = FieldChoice.objects.filter(field__iexact=fieldchoice_category).order_by('machine_value')
if len(choice_list) > 0:
context['choice_lists'][field] = choicelist_queryset_to_translated_dict (choice_list,self.request.LANGUAGE_CODE)
#Take the human value in the language we are using
machine_value = getattr(hs, field)
human_value = machine_value_to_translated_human_value(machine_value,choice_list,self.request.LANGUAGE_CODE)
#And add the kind of field
kind = fieldname_to_kind(field)
field_label = labels[field]
if field_label in ['Finger selection', 'T', 'I', 'M', 'R', 'P']:
if field_label != 'Finger selection':
context['handshape_fields_FS1'].append([human_value, field, field_label, kind])
elif field_label in ['Finger selection 2', 'T2', 'I2', 'M2', 'R2', 'P2']:
if field_label != 'Finger selection 2':
context['handshape_fields_FS2'].append([human_value, field, field_label, kind])
elif field_label in ['Unselected fingers', 'Tu', 'Iu', 'Mu', 'Ru', 'Pu']:
if field_label != 'Unselected fingers':
context['handshape_fields_UF'].append([human_value, field, field_label, kind])
# elif field_label == 'Finger configuration 1':
# context['handshape_fields_FC1'].append([human_value, field, field_label, kind])
elif field_label == 'Finger configuration 2':
context['handshape_fields_FC2'].append([human_value, field, field_label, kind])
else:
context['handshape_fields'].append([human_value, field, field_label, kind])
context['choice_lists'] = json.dumps(context['choice_lists'])
# Check the type of the current search results
if self.request.session['search_results'] and len(self.request.session['search_results']) > 0:
if 'gloss' in self.request.session['search_results'][0].keys():
self.request.session['search_results'] = None
# if there are no current handshape search results in the current session, display all of them in the navigation bar
if self.request.session['search_type'] != 'handshape' or self.request.session['search_results'] == None:
self.request.session['search_type'] = self.search_type
qs = Handshape.objects.all().order_by('machine_value')
items = []
for item in qs:
if self.request.LANGUAGE_CODE == 'nl':
items.append(dict(id=item.machine_value, handshape=item.dutch_name))
elif self.request.LANGUAGE_CODE == 'zh-hans':
items.append(dict(id=item.machine_value, handshape=item.chinese_name))
else:
items.append(dict(id=item.machine_value, handshape=item.english_name))
self.request.session['search_results'] = items
selected_datasets = get_selected_datasets_for_user(self.request.user)
dataset_languages = Language.objects.filter(dataset__in=selected_datasets).distinct()
context['dataset_languages'] = dataset_languages
if hasattr(settings, 'SHOW_DATASET_INTERFACE_OPTIONS'):
context['SHOW_DATASET_INTERFACE_OPTIONS'] = settings.SHOW_DATASET_INTERFACE_OPTIONS
else:
context['SHOW_DATASET_INTERFACE_OPTIONS'] = False
return context
class HomonymListView(ListView):
model = Gloss
template_name = 'dictionary/admin_homonyms_list.html'
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super(HomonymListView, self).get_context_data(**kwargs)
if self.request.LANGUAGE_CODE == 'zh-hans':
languages = Language.objects.filter(language_code_2char='zh')
else:
languages = Language.objects.filter(language_code_2char=self.request.LANGUAGE_CODE)
if languages:
context['language'] = languages[0]
else:
context['language'] = Language.objects.get(id=get_default_language_id())
selected_datasets = get_selected_datasets_for_user(self.request.user)
dataset_languages = Language.objects.filter(dataset__in=selected_datasets).distinct()
context['dataset_languages'] = dataset_languages
if hasattr(settings, 'SHOW_DATASET_INTERFACE_OPTIONS'):
context['SHOW_DATASET_INTERFACE_OPTIONS'] = settings.SHOW_DATASET_INTERFACE_OPTIONS
else:
context['SHOW_DATASET_INTERFACE_OPTIONS'] = False
# this is used to set up the ajax calls, one per each focus gloss in the table
context['ids_of_all_glosses'] = [ g.id for g in Gloss.none_morpheme_objects().select_related('lemma').filter(lemma__dataset__in=selected_datasets).exclude((Q(**{'handedness__isnull': True}))).exclude((Q(**{'domhndsh__isnull': True}))) ]
return context
def get_queryset(self):
# Get all existing saved Homonyms
# relation_homonyms = Relation.objects.filter(role='homonym')
selected_datasets = get_selected_datasets_for_user(self.request.user)
glosses_with_phonology = Gloss.none_morpheme_objects().select_related('lemma').filter(lemma__dataset__in=selected_datasets).exclude((Q(**{'handedness__isnull': True}))).exclude((Q(**{'domhndsh__isnull': True})))
return glosses_with_phonology
class MinimalPairsListView(ListView):
model = Gloss
template_name = 'dictionary/admin_minimalpairs_list.html'
paginate_by = 10
def get_context_data(self, **kwargs):
# reformat LANGUAGE_CODE for use in dictionary domain, accomodate multilingual codings
from signbank.tools import convert_language_code_to_2char
language_code = convert_language_code_to_2char(self.request.LANGUAGE_CODE)
language = Language.objects.get(id=get_default_language_id())
default_language_code = language.language_code_2char
# Refresh the "constant" translated choice lists table
translated_choice_lists_table = generate_translated_choice_list_table()
context = super(MinimalPairsListView, self).get_context_data(**kwargs)
languages = Language.objects.filter(language_code_2char=self.request.LANGUAGE_CODE)
if languages:
context['language'] = languages[0]
else:
context['language'] = Language.objects.get(id=get_default_language_id())
selected_datasets = get_selected_datasets_for_user(self.request.user)
dataset_languages = Language.objects.filter(dataset__in=selected_datasets).distinct()
context['dataset_languages'] = dataset_languages
if hasattr(settings, 'SHOW_DATASET_INTERFACE_OPTIONS'):
context['SHOW_DATASET_INTERFACE_OPTIONS'] = settings.SHOW_DATASET_INTERFACE_OPTIONS
else:
context['SHOW_DATASET_INTERFACE_OPTIONS'] = False
field_names = []
for field in FIELDS['phonology']:
# the following fields are not considered for minimal pairs
if field not in ['locVirtObj', 'phonOth', 'mouthG', 'mouthing', 'phonetVar']:
field_names.append(field)
field_labels = dict()
for field in field_names:
field_label = Gloss._meta.get_field(field).verbose_name
field_labels[field] = field_label.encode('utf-8').decode()
context['field_labels'] = field_labels
context['page_number'] = context['page_obj'].number
context['objects_on_page'] = [ g.id for g in context['page_obj'].object_list ]
context['paginate_by'] = self.request.GET.get('paginate_by', self.paginate_by)
return context
def get_paginate_by(self, queryset):
"""
Paginate by specified value in querystring, or use default class property value.
"""
return self.request.GET.get('paginate_by', self.paginate_by)
def get_queryset(self):
selected_datasets = get_selected_datasets_for_user(self.request.user)
# grab gloss ids for finger spelling glosses, identified by text #.
finger_spelling_glosses = [ a_idgloss_trans.gloss_id for a_idgloss_trans in AnnotationIdglossTranslation.objects.filter(text__startswith="#") ]
glosses_with_phonology = Gloss.none_morpheme_objects().select_related('lemma').filter(lemma__dataset__in=selected_datasets).exclude(id__in=finger_spelling_glosses).exclude((Q(**{'handedness__isnull': True}))).exclude((Q(**{'domhndsh__isnull': True})))
return glosses_with_phonology
class FrequencyListView(ListView):
# not sure what model should be used here, it applies to all the glosses in a dataset
model = Dataset
template_name = 'dictionary/admin_frequency_list.html'
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super(FrequencyListView, self).get_context_data(**kwargs)
language_code = self.request.LANGUAGE_CODE
if self.request.LANGUAGE_CODE == 'zh-hans':
languages = Language.objects.filter(language_code_2char='zh')
language_code = 'zh'
else:
languages = Language.objects.filter(language_code_2char=self.request.LANGUAGE_CODE)
if languages:
context['language'] = languages[0]
else:
context['language'] = Language.objects.get(id=get_default_language_id())
selected_datasets = get_selected_datasets_for_user(self.request.user)
dataset_languages = Language.objects.filter(dataset__in=selected_datasets).distinct()
context['dataset_languages'] = dataset_languages
codes_to_adjectives = dict(settings.LANGUAGES)
if language_code not in codes_to_adjectives.keys():
adjective = 'english'
else:
adjective = codes_to_adjectives[language_code].lower()
if hasattr(settings, 'SHOW_DATASET_INTERFACE_OPTIONS'):
context['SHOW_DATASET_INTERFACE_OPTIONS'] = settings.SHOW_DATASET_INTERFACE_OPTIONS
else:
context['SHOW_DATASET_INTERFACE_OPTIONS'] = False
context['dataset_ids'] = [ ds.id for ds in selected_datasets]
# sort the phonology fields based on field label in the designated language
# this is used for display in the template, by lookup
field_labels = dict()
for field in FIELDS['phonology']:
if field not in ['weakprop', 'weakdrop', 'domhndsh_number', 'domhndsh_letter', 'subhndsh_number',
'subhndsh_letter']:
field_kind = fieldname_to_kind(field)
if field_kind == 'list':
field_label = Gloss._meta.get_field(field).verbose_name
field_labels[field] = field_label.encode('utf-8').decode()
# note on context variables below: there are two variables for the same data
# the context variable field_labels_list is iterated over in the template to generate the pull-down menu
# this pull-down has to be sorted in the destination language
# the menu generation is done by Django as part of the form
# after Django generates the form, it is modified by javascript to convert the options to a multiple-select
# the javascript makes use of the labels generated by Django
# there were some issues getting the other dict variable (field_labels) to remain sorted in the template
# the field_labels dict is used to lookup the display names, it does not need to be sorted
field_labels_list = [ (k, v) for (k, v) in sorted(field_labels.items(), key=lambda x: x[1])]
context['field_labels'] = field_labels
context['field_labels_list'] = field_labels_list
# sort the field choices based on the designated language
# this is used for display in the template, by lookup
field_labels_choices = dict()
for field, label in field_labels.items():
field_category = fieldname_to_category(field)
field_choices = FieldChoice.objects.filter(field__iexact=field_category).order_by(adjective+'_name')
translated_choices = choicelist_queryset_to_translated_dict(field_choices,self.request.LANGUAGE_CODE,ordered=False,id_prefix='_',shortlist=False)
field_labels_choices[field] = dict(translated_choices)
context['field_labels_choices'] = field_labels_choices
# do the same for the semantics fields
# the code is here to keep phonology and semantics in separate dicts,
# but at the moment all results are displayed in one table in the template
field_labels_semantics = dict()
for field in FIELDS['semantics']:
field_kind = fieldname_to_kind(field)
if field_kind == 'list':
field_label = Gloss._meta.get_field(field).verbose_name
field_labels_semantics[field] = field_label.encode('utf-8').decode()
field_labels_semantics_list = [ (k, v) for (k, v) in sorted(field_labels_semantics.items(), key=lambda x: x[1])]
context['field_labels_semantics'] = field_labels_semantics
context['field_labels_semantics_list'] = field_labels_semantics_list
field_labels_semantics_choices = dict()
for field, label in field_labels_semantics.items():
field_category = fieldname_to_category(field)
field_choices = FieldChoice.objects.filter(field__iexact=field_category).order_by(adjective+'_name')
translated_choices = choicelist_queryset_to_translated_dict(field_choices,self.request.LANGUAGE_CODE,ordered=False,id_prefix='_',shortlist=False)
field_labels_semantics_choices[field] = dict(translated_choices)
context['field_labels_semantics_choices'] = field_labels_semantics_choices
# for ease of implementation in the template, the results of the two kinds of frequencies
# (phonology fields, semantics fields) are displayed in the same table, the lookup tables are merged so only one loop is needed
context['all_field_labels_choices'] = dict(field_labels_choices, **field_labels_semantics_choices)
context['all_field_labels'] = dict(field_labels, **field_labels_semantics)
return context
def get_queryset(self):
user = self.request.user
if user.is_authenticated():
selected_datasets = get_selected_datasets_for_user(self.request.user)
from django.db.models import Prefetch
qs = Dataset.objects.filter(id__in=selected_datasets).prefetch_related(
Prefetch(
"userprofile_set",
queryset=UserProfile.objects.filter(user=user),
to_attr="user"
)
)
checker = ObjectPermissionChecker(user)
checker.prefetch_perms(qs)
for dataset in qs:
checker.has_perm('view_dataset', dataset)
return qs
else:
# User is not authenticated
return None
class HandshapeListView(ListView):
model = Handshape
template_name = 'dictionary/admin_handshape_list.html'
search_type = 'handshape'
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super(HandshapeListView, self).get_context_data(**kwargs)
# Add in a QuerySet of all the books
search_form = HandshapeSearchForm(self.request.GET)
# Retrieve the search_type,so that we know whether the search should be restricted to Gloss or not
if 'search_type' in self.request.GET:
self.search_type = self.request.GET['search_type']
else:
self.search_type = 'handshape'
# self.request.session['search_type'] = self.search_type
context['searchform'] = search_form
context['search_type'] = self.search_type
# if self.search_type == 'sign_handshape':
# context['glosscount'] = Gloss.none_morpheme_objects().count() # Only count the none-morpheme glosses
# else:
# context['glosscount'] = Gloss.objects.count() # Count the glosses + morphemes
context['handshapefieldchoicecount'] = FieldChoice.objects.filter(field__iexact='Handshape').count()
selected_datasets = get_selected_datasets_for_user(self.request.user)
context['selected_datasets'] = selected_datasets
context['signscount'] = Gloss.objects.filter(lemma__dataset__in=selected_datasets).count()
context['HANDSHAPE_RESULT_FIELDS'] = settings.HANDSHAPE_RESULT_FIELDS
context['handshape_fields_FS1'] = []
context['choice_lists'] = {}
for field in FIELDS['handshape']:
# Get and save the choice list for this field
fieldchoice_category = fieldname_to_category(field)
choice_list = FieldChoice.objects.filter(field__iexact=fieldchoice_category).order_by('machine_value')
if len(choice_list) > 0:
context['choice_lists'][field] = choicelist_queryset_to_translated_dict(choice_list,
self.request.LANGUAGE_CODE, id_prefix='')
context['choice_lists'] = json.dumps(context['choice_lists'])
context['handshapescount'] = Handshape.objects.count()
selected_datasets = get_selected_datasets_for_user(self.request.user)
dataset_languages = Language.objects.filter(dataset__in=selected_datasets).distinct()
context['dataset_languages'] = dataset_languages
if hasattr(settings, 'SHOW_DATASET_INTERFACE_OPTIONS'):
context['SHOW_DATASET_INTERFACE_OPTIONS'] = settings.SHOW_DATASET_INTERFACE_OPTIONS
else:
context['SHOW_DATASET_INTERFACE_OPTIONS'] = False
return context
def render_to_response(self, context):
# Look for a 'format=json' GET argument
if self.request.GET.get('format') == 'CSV':
return self.render_to_csv_response(context)
else:
return super(HandshapeListView, self).render_to_response(context)
def render_to_csv_response(self, context):
if not self.request.user.has_perm('dictionary.export_csv'):
raise PermissionDenied
# Create the HttpResponse object with the appropriate CSV header.
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="dictionary-export-handshapes.csv"'
writer = csv.writer(response)
if self.search_type and self.search_type == 'handshape':
writer = write_csv_for_handshapes(self, writer)
else:
print('search type is sign')
return response
def get_queryset(self):
choice_lists = {}
for field in FIELDS['handshape']:
# Get and save the choice list for this field
fieldchoice_category = fieldname_to_category(field)
choice_list = FieldChoice.objects.filter(field__iexact=fieldchoice_category).order_by('machine_value')
if len(choice_list) > 0:
choice_lists[field] = choicelist_queryset_to_translated_dict(choice_list,
self.request.LANGUAGE_CODE, id_prefix='')
# get query terms from self.request
get = self.request.GET
#Then check what kind of stuff we want
if 'search_type' in get:
self.search_type = get['search_type']
else:
self.search_type = 'handshape'
setattr(self.request, 'search_type', self.search_type)
qs = Handshape.objects.all().order_by('machine_value')
handshapes = FieldChoice.objects.filter(field__iexact='Handshape')
# Find out if any Handshapes exist for which no Handshape object has been created
existing_handshape_objects_machine_values = [ o.machine_value for o in qs ]
new_handshape_created = 0
for h in handshapes:
if h.machine_value in existing_handshape_objects_machine_values:
pass
else:
# create a new Handshape object
new_id = h.machine_value
new_machine_value = h.machine_value
new_english_name = h.english_name
new_dutch_name = h.dutch_name
new_chinese_name = h.chinese_name
new_handshape = Handshape(machine_value=new_machine_value, english_name=new_english_name,
dutch_name=new_dutch_name, chinese_name=new_chinese_name)
new_handshape.save()
new_handshape_created = 1
if new_handshape_created: # if a new Handshape object was created, reload the query result
qs = Handshape.objects.all().order_by('machine_value')
fieldnames = ['machine_value', 'english_name', 'dutch_name', 'chinese_name']+FIELDS['handshape']
## phonology and semantics field filters
for fieldname in fieldnames:
if fieldname in get:
key = fieldname + '__exact'
val = get[fieldname]
if fieldname == 'hsNumSel' and val != '':
fieldlabel = choice_lists[fieldname][val]
if fieldlabel == 'one':
qs = qs.annotate(
count_fs1=ExpressionWrapper(F('fsT') + F('fsI') + F('fsM') + F('fsR') + F('fsP'),
output_field=IntegerField())).filter(Q(count_fs1__exact=1) | Q(hsNumSel=val))
elif fieldlabel == 'two':
qs = qs.annotate(
count_fs1=ExpressionWrapper(F('fsT') + F('fsI') + F('fsM') + F('fsR') + F('fsP'),
output_field=IntegerField())).filter(Q(count_fs1__exact=2) | Q(hsNumSel=val))
elif fieldlabel == 'three':
qs = qs.annotate(
count_fs1=ExpressionWrapper(F('fsT') + F('fsI') + F('fsM') + F('fsR') + F('fsP'),
output_field=IntegerField())).filter(Q(count_fs1__exact=3) | Q(hsNumSel=val))
elif fieldlabel == 'four':
qs = qs.annotate(
count_fs1=ExpressionWrapper(F('fsT') + F('fsI') + F('fsM') + F('fsR') + F('fsP'),
output_field=IntegerField())).filter(Q(count_fs1__exact=4) | Q(hsNumSel=val))
elif fieldlabel == 'all':
qs = qs.annotate(
count_fs1=ExpressionWrapper(F('fsT') + F('fsI') + F('fsM') + F('fsR') + F('fsP'),
output_field=IntegerField())).filter(Q(count_fs1__gt=4) | Q(hsNumSel=val))
if isinstance(Handshape._meta.get_field(fieldname), NullBooleanField):
val = {'0': False, '1': True, 'True': True, 'False': False, 'None': '', '': '' }[val]
if self.request.LANGUAGE_CODE == 'nl' and fieldname == 'dutch_name' and val != '':
query = Q(dutch_name__icontains=val)
qs = qs.filter(query)
if self.request.LANGUAGE_CODE == 'zh-hans' and fieldname == 'chinese_name' and val != '':
query = Q(chinese_name__icontains=val)
qs = qs.filter(query)
if fieldname == 'english_name' and val != '':
query = Q(english_name__icontains=val)
qs = qs.filter(query)
if val != '' and fieldname != 'hsNumSel' and fieldname != 'dutch_name' and fieldname != 'chinese_name' and fieldname != 'english_name':
kwargs = {key: val}
qs = qs.filter(**kwargs)
# Handshape searching of signs relies on using the search_results in order to search signs that have the handshapes
# The search_results is no longer set to None
# Make sure that the QuerySet has filters applied (user is searching for something instead of showing all results [objects.all()])
if hasattr(qs.query.where, 'children') and len(qs.query.where.children) > 0:
items = []
for item in qs:
if self.request.LANGUAGE_CODE == 'nl':
items.append(dict(id = item.machine_value, handshape = item.dutch_name))
elif self.request.LANGUAGE_CODE == 'zh-hans':
items.append(dict(id = item.machine_value, handshape = item.chinese_name))
else:
items.append(dict(id = item.machine_value, handshape = item.english_name))
self.request.session['search_results'] = items
if ('sortOrder' in get and get['sortOrder'] != 'machine_value'):
# User has toggled the sort order for the column
qs = order_handshape_queryset_by_sort_order(self.request.GET, qs)
else:
# The default is to order the signs alphabetically by whether there is an angle bracket
qs = order_handshape_by_angle(qs, self.request.LANGUAGE_CODE)
if self.search_type == 'sign_handshape':
# search for signs with found hadnshapes
# find relevant machine values for handshapes
selected_handshapes = [ h.machine_value for h in qs ]
selected_datasets = get_selected_datasets_for_user(self.request.user)
if len(selected_handshapes) == (Handshape.objects.all().count()):
qs = Gloss.objects.filter(lemma__dataset__in=selected_datasets).filter(Q(domhndsh__in=selected_handshapes)
| Q(domhndsh__isnull=True) | Q(domhndsh__exact='0')
| Q(subhndsh__in=selected_handshapes) | Q(subhndsh__isnull=True) | Q(subhndsh__exact='0'))
else:
qs = Gloss.objects.filter(lemma__dataset__in=selected_datasets).filter(Q(domhndsh__in=selected_handshapes) | Q(subhndsh__in=selected_handshapes))
self.request.session['search_type'] = self.search_type
return qs
class DatasetListView(ListView):
model = Dataset
# set the default dataset, this should not be empty
dataset_name = DEFAULT_DATASET
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super(DatasetListView, self).get_context_data(**kwargs)
selected_datasets = get_selected_datasets_for_user(self.request.user)
dataset_languages = Language.objects.filter(dataset__in=selected_datasets).distinct()
context['dataset_languages'] = dataset_languages
if hasattr(settings, 'SHOW_DATASET_INTERFACE_OPTIONS'):
context['SHOW_DATASET_INTERFACE_OPTIONS'] = settings.SHOW_DATASET_INTERFACE_OPTIONS
else:
context['SHOW_DATASET_INTERFACE_OPTIONS'] = False
return context
def get_template_names(self):
if 'select' in self.kwargs:
return ['dictionary/admin_dataset_select_list.html']
return ['dictionary/admin_dataset_list.html']
def render_to_response(self, context):
if self.request.GET.get('export_ecv') == 'ECV':
return self.render_to_ecv_export_response(context)
elif self.request.GET.get('request_view_access') == 'VIEW':
return self.render_to_request_response(context)
else:
return super(DatasetListView, self).render_to_response(context)
def render_to_request_response(self, context):
# check that the user is logged in
if self.request.user.is_authenticated():
pass
else:
messages.add_message(self.request, messages.ERROR, ('Please login to use this functionality.'))
return HttpResponseRedirect(URL + settings.PREFIX_URL + '/datasets/available')
# if the dataset is specified in the url parameters, set the dataset_name variable
get = self.request.GET
if 'dataset_name' in get:
self.dataset_name = get['dataset_name']
if self.dataset_name == '':
messages.add_message(self.request, messages.ERROR, ('Dataset name must be non-empty.'))
return HttpResponseRedirect(URL + settings.PREFIX_URL + '/datasets/available')
try:
dataset_object = Dataset.objects.get(name=self.dataset_name)
except:
messages.add_message(self.request, messages.ERROR, ('No dataset with name '+self.dataset_name+' found.'))
return HttpResponseRedirect(URL + settings.PREFIX_URL + '/datasets/available')
# make sure the user can write to this dataset
# from guardian.shortcuts import get_objects_for_user
user_view_datasets = get_objects_for_user(self.request.user, 'view_dataset', Dataset, accept_global_perms=False)
if user_view_datasets and not dataset_object in user_view_datasets:
# the user currently has no view permission for the requested dataset
pass
else:
# this should not happen from the html page. the check is made to catch a user adding a parameter to the url
messages.add_message(self.request, messages.INFO, ('You can already view this dataset.'))
return HttpResponseRedirect(URL + settings.PREFIX_URL + '/datasets/available')
motivation = ''
if 'motivation_for_use' in get:
motivation = get['motivation_for_use'] # motivation is a required field in the form
from django.contrib.auth.models import Group, User
group_manager = Group.objects.get(name='Dataset_Manager')
owners_of_dataset = dataset_object.owners.all()
dataset_manager_found = False
for owner in owners_of_dataset:
groups_of_user = owner.groups.all()
if not group_manager in groups_of_user:
# this owner can't manage users
continue
dataset_manager_found = True
# send email to the dataset manager
from django.core.mail import send_mail
current_site = Site.objects.get_current()
subject = render_to_string('registration/dataset_access_email_subject.txt',
context={'dataset': dataset_object.name,
'site': current_site})
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
message = render_to_string('registration/dataset_access_request_email.txt',
context={'user': self.request.user,
'dataset': dataset_object.name,
'motivation': motivation,
'site': current_site})
# for debug purposes on local machine
# print('grant access subject: ', subject)
# print('message: ', message)
# print('owner of dataset: ', owner.username, ' with email: ', owner.email)
# print('user email: ', owner.email)
send_mail(subject, message, settings.DEFAULT_FROM_EMAIL, [owner.email])
if not dataset_manager_found:
messages.add_message(self.request, messages.ERROR, ('No dataset manager has been found for '+dataset_object.name+'. Your request could not be submitted.'))
else:
messages.add_message(self.request, messages.INFO, ('Your request for view access to dataset '+dataset_object.name+' has been submitted.'))
return HttpResponseRedirect(URL + settings.PREFIX_URL + '/datasets/available')
def render_to_ecv_export_response(self, context):
# check that the user is logged in
if self.request.user.is_authenticated():
pass
else:
messages.add_message(self.request, messages.ERROR, ('Please login to use this functionality.'))
return HttpResponseRedirect(reverse('admin_dataset_view'))
# if the dataset is specified in the url parameters, set the dataset_name variable
get = self.request.GET
if 'dataset_name' in get:
self.dataset_name = get['dataset_name']
if self.dataset_name == '':
messages.add_message(self.request, messages.ERROR, ('Dataset name must be non-empty.'))
return HttpResponseRedirect(reverse('admin_dataset_view'))
try:
dataset_object = Dataset.objects.get(name=self.dataset_name)
except:
messages.add_message(self.request, messages.ERROR, ('No dataset with name '+self.dataset_name+' found.'))
return HttpResponseRedirect(reverse('admin_dataset_view'))
# make sure the user can write to this dataset
# from guardian.shortcuts import get_objects_for_user
user_change_datasets = get_objects_for_user(self.request.user, 'change_dataset', Dataset, accept_global_perms=False)
if user_change_datasets and dataset_object in user_change_datasets:
pass
else:
messages.add_message(self.request, messages.ERROR, ('No permission to export dataset.'))
return HttpResponseRedirect(reverse('admin_dataset_view'))
# if we get to here, the user is authenticated and has permission to export the dataset
ecv_file = write_ecv_file_for_dataset(self.dataset_name)
messages.add_message(self.request, messages.INFO, ('ECV ' + self.dataset_name + ' successfully updated.'))
# return HttpResponse('ECV successfully updated.')
return HttpResponseRedirect(reverse('admin_dataset_view'))
def get_queryset(self):
user = self.request.user
# get query terms from self.request
get = self.request.GET
# Then check what kind of stuff we want
if 'dataset_name' in get:
self.dataset_name = get['dataset_name']
# otherwise the default dataset_name DEFAULT_DATASET is used
setattr(self.request, 'dataset_name', self.dataset_name)
if user.is_authenticated():
from django.db.models import Prefetch
qs = Dataset.objects.all().prefetch_related(
Prefetch(
"userprofile_set",
queryset=UserProfile.objects.filter(user=user),
to_attr="user"
)
)
checker = ObjectPermissionChecker(user)
checker.prefetch_perms(qs)
for dataset in qs:
checker.has_perm('view_dataset', dataset)
qs = qs.annotate(Count('lemmaidgloss__gloss')).order_by('name')
return qs
else:
# User is not authenticated
return None
class DatasetManagerView(ListView):
model = Dataset
template_name = 'dictionary/admin_dataset_manager.html'
# set the default dataset, this should not be empty
dataset_name = DEFAULT_DATASET
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super(DatasetManagerView, self).get_context_data(**kwargs)
selected_datasets = get_selected_datasets_for_user(self.request.user)
dataset_languages = Language.objects.filter(dataset__in=selected_datasets).distinct()
context['dataset_languages'] = dataset_languages
default_language_choice_dict = dict()
for language in dataset_languages:
default_language_choice_dict[language.name] = language.name
context['default_language_choice_list'] = json.dumps(default_language_choice_dict)
if hasattr(settings, 'SHOW_DATASET_INTERFACE_OPTIONS'):
context['SHOW_DATASET_INTERFACE_OPTIONS'] = settings.SHOW_DATASET_INTERFACE_OPTIONS
else:
context['SHOW_DATASET_INTERFACE_OPTIONS'] = False
return context
def render_to_response(self, context):
if 'add_view_perm' in self.request.GET or 'add_change_perm' in self.request.GET \
or 'delete_view_perm' in self.request.GET or 'delete_change_perm' in self.request.GET:
return self.render_to_add_user_response(context)
elif 'default_language' in self.request.GET:
return self.render_to_set_default_language()
else:
return super(DatasetManagerView, self).render_to_response(context)
def check_user_permissions_for_managing_dataset(self, dataset_object):
"""
Checks whether the logged in user has permission to manage the dataset object
:return:
"""
# check that the user is logged in
if self.request.user.is_authenticated():
pass
else:
messages.add_message(self.request, messages.ERROR, ('Please login to use this functionality.'))
return HttpResponseRedirect(reverse('admin_dataset_manager'))
# check if the user can manage this dataset
from django.contrib.auth.models import Group, User
try:
group_manager = Group.objects.get(name='Dataset_Manager')
except:
messages.add_message(self.request, messages.ERROR, ('No group Dataset_Manager found.'))
return HttpResponseRedirect(reverse('admin_dataset_manager'))
groups_of_user = self.request.user.groups.all()
if not group_manager in groups_of_user:
messages.add_message(self.request, messages.ERROR,
('You must be in group Dataset Manager to modify dataset permissions.'))
return HttpResponseRedirect(reverse('admin_dataset_manager'))
# make sure the user can write to this dataset
# from guardian.shortcuts import get_objects_for_user
user_change_datasets = get_objects_for_user(self.request.user, 'change_dataset', Dataset,
accept_global_perms=False)
if user_change_datasets and dataset_object in user_change_datasets:
pass
else:
messages.add_message(self.request, messages.ERROR, ('No permission to modify dataset permissions.'))
return HttpResponseRedirect(reverse('admin_dataset_manager'))
# Everything is alright
return None
def get_dataset_from_request(self):
"""
Use the 'dataset_name' GET query string parameter to find a dataset object
:return: tuple of a dataset object and HttpResponse in which either is None
"""
# if the dataset is specified in the url parameters, set the dataset_name variable
get = self.request.GET
if 'dataset_name' in get:
self.dataset_name = get['dataset_name']
if self.dataset_name == '':
messages.add_message(self.request, messages.ERROR, ('Dataset name must be non-empty.'))
return None, HttpResponseRedirect(reverse('admin_dataset_manager'))
try:
return Dataset.objects.get(name=self.dataset_name), None
except:
messages.add_message(self.request, messages.ERROR,
('No dataset with name ' + self.dataset_name + ' found.'))
return None, HttpResponseRedirect(reverse('admin_dataset_manager'))
def get_user_from_request(self):
"""
Use the 'username' GET query string parameter to find a user object
:return: tuple of a dataset object and HttpResponse in which either is None
"""
get = self.request.GET
username = ''
if 'username' in get:
username = get['username']
if username == '':
messages.add_message(self.request, messages.ERROR,
('Username must be non-empty. Please make a selection using the drop-down list.'))
return None, HttpResponseRedirect(reverse('admin_dataset_manager'))
try:
return User.objects.get(username=username), None
except:
messages.add_message(self.request, messages.ERROR, ('No user with name ' + username + ' found.'))
return None, HttpResponseRedirect(reverse('admin_dataset_manager'))
def render_to_set_default_language(self):
"""
Sets the default language for a dataset
:return: a HttpResponse object
"""
dataset_object, response = self.get_dataset_from_request()
if response:
return response
response = self.check_user_permissions_for_managing_dataset(dataset_object)
if response:
return response
try:
language = Language.objects.get(id=self.request.GET['default_language'])
if language in dataset_object.translation_languages.all():
dataset_object.default_language = language
dataset_object.save()
messages.add_message(self.request, messages.INFO,
('The default language of {} is set to {}.'
.format(dataset_object.acronym, language.name)))
else:
messages.add_message(self.request, messages.INFO,
('{} is not in the set of languages of dataset {}.'
.format(language.name, dataset_object.acronym)))
except:
messages.add_message(self.request, messages.ERROR,
('Something went wrong setting the default language for '
+ dataset_object.acronym))
return HttpResponseRedirect(reverse('admin_dataset_manager'))
def render_to_add_user_response(self, context):
dataset_object, response = self.get_dataset_from_request()
if response:
return response
response = self.check_user_permissions_for_managing_dataset(dataset_object)
if response:
return response
user_object, response = self.get_user_from_request()
if response:
return response
username = user_object.username
# user has permission to modify dataset permissions for other users
manage_identifier = 'dataset_' + dataset_object.acronym.replace(' ','')
from guardian.shortcuts import assign_perm, remove_perm
if 'add_view_perm' in self.request.GET:
manage_identifier += '_manage_view'
if dataset_object in get_objects_for_user(user_object, 'view_dataset', Dataset, accept_global_perms=False):
if user_object.is_staff or user_object.is_superuser:
messages.add_message(self.request, messages.INFO,
('User ' + username + ' (' + user_object.first_name + ' ' + user_object.last_name +
') already has view permission for this dataset as staff or superuser.'))
else:
messages.add_message(self.request, messages.INFO,
('User ' + username + ' (' + user_object.first_name + ' ' + user_object.last_name +
') already has view permission for this dataset.'))
return HttpResponseRedirect(reverse('admin_dataset_manager')+'?'+manage_identifier)
try:
assign_perm('view_dataset', user_object, dataset_object)
messages.add_message(self.request, messages.INFO,
('View permission for user ' + username + ' (' + user_object.first_name + ' ' + user_object.last_name + ') successfully granted.'))
if not user_object.is_active:
user_object.is_active = True
assign_perm('dictionary.search_gloss', user_object)
user_object.save()
# send email to user
from django.core.mail import send_mail
current_site = Site.objects.get_current()
subject = render_to_string('registration/dataset_access_granted_email_subject.txt',
context={'dataset': dataset_object.name,
'site': current_site})
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
message = render_to_string('registration/dataset_access_granted_email.txt',
context={'dataset': dataset_object.name,
'site': current_site})
# for debug purposes on local machine
# print('grant access subject: ', subject)
# print('message: ', message)
# print('user email: ', user_object.email)
send_mail(subject, message, settings.DEFAULT_FROM_EMAIL, [user_object.email])
except:
messages.add_message(self.request, messages.ERROR, ('Error assigning view dataset permission to user '+username+'.'))
return HttpResponseRedirect(reverse('admin_dataset_manager')+'?'+manage_identifier)
if 'add_change_perm' in self.request.GET:
manage_identifier += '_manage_change'
if dataset_object in get_objects_for_user(user_object, 'change_dataset', Dataset, accept_global_perms=False):
if user_object.is_staff or user_object.is_superuser:
messages.add_message(self.request, messages.INFO,
(
'User ' + username + ' (' + user_object.first_name + ' ' + user_object.last_name +
') already has change permission for this dataset as staff or superuser.'))
else:
messages.add_message(self.request, messages.INFO,
('User ' + username + ' (' + user_object.first_name + ' ' + user_object.last_name +
') already has change permission for this dataset.'))
return HttpResponseRedirect(reverse('admin_dataset_manager') + '?' + manage_identifier)
if not dataset_object in get_objects_for_user(user_object, 'view_dataset', Dataset, accept_global_perms=False):
messages.add_message(self.request, messages.WARNING,
(
'User ' + username + ' (' + user_object.first_name + ' ' + user_object.last_name +
') does not have view permission for this dataset. Please grant view permission first.'))
# open Manage View Dataset pane instead of Manage Change Dataset
manage_identifier = 'dataset_' + dataset_object.acronym.replace(' ', '')
manage_identifier += '_manage_view'
return HttpResponseRedirect(reverse('admin_dataset_manager') + '?' + manage_identifier)
try:
assign_perm('change_dataset', user_object, dataset_object)
# send email to new user
# probably don't want to assign change permission to new users
messages.add_message(self.request, messages.INFO,
('Change permission for user ' + username + ' successfully granted.'))
except:
messages.add_message(self.request, messages.ERROR, ('Error assigning change dataset permission to user '+username+'.'))
return HttpResponseRedirect(reverse('admin_dataset_manager') + '?' + manage_identifier)
if 'delete_view_perm' in self.request.GET:
manage_identifier += '_manage_view'
if dataset_object in get_objects_for_user(user_object, 'view_dataset', Dataset, accept_global_perms=False):
if user_object.is_staff or user_object.is_superuser:
messages.add_message(self.request, messages.ERROR,
(
'User ' + username + ' (' + user_object.first_name + ' ' + user_object.last_name +
') has view permission for this dataset as staff or superuser. This cannot be modified here.'))
else:
# can remove permission
try:
# also need to remove change_dataset perm in this case
from guardian.shortcuts import remove_perm
remove_perm('view_dataset', user_object, dataset_object)
remove_perm('change_dataset', user_object, dataset_object)
messages.add_message(self.request, messages.INFO,
('View (and change) permission for user ' + username + ' successfully revoked.'))
except:
messages.add_message(self.request, messages.ERROR,
('Error revoking view dataset permission for user ' + username + '.'))
return HttpResponseRedirect(reverse('admin_dataset_manager') + '?' + manage_identifier)
else:
messages.add_message(self.request, messages.ERROR, ('User '+username+' currently has no permission to view this dataset.'))
return HttpResponseRedirect(reverse('admin_dataset_manager') + '?' + manage_identifier)
if 'delete_change_perm' in self.request.GET:
manage_identifier += '_manage_change'
if dataset_object in get_objects_for_user(user_object, 'change_dataset', Dataset, accept_global_perms=False):
if user_object.is_staff or user_object.is_superuser:
messages.add_message(self.request, messages.ERROR,
(
'User ' + username + ' (' + user_object.first_name + ' ' + user_object.last_name +
') has change permission for this dataset as staff or superuser. This cannot be modified here.'))
else:
# can remove permission
try:
remove_perm('change_dataset', user_object, dataset_object)
messages.add_message(self.request, messages.INFO,
('Change permission for user ' + username + ' successfully revoked.'))
except:
messages.add_message(self.request, messages.ERROR,
('Error revoking change dataset permission for user ' + username + '.'))
return HttpResponseRedirect(reverse('admin_dataset_manager') + '?' + manage_identifier)
else:
messages.add_message(self.request, messages.ERROR, ('User '+username+' currently has no permission to change this dataset.'))
return HttpResponseRedirect(reverse('admin_dataset_manager') + '?' + manage_identifier)
# the code doesn't seem to get here. if somebody puts something else in the url (else case), there is no (hidden) csrf token.
messages.add_message(self.request, messages.ERROR, ('Unrecognised argument to dataset manager url.'))
return HttpResponseRedirect(reverse('admin_dataset_manager'))
def get_queryset(self):
user = self.request.user
# get query terms from self.request
get = self.request.GET
# Then check what kind of stuff we want
if 'dataset_name' in get:
self.dataset_name = get['dataset_name']
# otherwise the default dataset_name DEFAULT_DATASET is used
setattr(self.request, 'dataset_name', self.dataset_name)
if user.is_authenticated():
# determine if user is a dataset manager
from django.contrib.auth.models import Group, User
try:
group_manager = Group.objects.get(name='Dataset_Manager')
except:
messages.add_message(self.request, messages.ERROR, ('No group Dataset_Manager found.'))
return HttpResponseRedirect(URL + settings.PREFIX_URL + '/datasets/available')
groups_of_user = self.request.user.groups.all()
if not group_manager in groups_of_user:
return None
from django.db.models import Prefetch
qs = Dataset.objects.all().prefetch_related(
Prefetch(
"userprofile_set",
queryset=UserProfile.objects.filter(user=user),
to_attr="user"
)
)
checker = ObjectPermissionChecker(user)
checker.prefetch_perms(qs)
for dataset in qs:
checker.has_perm('change_dataset', dataset)
return qs
else:
# User is not authenticated
return None
class DatasetDetailView(DetailView):
model = Dataset
context_object_name = 'dataset'
template_name = 'dictionary/dataset_detail.html'
# set the default dataset, this should not be empty
dataset_name = DEFAULT_DATASET
#Overriding the get method get permissions right
def get(self, request, *args, **kwargs):
try:
self.object = self.get_object()
# except Http404:
except:
# return custom template
# return render(request, 'dictionary/warning.html', status=404)
raise Http404()
context = self.get_context_data(object=self.object)
return self.render_to_response(context)
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super(DatasetDetailView, self).get_context_data(**kwargs)
dataset = context['dataset']
context['default_language_choice_list'] = {}
translation_languages = dataset.translation_languages.all()
default_language_choice_dict = dict()
for language in translation_languages:
default_language_choice_dict[language.name] = language.name
context['default_language_choice_list'] = json.dumps(default_language_choice_dict)
datasetform = DatasetUpdateForm(languages=context['default_language_choice_list'])
context['datasetform'] = datasetform
selected_datasets = get_selected_datasets_for_user(self.request.user)
dataset_languages = Language.objects.filter(dataset__in=selected_datasets).distinct()
context['dataset_languages'] = dataset_languages
if hasattr(settings, 'SHOW_DATASET_INTERFACE_OPTIONS'):
context['SHOW_DATASET_INTERFACE_OPTIONS'] = settings.SHOW_DATASET_INTERFACE_OPTIONS
else:
context['SHOW_DATASET_INTERFACE_OPTIONS'] = False
return context
def render_to_response(self, context):
if 'add_owner' in self.request.GET:
return self.render_to_add_owner_response(context)
else:
return super(DatasetDetailView, self).render_to_response(context)
def render_to_add_owner_response(self, context):
# check that the user is logged in
if self.request.user.is_authenticated():
pass
else:
messages.add_message(self.request, messages.ERROR, ('Please login to use this functionality.'))
return HttpResponseRedirect(URL + settings.PREFIX_URL + '/datasets/available')
# check if the user can manage this dataset
from django.contrib.auth.models import Group, User
try:
group_manager = Group.objects.get(name='Dataset_Manager')
except:
messages.add_message(self.request, messages.ERROR, ('No group Dataset_Manager found.'))
return HttpResponseRedirect(URL + settings.PREFIX_URL + '/datasets/available')
groups_of_user = self.request.user.groups.all()
if not group_manager in groups_of_user:
messages.add_message(self.request, messages.ERROR, ('You must be in group Dataset Manager to modify dataset permissions.'))
return HttpResponseRedirect(URL + settings.PREFIX_URL + '/datasets/available')
# if the dataset is specified in the url parameters, set the dataset_name variable
get = self.request.GET
if 'dataset_name' in get:
self.dataset_name = get['dataset_name']
if self.dataset_name == '':
messages.add_message(self.request, messages.ERROR, ('Dataset name must be non-empty.'))
return HttpResponseRedirect(URL + settings.PREFIX_URL + '/datasets/available')
try:
dataset_object = Dataset.objects.get(name=self.dataset_name)
except:
messages.add_message(self.request, messages.ERROR, ('No dataset with name '+self.dataset_name+' found.'))
return HttpResponseRedirect(URL + settings.PREFIX_URL + '/datasets/available')
username = ''
if 'username' in get:
username = get['username']
if username == '':
messages.add_message(self.request, messages.ERROR, ('Username must be non-empty.'))
return HttpResponseRedirect(URL + settings.PREFIX_URL + '/datasets/available')
try:
user_object = User.objects.get(username=username)
except:
messages.add_message(self.request, messages.ERROR, ('No user with name '+username+' found.'))
return HttpResponseRedirect(URL + settings.PREFIX_URL + '/datasets/available')
# if we get to here, we have a dataset object and a user object to add as an owner of the dataset
dataset_object.owners.add(user_object)
dataset_object.save()
messages.add_message(self.request, messages.INFO,
('User ' + username + ' successfully made (co-)owner of this dataset.'))
return HttpResponseRedirect(URL + settings.PREFIX_URL + '/datasets/detail/' + str(dataset_object.id))
def dataset_field_choices_view(request):
context = {}
context['field_choices'] = sorted(FieldChoice.objects.all(),key=lambda x: (x.field,x.english_name))
context['datasets'] = [(dataset,dataset.exclude_choices.all()) for dataset in get_objects_for_user(request.user, 'change_dataset', Dataset,accept_global_perms=False)]
return render(request,'dictionary/dataset_field_choices.html',context)
def order_handshape_queryset_by_sort_order(get, qs):
"""Change the sort-order of the query set, depending on the form field [sortOrder]
This function is used both by HandshapeListView.
The value of [sortOrder] is 'machine_value' by default.
[sortOrder] is a hidden field inside the "adminsearch" html form in the template admin_handshape_list.html
Its value is changed by clicking the up/down buttons in the second row of the search result table
"""
def get_string_from_tuple_list(lstTuples, number):
"""Get the string value corresponding to a number in a list of number-string tuples"""
sBack = [tup[1] for tup in lstTuples if tup[0] == number]
return sBack
# Helper: order a queryset on field [sOrder], which is a number from a list of tuples named [sListName]
def order_queryset_by_tuple_list(qs, sOrder, sListName):
"""Order a queryset on field [sOrder], which is a number from a list of tuples named [sListName]"""
# Get a list of tuples for this sort-order
tpList = build_choice_list(sListName)
# Determine sort order: ascending is default
bReversed = False
if (sOrder[0:1] == '-'):
# A starting '-' sign means: descending order
sOrder = sOrder[1:]
bReversed = True
# Order the list of tuples alphabetically
# (NOTE: they are alphabetical from 'build_choice_list()', except for the values 0,1)
tpList = sorted(tpList, key=operator.itemgetter(1))
# Order by the string-values in the tuple list
return sorted(qs, key=lambda x: get_string_from_tuple_list(tpList, getattr(x, sOrder)), reverse=bReversed)
# Set the default sort order
sOrder = 'machine_value' # Default sort order if nothing is specified
# See if the form contains any sort-order information
if ('sortOrder' in get and get['sortOrder'] != ''):
# Take the user-indicated sort order
sOrder = get['sortOrder']
# The ordering method depends on the kind of field:
# (1) text fields are ordered straightforwardly
# (2) fields made from a choice_list need special treatment
if (sOrder.endswith('hsThumb')):
ordered = order_queryset_by_tuple_list(qs, sOrder, "Thumb")
elif (sOrder.endswith('hsFingConf') or sOrder.endswith('hsFingConf2')):
ordered = order_queryset_by_tuple_list(qs, sOrder, "JointConfiguration")
elif (sOrder.endswith('hsAperture')):
ordered = order_queryset_by_tuple_list(qs, sOrder, "Aperture")
elif (sOrder.endswith('hsSpread')):
ordered = order_queryset_by_tuple_list(qs, sOrder, "Spreading")
elif (sOrder.endswith('hsNumSel')):
ordered = order_queryset_by_tuple_list(qs, sOrder, "Quantity")
elif (sOrder.endswith('hsFingSel') or sOrder.endswith('hsFingSel2') or sOrder.endswith('hsFingUnsel')):
ordered = order_queryset_by_tuple_list(qs, sOrder, "FingerSelection")
else:
# Use straightforward ordering on field [sOrder]
bReversed = False
if (sOrder[0:1] == '-'):
# A starting '-' sign means: descending order
sOrder = sOrder[1:]
bReversed = True
qs_letters = qs.filter(**{sOrder+'__regex':r'^[a-zA-Z]'})
qs_special = qs.filter(**{sOrder+'__regex':r'^[^a-zA-Z]'})
ordered = sorted(qs_letters, key=lambda x: getattr(x, sOrder))
ordered += sorted(qs_special, key=lambda x: getattr(x, sOrder))
if bReversed:
ordered.reverse()
# return the ordered list
return ordered
def order_handshape_by_angle(qs, language_code):
# put the handshapes with an angle bracket > in the name after the others
# the language code is that of the interface
if language_code == 'nl':
qs_no_angle = qs.filter(**{'dutch_name__regex':r'^[^>]+$'})
qs_angle = qs.filter(**{'dutch_name__regex':r'^.+>.+$'})
ordered = sorted(qs_no_angle, key=lambda x: x.dutch_name)
ordered += sorted(qs_angle, key=lambda x: x.dutch_name)
elif language_code == 'zh-hans':
qs_no_angle = qs.filter(**{'chinese_name__regex':r'^[^>]*$'})
qs_angle = qs.filter(**{'chinese_name__regex':r'^.+>.+$'})
ordered = sorted(qs_no_angle, key=lambda x: x.chinese_name)
ordered += sorted(qs_angle, key=lambda x: x.chinese_name)
else:
qs_no_angle = qs.filter(**{'english_name__regex':r'^[^>]+$'})
qs_angle = qs.filter(**{'english_name__regex':r'^.+>.+$'})
ordered = sorted(qs_no_angle, key=lambda x: x.english_name)
ordered += sorted(qs_angle, key=lambda x: x.english_name)
return ordered
class MorphemeDetailView(DetailView):
model = Morpheme
context_object_name = 'morpheme'
last_used_dataset = None
# Overriding the get method get permissions right
def get(self, request, *args, **kwargs):
try:
self.object = self.get_object()
# except Http404:
except:
# return custom template
# return render(request, 'dictionary/warning.html', status=404)
raise Http404()
if request.user.is_authenticated():
if self.object.dataset not in get_objects_for_user(request.user, 'view_dataset', Dataset, accept_global_perms=False):
if self.object.inWeb:
return HttpResponseRedirect(reverse('dictionary:public_gloss', kwargs={'glossid': self.object.pk}))
else:
messages.add_message(request, messages.WARNING, 'You are not allowed to see this morpheme.')
return HttpResponseRedirect('/')
else:
if self.object.inWeb:
return HttpResponseRedirect(reverse('dictionary:public_gloss', kwargs={'glossid': self.object.pk}))
else:
return HttpResponseRedirect(reverse('registration:auth_login'))
context = self.get_context_data(object=self.object)
return self.render_to_response(context)
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super(MorphemeDetailView, self).get_context_data(**kwargs)
# Add in a QuerySet of all the books
context['tagform'] = TagUpdateForm()
context['videoform'] = VideoUploadForGlossForm()
context['imageform'] = ImageUploadForGlossForm()
context['definitionform'] = DefinitionForm()
context['relationform'] = RelationForm()
context['morphologyform'] = MorphemeMorphologyForm()
context['othermediaform'] = OtherMediaForm()
context['navigation'] = context['morpheme'].navigation(True)
context['interpform'] = InterpreterFeedbackForm()
context['SIGN_NAVIGATION'] = settings.SIGN_NAVIGATION
# Get the set of all the Gloss signs that point to me
other_glosses_that_point_to_morpheme = SimultaneousMorphologyDefinition.objects.filter(morpheme_id__exact=context['morpheme'].id)
context['appears_in'] = []
word_class_choices = FieldChoice.objects.filter(field__iexact='WordClass')
for sim_morph in other_glosses_that_point_to_morpheme:
parent_gloss = sim_morph.parent_gloss
if parent_gloss.wordClass:
translated_word_class = machine_value_to_translated_human_value(parent_gloss.wordClass,word_class_choices,self.request.LANGUAGE_CODE)
else:
translated_word_class = ''
context['appears_in'].append((parent_gloss, translated_word_class))
try:
# Note: setting idgloss to context['morpheme'] is not enough; the ".idgloss" needs to be specified
next_morpheme = Morpheme.objects.get(idgloss=context['morpheme'].idgloss).admin_next_morpheme()
except:
next_morpheme = None
if next_morpheme == None:
context['nextmorphemeid'] = context['morpheme'].pk
else:
context['nextmorphemeid'] = next_morpheme.pk
if settings.SIGN_NAVIGATION:
context['glosscount'] = Morpheme.objects.count()
context['glossposn'] = Morpheme.objects.filter(sn__lt=context['morpheme'].sn).count() + 1
# Pass info about which fields we want to see
gl = context['morpheme']
labels = gl.field_labels()
# set a session variable to be able to pass the gloss's id to the ajax_complete method
# the last_used_dataset name is updated to that of this gloss
# if a sequesce of glosses are being created by hand, this keeps the dataset setting the same
if gl.dataset:
self.request.session['datasetid'] = gl.dataset.id
self.last_used_dataset = gl.dataset.acronym
else:
self.request.session['datasetid'] = get_default_language_id()
self.request.session['last_used_dataset'] = self.last_used_dataset
context['choice_lists'] = {}
# Translate the machine values to human values in the correct language, and save the choice lists along the way
for topic in ['phonology', 'semantics', 'frequency']:
context[topic + '_fields'] = []
for field in FIELDS[topic]:
# Get and save the choice list for this field
field_category = fieldname_to_category(field)
choice_list = FieldChoice.objects.filter(field__iexact=field_category)
if len(choice_list) > 0:
context['choice_lists'][field] = choicelist_queryset_to_translated_dict(choice_list,
self.request.LANGUAGE_CODE)
# Take the human value in the language we are using
machine_value = getattr(gl, field)
human_value = machine_value_to_translated_human_value(machine_value,choice_list,self.request.LANGUAGE_CODE)
# And add the kind of field
kind = fieldname_to_kind(field)
context[topic + '_fields'].append([human_value, field, labels[field], kind])
# Gather the OtherMedia
context['other_media'] = []
other_media_type_choice_list = FieldChoice.objects.filter(field__iexact='OthermediaType')
for other_media in gl.othermedia_set.all():
human_value_media_type = machine_value_to_translated_human_value(other_media.type,other_media_type_choice_list,self.request.LANGUAGE_CODE)
path = settings.STATIC_URL + 'othermedia/' + other_media.path
context['other_media'].append([other_media.pk, path, human_value_media_type, other_media.alternative_gloss])
# Save the other_media_type choices (same for every other_media, but necessary because they all have other ids)
context['choice_lists'][
'other-media-type_' + str(other_media.pk)] = choicelist_queryset_to_translated_dict(
other_media_type_choice_list, self.request.LANGUAGE_CODE)
context['choice_lists']['morph_type'] = choicelist_queryset_to_translated_dict(FieldChoice.objects.filter(field__iexact='MorphemeType'),self.request.LANGUAGE_CODE)
context['choice_lists'] = json.dumps(context['choice_lists'])
# make lemma group empty for Morpheme (ask Onno about this)
# Morpheme Detail View shares the gloss_edit.js code with Gloss Detail View
context['lemma_group'] = False
context['lemma_group_url'] = ''
# Put annotation_idgloss per language in the context
context['annotation_idgloss'] = {}
if gl.dataset:
for language in gl.dataset.translation_languages.all():
context['annotation_idgloss'][language] = gl.annotationidglosstranslation_set.filter(language=language)
else:
language = Language.objects.get(id=get_default_language_id())
context['annotation_idgloss'][language] = gl.annotationidglosstranslation_set.filter(language=language)
morph_typ_choices = FieldChoice.objects.filter(field__iexact='MorphemeType')
if gl.mrpType:
translated_morph_type = machine_value_to_translated_human_value(gl.mrpType,morph_typ_choices,self.request.LANGUAGE_CODE)
else:
translated_morph_type = ''
context['morpheme_type'] = translated_morph_type
# Put translations (keywords) per language in the context
context['translations_per_language'] = {}
if gl.dataset:
for language in gl.dataset.translation_languages.all():
context['translations_per_language'][language] = gl.translation_set.filter(language=language)
else:
language = Language.objects.get(id=get_default_language_id())
context['translations_per_language'][language] = gl.translation_set.filter(language=language)
context['separate_english_idgloss_field'] = SEPARATE_ENGLISH_IDGLOSS_FIELD
if hasattr(settings, 'SHOW_DATASET_INTERFACE_OPTIONS') and settings.SHOW_DATASET_INTERFACE_OPTIONS:
context['dataset_choices'] = {}
user = self.request.user
if user.is_authenticated():
qs = get_objects_for_user(user, 'view_dataset', Dataset, accept_global_perms=False)
dataset_choices = dict()
for dataset in qs:
dataset_choices[dataset.acronym] = dataset.acronym
context['dataset_choices'] = json.dumps(dataset_choices)
selected_datasets = get_selected_datasets_for_user(self.request.user)
dataset_languages = Language.objects.filter(dataset__in=selected_datasets).distinct()
context['dataset_languages'] = dataset_languages
if hasattr(settings, 'SHOW_DATASET_INTERFACE_OPTIONS'):
context['SHOW_DATASET_INTERFACE_OPTIONS'] = settings.SHOW_DATASET_INTERFACE_OPTIONS
else:
context['SHOW_DATASET_INTERFACE_OPTIONS'] = False
context['lemma_create_field_prefix'] = LemmaCreateForm.lemma_create_field_prefix
return context
def gloss_ajax_search_results(request):
"""Returns a JSON list of glosses that match the previous search stored in sessions"""
if 'search_type' in request.session.keys() and \
(request.session['search_type'] == 'sign' or request.session['search_type'] == 'morpheme' or request.session['search_type'] == 'sign_or_morpheme'):
return HttpResponse(json.dumps(request.session['search_results']))
else:
return HttpResponse(json.dumps(None))
def handshape_ajax_search_results(request):
"""Returns a JSON list of handshapes that match the previous search stored in sessions"""
if 'search_type' in request.session.keys() and request.session['search_type'] == 'handshape':
return HttpResponse(json.dumps(request.session['search_results']))
else:
return HttpResponse(json.dumps(None))
def gloss_ajax_complete(request, prefix):
"""Return a list of glosses matching the search term
as a JSON structure suitable for typeahead."""
datasetid = request.session['datasetid']
dataset_id = Dataset.objects.get(id=datasetid)
query = Q(lemma__lemmaidglosstranslation__text__istartswith=prefix) | \
Q(annotationidglosstranslation__text__istartswith=prefix) | \
Q(sn__startswith=prefix)
qs = Gloss.objects.filter(query).distinct()
from signbank.tools import convert_language_code_to_2char
language_code = convert_language_code_to_2char(request.LANGUAGE_CODE)
result = []
for g in qs:
if g.dataset == dataset_id:
default_annotationidglosstranslation = ""
annotationidglosstranslation = g.annotationidglosstranslation_set.get(language__language_code_2char=language_code)
if annotationidglosstranslation:
default_annotationidglosstranslation = annotationidglosstranslation.text
else:
annotationidglosstranslation = g.annotationidglosstranslation_set.get(
language__language_code_2char='en')
if annotationidglosstranslation:
default_annotationidglosstranslation = annotationidglosstranslation.text
result.append({'idgloss': g.idgloss, 'annotation_idgloss': default_annotationidglosstranslation, 'sn': g.sn, 'pk': "%s" % (g.id)})
return HttpResponse(json.dumps(result), {'content-type': 'application/json'})
def handshape_ajax_complete(request, prefix):
"""Return a list of handshapes matching the search term
as a JSON structure suitable for typeahead."""
if request.LANGUAGE_CODE == 'nl':
query = Q(dutch_name__istartswith=prefix)
elif request.LANGUAGE_CODE == 'zh-hans':
query = Q(chinese_name__istartswith=prefix)
else:
query = Q(english_name__istartswith=prefix)
qs = Handshape.objects.filter(query)
result = []
for g in qs:
result.append({'dutch_name': g.dutch_name, 'english_name': g.english_name, 'machine_value': g.machine_value, 'chinese_name': g.chinese_name})
return HttpResponse(json.dumps(result), {'content-type': 'application/json'})
def morph_ajax_complete(request, prefix):
"""Return a list of morphs matching the search term
as a JSON structure suitable for typeahead."""
datasetid = request.session['datasetid']
dataset_id = Dataset.objects.get(id=datasetid)
query = Q(idgloss__istartswith=prefix) | \
Q(annotationidglosstranslation__text__istartswith=prefix) | \
Q(sn__startswith=prefix)
qs = Morpheme.objects.filter(query).distinct()
result = []
for g in qs:
if g.dataset == dataset_id:
default_annotationidglosstranslation = ""
annotationidglosstranslation = g.annotationidglosstranslation_set.get(language__language_code_2char=request.LANGUAGE_CODE)
if annotationidglosstranslation:
default_annotationidglosstranslation = annotationidglosstranslation.text
else:
annotationidglosstranslation = g.annotationidglosstranslation_set.get(
language__language_code_2char='en')
if annotationidglosstranslation:
default_annotationidglosstranslation = annotationidglosstranslation.text
result.append({'idgloss': g.idgloss, 'annotation_idgloss': default_annotationidglosstranslation, 'sn': g.sn,
'pk': "%s" % (g.id)})
return HttpResponse(json.dumps(result), {'content-type': 'application/json'})
def user_ajax_complete(request, prefix):
"""Return a list of users matching the search term
as a JSON structure suitable for typeahead."""
query = Q(username__istartswith=prefix) | \
Q(first_name__istartswith=prefix) | \
Q(last_name__startswith=prefix)
qs = User.objects.filter(query).distinct()
result = []
for u in qs:
result.append({'first_name': u.first_name, 'last_name': u.last_name, 'username': u.username})
return HttpResponse(json.dumps(result), {'content-type': 'application/json'})
def lemma_ajax_complete(request, dataset_id, q):
"""Return a list of users matching the search term
as a JSON structure suitable for typeahead."""
lemmas = LemmaIdgloss.objects.filter(dataset_id=dataset_id, lemmaidglosstranslation__text__icontains=q)\
.order_by('lemmaidglosstranslation__text')
lemmas_dict = [{'pk': lemma.pk, 'lemma': str(lemma)} for lemma in lemmas]
return HttpResponse(json.dumps(lemmas_dict), {'content-type': 'application/json'})
def homonyms_ajax_complete(request, gloss_id):
language_code = request.LANGUAGE_CODE
if language_code == "zh-hans":
language_code = "zh"
try:
this_gloss = Gloss.objects.get(id=gloss_id)
homonym_objects = this_gloss.homonym_objects()
except:
homonym_objects = []
result = []
for homonym in homonym_objects:
translation = ""
translations = homonym.annotationidglosstranslation_set.filter(language__language_code_2char=language_code)
if translations is not None and len(translations) > 0:
translation = translations[0].text
else:
translations = homonym.annotationidglosstranslation_set.filter(language__language_code_3char='eng')
if translations is not None and len(translations) > 0:
translation = translations[0].text
result.append({ 'id': str(homonym.id), 'gloss': translation })
# result.append({ 'id': str(homonym.id), 'gloss': str(homonym) })
homonyms_dict = { str(gloss_id) : result }
return HttpResponse(json.dumps(homonyms_dict), {'content-type': 'application/json'})
def minimalpairs_ajax_complete(request, gloss_id, gloss_detail=False):
if 'gloss_detail' in request.GET:
gloss_detail = request.GET['gloss_detail']
language_code = request.LANGUAGE_CODE
if language_code == "zh-hans":
language_code = "zh"
this_gloss = Gloss.objects.get(id=gloss_id)
try:
minimalpairs_objects = this_gloss.minimal_pairs_dict()
except:
minimalpairs_objects = []
translation_focus_gloss = ""
translations_this_gloss = this_gloss.annotationidglosstranslation_set.filter(language__language_code_2char=language_code)
if translations_this_gloss is not None and len(translations_this_gloss) > 0:
translation_focus_gloss = translations_this_gloss[0].text
else:
translations_this_gloss = this_gloss.annotationidglosstranslation_set.filter(language__language_code_3char='eng')
if translations_this_gloss is not None and len(translations_this_gloss) > 0:
translation_focus_gloss = translations_this_gloss[0].text
result = []
for minimalpairs_object, minimal_pairs_dict in minimalpairs_objects.items():
other_gloss_dict = dict()
other_gloss_dict['id'] = str(minimalpairs_object.id)
other_gloss_dict['other_gloss'] = minimalpairs_object
for field, values in minimal_pairs_dict.items():
# print('values: ', values)
other_gloss_dict['field'] = field
other_gloss_dict['field_display'] = values[0]
other_gloss_dict['field_category'] = values[1]
# print('field: ', field, ', choice: ', values[2])
# print('translated_choice_lists_table: ', translated_choice_lists_table[field])
focus_gloss_choice = values[2]
other_gloss_choice = values[3]
field_kind = values[4]
# print('other gloss ', minimalpairs_object.id, ', field ', field, ': kind and choices: ', field_kind, ', ', focus_gloss_choice, ', ', other_gloss_choice)
if field_kind == 'list':
if focus_gloss_choice:
try:
focus_gloss_value = translated_choice_lists_table[field][int(focus_gloss_choice)][language_code]
except:
focus_gloss_value = 'ERROR_' + focus_gloss_choice
print('Error for gloss ', minimalpairs_object.id, ' on stored choice (field: ', field, ', choice: ', focus_gloss_choice, ')')
else:
focus_gloss_value = '-'
elif field_kind == 'check':
if focus_gloss_choice == 'True':
focus_gloss_value = _('Yes')
elif focus_gloss_choice == 'Neutral' and field in ['weakdrop', 'weakprop']:
focus_gloss_value = _('Neutral')
else:
focus_gloss_value = _('No')
else:
# translate Boolean fields
focus_gloss_value = focus_gloss_choice
# print('focus gloss choice: ', focus_gloss_value)
other_gloss_dict['focus_gloss_value'] = focus_gloss_value
if field_kind == 'list':
if other_gloss_choice:
try:
other_gloss_value = translated_choice_lists_table[field][int(other_gloss_choice)][language_code]
except:
other_gloss_value = 'ERROR_' + other_gloss_choice
print('Error for gloss ', minimalpairs_object.id, ' on stored choice (field: ', field, ', choice: ', other_gloss_choice, ')')
else:
other_gloss_value = '-'
elif field_kind == 'check':
if other_gloss_choice == 'True':
other_gloss_value = _('Yes')
elif other_gloss_choice == 'Neutral' and field in ['weakdrop', 'weakprop']:
other_gloss_value = _('Neutral')
else:
other_gloss_value = _('No')
else:
other_gloss_value = other_gloss_choice
# print('other gloss choice: ', other_gloss_value)
other_gloss_dict['other_gloss_value'] = other_gloss_value
other_gloss_dict['field_kind'] = field_kind
# print('min pairs other gloss dict: ', other_gloss_dict)
translation = ""
translations = minimalpairs_object.annotationidglosstranslation_set.filter(language__language_code_2char=language_code)
if translations is not None and len(translations) > 0:
translation = translations[0].text
else:
translations = minimalpairs_object.annotationidglosstranslation_set.filter(language__language_code_3char='eng')
if translations is not None and len(translations) > 0:
translation = translations[0].text
other_gloss_dict['other_gloss_idgloss'] = translation
result.append(other_gloss_dict)
if hasattr(settings, 'SHOW_DATASET_INTERFACE_OPTIONS'):
SHOW_DATASET_INTERFACE_OPTIONS = settings.SHOW_DATASET_INTERFACE_OPTIONS
else:
SHOW_DATASET_INTERFACE_OPTIONS = False
if gloss_detail:
return render(request, 'dictionary/minimalpairs_gloss_table.html', { 'focus_gloss': this_gloss,
'focus_gloss_translation': translation_focus_gloss,
'SHOW_DATASET_INTERFACE_OPTIONS' : SHOW_DATASET_INTERFACE_OPTIONS,
'minimal_pairs_dict' : result })
else:
return render(request, 'dictionary/minimalpairs_row.html', { 'focus_gloss': this_gloss,
'focus_gloss_translation': translation_focus_gloss,
'SHOW_DATASET_INTERFACE_OPTIONS' : SHOW_DATASET_INTERFACE_OPTIONS,
'minimal_pairs_dict' : result })
class LemmaListView(ListView):
model = LemmaIdgloss
template_name = 'dictionary/admin_lemma_list.html'
paginate_by = 10
def get_queryset(self, **kwargs):
queryset = super(LemmaListView, self).get_queryset(**kwargs)
selected_datasets = get_selected_datasets_for_user(self.request.user)
return queryset.filter(dataset__in=selected_datasets).annotate(num_gloss=Count('gloss'))
def get_context_data(self, **kwargs):
context = super(LemmaListView, self).get_context_data(**kwargs)
if hasattr(settings, 'SHOW_DATASET_INTERFACE_OPTIONS'):
context['SHOW_DATASET_INTERFACE_OPTIONS'] = settings.SHOW_DATASET_INTERFACE_OPTIONS
else:
context['SHOW_DATASET_INTERFACE_OPTIONS'] = False
selected_datasets = get_selected_datasets_for_user(self.request.user)
context['selected_datasets'] = selected_datasets
dataset_languages = Language.objects.filter(dataset__in=selected_datasets).distinct()
context['dataset_languages'] = dataset_languages
return context
def render_to_response(self, context):
# Look for a 'format=json' GET argument
if self.request.GET.get('format') == 'CSV':
return self.render_to_csv_response(context)
else:
return super(LemmaListView, self).render_to_response(context)
def render_to_csv_response(self, context):
if not self.request.user.has_perm('dictionary.export_csv'):
raise PermissionDenied
# Create the HttpResponse object with the appropriate CSV header.
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="dictionary-export-lemmas.csv"'
selected_datasets = get_selected_datasets_for_user(self.request.user)
dataset_languages = Language.objects.filter(dataset__in=selected_datasets).distinct()
lang_attr_name = 'name_' + DEFAULT_KEYWORDS_LANGUAGE['language_code_2char']
lemmaidglosstranslation_fields = ["Lemma ID Gloss" + " (" + getattr(language, lang_attr_name) + ")"
for language in dataset_languages]
writer = csv.writer(response)
with override(LANGUAGE_CODE):
header = ['Lemma ID', 'Dataset'] + lemmaidglosstranslation_fields
writer.writerow(header)
for lemma in self.get_queryset():
row = [str(lemma.pk), lemma.dataset.acronym]
for language in dataset_languages:
lemmaidglosstranslations = lemma.lemmaidglosstranslation_set.filter(language=language)
if lemmaidglosstranslations and len(lemmaidglosstranslations) == 1:
row.append(lemmaidglosstranslations[0].text)
else:
row.append("")
#Make it safe for weird chars
safe_row = []
for column in row:
try:
safe_row.append(column.encode('utf-8').decode())
except AttributeError:
safe_row.append(None)
writer.writerow(row)
return response
class LemmaCreateView(CreateView):
model = LemmaIdgloss
template_name = 'dictionary/add_lemma.html'
fields = []
def get_context_data(self, **kwargs):
context = super(CreateView, self).get_context_data(**kwargs)
if hasattr(settings, 'SHOW_DATASET_INTERFACE_OPTIONS'):
context['SHOW_DATASET_INTERFACE_OPTIONS'] = settings.SHOW_DATASET_INTERFACE_OPTIONS
else:
context['SHOW_DATASET_INTERFACE_OPTIONS'] = False
selected_datasets = get_selected_datasets_for_user(self.request.user)
context['selected_datasets'] = selected_datasets
dataset_languages = Language.objects.filter(dataset__in=selected_datasets).distinct()
context['dataset_languages'] = dataset_languages
context['add_lemma_form'] = LemmaCreateForm(self.request.GET, languages=dataset_languages, user=self.request.user)
context['lemma_create_field_prefix'] = LemmaCreateForm.lemma_create_field_prefix
return context
def post(self, request, *args, **kwargs):
print(request.POST)
dataset = None
if 'dataset' in request.POST and request.POST['dataset'] is not None:
dataset = Dataset.objects.get(pk=request.POST['dataset'])
selected_datasets = Dataset.objects.filter(pk=request.POST['dataset'])
else:
selected_datasets = get_selected_datasets_for_user(request.user)
dataset_languages = Language.objects.filter(dataset__in=selected_datasets).distinct()
form = LemmaCreateForm(request.POST, languages=dataset_languages, user=request.user)
for item, value in request.POST.items():
if item.startswith(form.lemma_create_field_prefix):
language_code_2char = item[len(form.lemma_create_field_prefix):]
language = Language.objects.get(language_code_2char=language_code_2char)
lemmas_for_this_language_and_annotation_idgloss = LemmaIdgloss.objects.filter(
lemmaidglosstranslation__language=language,
lemmaidglosstranslation__text__exact=value.upper(),
dataset=dataset)
if len(lemmas_for_this_language_and_annotation_idgloss) != 0:
return render(request, 'dictionary/warning.html',
{'warning': language.name + " " + 'lemma ID Gloss not unique.'})
if form.is_valid():
try:
lemma = form.save()
print("LEMMA " + str(lemma.pk))
except ValidationError as ve:
messages.add_message(request, messages.ERROR, ve.message)
return render(request, 'dictionary/add_lemma.html', {'add_lemma_form': LemmaCreateForm(request.POST, user=request.user),
'dataset_languages': dataset_languages,
'selected_datasets': get_selected_datasets_for_user(request.user)})
# return HttpResponseRedirect(reverse('dictionary:admin_lemma_list', kwargs={'pk': lemma.id}))
return HttpResponseRedirect(reverse('dictionary:admin_lemma_list'))
else:
return render(request, 'dictionary/add_gloss.html', {'add_lemma_form': form,
'dataset_languages': dataset_languages,
'selected_datasets': get_selected_datasets_for_user(
request.user)})
def create_lemma_for_gloss(request, glossid):
try:
gloss = Gloss.objects.get(id=glossid)
except ObjectDoesNotExist:
try:
gloss = Morpheme.objects.get(id=glossid).gloss
except ObjectDoesNotExist:
messages.add_message(request, messages.ERROR, _("The specified gloss does not exist."))
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
dataset = gloss.dataset
dataset_languages = dataset.translation_languages.all()
form = LemmaCreateForm(request.POST, languages=dataset_languages, user=request.user)
for item, value in request.POST.items():
if item.startswith(form.lemma_create_field_prefix):
language_code_2char = item[len(form.lemma_create_field_prefix):]
language = Language.objects.get(language_code_2char=language_code_2char)
lemmas_for_this_language_and_annotation_idgloss = LemmaIdgloss.objects.filter(
lemmaidglosstranslation__language=language,
lemmaidglosstranslation__text__exact=value.upper(),
dataset=dataset)
if len(lemmas_for_this_language_and_annotation_idgloss) != 0:
messages.add_message(request, messages.ERROR, _('Lemma ID Gloss not unique for %(language)s.') % {'language': language.name})
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
if form.is_valid():
try:
old_video_path = settings.MEDIA_ROOT + gloss.get_video_path()
with atomic():
lemma = form.save()
gloss.lemma = lemma
gloss.save()
new_video_path = settings.MEDIA_ROOT + gloss.get_video_path()
# Rename video
gloss.rename_video(old_video_path, new_video_path)
except ValidationError as ve:
messages.add_message(request, messages.ERROR, ve.message)
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
else:
messages.add_message(request, messages.ERROR, _("The form contains errors."))
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
class LemmaUpdateView(UpdateView):
model = LemmaIdgloss
success_url = reverse_lazy('dictionary:admin_lemma_list')
page_in_lemma_list = ''
template_name = 'dictionary/update_lemma.html'
fields = []
def get_context_data(self, **kwargs):
context = super(LemmaUpdateView, self).get_context_data(**kwargs)
# this is needed by the menu bar
if hasattr(settings, 'SHOW_DATASET_INTERFACE_OPTIONS'):
context['SHOW_DATASET_INTERFACE_OPTIONS'] = settings.SHOW_DATASET_INTERFACE_OPTIONS
else:
context['SHOW_DATASET_INTERFACE_OPTIONS'] = False
# get the page of the lemma list on which this lemma appears in order ro return to it after update
request_path = self.request.META.get('HTTP_REFERER')
path_parms = request_path.split('?page=')
if len(path_parms) > 1:
self.page_in_lemma_list = str(path_parms[1])
context['page_in_lemma_list'] = self.page_in_lemma_list
dataset = self.object.dataset
context['dataset'] = dataset
dataset_languages = Language.objects.filter(dataset=dataset).distinct()
context['dataset_languages'] = dataset_languages
context['change_lemma_form'] = LemmaUpdateForm(instance=self.object, page_in_lemma_list=self.page_in_lemma_list)
context['lemma_create_field_prefix'] = LemmaCreateForm.lemma_create_field_prefix
return context
def post(self, request, *args, **kwargs):
instance = self.get_object()
dataset = instance.dataset
form = LemmaUpdateForm(request.POST, instance=instance)
for item, value in request.POST.items():
if item.startswith(form.lemma_update_field_prefix):
if value != '':
language_code_2char = item[len(form.lemma_update_field_prefix):]
language = Language.objects.get(language_code_2char=language_code_2char)
lemmas_for_this_language_and_annotation_idgloss = LemmaIdgloss.objects.filter(
lemmaidglosstranslation__language=language,
lemmaidglosstranslation__text__exact=value.upper(),
dataset=dataset)
if len(lemmas_for_this_language_and_annotation_idgloss) != 0:
for nextLemma in lemmas_for_this_language_and_annotation_idgloss:
if nextLemma.id != instance.id:
# found a different lemma with same translation
return render(request, 'dictionary/warning.html',
{'warning': language.name + " " + 'lemma ID Gloss not unique.'})
else:
# intent to set lemma translation to empty
pass
elif item.startswith('page') and value:
# page of the lemma list where the gloss to update is displayed
self.page_in_lemma_list = value
if form.is_valid():
try:
form.save()
messages.add_message(request, messages.INFO, _("The changes to the lemma have been saved."))
except:
# a specific message is put into the messages frmaework rather than the message caught in the exception
# if it's not done this way, it gives a runtime error
messages.add_message(request, messages.ERROR, _("There must be at least one translation for this lemma."))
# return to the same page in the list of lemmas, if available
if self.page_in_lemma_list:
return HttpResponseRedirect(self.success_url + '?page='+self.page_in_lemma_list)
else:
return HttpResponseRedirect(self.success_url)
else:
return HttpResponseRedirect(reverse_lazy('dictionary:change_lemma', kwargs={'pk': instance.id}))
class LemmaDeleteView(DeleteView):
model = LemmaIdgloss
success_url = reverse_lazy('dictionary:admin_lemma_list')
def delete(self, request, *args, **kwargs):
self.object = self.get_object()
if self.object.gloss_set.all():
messages.add_message(request, messages.ERROR, _("There are glosses using this lemma."))
else:
self.object.delete()
return HttpResponseRedirect(self.get_success_url())
|
from os import walk
from os.path import abspath, join
from argparse import ArgumentParser
from re import findall, sub, IGNORECASE
class Grebot(object):
LINE_FORMAT = '%s:\t%s'
def __init__(self, function_name, extensions, base_dir=None, sensitive=False, color=False):
self._search_word = '%s|%s|%s|%s' % (function_name, function_name.replace('_', ' '),
sub('(.)([A-Z][a-z_]+)', r'\1 \2', function_name),
function_name.replace(' ', '_'))
self._base_dir = base_dir
self._is_case_sensitive = 0 if sensitive else IGNORECASE
self._word_format = '\033[91m%s\033[0m' if color else '%s'
self._extensions = '|'.join(['\w+\.%s$' % extension for extension in extensions.split(',')])
def main(self):
for current_dir, dirs, files in walk(self._base_dir, followlinks=False):
[self._find_in_file(abspath(join(current_dir, f)))
for f in files if findall(self._extensions, f)]
def _find_in_file(self, path):
to_print = []
with open(path, 'rb') as f:
data_lines = f.readlines()
for line_num, line in enumerate(data_lines):
result = findall(self._search_word, line, self._is_case_sensitive)
if result:
to_print.append(self.LINE_FORMAT % (line_num + 1, line.replace(result[0], self._word_format % result[0])))
if to_print:
print 'in file: %s' % path
print ''.join(to_print)
def get_parser():
parser = ArgumentParser()
parser.usage = '%(prog)s function_name [-despch]'
parser.add_argument('-c', '--color', action='store_true', default=False,
help='show matched words in color')
parser.add_argument('-s', '--sensitive', action='store_true', default=False,
help='Be case sensitive')
parser.add_argument('-p', '--path', type=str, default='.',
help='path to check to start recursive check from')
parser.add_argument('-e', '--extensions', type=str, default='txt,robot,py',
help='which file extensions to check')
parser.add_argument('-d', '--debug', action='store_true', default=False,
help='show exception in case of fail')
return parser
if __name__ == '__main__':
parser = get_parser()
args, word = parser.parse_known_args()
try:
function_name = ' '.join(word)
if function_name:
Grebot(function_name, args.extensions, args.path, args.sensitive, args.color).main()
except:
if args.debug:
raise
print parser.format_usage()
disable/enable regex pattern search
from os import walk
from os.path import abspath, join
from argparse import ArgumentParser
from re import findall, sub, IGNORECASE, escape
class Grebot(object):
LINE_FORMAT = '%s:\t%s'
def __init__(self, function_name, extensions, base_dir=None, regex=False, sensitive=False, color=False):
function_name = function_name if regex else escape(function_name)
self._search_word = '%s|%s|%s|%s' % (function_name, function_name.replace('_', ' '),
sub('(.)([A-Z][a-z_]+)', r'\1 \2', function_name),
function_name.replace(' ', '_'))
self._base_dir = base_dir
self._is_case_sensitive = 0 if sensitive else IGNORECASE
self._word_format = '\033[91m%s\033[0m' if color else '%s'
self._extensions = '|'.join(['\w+\.%s$' % extension for extension in extensions.split(',')])
def main(self):
for current_dir, dirs, files in walk(self._base_dir, followlinks=False):
[self._find_in_file(abspath(join(current_dir, f)))
for f in files if findall(self._extensions, f)]
def _find_in_file(self, path):
to_print = []
with open(path, 'rb') as f:
data_lines = f.readlines()
for line_num, line in enumerate(data_lines):
result = findall(self._search_word, line, self._is_case_sensitive)
if result:
to_print.append(self.LINE_FORMAT % (line_num + 1, line.replace(result[0], self._word_format % result[0])))
if to_print:
print 'in file: %s' % path
print ''.join(to_print)
def get_parser():
parser = ArgumentParser()
parser.usage = '%(prog)s function_name [-despchr]'
parser.add_argument('-c', '--color', action='store_true', default=False,
help='show matched words in color')
parser.add_argument('-s', '--sensitive', action='store_true', default=False,
help='be case sensitive')
parser.add_argument('-p', '--path', type=str, default='.',
help='path to check to start recursive check from')
parser.add_argument('-e', '--extensions', type=str, default='txt,robot,py',
help='which file extensions to check')
parser.add_argument('-d', '--debug', action='store_true', default=False,
help='show exception in case of fail')
parser.add_argument('-r', '--regex', action='store_true', default=False,
help='use regex pattern search')
return parser
if __name__ == '__main__':
parser = get_parser()
args, word = parser.parse_known_args()
try:
function_name = ' '.join(word)
if function_name:
Grebot(function_name, args.extensions, args.path,
args.regex, args.sensitive, args.color).main()
except:
if args.debug:
raise
print parser.format_usage()
|
from __future__ import absolute_import
from __future__ import unicode_literals
import json
from django.test.testcases import TestCase
from django.test.client import RequestFactory
from django.test.testcases import SimpleTestCase
from fakecouch import FakeCouchDb
from corehq.apps.users.models import WebUser
from corehq.apps.domain.models import Domain
from casexml.apps.case.models import CommCareCase
from corehq.apps.userreports.expressions import ExpressionFactory
from corehq.apps.userreports.filters.factory import FilterFactory
from corehq.apps.userreports.models import DataSourceConfiguration
from corehq.apps.userreports.specs import FactoryContext
from corehq.apps.users.models import CommCareUser
from couchforms.models import XFormInstance
import os
from io import open
class YeksiTestCase(TestCase):
def setUp(self):
self.factory = RequestFactory()
domain = Domain.get_or_create_with_name('test-pna')
domain.is_active = True
domain.save()
self.domain = domain
user = WebUser.get_by_username('test')
if not user:
user = WebUser.create(domain.name, 'test', 'passwordtest')
user.is_authenticated = True
user.is_superuser = True
user.is_authenticated = True
user.is_active = True
self.user = user
class TestDataSourceExpressions(SimpleTestCase):
data_source_name = None
def get_expression(self, column_id, column_type):
column = self.get_column(column_id)
if column['type'] == 'boolean':
return FilterFactory.from_spec(
column['filter'],
context=FactoryContext(self.named_expressions, {})
)
else:
self.assertEqual(column['datatype'], column_type)
return ExpressionFactory.from_spec(
column['expression'],
context=FactoryContext(self.named_expressions, {})
)
def get_expressions_from_base_item_expression(self):
return ExpressionFactory.from_spec(
self.base_item_expression,
context=FactoryContext(self.base_item_expression, {})
)
@classmethod
def setUpClass(cls):
super(TestDataSourceExpressions, cls).setUpClass()
data_source_file = os.path.join(
os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir)),
'ucr',
'data_sources',
cls.data_source_name
)
with open(data_source_file, encoding='utf-8') as f:
cls.data_source = DataSourceConfiguration.wrap(json.loads(f.read())['config'])
cls.named_expressions = cls.data_source.named_expression_objects
cls.base_item_expression = cls.data_source.base_item_expression
def setUp(self):
self.database = FakeCouchDb()
self.case_orig_db = CommCareCase.get_db()
self.form_orig_db = XFormInstance.get_db()
self.user_orig_db = CommCareUser.get_db()
CommCareCase.set_db(self.database)
XFormInstance.set_db(self.database)
CommCareUser.set_db(self.database)
def tearDown(self):
CommCareCase.set_db(self.case_orig_db)
XFormInstance.set_db(self.form_orig_db)
CommCareUser.set_db(self.user_orig_db)
def get_column(self, column_id):
for indicator in self.data_source.configured_indicators:
if indicator['column_id'] == column_id:
return indicator
Remove user in tests after usage
from __future__ import absolute_import
from __future__ import unicode_literals
import json
from django.test.testcases import TestCase
from django.test.client import RequestFactory
from django.test.testcases import SimpleTestCase
from fakecouch import FakeCouchDb
from corehq.apps.users.models import WebUser
from corehq.apps.domain.models import Domain
from casexml.apps.case.models import CommCareCase
from corehq.apps.userreports.expressions import ExpressionFactory
from corehq.apps.userreports.filters.factory import FilterFactory
from corehq.apps.userreports.models import DataSourceConfiguration
from corehq.apps.userreports.specs import FactoryContext
from corehq.apps.users.models import CommCareUser
from couchforms.models import XFormInstance
import os
from io import open
class YeksiTestCase(TestCase):
def setUp(self):
self.factory = RequestFactory()
domain = Domain.get_or_create_with_name('test-pna')
domain.is_active = True
domain.save()
self.domain = domain
user = WebUser.get_by_username('test')
if not user:
user = WebUser.create(domain.name, 'test', 'passwordtest')
user.is_authenticated = True
user.is_superuser = True
user.is_authenticated = True
user.is_active = True
self.user = user
def tearDown(self):
self.user.delete()
class TestDataSourceExpressions(SimpleTestCase):
data_source_name = None
def get_expression(self, column_id, column_type):
column = self.get_column(column_id)
if column['type'] == 'boolean':
return FilterFactory.from_spec(
column['filter'],
context=FactoryContext(self.named_expressions, {})
)
else:
self.assertEqual(column['datatype'], column_type)
return ExpressionFactory.from_spec(
column['expression'],
context=FactoryContext(self.named_expressions, {})
)
def get_expressions_from_base_item_expression(self):
return ExpressionFactory.from_spec(
self.base_item_expression,
context=FactoryContext(self.base_item_expression, {})
)
@classmethod
def setUpClass(cls):
super(TestDataSourceExpressions, cls).setUpClass()
data_source_file = os.path.join(
os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir)),
'ucr',
'data_sources',
cls.data_source_name
)
with open(data_source_file, encoding='utf-8') as f:
cls.data_source = DataSourceConfiguration.wrap(json.loads(f.read())['config'])
cls.named_expressions = cls.data_source.named_expression_objects
cls.base_item_expression = cls.data_source.base_item_expression
def setUp(self):
self.database = FakeCouchDb()
self.case_orig_db = CommCareCase.get_db()
self.form_orig_db = XFormInstance.get_db()
self.user_orig_db = CommCareUser.get_db()
CommCareCase.set_db(self.database)
XFormInstance.set_db(self.database)
CommCareUser.set_db(self.database)
def tearDown(self):
CommCareCase.set_db(self.case_orig_db)
XFormInstance.set_db(self.form_orig_db)
CommCareUser.set_db(self.user_orig_db)
def get_column(self, column_id):
for indicator in self.data_source.configured_indicators:
if indicator['column_id'] == column_id:
return indicator
|
"""
Custom report definitions - control display of reports.
The BaseReport is somewhat general, but it's
currently specific to monthly reports. It would be pretty simple to make
this more general and subclass for montly reports , but I'm holding off on
that until we actually have another use case for it.
"""
import datetime
from corehq.apps.reports.generic import GenericTabularReport
from corehq.apps.reports.standard import CustomProjectReport, MonthYearMixin
from corehq.apps.reports.datatables import DataTablesHeader, DataTablesColumn, DataTablesColumnGroup
from corehq.apps.reports.filters.select import SelectOpenCloseFilter
from corehq.apps.users.models import CommCareUser, CommCareCase
from dimagi.utils.couch.database import get_db
from dimagi.utils.decorators.memoized import memoized
from dimagi.utils.dates import DateSpan
from couchdbkit.exceptions import ResourceNotFound
from ..opm_tasks.models import OpmReportSnapshot
from .beneficiary import Beneficiary
from .incentive import Worker
from .constants import *
from .filters import BlockFilter, AWCFilter
class BaseReport(MonthYearMixin, GenericTabularReport, CustomProjectReport):
"""
Report parent class. Children must provide a get_rows() method that
returns a list of the raw data that forms the basis of each row.
The "model" attribute is an object that can accept raw_data for a row
and perform the neccessary calculations. It must also provide a
method_map that is a list of (method_name, "Verbose Title") tuples
that define the columns in the report.
"""
name = None
slug = None
model = None
report_template_path = "opm/report.html"
default_rows = 50
printable = True
exportable = True
@property
def fields(self):
return [BlockFilter, AWCFilter] + super(BaseReport, self).fields
def filter(self, fn, filter_fields=None):
"""
This function is to be called by the row constructer to verify that
the row matches the filters
``fn`` should be a callable that accepts a key, and returns the value
that should match the filters for a given field.
I'm not super happy with this implementation, but it beats repeating
the same logic in incentive, beneficiary, and snapshot.
"""
if filter_fields is None:
filter_fields = [('awc_name', 'awcs'), ('block', 'blocks')]
for key, field in filter_fields:
keys = self.filter_data.get(field, [])
if keys and fn(key) not in keys:
raise InvalidRow
@property
@memoized
def snapshot(self):
# Don't load snapshot if filtering by current case status,
# instead, calculate again.
if self.filter_data.get('is_open', False):
return None
return OpmReportSnapshot.from_view(self)
@property
def headers(self):
if self.snapshot is not None:
return DataTablesHeader(*[
DataTablesColumn(header) for header in self.snapshot.headers
])
return DataTablesHeader(*[
DataTablesColumn(header) for method, header in self.model.method_map
])
@property
def rows(self):
# is it worth noting whether or not the data being displayed is pulled
# from an old snapshot?
if self.snapshot is not None:
return self.snapshot.rows
rows = []
for row in self.row_objects:
rows.append([getattr(row, method) for
method, header in self.model.method_map])
return rows
@property
def filter_data(self):
return dict([
(field.slug, field.get_value(self.request, DOMAIN))
for field in self.fields
])
@property
def row_objects(self):
"""
Returns a list of objects, each representing a row in the report
"""
rows = []
for row in self.get_rows(self.datespan):
try:
rows.append(self.model(row, self))
except InvalidRow:
pass
return rows
@property
def date_range(self):
start = self.datespan.startdate_utc
end = self.datespan.enddate_utc
now = datetime.datetime.utcnow()
# if report is run on current month, date range should be
# this month up till now
if start.year == now.year and start.month == now.month:
end = now
return (start, end)
def get_model_kwargs(self):
"""
Override this method to provide a dict of extra kwargs to the
row constructor
"""
return {}
class BeneficiaryPaymentReport(BaseReport):
name = "Beneficiary Payment Report"
slug = 'beneficiary_payment_report'
model = Beneficiary
@property
def fields(self):
return super(BeneficiaryPaymentReport, self).fields + [SelectOpenCloseFilter]
# TODO: Switch to ES. Peformance aaah!
def get_rows(self, datespan):
cases = CommCareCase.get_all_cases(DOMAIN, include_docs=True)
return [case for case in cases if self.passes_filter(case)]
def passes_filter(self, case):
status = self.filter_data.get('is_open', None)
if status:
if status == 'open' and not case.closed:
return True
elif status == 'closed' and case.closed:
return True
return False
return True
class IncentivePaymentReport(BaseReport):
name = "Incentive Payment Report"
slug = 'incentive_payment_report'
model = Worker
@property
@memoized
def last_month_totals(self):
last_month = self.datespan.startdate_utc - datetime.timedelta(days=4)
snapshot = OpmReportSnapshot.by_month(last_month.month, last_month.year,
"IncentivePaymentReport")
if snapshot is not None:
total_index = snapshot.slugs.index('month_total')
account_index = snapshot.slugs.index('account_number')
return dict(
(row[account_index], row[total_index]) for row in snapshot.rows
)
def get_model_kwargs(self):
return {'last_month_totals': self.last_month_totals}
def get_rows(self, datespan):
return CommCareUser.by_domain(DOMAIN)
def last_if_none(month, year):
if month is not None:
assert year is not None, \
"You must pass either nothing or a month AND a year"
return month, year
else:
last_month = datetime.datetime.now() - datetime.timedelta(days=27)
return last_month.month, last_month.year
def get_report(ReportClass, month=None, year=None):
"""
Utility method to run a report for an arbitrary month without a request
"""
month, year = last_if_none(month, year)
class Report(ReportClass):
snapshot = None
report_class = ReportClass
def __init__(self, *args, **kwargs):
self.slugs, self._headers = [list(tup) for tup in zip(*self.model.method_map)]
@property
def month(self):
return month
@property
def year(self):
return year
@property
def headers(self):
return self._headers
@property
def datespan(self):
return DateSpan.from_month(month, year)
@property
def filter_data(self):
return {}
return Report()
Change name of Incentive Payment Report
"""
Custom report definitions - control display of reports.
The BaseReport is somewhat general, but it's
currently specific to monthly reports. It would be pretty simple to make
this more general and subclass for montly reports , but I'm holding off on
that until we actually have another use case for it.
"""
import datetime
from corehq.apps.reports.generic import GenericTabularReport
from corehq.apps.reports.standard import CustomProjectReport, MonthYearMixin
from corehq.apps.reports.datatables import DataTablesHeader, DataTablesColumn, DataTablesColumnGroup
from corehq.apps.reports.filters.select import SelectOpenCloseFilter
from corehq.apps.users.models import CommCareUser, CommCareCase
from dimagi.utils.couch.database import get_db
from dimagi.utils.decorators.memoized import memoized
from dimagi.utils.dates import DateSpan
from couchdbkit.exceptions import ResourceNotFound
from ..opm_tasks.models import OpmReportSnapshot
from .beneficiary import Beneficiary
from .incentive import Worker
from .constants import *
from .filters import BlockFilter, AWCFilter
class BaseReport(MonthYearMixin, GenericTabularReport, CustomProjectReport):
"""
Report parent class. Children must provide a get_rows() method that
returns a list of the raw data that forms the basis of each row.
The "model" attribute is an object that can accept raw_data for a row
and perform the neccessary calculations. It must also provide a
method_map that is a list of (method_name, "Verbose Title") tuples
that define the columns in the report.
"""
name = None
slug = None
model = None
report_template_path = "opm/report.html"
default_rows = 50
printable = True
exportable = True
@property
def fields(self):
return [BlockFilter, AWCFilter] + super(BaseReport, self).fields
def filter(self, fn, filter_fields=None):
"""
This function is to be called by the row constructer to verify that
the row matches the filters
``fn`` should be a callable that accepts a key, and returns the value
that should match the filters for a given field.
I'm not super happy with this implementation, but it beats repeating
the same logic in incentive, beneficiary, and snapshot.
"""
if filter_fields is None:
filter_fields = [('awc_name', 'awcs'), ('block', 'blocks')]
for key, field in filter_fields:
keys = self.filter_data.get(field, [])
if keys and fn(key) not in keys:
raise InvalidRow
@property
@memoized
def snapshot(self):
# Don't load snapshot if filtering by current case status,
# instead, calculate again.
if self.filter_data.get('is_open', False):
return None
return OpmReportSnapshot.from_view(self)
@property
def headers(self):
if self.snapshot is not None:
return DataTablesHeader(*[
DataTablesColumn(header) for header in self.snapshot.headers
])
return DataTablesHeader(*[
DataTablesColumn(header) for method, header in self.model.method_map
])
@property
def rows(self):
# is it worth noting whether or not the data being displayed is pulled
# from an old snapshot?
if self.snapshot is not None:
return self.snapshot.rows
rows = []
for row in self.row_objects:
rows.append([getattr(row, method) for
method, header in self.model.method_map])
return rows
@property
def filter_data(self):
return dict([
(field.slug, field.get_value(self.request, DOMAIN))
for field in self.fields
])
@property
def row_objects(self):
"""
Returns a list of objects, each representing a row in the report
"""
rows = []
for row in self.get_rows(self.datespan):
try:
rows.append(self.model(row, self))
except InvalidRow:
pass
return rows
@property
def date_range(self):
start = self.datespan.startdate_utc
end = self.datespan.enddate_utc
now = datetime.datetime.utcnow()
# if report is run on current month, date range should be
# this month up till now
if start.year == now.year and start.month == now.month:
end = now
return (start, end)
def get_model_kwargs(self):
"""
Override this method to provide a dict of extra kwargs to the
row constructor
"""
return {}
class BeneficiaryPaymentReport(BaseReport):
name = "Beneficiary Payment Report"
slug = 'beneficiary_payment_report'
model = Beneficiary
@property
def fields(self):
return super(BeneficiaryPaymentReport, self).fields + [SelectOpenCloseFilter]
# TODO: Switch to ES. Peformance aaah!
def get_rows(self, datespan):
cases = CommCareCase.get_all_cases(DOMAIN, include_docs=True)
return [case for case in cases if self.passes_filter(case)]
def passes_filter(self, case):
status = self.filter_data.get('is_open', None)
if status:
if status == 'open' and not case.closed:
return True
elif status == 'closed' and case.closed:
return True
return False
return True
class IncentivePaymentReport(BaseReport):
name = "AWW Payment Report"
slug = 'incentive_payment_report'
model = Worker
@property
@memoized
def last_month_totals(self):
last_month = self.datespan.startdate_utc - datetime.timedelta(days=4)
snapshot = OpmReportSnapshot.by_month(last_month.month, last_month.year,
"IncentivePaymentReport")
if snapshot is not None:
total_index = snapshot.slugs.index('month_total')
account_index = snapshot.slugs.index('account_number')
return dict(
(row[account_index], row[total_index]) for row in snapshot.rows
)
def get_model_kwargs(self):
return {'last_month_totals': self.last_month_totals}
def get_rows(self, datespan):
return CommCareUser.by_domain(DOMAIN)
def last_if_none(month, year):
if month is not None:
assert year is not None, \
"You must pass either nothing or a month AND a year"
return month, year
else:
last_month = datetime.datetime.now() - datetime.timedelta(days=27)
return last_month.month, last_month.year
def get_report(ReportClass, month=None, year=None):
"""
Utility method to run a report for an arbitrary month without a request
"""
month, year = last_if_none(month, year)
class Report(ReportClass):
snapshot = None
report_class = ReportClass
def __init__(self, *args, **kwargs):
self.slugs, self._headers = [list(tup) for tup in zip(*self.model.method_map)]
@property
def month(self):
return month
@property
def year(self):
return year
@property
def headers(self):
return self._headers
@property
def datespan(self):
return DateSpan.from_month(month, year)
@property
def filter_data(self):
return {}
return Report()
|
#!/bin/python
# Asteroid's shadow is round and it's radius is always ASTEROID_R
# Shadow's speed is between ASTEROID_MIN_SPEED and ASTEROID_MAX_SPEED
# Vx is positive and Vy is smaller than Vx at least ASTEROID_VX_VY_RATIO times.
# that is because asteroids move on orbit in same direction as Earth and mostly in same plane - so it's speed in some range.
# We cannot detect ones that are near 0, so we look for ones that are between nim and max, and later we can do -max, -min with same algorithm.
#
# We have a rectangle grid of telescope. Number of telescopes T_WIDTH * T_HEIGHT
# The distance between them is T_STEP_X and T_STEP_Y
#
# NO NOISE
import os
import math
from random import randint
NOISE_EVENTS_PER_SCOPE_SECOND = 1
FRAME_DURATION_SEC = 1 / 100.0
ASTEROID_R = 100
ASTEROID_MAX_SPEED = 10 * 1000
ASTEROID_MAX_SPEED = 10 * 1000
ASTEROID_MIN_SPEED = ASTEROID_MAX_SPEED / 10
ASTEROID_VX_VY_RATIO = 3
T_WIDTH = 10
T_HEIGHT = 50
T_STEP_X = ASTEROID_R * 5
T_STEP_Y = ASTEROID_R
OCCULTATION_FLAG = 'occultation'
NOISE_FLAG = 'noise'
telescopes = {} # telescope_num: (x, y)
asteroid = () # (x, y, vx, vy)
def round_time_to_frame(t):
return round(t / FRAME_DURATION_SEC) * FRAME_DURATION_SEC
def format(str):
return str.replace('(', '').replace(')', '').replace(',', '').replace('\'', '') + os.linesep
def time_str(time):
return "%.2f" % time
def init_telescopes():
telescopes = {}
for x in range(T_WIDTH):
for y in range(T_HEIGHT):
telescopes[y + x * T_HEIGHT] = (x * T_STEP_X, y * T_STEP_Y)
return telescopes
def init_asteroid():
xt = T_WIDTH * T_STEP_X / 2
yt = randint(0, T_HEIGHT * T_STEP_Y)
vx = randint(0, ASTEROID_MAX_SPEED - ASTEROID_MIN_SPEED) + ASTEROID_MIN_SPEED
vy = randint(-vx / ASTEROID_VX_VY_RATIO, vx / ASTEROID_VX_VY_RATIO)
t = randint(100, 200)
x = xt - vx * t
y = yt - vy * t
return (x, y, vx, vy)
def list_with_noise(start, duration, actual_event, scope_num):
duration_frames = duration / FRAME_DURATION_SEC
noise_ratio = 1 / NOISE_EVENTS_PER_SCOPE_SECOND / FRAME_DURATION_SEC
res = []
for i in range(int(duration_frames)):
if randint(0, noise_ratio) == 0:
if not actual_event:
res.append((scope_num, start + i * FRAME_DURATION_SEC, start + (i + 1) * FRAME_DURATION_SEC, NOISE_FLAG))
else:
if actual_event[2] <= i * FRAME_DURATION_SEC and actual_event[1] >= (i + 1) * FRAME_DURATION_SEC:
res.append((scope_num, start + i * FRAME_DURATION_SEC, start + (i + 1) * FRAME_DURATION_SEC, NOISE_FLAG))
if actual_event:
if abs(actual_event[1] - start - i * FRAME_DURATION_SEC) < 0.000001:
res.append(actual_event)
return res
def add_noise(events):
max_event = max(events, key=lambda x: x[2]) # 2 is end time
min_event = min(events, key=lambda x: x[1]) # 1 is start time
event_to_scope = {}
for event in events:
# key is scope number
event_to_scope[event[0]] = event
pass_duration = round_time_to_frame(max_event[2] - min_event[1])
pass_center = round_time_to_frame(min_event[1] + pass_duration / 2)
res = []
print len(events)
for scope_num in range(T_WIDTH * T_HEIGHT):
actual_event = None
if event_to_scope.has_key(scope_num):
actual_event = event_to_scope[scope_num]
res.extend(list_with_noise(pass_center - 2 * pass_duration, 4 * pass_duration, actual_event, scope_num))
return res
def calc_event(telescope_num):
xa = asteroid[0]
ya = asteroid[1]
vxa = asteroid[2]
vya = asteroid[3]
xt = telescopes[telescope_num][0]
yt = telescopes[telescope_num][1]
xd = xa - xt
yd = ya - yt
a = vxa * vxa + vya * vya
b = 2 * (vxa * xd + vya * yd)
c = xd * xd + yd * yd - ASTEROID_R * ASTEROID_R
D = b * b - 4 * a * c
if D > 0:
t1 = (-b + math.sqrt(D)) / (2 * a)
t2 = (-b - math.sqrt(D)) / (2 * a)
t1 = round_time_to_frame(t1)
t2 = round_time_to_frame(t2)
if t1 == t2:
t1 = t2 + FRAME_DURATION_SEC
return (telescope_num, t2, t1, OCCULTATION_FLAG)
#event_begin_point = (xa + vxa * t1, ya + vya * t1)
#event_end_point = (xa + vxa * t2, ya + vya * t2)
def calc_events():
events = []
for k in telescopes.keys():
event = calc_event(k)
if event:
events.append(event)
return events
telescopes = init_telescopes()
#print telescopes
asteroid = init_asteroid()
#asteroid = (0,0,1,1)
#print asteroid
events = calc_events()
events = add_noise(events)
# output data
with open('asteroid.txt','w') as f:
f.write(format(str(asteroid) + ' ' + str(ASTEROID_R)))
f.close()
with open('events.txt','w') as f:
for event in events:
f.write(format(str(event)))
f.close()
with open('telescope.txt','w') as f:
for t_num in telescopes.keys():
f.write(format(str(t_num) + ' ' + str(telescopes[t_num])))
f.close()
harsh params back
#!/bin/python
# Asteroid's shadow is round and it's radius is always ASTEROID_R
# Shadow's speed is between ASTEROID_MIN_SPEED and ASTEROID_MAX_SPEED
# Vx is positive and Vy is smaller than Vx at least ASTEROID_VX_VY_RATIO times.
# that is because asteroids move on orbit in same direction as Earth and mostly in same plane - so it's speed in some range.
# We cannot detect ones that are near 0, so we look for ones that are between nim and max, and later we can do -max, -min with same algorithm.
#
# We have a rectangle grid of telescope. Number of telescopes T_WIDTH * T_HEIGHT
# The distance between them is T_STEP_X and T_STEP_Y
#
# NO NOISE
import os
import math
from random import randint
NOISE_EVENTS_PER_SCOPE_SECOND = 10.0
FRAME_DURATION_SEC = 1 / 100.0
ASTEROID_R = 100
ASTEROID_MAX_SPEED = 10 * 1000
ASTEROID_MAX_SPEED = 10 * 1000
ASTEROID_MIN_SPEED = ASTEROID_MAX_SPEED / 10
ASTEROID_VX_VY_RATIO = 3
T_WIDTH = 10
T_HEIGHT = 50
T_STEP_X = ASTEROID_R * 5
T_STEP_Y = ASTEROID_R
OCCULTATION_FLAG = 'occultation'
NOISE_FLAG = 'noise'
telescopes = {} # telescope_num: (x, y)
asteroid = () # (x, y, vx, vy)
def round_time_to_frame(t):
return round(t / FRAME_DURATION_SEC) * FRAME_DURATION_SEC
def format(str):
return str.replace('(', '').replace(')', '').replace(',', '').replace('\'', '') + os.linesep
def time_str(time):
return "%.2f" % time
def init_telescopes():
telescopes = {}
for x in range(T_WIDTH):
for y in range(T_HEIGHT):
telescopes[y + x * T_HEIGHT] = (x * T_STEP_X, y * T_STEP_Y)
return telescopes
def init_asteroid():
xt = T_WIDTH * T_STEP_X / 2
yt = randint(0, T_HEIGHT * T_STEP_Y)
vx = randint(0, ASTEROID_MAX_SPEED - ASTEROID_MIN_SPEED) + ASTEROID_MIN_SPEED
vy = randint(-vx / ASTEROID_VX_VY_RATIO, vx / ASTEROID_VX_VY_RATIO)
t = randint(100, 200)
x = xt - vx * t
y = yt - vy * t
return (x, y, vx, vy)
def list_with_noise(start, duration, actual_event, scope_num):
duration_frames = duration / FRAME_DURATION_SEC
noise_ratio = 1 / NOISE_EVENTS_PER_SCOPE_SECOND / FRAME_DURATION_SEC
res = []
for i in range(int(duration_frames)):
if randint(0, noise_ratio) == 0:
if not actual_event:
res.append((scope_num, start + i * FRAME_DURATION_SEC, start + (i + 1) * FRAME_DURATION_SEC, NOISE_FLAG))
else:
if actual_event[2] <= i * FRAME_DURATION_SEC and actual_event[1] >= (i + 1) * FRAME_DURATION_SEC:
res.append((scope_num, start + i * FRAME_DURATION_SEC, start + (i + 1) * FRAME_DURATION_SEC, NOISE_FLAG))
if actual_event:
if abs(actual_event[1] - start - i * FRAME_DURATION_SEC) < 0.000001:
res.append(actual_event)
return res
def add_noise(events):
max_event = max(events, key=lambda x: x[2]) # 2 is end time
min_event = min(events, key=lambda x: x[1]) # 1 is start time
event_to_scope = {}
for event in events:
# key is scope number
event_to_scope[event[0]] = event
pass_duration = round_time_to_frame(max_event[2] - min_event[1])
pass_center = round_time_to_frame(min_event[1] + pass_duration / 2)
res = []
print len(events)
for scope_num in range(T_WIDTH * T_HEIGHT):
actual_event = None
if event_to_scope.has_key(scope_num):
actual_event = event_to_scope[scope_num]
res.extend(list_with_noise(pass_center - 2 * pass_duration, 4 * pass_duration, actual_event, scope_num))
return res
def calc_event(telescope_num):
xa = asteroid[0]
ya = asteroid[1]
vxa = asteroid[2]
vya = asteroid[3]
xt = telescopes[telescope_num][0]
yt = telescopes[telescope_num][1]
xd = xa - xt
yd = ya - yt
a = vxa * vxa + vya * vya
b = 2 * (vxa * xd + vya * yd)
c = xd * xd + yd * yd - ASTEROID_R * ASTEROID_R
D = b * b - 4 * a * c
if D > 0:
t1 = (-b + math.sqrt(D)) / (2 * a)
t2 = (-b - math.sqrt(D)) / (2 * a)
t1 = round_time_to_frame(t1)
t2 = round_time_to_frame(t2)
if t1 == t2:
t1 = t2 + FRAME_DURATION_SEC
return (telescope_num, t2, t1, OCCULTATION_FLAG)
#event_begin_point = (xa + vxa * t1, ya + vya * t1)
#event_end_point = (xa + vxa * t2, ya + vya * t2)
def calc_events():
events = []
for k in telescopes.keys():
event = calc_event(k)
if event:
events.append(event)
return events
telescopes = init_telescopes()
#print telescopes
asteroid = init_asteroid()
#asteroid = (0,0,1,1)
#print asteroid
events = calc_events()
events = add_noise(events)
# output data
with open('asteroid.txt','w') as f:
f.write(format(str(asteroid) + ' ' + str(ASTEROID_R)))
f.close()
with open('events.txt','w') as f:
for event in events:
f.write(format(str(event)))
f.close()
with open('telescope.txt','w') as f:
for t_num in telescopes.keys():
f.write(format(str(t_num) + ' ' + str(telescopes[t_num])))
f.close()
|
from django import http
from django.core.urlresolvers import reverse
from django.shortcuts import get_object_or_404
from django.views.generic import TemplateView
from django.views.generic import FormView
from django.views.generic import View
from history import forms
def get_form_data_from_session(session):
form_data = session.get('forms', [])
for form in form_data:
form.pop("csrfmiddlewaretoken", None)
return form_data
def format_history_entry(entry):
description = ""
if entry["description"][0]:
description = "({0})".format(entry["description"][0])
circumstance_data = entry["circumstances"]
if 'other' in circumstance_data:
circumstance_data.remove('other')
if entry["other_more"][0]:
circumstance_data += entry["other_more"]
formatted_circumstances = list(map(format_circumstance, circumstance_data))
circumstances = ", ".join(formatted_circumstances)
duration_dict = dict(forms.HistoryDetailsForm.DURATION_CHOICES)
duration = duration_dict[entry["duration"][0]]
return "For {0}: {1} {2}".format(duration, circumstances, description)
def format_circumstance(circumstance):
circumstance_dict = dict(forms.HistoryDetailsForm.CIRCUMSTANCE_CHOICES)
return circumstance_dict.get(circumstance, circumstance)
class HistoryDetailsView(FormView):
"""
Render HistoryDetailsView and redirect to HistoryReportView when
the form has been completed 3 times
"""
template_name = "history/details.html"
form_class = forms.HistoryDetailsForm
def get(self, request, *args, **kwargs):
if len(self.request.session.get('forms', [])) >= 3:
url = reverse('history:report')
return http.HttpResponseRedirect(url)
else:
response = super().get(request, *args, **kwargs)
return response
def form_valid(self, form):
if 'forms' not in self.request.session:
self.request.session['forms'] = []
self.request.session['forms'] += [dict(form.data.lists())]
if len(self.request.session['forms']) < 3:
url = reverse('history:details')
else:
url = reverse('history:report')
return http.HttpResponseRedirect(url)
def get_context_data(self, **kwargs):
context = kwargs
history_data = get_form_data_from_session(self.request.session)
if history_data:
context['history'] = map(format_history_entry, history_data)
context['circumstance_title'] = "Your circumstances previously"
context['percentage'] = len(history_data)*100/3
else:
context['circumstance_title'] = "Your current circumstances"
context['percentage'] = 0
return context
class HistoryReportView(TemplateView):
def get(self, request, *args, **kwargs):
session = self.request.session
if 'forms' not in session or len(session['forms']) < 3:
url = reverse('history:details')
return http.HttpResponseRedirect(url)
else:
self.template_name = "history/report.html"
response = super().get(request, *args, **kwargs)
return response
def get_context_data(self, **kwargs):
context = kwargs
history_data = get_form_data_from_session(self.request.session)
context['report'] = map(format_history_entry, history_data)
return context
class ClearSessionView(TemplateView):
def post(self, request, *args, **kwargs):
self.request.session['forms'] = []
url = reverse('history:details')
return http.HttpResponseRedirect(url)
Minor PEP8 fixup
from django import http
from django.core.urlresolvers import reverse
from django.shortcuts import get_object_or_404
from django.views.generic import TemplateView
from django.views.generic import FormView
from django.views.generic import View
from history import forms
def get_form_data_from_session(session):
form_data = session.get('forms', [])
for form in form_data:
form.pop("csrfmiddlewaretoken", None)
return form_data
def format_history_entry(entry):
description = ""
if entry["description"][0]:
description = "({0})".format(entry["description"][0])
circumstance_data = entry["circumstances"]
if 'other' in circumstance_data:
circumstance_data.remove('other')
if entry["other_more"][0]:
circumstance_data += entry["other_more"]
formatted_circumstances = list(map(format_circumstance, circumstance_data))
circumstances = ", ".join(formatted_circumstances)
duration_dict = dict(forms.HistoryDetailsForm.DURATION_CHOICES)
duration = duration_dict[entry["duration"][0]]
return "For {0}: {1} {2}".format(duration, circumstances, description)
def format_circumstance(circumstance):
circumstance_dict = dict(forms.HistoryDetailsForm.CIRCUMSTANCE_CHOICES)
return circumstance_dict.get(circumstance, circumstance)
class HistoryDetailsView(FormView):
"""
Render HistoryDetailsView and redirect to HistoryReportView when
the form has been completed 3 times
"""
template_name = "history/details.html"
form_class = forms.HistoryDetailsForm
def get(self, request, *args, **kwargs):
if len(self.request.session.get('forms', [])) >= 3:
url = reverse('history:report')
return http.HttpResponseRedirect(url)
else:
response = super().get(request, *args, **kwargs)
return response
def form_valid(self, form):
if 'forms' not in self.request.session:
self.request.session['forms'] = []
self.request.session['forms'] += [dict(form.data.lists())]
if len(self.request.session['forms']) < 3:
url = reverse('history:details')
else:
url = reverse('history:report')
return http.HttpResponseRedirect(url)
def get_context_data(self, **kwargs):
context = kwargs
history_data = get_form_data_from_session(self.request.session)
if history_data:
context['history'] = map(format_history_entry, history_data)
context['circumstance_title'] = "Your circumstances previously"
context['percentage'] = len(history_data) * 100 / 3
else:
context['circumstance_title'] = "Your current circumstances"
context['percentage'] = 0
return context
class HistoryReportView(TemplateView):
def get(self, request, *args, **kwargs):
session = self.request.session
if 'forms' not in session or len(session['forms']) < 3:
url = reverse('history:details')
return http.HttpResponseRedirect(url)
else:
self.template_name = "history/report.html"
response = super().get(request, *args, **kwargs)
return response
def get_context_data(self, **kwargs):
context = kwargs
history_data = get_form_data_from_session(self.request.session)
context['report'] = map(format_history_entry, history_data)
return context
class ClearSessionView(TemplateView):
def post(self, request, *args, **kwargs):
self.request.session['forms'] = []
url = reverse('history:details')
return http.HttpResponseRedirect(url)
|
#!/usr/bin/env python
#-----------------------------------------------------------------------------------------------------------------------
# INFO:
#-----------------------------------------------------------------------------------------------------------------------
"""
Author: Evan Hubinger
License: Apache 2.0
Description: The Coconut root.
"""
#-----------------------------------------------------------------------------------------------------------------------
# IMPORTS:
#-----------------------------------------------------------------------------------------------------------------------
from __future__ import with_statement, print_function, absolute_import, unicode_literals, division
try:
import readline
except ImportError:
pass
import sys
#-----------------------------------------------------------------------------------------------------------------------
# CONSTANTS:
#-----------------------------------------------------------------------------------------------------------------------
VERSION = "0.3.4"
VERSION_NAME = "Macapuno"
DEVELOP = True
ENCODING = "UTF-8"
if DEVELOP:
VERSION += "-post_dev"
VERSION_STR = VERSION + " [" + VERSION_NAME + "]"
PY2 = sys.version_info < (3,)
PY2_HEADER = r'''py2_filter, py2_hex, py2_map, py2_oct, py2_zip = filter, hex, map, oct, zip
from future_builtins import *
py2_open = open
from io import open
py2_range, range = range, xrange
py2_int = int
_coconut_int, _coconut_long = py2_int, long
class _coconut_metaint(type):
def __instancecheck__(cls, inst):
return isinstance(inst, (_coconut_int, _coconut_long))
class int(_coconut_int):
"""Python 3 int."""
__metaclass__ = _coconut_metaint
py2_chr, chr = chr, unichr
py2_str = str
_coconut_str, _coconut_unicode = py2_str, unicode
_coconut_new_int = int
bytes = _coconut_str
class _coconut_metabytes(type):
def __instancecheck__(cls, inst):
return isinstance(inst, _coconut_str)
class bytes(_coconut_str):
"""Python 3 bytes."""
__metaclass__ = _coconut_metabytes
def __init__(self, *args, **kwargs):
"""Python 3 bytes constructor."""
if len(args) == 1 and isinstance(args[0], _coconut_new_int):
if kwargs:
_coconut_str.__init__(self, b"\x00" * args[0], **kwargs)
else:
_coconut_str.__init__(self, b"\x00" * args[0])
elif kwargs:
_coconut_str.__init__(self, *args, **kwargs)
else:
_coconut_str.__init__(self, *args)
class _coconut_metastr(type):
def __instancecheck__(cls, inst):
return isinstance(inst, _coconut_unicode)
class str(_coconut_unicode):
"""Python 3 str."""
__metaclass__ = _coconut_metastr
def __init__(self, *args, **kwargs):
"""Python 3 str constructor."""
if len(args) == 1 and isinstance(args[0], _coconut_str):
if kwargs:
_coconut_unicode.__init__(self, repr(args[0]), **kwargs)
else:
_coconut_unicode.__init__(self, repr(args[0]))
elif kwargs:
_coconut_unicode.__init__(self, *args, **kwargs)
else:
_coconut_unicode.__init__(self, *args, **kwargs)
_coconut_encoding = "'''+ENCODING+r'''"
py2_print = print
_coconut_print = py2_print
_coconut_new_str = str
def print(*args, **kwargs):
"""Python 3 print."""
return _coconut_print(*(_coconut_new_str(x) for x in args), **kwargs)
py2_input = input
_coconut_raw_input = raw_input
def input(*args, **kwargs):
"""Python 3 input."""
return _coconut_raw_input(*args, **kwargs).decode(_coconut_encoding)'''
#-----------------------------------------------------------------------------------------------------------------------
# SETUP:
#-----------------------------------------------------------------------------------------------------------------------
if PY2:
exec(PY2_HEADER)
Uses __new__ instead of __init__ for str and bytes
#!/usr/bin/env python
#-----------------------------------------------------------------------------------------------------------------------
# INFO:
#-----------------------------------------------------------------------------------------------------------------------
"""
Author: Evan Hubinger
License: Apache 2.0
Description: The Coconut root.
"""
#-----------------------------------------------------------------------------------------------------------------------
# IMPORTS:
#-----------------------------------------------------------------------------------------------------------------------
from __future__ import with_statement, print_function, absolute_import, unicode_literals, division
try:
import readline
except ImportError:
pass
import sys
#-----------------------------------------------------------------------------------------------------------------------
# CONSTANTS:
#-----------------------------------------------------------------------------------------------------------------------
VERSION = "0.3.4"
VERSION_NAME = "Macapuno"
DEVELOP = True
ENCODING = "UTF-8"
if DEVELOP:
VERSION += "-post_dev"
VERSION_STR = VERSION + " [" + VERSION_NAME + "]"
PY2 = sys.version_info < (3,)
PY2_HEADER = r'''py2_filter, py2_hex, py2_map, py2_oct, py2_zip = filter, hex, map, oct, zip
from future_builtins import *
py2_open = open
from io import open
py2_range, range = range, xrange
py2_int = int
_coconut_int, _coconut_long = py2_int, long
class _coconut_metaint(type):
def __instancecheck__(cls, inst):
return isinstance(inst, (_coconut_int, _coconut_long))
class int(_coconut_int):
"""Python 3 int."""
__metaclass__ = _coconut_metaint
py2_chr, chr = chr, unichr
py2_str = str
_coconut_str, _coconut_unicode = py2_str, unicode
_coconut_new_int = int
bytes = _coconut_str
class _coconut_metabytes(type):
def __instancecheck__(cls, inst):
return isinstance(inst, _coconut_str)
class bytes(_coconut_str):
"""Python 3 bytes."""
__metaclass__ = _coconut_metabytes
def __new__(cls, *args, **kwargs):
"""Python 3 bytes constructor."""
if len(args) == 1 and isinstance(args[0], _coconut_new_int):
return _coconut_str.__new__(cls, b"\x00" * args[0], **kwargs)
else:
return _coconut_str.__new__(cls, *args, **kwargs)
class _coconut_metastr(type):
def __instancecheck__(cls, inst):
return isinstance(inst, _coconut_unicode)
class str(_coconut_unicode):
"""Python 3 str."""
__metaclass__ = _coconut_metastr
def __new__(cls, *args, **kwargs):
"""Python 3 str constructor."""
if len(args) == 1 and isinstance(args[0], _coconut_str):
return _coconut_unicode.__new__(cls, repr(args[0]), **kwargs)
else:
return _coconut_unicode.__new__(cls, *args, **kwargs)
_coconut_encoding = "'''+ENCODING+r'''"
py2_print = print
_coconut_print = py2_print
_coconut_new_str = str
def print(*args, **kwargs):
"""Python 3 print."""
return _coconut_print(*(_coconut_new_str(x) for x in args), **kwargs)
py2_input = input
_coconut_raw_input = raw_input
def input(*args, **kwargs):
"""Python 3 input."""
return _coconut_raw_input(*args, **kwargs).decode(_coconut_encoding)'''
#-----------------------------------------------------------------------------------------------------------------------
# SETUP:
#-----------------------------------------------------------------------------------------------------------------------
if PY2:
exec(PY2_HEADER)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#-----------------------------------------------------------------------------------------------------------------------
# INFO:
#-----------------------------------------------------------------------------------------------------------------------
"""
Author: Evan Hubinger
License: Apache 2.0
Description: Basic Coconut constants and compatibility handling.
"""
#-----------------------------------------------------------------------------------------------------------------------
# IMPORTS:
#-----------------------------------------------------------------------------------------------------------------------
from __future__ import print_function, absolute_import, unicode_literals, division
import sys as _coconut_sys
#-----------------------------------------------------------------------------------------------------------------------
# VERSION:
#-----------------------------------------------------------------------------------------------------------------------
VERSION = "1.2.0"
VERSION_NAME = "Colonel"
DEVELOP = 5
#-----------------------------------------------------------------------------------------------------------------------
# CONSTANTS:
#-----------------------------------------------------------------------------------------------------------------------
if DEVELOP:
VERSION += "-post_dev" + str(int(DEVELOP))
__version__ = VERSION
VERSION_STR = VERSION + " [" + VERSION_NAME + "]"
VERSION_TAG = "v" + VERSION
VERSION_STR_TAG = "v" + VERSION_STR
PY2 = _coconut_sys.version_info < (3,)
PY26 = _coconut_sys.version_info < (2, 7)
PY3_HEADER = r'''py_chr, py_filter, py_hex, py_input, py_int, py_map, py_oct, py_open, py_print, py_range, py_str, py_zip = chr, filter, hex, input, int, map, oct, open, print, range, str, zip
'''
PY27_HEADER = PY3_HEADER + r'''py_raw_input, py_xrange = raw_input, xrange
_coconut_raw_input, _coconut_xrange, _coconut_int, _coconut_long, _coconut_print, _coconut_str, _coconut_unicode, _coconut_repr = raw_input, xrange, int, long, print, str, unicode, repr
from future_builtins import *
chr, str = unichr, unicode
from io import open
class range(object):
__slots__ = ("_xrange",)
if hasattr(_coconut_xrange, "__doc__"):
__doc__ = _coconut_xrange.__doc__
def __init__(self, *args):
self._xrange = _coconut_xrange(*args)
def __iter__(self):
return _coconut.iter(self._xrange)
def __reversed__(self):
return _coconut.reversed(self._xrange)
def __len__(self):
return _coconut.len(self._xrange)
def __contains__(self, elem):
return elem in self._xrange
def __getitem__(self, index):
if _coconut.isinstance(index, _coconut.slice):
start, stop, step = index.start, index.stop, index.step
if start is None:
start = 0
elif start < 0:
start += _coconut.len(self._xrange)
if stop is None:
stop = _coconut.len(self._xrange)
elif stop is not None and stop < 0:
stop += _coconut.len(self._xrange)
if step is None:
step = 1
return _coconut_map(self._xrange.__getitem__, self.__class__(start, stop, step))
else:
return self._xrange[index]
def count(self, elem):
"""Count the number of times elem appears in the range."""
return int(elem in self._xrange)
def index(self, elem):
"""Find the index of elem in the range."""
if elem not in self._xrange: raise _coconut.ValueError(_coconut.repr(elem) + " is not in range")
start, _, step = self._xrange.__reduce_ex__(2)[1]
return (elem - start) // step
def __repr__(self):
return _coconut.repr(self._xrange)[1:]
def __reduce_ex__(self, protocol):
return (self.__class__, self._xrange.__reduce_ex__(protocol)[1])
def __reduce__(self):
return self.__reduce_ex__(_coconut.pickle.HIGHEST_PROTOCOL)
def __hash__(self):
return _coconut.hash(self._xrange.__reduce__()[1])
def __copy__(self):
return self.__class__(*self._xrange.__reduce__()[1])
def __eq__(self, other):
reduction = self.__reduce__()
return _coconut.isinstance(other, reduction[0]) and reduction[1] == other.__reduce__()[1]
from collections import Sequence as _coconut_Sequence
_coconut_Sequence.register(range)
class int(_coconut_int):
__slots__ = ()
if hasattr(_coconut_int, "__doc__"):
__doc__ = _coconut_int.__doc__
class __metaclass__(type):
def __instancecheck__(cls, inst):
return _coconut.isinstance(inst, (_coconut_int, _coconut_long))
class bytes(_coconut_str):
__slots__ = ()
if hasattr(_coconut_str, "__doc__"):
__doc__ = _coconut_str.__doc__
class __metaclass__(type):
def __instancecheck__(cls, inst):
return _coconut.isinstance(inst, _coconut_str)
def __new__(cls, *args, **kwargs):
return _coconut_str.__new__(cls, _coconut.bytearray(*args, **kwargs))
from functools import wraps as _coconut_wraps
@_coconut_wraps(_coconut_print)
def print(*args, **kwargs):
if _coconut.hasattr(_coconut_sys.stdout, "encoding") and _coconut_sys.stdout.encoding is not None:
return _coconut_print(*(_coconut_unicode(x).encode(_coconut_sys.stdout.encoding) for x in args), **kwargs)
else:
return _coconut_print(*(_coconut_unicode(x).encode() for x in args), **kwargs)
@_coconut_wraps(_coconut_raw_input)
def input(*args, **kwargs):
if _coconut.hasattr(_coconut_sys.stdout, "encoding") and _coconut_sys.stdout.encoding is not None:
return _coconut_raw_input(*args, **kwargs).decode(_coconut_sys.stdout.encoding)
else:
return _coconut_raw_input(*args, **kwargs).decode()
@_coconut_wraps(_coconut_repr)
def repr(obj):
if isinstance(obj, _coconut_unicode):
return _coconut_repr(obj)[1:]
else:
return _coconut_repr(obj)
ascii = repr
def raw_input(*args):
"""Coconut uses Python 3 "input" instead of Python 2 "raw_input"."""
raise _coconut.NameError('Coconut uses Python 3 "input" instead of Python 2 "raw_input"')
def xrange(*args):
"""Coconut uses Python 3 "range" instead of Python 2 "xrange"."""
raise _coconut.NameError('Coconut uses Python 3 "range" instead of Python 2 "xrange"')
'''
PY2_HEADER = PY27_HEADER + '''if _coconut_sys.version_info < (2, 7):
import functools as _coconut_functools, copy_reg as _coconut_copy_reg
def _coconut_new_partial(func, args, keywords):
return _coconut_functools.partial(func, *(args if args is not None else ()), **(keywords if keywords is not None else {}))
_coconut_copy_reg.constructor(_coconut_new_partial)
def _coconut_reduce_partial(self):
return (_coconut_new_partial, (self.func, self.args, self.keywords))
_coconut_copy_reg.pickle(_coconut_functools.partial, _coconut_reduce_partial)
'''
PYCHECK_HEADER = r'''if _coconut_sys.version_info < (3,):
''' + "".join(
(" " if _line else "") + _line for _line in PY2_HEADER.splitlines(True)
) + '''else:
''' + "".join(
(" " if _line else "") + _line for _line in PY3_HEADER.splitlines(True)
)
#-----------------------------------------------------------------------------------------------------------------------
# SETUP:
#-----------------------------------------------------------------------------------------------------------------------
if PY2:
if PY26:
exec(PY2_HEADER)
else:
exec(PY27_HEADER)
import __builtin__ as _coconut # NOQA
import pickle
_coconut.pickle = pickle
_coconut_map = map
else:
exec(PY3_HEADER)
Updates develop version
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#-----------------------------------------------------------------------------------------------------------------------
# INFO:
#-----------------------------------------------------------------------------------------------------------------------
"""
Author: Evan Hubinger
License: Apache 2.0
Description: Basic Coconut constants and compatibility handling.
"""
#-----------------------------------------------------------------------------------------------------------------------
# IMPORTS:
#-----------------------------------------------------------------------------------------------------------------------
from __future__ import print_function, absolute_import, unicode_literals, division
import sys as _coconut_sys
#-----------------------------------------------------------------------------------------------------------------------
# VERSION:
#-----------------------------------------------------------------------------------------------------------------------
VERSION = "1.2.0"
VERSION_NAME = "Colonel"
DEVELOP = 6
#-----------------------------------------------------------------------------------------------------------------------
# CONSTANTS:
#-----------------------------------------------------------------------------------------------------------------------
if DEVELOP:
VERSION += "-post_dev" + str(int(DEVELOP))
__version__ = VERSION
VERSION_STR = VERSION + " [" + VERSION_NAME + "]"
VERSION_TAG = "v" + VERSION
VERSION_STR_TAG = "v" + VERSION_STR
PY2 = _coconut_sys.version_info < (3,)
PY26 = _coconut_sys.version_info < (2, 7)
PY3_HEADER = r'''py_chr, py_filter, py_hex, py_input, py_int, py_map, py_oct, py_open, py_print, py_range, py_str, py_zip = chr, filter, hex, input, int, map, oct, open, print, range, str, zip
'''
PY27_HEADER = PY3_HEADER + r'''py_raw_input, py_xrange = raw_input, xrange
_coconut_raw_input, _coconut_xrange, _coconut_int, _coconut_long, _coconut_print, _coconut_str, _coconut_unicode, _coconut_repr = raw_input, xrange, int, long, print, str, unicode, repr
from future_builtins import *
chr, str = unichr, unicode
from io import open
class range(object):
__slots__ = ("_xrange",)
if hasattr(_coconut_xrange, "__doc__"):
__doc__ = _coconut_xrange.__doc__
def __init__(self, *args):
self._xrange = _coconut_xrange(*args)
def __iter__(self):
return _coconut.iter(self._xrange)
def __reversed__(self):
return _coconut.reversed(self._xrange)
def __len__(self):
return _coconut.len(self._xrange)
def __contains__(self, elem):
return elem in self._xrange
def __getitem__(self, index):
if _coconut.isinstance(index, _coconut.slice):
start, stop, step = index.start, index.stop, index.step
if start is None:
start = 0
elif start < 0:
start += _coconut.len(self._xrange)
if stop is None:
stop = _coconut.len(self._xrange)
elif stop is not None and stop < 0:
stop += _coconut.len(self._xrange)
if step is None:
step = 1
return _coconut_map(self._xrange.__getitem__, self.__class__(start, stop, step))
else:
return self._xrange[index]
def count(self, elem):
"""Count the number of times elem appears in the range."""
return int(elem in self._xrange)
def index(self, elem):
"""Find the index of elem in the range."""
if elem not in self._xrange: raise _coconut.ValueError(_coconut.repr(elem) + " is not in range")
start, _, step = self._xrange.__reduce_ex__(2)[1]
return (elem - start) // step
def __repr__(self):
return _coconut.repr(self._xrange)[1:]
def __reduce_ex__(self, protocol):
return (self.__class__, self._xrange.__reduce_ex__(protocol)[1])
def __reduce__(self):
return self.__reduce_ex__(_coconut.pickle.HIGHEST_PROTOCOL)
def __hash__(self):
return _coconut.hash(self._xrange.__reduce__()[1])
def __copy__(self):
return self.__class__(*self._xrange.__reduce__()[1])
def __eq__(self, other):
reduction = self.__reduce__()
return _coconut.isinstance(other, reduction[0]) and reduction[1] == other.__reduce__()[1]
from collections import Sequence as _coconut_Sequence
_coconut_Sequence.register(range)
class int(_coconut_int):
__slots__ = ()
if hasattr(_coconut_int, "__doc__"):
__doc__ = _coconut_int.__doc__
class __metaclass__(type):
def __instancecheck__(cls, inst):
return _coconut.isinstance(inst, (_coconut_int, _coconut_long))
class bytes(_coconut_str):
__slots__ = ()
if hasattr(_coconut_str, "__doc__"):
__doc__ = _coconut_str.__doc__
class __metaclass__(type):
def __instancecheck__(cls, inst):
return _coconut.isinstance(inst, _coconut_str)
def __new__(cls, *args, **kwargs):
return _coconut_str.__new__(cls, _coconut.bytearray(*args, **kwargs))
from functools import wraps as _coconut_wraps
@_coconut_wraps(_coconut_print)
def print(*args, **kwargs):
if _coconut.hasattr(_coconut_sys.stdout, "encoding") and _coconut_sys.stdout.encoding is not None:
return _coconut_print(*(_coconut_unicode(x).encode(_coconut_sys.stdout.encoding) for x in args), **kwargs)
else:
return _coconut_print(*(_coconut_unicode(x).encode() for x in args), **kwargs)
@_coconut_wraps(_coconut_raw_input)
def input(*args, **kwargs):
if _coconut.hasattr(_coconut_sys.stdout, "encoding") and _coconut_sys.stdout.encoding is not None:
return _coconut_raw_input(*args, **kwargs).decode(_coconut_sys.stdout.encoding)
else:
return _coconut_raw_input(*args, **kwargs).decode()
@_coconut_wraps(_coconut_repr)
def repr(obj):
if isinstance(obj, _coconut_unicode):
return _coconut_repr(obj)[1:]
else:
return _coconut_repr(obj)
ascii = repr
def raw_input(*args):
"""Coconut uses Python 3 "input" instead of Python 2 "raw_input"."""
raise _coconut.NameError('Coconut uses Python 3 "input" instead of Python 2 "raw_input"')
def xrange(*args):
"""Coconut uses Python 3 "range" instead of Python 2 "xrange"."""
raise _coconut.NameError('Coconut uses Python 3 "range" instead of Python 2 "xrange"')
'''
PY2_HEADER = PY27_HEADER + '''if _coconut_sys.version_info < (2, 7):
import functools as _coconut_functools, copy_reg as _coconut_copy_reg
def _coconut_new_partial(func, args, keywords):
return _coconut_functools.partial(func, *(args if args is not None else ()), **(keywords if keywords is not None else {}))
_coconut_copy_reg.constructor(_coconut_new_partial)
def _coconut_reduce_partial(self):
return (_coconut_new_partial, (self.func, self.args, self.keywords))
_coconut_copy_reg.pickle(_coconut_functools.partial, _coconut_reduce_partial)
'''
PYCHECK_HEADER = r'''if _coconut_sys.version_info < (3,):
''' + "".join(
(" " if _line else "") + _line for _line in PY2_HEADER.splitlines(True)
) + '''else:
''' + "".join(
(" " if _line else "") + _line for _line in PY3_HEADER.splitlines(True)
)
#-----------------------------------------------------------------------------------------------------------------------
# SETUP:
#-----------------------------------------------------------------------------------------------------------------------
if PY2:
if PY26:
exec(PY2_HEADER)
else:
exec(PY27_HEADER)
import __builtin__ as _coconut # NOQA
import pickle
_coconut.pickle = pickle
_coconut_map = map
else:
exec(PY3_HEADER)
|
r"""
Parse biological sequences (:mod:`skbio.parse.sequences`)
=========================================================
.. currentmodule:: skbio.parse.sequences
This module provides functions for parsing sequence files in a variety of
different formats. Two interfaces are provided for parsing sequence files:
sequence iterators (high-level, recommended interface) and parsing functions
(lower-level interface).
Sequence iterator interface
---------------------------
The sequence iterator interface is the recommended way to parse sequence files.
The ``load`` function provides a standard, high-level interface to iterate over
sequence files regardless of file type or whether they are compressed. The
method accepts single or multiple file paths and employs the correct file
handlers, iterator objects, and parsers for the user.
The benefit of the sequence iterator interface is that the type of the file and
any file format details are abstracted away from the user. In this manner, the
user does not need to worry about whether they're operating on FASTA or FASTQ
files or any differences in the returns from their respective parsers.
Classes
^^^^^^^
.. autosummary::
:toctree: generated/
SequenceIterator
FastaIterator
FastqIterator
QseqIterator
Functions
^^^^^^^^^
.. autosummary::
:toctree: generated/
load
Examples
^^^^^^^^
For the first set of sequence iterator examples, we're going to use the
``load`` function. The ``load`` function is intended to operate on file paths,
so let's create two files for it to use. The first one will be a regular FASTA
file, and the second will be a gzip'd FASTQ file:
>>> import os
>>> import gzip
>>> out = open('test_seqs.fna', 'w')
>>> out.write(">s1\nATGC\n>s2\nATGGC\n")
>>> out.close()
>>> outgz = gzip.open('test_seqs.fq.gz', 'w')
>>> _ = outgz.write("@s3\nAATTGG\n+\nghghgh\n@s4\nAAA\n+\nfgh\n")
>>> outgz.close()
Now let's see what ``load`` can do:
>>> it = load(['test_seqs.fna', 'test_seqs.fq.gz'], phred_offset=64)
>>> for rec in it:
... print rec['SequenceID']
... print rec['Sequence']
... print rec['Qual']
s1
ATGC
None
s2
ATGGC
None
s3
AATTGG
[39 40 39 40 39 40]
s4
AAA
[38 39 40]
To be polite, let's remove the files we just created:
>>> os.remove('test_seqs.fna')
>>> os.remove('test_seqs.fq.gz')
In the following examples, we'll see how to use the sequence iterators directly
instead of using ``load``.
>>> from StringIO import StringIO
>>> from skbio.parse.sequences import FastaIterator, FastqIterator
In this first example, we're going to construct a FASTA iterator that is also
paired with quality scores (e.g., as in 454 fasta/qual files).
>>> seqs = StringIO(">seq1\n"
... "ATGC\n"
... ">seq2\n"
... "TTGGCC\n")
>>> qual = StringIO(">seq1\n"
... "10 20 30 40\n"
... ">seq2\n"
... "1 2 3 4 5 6\n")
>>> it = FastaIterator(seq=[seqs], qual=[qual])
>>> for record in it:
... print record['Sequence']
... print record['Qual']
ATGC
[10 20 30 40]
TTGGCC
[1 2 3 4 5 6]
In the next example, we're going to iterate over multiple FASTQ files at once.
>>> seqs1 = StringIO("@seq1\n"
... "ATGC\n"
... "+\n"
... "hhhh\n")
>>> seqs2 = StringIO("@seq2\n"
... "AATTGGCC\n"
... ">seq2\n"
... "abcdefgh\n")
>>> it = FastqIterator(seq=[seqs1, seqs2], phred_offset=64)
>>> for record in it:
... print record['Sequence']
... print record['Qual']
ATGC
[40 40 40 40]
AATTGGCC
[33 34 35 36 37 38 39 40]
Finally, we can apply arbitrary transforms to the sequences during iteration.
>>> seqs1 = StringIO("@seq1\n"
... "ATGC\n"
... "+\n"
... "hhhh\n")
>>> seqs2 = StringIO("@seq2\n"
... "AATTGGCC\n"
... ">seq2\n"
... "abcdefgh\n")
>>> def rev_f(st):
... st['Sequence'] = st['Sequence'][::-1]
... st['Qual'] = st['Qual'][::-1] if st['Qual'] is not None else None
>>> it = FastqIterator(seq=[seqs1, seqs2], transform=rev_f, phred_offset=64)
>>> for record in it:
... print record['Sequence']
... print record['Qual']
CGTA
[40 40 40 40]
CCGGTTAA
[40 39 38 37 36 35 34 33]
Low-level parsing functions
---------------------------
Lower-level parsing functions are also made available in addition to the
sequence iterator interface. These functions can be used to directly parse a
single sequence file. They accept file paths, file handles, or file-like
objects.
Functions
^^^^^^^^^
.. autosummary::
:toctree: generated/
parse_fasta
parse_fastq
parse_qual
parse_qseq
Exceptions
----------
.. autosummary::
:toctree: generated/
FastqParseError
"""
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from .fasta import parse_fasta, parse_qual
from .fastq import parse_fastq
from .qseq import parse_qseq
from .iterator import (FastaIterator, FastqIterator, QseqIterator,
SequenceIterator)
from .factory import load
from ._exception import FastqParseError, QseqParseError
__all__ = ['parse_fasta', 'parse_fastq', 'parse_qual',
'parse_qseq', 'FastqIterator', 'FastaIterator', 'QseqIterator',
'SequenceIterator', 'load', 'FastqParseError', 'QseqParseError']
from numpy.testing import Tester
test = Tester().test
Conforming to pep8 standards
r"""
Parse biological sequences (:mod:`skbio.parse.sequences`)
=========================================================
.. currentmodule:: skbio.parse.sequences
This module provides functions for parsing sequence files in a variety of
different formats. Two interfaces are provided for parsing sequence files:
sequence iterators (high-level, recommended interface) and parsing functions
(lower-level interface).
Sequence iterator interface
---------------------------
The sequence iterator interface is the recommended way to parse sequence files.
The ``load`` function provides a standard, high-level interface to iterate over
sequence files regardless of file type or whether they are compressed. The
method accepts single or multiple file paths and employs the correct file
handlers, iterator objects, and parsers for the user.
The benefit of the sequence iterator interface is that the type of the file and
any file format details are abstracted away from the user. In this manner, the
user does not need to worry about whether they're operating on FASTA or FASTQ
files or any differences in the returns from their respective parsers.
Classes
^^^^^^^
.. autosummary::
:toctree: generated/
SequenceIterator
FastaIterator
FastqIterator
QseqIterator
Functions
^^^^^^^^^
.. autosummary::
:toctree: generated/
load
Examples
^^^^^^^^
For the first set of sequence iterator examples, we're going to use the
``load`` function. The ``load`` function is intended to operate on file paths,
so let's create two files for it to use. The first one will be a regular FASTA
file, and the second will be a gzip'd FASTQ file:
>>> import os
>>> import gzip
>>> out = open('test_seqs.fna', 'w')
>>> out.write(">s1\nATGC\n>s2\nATGGC\n")
>>> out.close()
>>> outgz = gzip.open('test_seqs.fq.gz', 'w')
>>> _ = outgz.write("@s3\nAATTGG\n+\nghghgh\n@s4\nAAA\n+\nfgh\n")
>>> outgz.close()
Now let's see what ``load`` can do:
>>> it = load(['test_seqs.fna', 'test_seqs.fq.gz'], phred_offset=64)
>>> for rec in it:
... print rec['SequenceID']
... print rec['Sequence']
... print rec['Qual']
s1
ATGC
None
s2
ATGGC
None
s3
AATTGG
[39 40 39 40 39 40]
s4
AAA
[38 39 40]
To be polite, let's remove the files we just created:
>>> os.remove('test_seqs.fna')
>>> os.remove('test_seqs.fq.gz')
In the following examples, we'll see how to use the sequence iterators directly
instead of using ``load``.
>>> from StringIO import StringIO
>>> from skbio.parse.sequences import FastaIterator, FastqIterator
In this first example, we're going to construct a FASTA iterator that is also
paired with quality scores (e.g., as in 454 fasta/qual files).
>>> seqs = StringIO(">seq1\n"
... "ATGC\n"
... ">seq2\n"
... "TTGGCC\n")
>>> qual = StringIO(">seq1\n"
... "10 20 30 40\n"
... ">seq2\n"
... "1 2 3 4 5 6\n")
>>> it = FastaIterator(seq=[seqs], qual=[qual])
>>> for record in it:
... print record['Sequence']
... print record['Qual']
ATGC
[10 20 30 40]
TTGGCC
[1 2 3 4 5 6]
In the next example, we're going to iterate over multiple FASTQ files at once.
>>> seqs1 = StringIO("@seq1\n"
... "ATGC\n"
... "+\n"
... "hhhh\n")
>>> seqs2 = StringIO("@seq2\n"
... "AATTGGCC\n"
... ">seq2\n"
... "abcdefgh\n")
>>> it = FastqIterator(seq=[seqs1, seqs2], phred_offset=64)
>>> for record in it:
... print record['Sequence']
... print record['Qual']
ATGC
[40 40 40 40]
AATTGGCC
[33 34 35 36 37 38 39 40]
Finally, we can apply arbitrary transforms to the sequences during iteration.
>>> seqs1 = StringIO("@seq1\n"
... "ATGC\n"
... "+\n"
... "hhhh\n")
>>> seqs2 = StringIO("@seq2\n"
... "AATTGGCC\n"
... ">seq2\n"
... "abcdefgh\n")
>>> def rev_f(st):
... st['Sequence'] = st['Sequence'][::-1]
... st['Qual'] = st['Qual'][::-1] if st['Qual'] is not None else None
>>> it = FastqIterator(seq=[seqs1, seqs2], transform=rev_f, phred_offset=64)
>>> for record in it:
... print record['Sequence']
... print record['Qual']
CGTA
[40 40 40 40]
CCGGTTAA
[40 39 38 37 36 35 34 33]
Low-level parsing functions
---------------------------
Lower-level parsing functions are also made available in addition to the
sequence iterator interface. These functions can be used to directly parse a
single sequence file. They accept file paths, file handles, or file-like
objects.
Functions
^^^^^^^^^
.. autosummary::
:toctree: generated/
parse_fasta
parse_fastq
parse_qual
parse_qseq
Exceptions
----------
.. autosummary::
:toctree: generated/
FastqParseError
"""
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from .fasta import parse_fasta, parse_qual
from .fastq import parse_fastq
from .qseq import parse_qseq
from .iterator import (FastaIterator, FastqIterator, QseqIterator,
SequenceIterator)
from .factory import load
from ._exception import FastqParseError, QseqParseError
__all__ = ['parse_fasta', 'parse_fastq', 'parse_qual',
'parse_qseq', 'FastqIterator', 'FastaIterator', 'QseqIterator',
'SequenceIterator', 'load', 'FastqParseError', 'QseqParseError']
from numpy.testing import Tester
test = Tester().test
|
try:
# the json module was included in the stdlib in python 2.6
# http://docs.python.org/library/json.html
import json
except ImportError:
# simplejson 2.0.9 is available for python 2.4+
# http://pypi.python.org/pypi/simplejson/2.0.9
# simplejson 1.7.3 is available for python 2.3+
# http://pypi.python.org/pypi/simplejson/1.7.3
import simplejson as json
from lxml import etree
import uuid
from datetime import datetime, tzinfo, timedelta
from . import bagatom
import urllib2
import urlparse
import sys
import tempfile
import os
import time
import subprocess
# not really thrilled about duplicating these globals here -- maybe define them in coda.bagatom?
PREMIS_NAMESPACE = "info:lc/xmlns/premis-v2"
PREMIS = "{%s}" % PREMIS_NAMESPACE
PREMIS_NSMAP = {"premis": PREMIS_NAMESPACE}
svn_version_path = "/usr/bin/svnversion"
# constants for time parsing/formatting
XSDT_FMT = "%Y-%m-%dT%H:%M:%S" # this is a stub
XSDT_TZ_OFFSET = 19 # this should never change
class InvalidXSDateTime(Exception):
pass
class XSDateTimezone(tzinfo):
"""
Concrete subclass of tzinfo for making sense of timezone offsets.
Not really worried about localization here, just +/-HHMM
"""
def __init__(self, hours=0, minutes=0, sign=1):
self.minutes = hours*60+minutes
self.minutes *= sign
def utcoffset(self, dt):
return timedelta(minutes=self.minutes)
def dst(self, dt):
return timedelta(0)
def xsDateTime_parse(xdt_str):
"""
Parses xsDateTime strings of form 2017-01-27T14:58:00+0600, etc.
Returns datetime with tzinfo, if offset included.
"""
try:
# this won't parse the offset (or other tzinfo)
naive_dt = datetime.strptime(xdt_str[0:XSDT_TZ_OFFSET], XSDT_FMT)
except:
raise InvalidXSDateTime("Malformed date/time ('%s')." % (xdt_str, ))
naive_len = XSDT_TZ_OFFSET
offset_len = len(xdt_str) - naive_len
offset_str = xdt_str[-offset_len:]
offset_hourse = None
offset_minutes = None
offset_sign = 1
parsed = None
if not offset_len:
parsed = naive_dt
elif offset_len is 6: # +00:00
if offset_str[0] not in "+-":
raise InvalidXSDateTime("Malformed offset (missing sign).")
elif offset_str[0] is '-':
offset_sign = -1
try:
offset_hours = int(offset_str[1:3])
except:
raise InvalidXSDateTime("Malformed offset (invalid hours '%s')"
% (offset_str[1:3], )
)
if offset_str[3] is not ':':
raise InvalidXSDateTime("Colon missing in offset.")
try:
offset_minutes = int(offset_str[4:6])
except:
raise InvalidXSDateTime("Malformed offset (invalid minutes '%s')"
% (offset_str[4:6], )
)
offset = offset_hours*60+offset_minutes
offset *= offset_sign
timezone = XSDateTimezone(offset_hours, offset_minutes, offset_sign)
parsed = naive_dt.replace(tzinfo=timezone)
elif offset_len is 1: # Z
if offset_str is 'Z':
parsed = naive_dt.replace(tzinfo=XSDateTimezone())
else:
raise InvaildXSDateTime("Unrecognized timezone identifier '%s'." %
(offset_str, )
)
else:
raise InvalidXSDateTime("Malformed offset '%s'." % (offset_str, ))
return parsed
def xsDateTime_format(xdt):
xdt_str = xdt.strftime(XSDT_FMT)
offset = xdt.utcoffset()
if offset is None:
return xdt_str
offset_hours = offset.days*24+offset.seconds/(60*60)
offset_minutes = (offset.seconds % (60*60))/60
xdt_str += "{:+03d}:{:02d}".format(offset_hours, offset_minutes)
return xdt_str
def parseVocabularySources(jsonFilePath):
choiceList = []
jsonString = open(jsonFilePath, "r").read()
jsonDict = json.loads(jsonString)
terms = jsonDict["terms"]
for term in terms:
choiceList.append((term['name'], term['label']))
return choiceList
class HEADREQUEST(urllib2.Request):
def get_method(self):
return "HEAD"
class PUTREQUEST(urllib2.Request):
def get_method(self):
return "PUT"
class DELETEREQUEST(urllib2.Request):
def get_method(self):
return "DELETE"
def waitForURL(url, max_seconds=None):
"""
Give it a URL. Keep trying to get a HEAD request from it until it works.
If it doesn't work, wait a while and try again
"""
startTime = datetime.now()
while True:
response = None
try:
response = urllib2.urlopen(HEADREQUEST(url))
except urllib2.URLError:
pass
if response is not None and isinstance(response, urllib2.addinfourl):
if response.getcode() == 200:
# we're done, yay!
return
timeNow = datetime.now()
timePassed = timeNow - startTime
if max_seconds and max_seconds < timePassed.seconds:
return
print("Waiting on URL %s for %s so far" % (url, timePassed))
time.sleep(30)
def doWaitWebRequest(url, method="GET", data=None, headers={}):
"""
Same as doWebRequest, but with built in wait-looping
"""
completed = False
while not completed:
completed = True
try:
response, content = doWebRequest(url, method, data, headers)
except urllib2.URLError:
completed = False
waitForURL(url)
return response, content
def doWebRequest(url, method="GET", data=None, headers={}):
"""
A urllib2 wrapper to mimic the functionality of http2lib, but with timeout support
"""
# initialize variables
response = None
content = None
# find condition that matches request
if method == "HEAD":
request = HEADREQUEST(url, data=data, headers=headers)
elif method == "PUT":
request = PUTREQUEST(url, data=data, headers=headers)
elif method == "DELETE":
request = DELETEREQUEST(url, headers=headers)
elif method == "GET":
request = urllib2.Request(url, headers=headers)
# POST?
else:
request = urllib2.Request(url, data=data, headers=headers)
response = urllib2.urlopen(request)
if response:
content = response.read()
return response, content
def sendPREMISEvent(webRoot, eventType, agentIdentifier, eventDetail,
eventOutcome, eventOutcomeDetail=None, linkObjectList=[],
eventDate=None, debug=False, eventIdentifier=None):
"""
A function to format an event to be uploaded and send it to a particular CODA server
in order to register it
"""
atomID = uuid.uuid1().hex
eventXML = createPREMISEventXML(
eventType=eventType,
agentIdentifier=agentIdentifier,
eventDetail=eventDetail,
eventOutcome=eventOutcome,
outcomeDetail=eventOutcomeDetail,
eventIdentifier=eventIdentifier,
eventDate=eventDate,
linkObjectList=linkObjectList
)
atomXML = bagatom.wrapAtom(eventXML, id=atomID, title=atomID)
atomXMLText = '<?xml version="1.0"?>\n%s' % etree.tostring(
atomXML, pretty_print=True
)
if debug:
print "Uploading XML to %s\n---\n%s\n---\n" % (webRoot, atomXMLText)
response = None
try:
response, content = doWebRequest(webRoot, "POST", data=atomXMLText)
except urllib2.URLError:
pass
if not response:
waitForURL(webRoot, 60)
response, content = doWebRequest(webRoot, "POST", data=atomXMLText)
if response.code != 201:
if debug:
tempdir = tempfile.gettempdir()
tfPath = os.path.join(
tempdir, "premis_upload_%s.html" % uuid.uuid1().hex
)
tf = open(tfPath, "w")
tf.write(content)
tf.close()
sys.stderr.write(
"Output from webserver available at %s. Response code %s" % (
tf.name, response.code
)
)
raise Exception(
"Error uploading PREMIS Event to %s. Response code is %s" % (
webRoot, response.code
)
)
return (response, content)
def createPREMISEventXML(eventType, agentIdentifier, eventDetail, eventOutcome,
outcomeDetail=None, eventIdentifier=None,
linkObjectList=[], eventDate=None):
"""
Actually create our PREMIS Event XML
"""
eventXML = etree.Element(PREMIS + "event", nsmap=PREMIS_NSMAP)
eventIDXML = etree.SubElement(eventXML, PREMIS + "eventIdentifier")
eventTypeXML = etree.SubElement(eventXML, PREMIS + "eventType")
eventTypeXML.text = eventType
eventIDTypeXML = etree.SubElement(
eventIDXML, PREMIS + "eventIdentifierType"
)
eventIDTypeXML.text = \
"http://purl.org/net/untl/vocabularies/identifier-qualifiers/#UUID"
eventIDValueXML = etree.SubElement(
eventIDXML, PREMIS + "eventIdentifierValue"
)
if eventIdentifier:
eventIDValueXML.text = eventIdentifier
else:
eventIDValueXML.text = uuid.uuid4().hex
eventDateTimeXML = etree.SubElement(eventXML, PREMIS + "eventDateTime")
if eventDate is None:
eventDateTimeXML.text = xsDateTime_format(datetime.utcnow())
else:
eventDateTimeXML.text = xsDateTime_format(eventDate)
eventDetailXML = etree.SubElement(eventXML, PREMIS + "eventDetail")
eventDetailXML.text = eventDetail
eventOutcomeInfoXML = etree.SubElement(
eventXML, PREMIS + "eventOutcomeInformation"
)
eventOutcomeXML = etree.SubElement(
eventOutcomeInfoXML, PREMIS + "eventOutcome"
)
eventOutcomeXML.text = eventOutcome
if outcomeDetail:
eventOutcomeDetailXML = etree.SubElement(
eventOutcomeInfoXML, PREMIS + "eventOutcomeDetail"
)
etree.SubElement(eventOutcomeDetailXML,
PREMIS + "eventOutcomeDetailNote").text = outcomeDetail
# assuming it's a list of 3-item tuples here [ ( identifier, type, role) ]
linkAgentIDXML = etree.SubElement(
eventXML, PREMIS + "linkingAgentIdentifier"
)
linkAgentIDTypeXML = etree.SubElement(
linkAgentIDXML, PREMIS + "linkingAgentIdentifierType"
)
linkAgentIDTypeXML.text = \
"http://purl.org/net/untl/vocabularies/identifier-qualifiers/#URL"
linkAgentIDValueXML = etree.SubElement(
linkAgentIDXML, PREMIS + "linkingAgentIdentifierValue"
)
linkAgentIDValueXML.text = agentIdentifier
linkAgentIDRoleXML = etree.SubElement(
linkAgentIDXML, PREMIS + "linkingAgentRole"
)
linkAgentIDRoleXML.text = \
"http://purl.org/net/untl/vocabularies/linkingAgentRoles/#executingProgram"
for linkObject in linkObjectList:
linkObjectIDXML = etree.SubElement(
eventXML, PREMIS + "linkingObjectIdentifier"
)
linkObjectIDTypeXML = etree.SubElement(
linkObjectIDXML, PREMIS + "linkingObjectIdentifierType"
)
linkObjectIDTypeXML.text = linkObject[1]
linkObjectIDValueXML = etree.SubElement(
linkObjectIDXML, PREMIS + "linkingObjectIdentifierValue"
)
linkObjectIDValueXML.text = linkObject[0]
if linkObject[2]:
linkObjectRoleXML = etree.SubElement(
linkObjectIDXML, PREMIS + "linkingObjectRole"
)
linkObjectRoleXML.text = linkObject[2]
return eventXML
def get_svn_revision(path=None):
if not path:
path = os.path.dirname(sys.argv[0])
path = os.path.abspath(path)
exec_list = [svn_version_path, path]
proc = subprocess.Popen(exec_list, stdout=subprocess.PIPE)
out, errs = proc.communicate()
return out.strip()
def deleteQueue(destinationRoot, queueArk, debug=False):
"""
Delete an entry from the queue
"""
url = urlparse.urljoin(destinationRoot, "APP/queue/" + queueArk + "/")
response, content = doWaitWebRequest(url, "DELETE")
if response.getcode() != 200:
raise Exception(
"Error updating queue %s to url %s. Response code is %s\n%s" %
(queueArk, url, response.getcode(), content)
)
def updateQueue(destinationRoot, queueDict, debug=False):
"""
With a dictionary that represents a queue entry, update the queue entry with
the values
"""
attrDict = bagatom.AttrDict(queueDict)
url = urlparse.urljoin(destinationRoot, "APP/queue/" + attrDict.ark + "/")
queueXML = bagatom.queueEntryToXML(attrDict)
urlID = os.path.join(destinationRoot, attrDict.ark)
uploadXML = bagatom.wrapAtom(queueXML, id=urlID, title=attrDict.ark)
uploadXMLText = '<?xml version="1.0"?>\n' + etree.tostring(
uploadXML, pretty_print=True
)
if debug:
print "Sending XML to %s" % url
print uploadXMLText
try:
response, content = doWebRequest(url, "PUT", data=uploadXMLText)
except:
# sleep a few minutes then give it a second shot before dying
time.sleep(300)
response, content = doWebRequest(url, "PUT", data=uploadXMLText)
if response.getcode() != 200:
raise Exception(
"Error updating queue %s to url %s. Response code is %s\n%s" %
(attrDict.ark, url, response.getcode(), content)
)
typo fix
try:
# the json module was included in the stdlib in python 2.6
# http://docs.python.org/library/json.html
import json
except ImportError:
# simplejson 2.0.9 is available for python 2.4+
# http://pypi.python.org/pypi/simplejson/2.0.9
# simplejson 1.7.3 is available for python 2.3+
# http://pypi.python.org/pypi/simplejson/1.7.3
import simplejson as json
from lxml import etree
import uuid
from datetime import datetime, tzinfo, timedelta
from . import bagatom
import urllib2
import urlparse
import sys
import tempfile
import os
import time
import subprocess
# not really thrilled about duplicating these globals here -- maybe define them in coda.bagatom?
PREMIS_NAMESPACE = "info:lc/xmlns/premis-v2"
PREMIS = "{%s}" % PREMIS_NAMESPACE
PREMIS_NSMAP = {"premis": PREMIS_NAMESPACE}
svn_version_path = "/usr/bin/svnversion"
# constants for time parsing/formatting
XSDT_FMT = "%Y-%m-%dT%H:%M:%S" # this is a stub
XSDT_TZ_OFFSET = 19 # this should never change
class InvalidXSDateTime(Exception):
pass
class XSDateTimezone(tzinfo):
"""
Concrete subclass of tzinfo for making sense of timezone offsets.
Not really worried about localization here, just +/-HHMM
"""
def __init__(self, hours=0, minutes=0, sign=1):
self.minutes = hours*60+minutes
self.minutes *= sign
def utcoffset(self, dt):
return timedelta(minutes=self.minutes)
def dst(self, dt):
return timedelta(0)
def xsDateTime_parse(xdt_str):
"""
Parses xsDateTime strings of form 2017-01-27T14:58:00+0600, etc.
Returns datetime with tzinfo, if offset included.
"""
try:
# this won't parse the offset (or other tzinfo)
naive_dt = datetime.strptime(xdt_str[0:XSDT_TZ_OFFSET], XSDT_FMT)
except:
raise InvalidXSDateTime("Malformed date/time ('%s')." % (xdt_str, ))
naive_len = XSDT_TZ_OFFSET
offset_len = len(xdt_str) - naive_len
offset_str = xdt_str[-offset_len:]
offset_hours = None
offset_minutes = None
offset_sign = 1
parsed = None
if not offset_len:
parsed = naive_dt
elif offset_len is 6: # +00:00
if offset_str[0] not in "+-":
raise InvalidXSDateTime("Malformed offset (missing sign).")
elif offset_str[0] is '-':
offset_sign = -1
try:
offset_hours = int(offset_str[1:3])
except:
raise InvalidXSDateTime("Malformed offset (invalid hours '%s')"
% (offset_str[1:3], )
)
if offset_str[3] is not ':':
raise InvalidXSDateTime("Colon missing in offset.")
try:
offset_minutes = int(offset_str[4:6])
except:
raise InvalidXSDateTime("Malformed offset (invalid minutes '%s')"
% (offset_str[4:6], )
)
offset = offset_hours*60+offset_minutes
offset *= offset_sign
timezone = XSDateTimezone(offset_hours, offset_minutes, offset_sign)
parsed = naive_dt.replace(tzinfo=timezone)
elif offset_len is 1: # Z
if offset_str is 'Z':
parsed = naive_dt.replace(tzinfo=XSDateTimezone())
else:
raise InvaildXSDateTime("Unrecognized timezone identifier '%s'." %
(offset_str, )
)
else:
raise InvalidXSDateTime("Malformed offset '%s'." % (offset_str, ))
return parsed
def xsDateTime_format(xdt):
xdt_str = xdt.strftime(XSDT_FMT)
offset = xdt.utcoffset()
if offset is None:
return xdt_str
offset_hours = offset.days*24+offset.seconds/(60*60)
offset_minutes = (offset.seconds % (60*60))/60
xdt_str += "{:+03d}:{:02d}".format(offset_hours, offset_minutes)
return xdt_str
def parseVocabularySources(jsonFilePath):
choiceList = []
jsonString = open(jsonFilePath, "r").read()
jsonDict = json.loads(jsonString)
terms = jsonDict["terms"]
for term in terms:
choiceList.append((term['name'], term['label']))
return choiceList
class HEADREQUEST(urllib2.Request):
def get_method(self):
return "HEAD"
class PUTREQUEST(urllib2.Request):
def get_method(self):
return "PUT"
class DELETEREQUEST(urllib2.Request):
def get_method(self):
return "DELETE"
def waitForURL(url, max_seconds=None):
"""
Give it a URL. Keep trying to get a HEAD request from it until it works.
If it doesn't work, wait a while and try again
"""
startTime = datetime.now()
while True:
response = None
try:
response = urllib2.urlopen(HEADREQUEST(url))
except urllib2.URLError:
pass
if response is not None and isinstance(response, urllib2.addinfourl):
if response.getcode() == 200:
# we're done, yay!
return
timeNow = datetime.now()
timePassed = timeNow - startTime
if max_seconds and max_seconds < timePassed.seconds:
return
print("Waiting on URL %s for %s so far" % (url, timePassed))
time.sleep(30)
def doWaitWebRequest(url, method="GET", data=None, headers={}):
"""
Same as doWebRequest, but with built in wait-looping
"""
completed = False
while not completed:
completed = True
try:
response, content = doWebRequest(url, method, data, headers)
except urllib2.URLError:
completed = False
waitForURL(url)
return response, content
def doWebRequest(url, method="GET", data=None, headers={}):
"""
A urllib2 wrapper to mimic the functionality of http2lib, but with timeout support
"""
# initialize variables
response = None
content = None
# find condition that matches request
if method == "HEAD":
request = HEADREQUEST(url, data=data, headers=headers)
elif method == "PUT":
request = PUTREQUEST(url, data=data, headers=headers)
elif method == "DELETE":
request = DELETEREQUEST(url, headers=headers)
elif method == "GET":
request = urllib2.Request(url, headers=headers)
# POST?
else:
request = urllib2.Request(url, data=data, headers=headers)
response = urllib2.urlopen(request)
if response:
content = response.read()
return response, content
def sendPREMISEvent(webRoot, eventType, agentIdentifier, eventDetail,
eventOutcome, eventOutcomeDetail=None, linkObjectList=[],
eventDate=None, debug=False, eventIdentifier=None):
"""
A function to format an event to be uploaded and send it to a particular CODA server
in order to register it
"""
atomID = uuid.uuid1().hex
eventXML = createPREMISEventXML(
eventType=eventType,
agentIdentifier=agentIdentifier,
eventDetail=eventDetail,
eventOutcome=eventOutcome,
outcomeDetail=eventOutcomeDetail,
eventIdentifier=eventIdentifier,
eventDate=eventDate,
linkObjectList=linkObjectList
)
atomXML = bagatom.wrapAtom(eventXML, id=atomID, title=atomID)
atomXMLText = '<?xml version="1.0"?>\n%s' % etree.tostring(
atomXML, pretty_print=True
)
if debug:
print "Uploading XML to %s\n---\n%s\n---\n" % (webRoot, atomXMLText)
response = None
try:
response, content = doWebRequest(webRoot, "POST", data=atomXMLText)
except urllib2.URLError:
pass
if not response:
waitForURL(webRoot, 60)
response, content = doWebRequest(webRoot, "POST", data=atomXMLText)
if response.code != 201:
if debug:
tempdir = tempfile.gettempdir()
tfPath = os.path.join(
tempdir, "premis_upload_%s.html" % uuid.uuid1().hex
)
tf = open(tfPath, "w")
tf.write(content)
tf.close()
sys.stderr.write(
"Output from webserver available at %s. Response code %s" % (
tf.name, response.code
)
)
raise Exception(
"Error uploading PREMIS Event to %s. Response code is %s" % (
webRoot, response.code
)
)
return (response, content)
def createPREMISEventXML(eventType, agentIdentifier, eventDetail, eventOutcome,
outcomeDetail=None, eventIdentifier=None,
linkObjectList=[], eventDate=None):
"""
Actually create our PREMIS Event XML
"""
eventXML = etree.Element(PREMIS + "event", nsmap=PREMIS_NSMAP)
eventIDXML = etree.SubElement(eventXML, PREMIS + "eventIdentifier")
eventTypeXML = etree.SubElement(eventXML, PREMIS + "eventType")
eventTypeXML.text = eventType
eventIDTypeXML = etree.SubElement(
eventIDXML, PREMIS + "eventIdentifierType"
)
eventIDTypeXML.text = \
"http://purl.org/net/untl/vocabularies/identifier-qualifiers/#UUID"
eventIDValueXML = etree.SubElement(
eventIDXML, PREMIS + "eventIdentifierValue"
)
if eventIdentifier:
eventIDValueXML.text = eventIdentifier
else:
eventIDValueXML.text = uuid.uuid4().hex
eventDateTimeXML = etree.SubElement(eventXML, PREMIS + "eventDateTime")
if eventDate is None:
eventDateTimeXML.text = xsDateTime_format(datetime.utcnow())
else:
eventDateTimeXML.text = xsDateTime_format(eventDate)
eventDetailXML = etree.SubElement(eventXML, PREMIS + "eventDetail")
eventDetailXML.text = eventDetail
eventOutcomeInfoXML = etree.SubElement(
eventXML, PREMIS + "eventOutcomeInformation"
)
eventOutcomeXML = etree.SubElement(
eventOutcomeInfoXML, PREMIS + "eventOutcome"
)
eventOutcomeXML.text = eventOutcome
if outcomeDetail:
eventOutcomeDetailXML = etree.SubElement(
eventOutcomeInfoXML, PREMIS + "eventOutcomeDetail"
)
etree.SubElement(eventOutcomeDetailXML,
PREMIS + "eventOutcomeDetailNote").text = outcomeDetail
# assuming it's a list of 3-item tuples here [ ( identifier, type, role) ]
linkAgentIDXML = etree.SubElement(
eventXML, PREMIS + "linkingAgentIdentifier"
)
linkAgentIDTypeXML = etree.SubElement(
linkAgentIDXML, PREMIS + "linkingAgentIdentifierType"
)
linkAgentIDTypeXML.text = \
"http://purl.org/net/untl/vocabularies/identifier-qualifiers/#URL"
linkAgentIDValueXML = etree.SubElement(
linkAgentIDXML, PREMIS + "linkingAgentIdentifierValue"
)
linkAgentIDValueXML.text = agentIdentifier
linkAgentIDRoleXML = etree.SubElement(
linkAgentIDXML, PREMIS + "linkingAgentRole"
)
linkAgentIDRoleXML.text = \
"http://purl.org/net/untl/vocabularies/linkingAgentRoles/#executingProgram"
for linkObject in linkObjectList:
linkObjectIDXML = etree.SubElement(
eventXML, PREMIS + "linkingObjectIdentifier"
)
linkObjectIDTypeXML = etree.SubElement(
linkObjectIDXML, PREMIS + "linkingObjectIdentifierType"
)
linkObjectIDTypeXML.text = linkObject[1]
linkObjectIDValueXML = etree.SubElement(
linkObjectIDXML, PREMIS + "linkingObjectIdentifierValue"
)
linkObjectIDValueXML.text = linkObject[0]
if linkObject[2]:
linkObjectRoleXML = etree.SubElement(
linkObjectIDXML, PREMIS + "linkingObjectRole"
)
linkObjectRoleXML.text = linkObject[2]
return eventXML
def get_svn_revision(path=None):
if not path:
path = os.path.dirname(sys.argv[0])
path = os.path.abspath(path)
exec_list = [svn_version_path, path]
proc = subprocess.Popen(exec_list, stdout=subprocess.PIPE)
out, errs = proc.communicate()
return out.strip()
def deleteQueue(destinationRoot, queueArk, debug=False):
"""
Delete an entry from the queue
"""
url = urlparse.urljoin(destinationRoot, "APP/queue/" + queueArk + "/")
response, content = doWaitWebRequest(url, "DELETE")
if response.getcode() != 200:
raise Exception(
"Error updating queue %s to url %s. Response code is %s\n%s" %
(queueArk, url, response.getcode(), content)
)
def updateQueue(destinationRoot, queueDict, debug=False):
"""
With a dictionary that represents a queue entry, update the queue entry with
the values
"""
attrDict = bagatom.AttrDict(queueDict)
url = urlparse.urljoin(destinationRoot, "APP/queue/" + attrDict.ark + "/")
queueXML = bagatom.queueEntryToXML(attrDict)
urlID = os.path.join(destinationRoot, attrDict.ark)
uploadXML = bagatom.wrapAtom(queueXML, id=urlID, title=attrDict.ark)
uploadXMLText = '<?xml version="1.0"?>\n' + etree.tostring(
uploadXML, pretty_print=True
)
if debug:
print "Sending XML to %s" % url
print uploadXMLText
try:
response, content = doWebRequest(url, "PUT", data=uploadXMLText)
except:
# sleep a few minutes then give it a second shot before dying
time.sleep(300)
response, content = doWebRequest(url, "PUT", data=uploadXMLText)
if response.getcode() != 200:
raise Exception(
"Error updating queue %s to url %s. Response code is %s\n%s" %
(attrDict.ark, url, response.getcode(), content)
)
|
__all__ = ['convex_hull_image', 'convex_hull_object']
import numpy as np
from ._pnpoly import grid_points_inside_poly
from ._convex_hull import possible_hull
from skimage.morphology import label
def convex_hull_image(image):
"""Compute the convex hull image of a binary image.
The convex hull is the set of pixels included in the smallest convex
polygon that surround all white pixels in the input image.
Parameters
----------
image : ndarray
Binary input image. This array is cast to bool before processing.
Returns
-------
hull : ndarray of bool
Binary image with pixels in convex hull set to True.
References
----------
.. [1] http://blogs.mathworks.com/steve/2011/10/04/binary-image-convex-hull-algorithm-notes/
"""
image = image.astype(bool)
# Here we do an optimisation by choosing only pixels that are
# the starting or ending pixel of a row or column. This vastly
# limits the number of coordinates to examine for the virtual
# hull.
coords = possible_hull(image.astype(np.uint8))
N = len(coords)
# Add a vertex for the middle of each pixel edge
coords_corners = np.empty((N * 4, 2))
for i, (x_offset, y_offset) in enumerate(zip((0, 0, -0.5, 0.5),
(-0.5, 0.5, 0, 0))):
coords_corners[i * N:(i + 1) * N] = coords + [x_offset, y_offset]
coords = coords_corners
try:
from scipy.spatial import Delaunay
except ImportError:
raise ImportError('Could not import scipy.spatial, only available in '
'scipy >= 0.9.')
# Find the convex hull
chull = Delaunay(coords).convex_hull
v = coords[np.unique(chull)]
# Sort vertices clock-wise
v_centred = v - v.mean(axis=0)
angles = np.arctan2(v_centred[:, 0], v_centred[:, 1])
v = v[np.argsort(angles)]
# For each pixel coordinate, check whether that pixel
# lies inside the convex hull
mask = grid_points_inside_poly(image.shape[:2], v)
return mask
def convex_hull_object(image, neighbors=8):
"""Compute the convex hull image of individual objects in a binary image.
The convex hull is the set of pixels included in the smallest convex
polygon that surround all white pixels in the input image.
Parameters
----------
image : ndarray
Binary input image.
neighbors : {4, 8}, int
Whether to use 4- or 8-connectivity.
Returns
-------
hull : ndarray of bool
Binary image with pixels in convex hull set to True.
Note
----
This function uses the label function defined in ccomp.pyx. Using this
convex_hull_image of individual objects are extracted and combined using
logical OR. However, OR operation may lead to overlapping of hulls, thus
giving inaccurate results. In such a case, it would be helpful for users
to use the above approach to create a mask of several objects (whose hulls
overlap).
"""
if neighbors != 4 and neighbors != 8:
raise ValueError('Neighbors must be either 4 or 8.')
labeled_im = label(image, neighbors, background=0)
convex_obj = np.zeros(image.shape, dtype=bool)
convex_img = np.zeros(image.shape, dtype=bool)
for i in range(0, labeled_im.max()+1):
convex_obj = convex_hull_image(labeled_im == i)
convex_img = np.logical_or(convex_img, convex_obj)
return convex_img
Note re-worded
__all__ = ['convex_hull_image', 'convex_hull_object']
import numpy as np
from ._pnpoly import grid_points_inside_poly
from ._convex_hull import possible_hull
from skimage.morphology import label
def convex_hull_image(image):
"""Compute the convex hull image of a binary image.
The convex hull is the set of pixels included in the smallest convex
polygon that surround all white pixels in the input image.
Parameters
----------
image : ndarray
Binary input image. This array is cast to bool before processing.
Returns
-------
hull : ndarray of bool
Binary image with pixels in convex hull set to True.
References
----------
.. [1] http://blogs.mathworks.com/steve/2011/10/04/binary-image-convex-hull-algorithm-notes/
"""
image = image.astype(bool)
# Here we do an optimisation by choosing only pixels that are
# the starting or ending pixel of a row or column. This vastly
# limits the number of coordinates to examine for the virtual
# hull.
coords = possible_hull(image.astype(np.uint8))
N = len(coords)
# Add a vertex for the middle of each pixel edge
coords_corners = np.empty((N * 4, 2))
for i, (x_offset, y_offset) in enumerate(zip((0, 0, -0.5, 0.5),
(-0.5, 0.5, 0, 0))):
coords_corners[i * N:(i + 1) * N] = coords + [x_offset, y_offset]
coords = coords_corners
try:
from scipy.spatial import Delaunay
except ImportError:
raise ImportError('Could not import scipy.spatial, only available in '
'scipy >= 0.9.')
# Find the convex hull
chull = Delaunay(coords).convex_hull
v = coords[np.unique(chull)]
# Sort vertices clock-wise
v_centred = v - v.mean(axis=0)
angles = np.arctan2(v_centred[:, 0], v_centred[:, 1])
v = v[np.argsort(angles)]
# For each pixel coordinate, check whether that pixel
# lies inside the convex hull
mask = grid_points_inside_poly(image.shape[:2], v)
return mask
def convex_hull_object(image, neighbors=8):
"""Compute the convex hull image of individual objects in a binary image.
The convex hull is the set of pixels included in the smallest convex
polygon that surround all white pixels in the input image.
Parameters
----------
image : ndarray
Binary input image.
neighbors : {4, 8}, int
Whether to use 4- or 8-connectivity.
Returns
-------
hull : ndarray of bool
Binary image with pixels in convex hull set to True.
Note
----
This function uses skimage.morphology.label to define unique objects,
finds the convex hull of each using convex_hull_image, and combines
these regions with logical OR. Be aware the convex hulls of unconnected
objects may overlap in the result. If this is suspected, consider using
convex_hull_image on those objects together.
"""
if neighbors != 4 and neighbors != 8:
raise ValueError('Neighbors must be either 4 or 8.')
labeled_im = label(image, neighbors, background=0)
convex_obj = np.zeros(image.shape, dtype=bool)
convex_img = np.zeros(image.shape, dtype=bool)
for i in range(0, labeled_im.max()+1):
convex_obj = convex_hull_image(labeled_im == i)
convex_img = np.logical_or(convex_img, convex_obj)
return convex_img
|
"""
The :mod:`sklearn.model_selection._split` module includes classes and
functions to split the data based on a preset strategy.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>,
# Gael Varoquaux <gael.varoquaux@normalesup.org>,
# Olivier Girsel <olivier.grisel@ensta.org>
# Raghav R V <rvraghav93@gmail.com>
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
import warnings
import inspect
from itertools import chain, combinations
from collections import Iterable
from math import ceil, floor
import numbers
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.misc import comb
from ..utils import indexable, check_random_state, safe_indexing
from ..utils.validation import _num_samples, column_or_1d
from ..utils.multiclass import type_of_target
from ..externals.six import with_metaclass
from ..externals.six.moves import zip
from ..utils.fixes import bincount
from ..base import _pprint
from ..gaussian_process.kernels import Kernel as GPKernel
__all__ = ['BaseCrossValidator',
'KFold',
'LabelKFold',
'LeaveOneLabelOut',
'LeaveOneOut',
'LeavePLabelOut',
'LeavePOut',
'ShuffleSplit',
'LabelShuffleSplit',
'StratifiedKFold',
'StratifiedShuffleSplit',
'PredefinedSplit',
'train_test_split',
'check_cv']
class BaseCrossValidator(with_metaclass(ABCMeta)):
"""Base class for all cross-validators
Implementations must define `_iter_test_masks` or `_iter_test_indices`.
"""
def split(self, X, y=None, labels=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, labels = indexable(X, y, labels)
indices = np.arange(_num_samples(X))
for test_index in self._iter_test_masks(X, y, labels):
train_index = indices[np.logical_not(test_index)]
test_index = indices[test_index]
yield train_index, test_index
# Since subclasses must implement either _iter_test_masks or
# _iter_test_indices, neither can be abstract.
def _iter_test_masks(self, X=None, y=None, labels=None):
"""Generates boolean masks corresponding to test sets.
By default, delegates to _iter_test_indices(X, y, labels)
"""
for test_index in self._iter_test_indices(X, y, labels):
test_mask = np.zeros(_num_samples(X), dtype=np.bool)
test_mask[test_index] = True
yield test_mask
def _iter_test_indices(self, X=None, y=None, labels=None):
"""Generates integer indices corresponding to test sets."""
raise NotImplementedError
@abstractmethod
def get_n_splits(self, X=None, y=None, labels=None):
"""Returns the number of splitting iterations in the cross-validator"""
def __repr__(self):
return _build_repr(self)
class LeaveOneOut(BaseCrossValidator):
"""Leave-One-Out cross-validator
Provides train/test indices to split data in train/test sets. Each
sample is used once as a test set (singleton) while the remaining
samples form the training set.
Note: ``LeaveOneOut()`` is equivalent to ``KFold(n_folds=n)`` and
``LeavePOut(p=1)`` where ``n`` is the number of samples.
Due to the high number of test sets (which is the same as the
number of samples) this cross-validation method can be very costly.
For large datasets one should favor :class:`KFold`, :class:`ShuffleSplit`
or :class:`StratifiedKFold`.
Read more in the :ref:`User Guide <cross_validation>`.
Examples
--------
>>> from sklearn.model_selection import LeaveOneOut
>>> X = np.array([[1, 2], [3, 4]])
>>> y = np.array([1, 2])
>>> loo = LeaveOneOut()
>>> loo.get_n_splits(X)
2
>>> print(loo)
LeaveOneOut()
>>> for train_index, test_index in loo.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [1] TEST: [0]
[[3 4]] [[1 2]] [2] [1]
TRAIN: [0] TEST: [1]
[[1 2]] [[3 4]] [1] [2]
See also
--------
LeaveOneLabelOut
For splitting the data according to explicit, domain-specific
stratification of the dataset.
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def _iter_test_indices(self, X, y=None, labels=None):
return range(_num_samples(X))
def get_n_splits(self, X, y=None, labels=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : object
Always ignored, exists for compatibility.
labels : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
if X is None:
raise ValueError("The X parameter should not be None")
return _num_samples(X)
class LeavePOut(BaseCrossValidator):
"""Leave-P-Out cross-validator
Provides train/test indices to split data in train/test sets. This results
in testing on all distinct samples of size p, while the remaining n - p
samples form the training set in each iteration.
Note: ``LeavePOut(p)`` is NOT equivalent to
``KFold(n_folds=n_samples // p)`` which creates non-overlapping test sets.
Due to the high number of iterations which grows combinatorically with the
number of samples this cross-validation method can be very costly. For
large datasets one should favor :class:`KFold`, :class:`StratifiedKFold`
or :class:`ShuffleSplit`.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
p : int
Size of the test sets.
Examples
--------
>>> from sklearn.model_selection import LeavePOut
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> lpo = LeavePOut(2)
>>> lpo.get_n_splits(X)
6
>>> print(lpo)
LeavePOut(p=2)
>>> for train_index, test_index in lpo.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 1] TEST: [2 3]
"""
def __init__(self, p):
self.p = p
def _iter_test_indices(self, X, y=None, labels=None):
for combination in combinations(range(_num_samples(X)), self.p):
yield np.array(combination)
def get_n_splits(self, X, y=None, labels=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : object
Always ignored, exists for compatibility.
labels : object
Always ignored, exists for compatibility.
"""
if X is None:
raise ValueError("The X parameter should not be None")
return int(comb(_num_samples(X), self.p, exact=True))
class _BaseKFold(with_metaclass(ABCMeta, BaseCrossValidator)):
"""Base class for KFold and StratifiedKFold"""
@abstractmethod
def __init__(self, n_folds, shuffle, random_state):
if not isinstance(n_folds, numbers.Integral):
raise ValueError('The number of folds must be of Integral type. '
'%s of type %s was passed.'
% (n_folds, type(n_folds)))
n_folds = int(n_folds)
if n_folds <= 1:
raise ValueError(
"k-fold cross-validation requires at least one"
" train/test split by setting n_folds=2 or more,"
" got n_folds={0}.".format(n_folds))
if not isinstance(shuffle, bool):
raise TypeError("shuffle must be True or False;"
" got {0}".format(shuffle))
self.n_folds = n_folds
self.shuffle = shuffle
self.random_state = random_state
def split(self, X, y=None, labels=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,), optional
The target variable for supervised learning problems.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, labels = indexable(X, y, labels)
n_samples = _num_samples(X)
if self.n_folds > n_samples:
raise ValueError(
("Cannot have number of folds n_folds={0} greater"
" than the number of samples: {1}.").format(self.n_folds,
n_samples))
for train, test in super(_BaseKFold, self).split(X, y, labels):
yield train, test
def get_n_splits(self, X=None, y=None, labels=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
labels : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return self.n_folds
class KFold(_BaseKFold):
"""K-Folds cross-validator
Provides train/test indices to split data in train/test sets. Split
dataset into k consecutive folds (without shuffling by default).
Each fold is then used once as a validation while the k - 1 remaining
folds form the training set.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle the data before splitting into batches.
random_state : None, int or RandomState
When shuffle=True, pseudo-random number generator state used for
shuffling. If None, use default numpy RNG for shuffling.
Examples
--------
>>> from sklearn.model_selection import KFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> kf = KFold(n_folds=2)
>>> kf.get_n_splits(X)
2
>>> print(kf) # doctest: +NORMALIZE_WHITESPACE
KFold(n_folds=2, random_state=None, shuffle=False)
>>> for train_index, test_index in kf.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [0 1] TEST: [2 3]
Notes
-----
The first ``n_samples % n_folds`` folds have size
``n_samples // n_folds + 1``, other folds have size
``n_samples // n_folds``, where ``n_samples`` is the number of samples.
See also
--------
StratifiedKFold
Takes label information into account to avoid building folds with
imbalanced class distributions (for binary or multiclass
classification tasks).
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, n_folds=3, shuffle=False,
random_state=None):
super(KFold, self).__init__(n_folds, shuffle, random_state)
self.shuffle = shuffle
def _iter_test_indices(self, X, y=None, labels=None):
n_samples = _num_samples(X)
indices = np.arange(n_samples)
if self.shuffle:
check_random_state(self.random_state).shuffle(indices)
n_folds = self.n_folds
fold_sizes = (n_samples // n_folds) * np.ones(n_folds, dtype=np.int)
fold_sizes[:n_samples % n_folds] += 1
current = 0
for fold_size in fold_sizes:
start, stop = current, current + fold_size
yield indices[start:stop]
current = stop
class LabelKFold(_BaseKFold):
"""K-fold iterator variant with non-overlapping labels.
The same label will not appear in two different folds (the number of
distinct labels has to be at least equal to the number of folds).
The folds are approximately balanced in the sense that the number of
distinct labels is approximately the same in each fold.
Parameters
----------
n_folds : int, default=3
Number of folds. Must be at least 2.
Examples
--------
>>> from sklearn.model_selection import LabelKFold
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> labels = np.array([0, 0, 2, 2])
>>> label_kfold = LabelKFold(n_folds=2)
>>> label_kfold.get_n_splits(X, y, labels)
2
>>> print(label_kfold)
LabelKFold(n_folds=2)
>>> for train_index, test_index in label_kfold.split(X, y, labels):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
...
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [3 4]
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [3 4] [1 2]
See also
--------
LeaveOneLabelOut
For splitting the data according to explicit domain-specific
stratification of the dataset.
"""
def __init__(self, n_folds=3):
super(LabelKFold, self).__init__(n_folds, shuffle=False,
random_state=None)
def _iter_test_indices(self, X, y, labels):
if labels is None:
raise ValueError("The labels parameter should not be None")
unique_labels, labels = np.unique(labels, return_inverse=True)
n_labels = len(unique_labels)
if self.n_folds > n_labels:
raise ValueError("Cannot have number of folds n_folds=%d greater"
" than the number of labels: %d."
% (self.n_folds, n_labels))
# Weight labels by their number of occurences
n_samples_per_label = np.bincount(labels)
# Distribute the most frequent labels first
indices = np.argsort(n_samples_per_label)[::-1]
n_samples_per_label = n_samples_per_label[indices]
# Total weight of each fold
n_samples_per_fold = np.zeros(self.n_folds)
# Mapping from label index to fold index
label_to_fold = np.zeros(len(unique_labels))
# Distribute samples by adding the largest weight to the lightest fold
for label_index, weight in enumerate(n_samples_per_label):
lightest_fold = np.argmin(n_samples_per_fold)
n_samples_per_fold[lightest_fold] += weight
label_to_fold[indices[label_index]] = lightest_fold
indices = label_to_fold[labels]
for f in range(self.n_folds):
yield np.where(indices == f)[0]
class StratifiedKFold(_BaseKFold):
"""Stratified K-Folds cross-validator
Provides train/test indices to split data in train/test sets.
This cross-validation object is a variation of KFold that returns
stratified folds. The folds are made by preserving the percentage of
samples for each class.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle each stratification of the data before splitting
into batches.
random_state : None, int or RandomState
When shuffle=True, pseudo-random number generator state used for
shuffling. If None, use default numpy RNG for shuffling.
Examples
--------
>>> from sklearn.model_selection import StratifiedKFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> skf = StratifiedKFold(n_folds=2)
>>> skf.get_n_splits(X, y)
2
>>> print(skf) # doctest: +NORMALIZE_WHITESPACE
StratifiedKFold(n_folds=2, random_state=None, shuffle=False)
>>> for train_index, test_index in skf.split(X, y):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [0 2] TEST: [1 3]
Notes
-----
All the folds have size ``trunc(n_samples / n_folds)``, the last one has
the complementary.
"""
def __init__(self, n_folds=3, shuffle=False, random_state=None):
super(StratifiedKFold, self).__init__(n_folds, shuffle, random_state)
self.shuffle = shuffle
def _make_test_folds(self, X, y=None, labels=None):
if self.shuffle:
rng = check_random_state(self.random_state)
else:
rng = self.random_state
y = np.asarray(y)
n_samples = y.shape[0]
unique_y, y_inversed = np.unique(y, return_inverse=True)
y_counts = bincount(y_inversed)
min_labels = np.min(y_counts)
if self.n_folds > min_labels:
warnings.warn(("The least populated class in y has only %d"
" members, which is too few. The minimum"
" number of labels for any class cannot"
" be less than n_folds=%d."
% (min_labels, self.n_folds)), Warning)
# pre-assign each sample to a test fold index using individual KFold
# splitting strategies for each class so as to respect the balance of
# classes
# NOTE: Passing the data corresponding to ith class say X[y==class_i]
# will break when the data is not 100% stratifiable for all classes.
# So we pass np.zeroes(max(c, n_folds)) as data to the KFold
per_cls_cvs = [
KFold(self.n_folds, shuffle=self.shuffle,
random_state=rng).split(np.zeros(max(count, self.n_folds)))
for count in y_counts]
test_folds = np.zeros(n_samples, dtype=np.int)
for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)):
for cls, (_, test_split) in zip(unique_y, per_cls_splits):
cls_test_folds = test_folds[y == cls]
# the test split can be too big because we used
# KFold(...).split(X[:max(c, n_folds)]) when data is not 100%
# stratifiable for all the classes
# (we use a warning instead of raising an exception)
# If this is the case, let's trim it:
test_split = test_split[test_split < len(cls_test_folds)]
cls_test_folds[test_split] = test_fold_indices
test_folds[y == cls] = cls_test_folds
return test_folds
def _iter_test_masks(self, X, y=None, labels=None):
test_folds = self._make_test_folds(X, y)
for i in range(self.n_folds):
yield test_folds == i
class LeaveOneLabelOut(BaseCrossValidator):
"""Leave One Label Out cross-validator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
Read more in the :ref:`User Guide <cross_validation>`.
Examples
--------
>>> from sklearn.model_selection import LeaveOneLabelOut
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> labels = np.array([1, 1, 2, 2])
>>> lol = LeaveOneLabelOut()
>>> lol.get_n_splits(X, y, labels)
2
>>> print(lol)
LeaveOneLabelOut()
>>> for train_index, test_index in lol.split(X, y, labels):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [1 2] [1 2]
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [1 2]
"""
def _iter_test_masks(self, X, y, labels):
if labels is None:
raise ValueError("The labels parameter should not be None")
# We make a copy of labels to avoid side-effects during iteration
labels = np.array(labels, copy=True)
unique_labels = np.unique(labels)
for i in unique_labels:
yield labels == i
def get_n_splits(self, X, y, labels):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
if labels is None:
raise ValueError("The labels parameter should not be None")
return len(np.unique(labels))
class LeavePLabelOut(BaseCrossValidator):
"""Leave P Labels Out cross-validator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePLabelOut and LeaveOneLabelOut is that
the former builds the test sets with all the samples assigned to
``p`` different values of the labels while the latter uses samples
all assigned the same labels.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_labels : int
Number of labels (``p``) to leave out in the test split.
Examples
--------
>>> from sklearn.model_selection import LeavePLabelOut
>>> X = np.array([[1, 2], [3, 4], [5, 6]])
>>> y = np.array([1, 2, 1])
>>> labels = np.array([1, 2, 3])
>>> lpl = LeavePLabelOut(n_labels=2)
>>> lpl.get_n_splits(X, y, labels)
3
>>> print(lpl)
LeavePLabelOut(n_labels=2)
>>> for train_index, test_index in lpl.split(X, y, labels):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2] TEST: [0 1]
[[5 6]] [[1 2]
[3 4]] [1] [1 2]
TRAIN: [1] TEST: [0 2]
[[3 4]] [[1 2]
[5 6]] [2] [1 1]
TRAIN: [0] TEST: [1 2]
[[1 2]] [[3 4]
[5 6]] [1] [2 1]
See also
--------
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, n_labels):
self.n_labels = n_labels
def _iter_test_masks(self, X, y, labels):
if labels is None:
raise ValueError("The labels parameter should not be None")
labels = np.array(labels, copy=True)
unique_labels = np.unique(labels)
combi = combinations(range(len(unique_labels)), self.n_labels)
for indices in combi:
test_index = np.zeros(_num_samples(X), dtype=np.bool)
for l in unique_labels[np.array(indices)]:
test_index[labels == l] = True
yield test_index
def get_n_splits(self, X, y, labels):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
if labels is None:
raise ValueError("The labels parameter should not be None")
return int(comb(len(np.unique(labels)), self.n_labels, exact=True))
class BaseShuffleSplit(with_metaclass(ABCMeta)):
"""Base class for ShuffleSplit and StratifiedShuffleSplit"""
def __init__(self, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
_validate_shuffle_split_init(test_size, train_size)
self.n_iter = n_iter
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
def split(self, X, y=None, labels=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, labels = indexable(X, y, labels)
for train, test in self._iter_indices(X, y, labels):
yield train, test
@abstractmethod
def _iter_indices(self, X, y=None, labels=None):
"""Generate (train, test) indices"""
def get_n_splits(self, X=None, y=None, labels=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
labels : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return self.n_iter
def __repr__(self):
return _build_repr(self)
class ShuffleSplit(BaseShuffleSplit):
"""Random permutation cross-validator
Yields indices to split data into training and test sets.
Note: contrary to other cross-validation strategies, random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float, int, or None, default 0.1
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn.model_selection import ShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> rs = ShuffleSplit(n_iter=3, test_size=.25, random_state=0)
>>> rs.get_n_splits(X)
3
>>> print(rs)
ShuffleSplit(n_iter=3, random_state=0, test_size=0.25, train_size=None)
>>> for train_index, test_index in rs.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... # doctest: +ELLIPSIS
TRAIN: [3 1 0] TEST: [2]
TRAIN: [2 1 3] TEST: [0]
TRAIN: [0 2 1] TEST: [3]
>>> rs = ShuffleSplit(n_iter=3, train_size=0.5, test_size=.25,
... random_state=0)
>>> for train_index, test_index in rs.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... # doctest: +ELLIPSIS
TRAIN: [3 1] TEST: [2]
TRAIN: [2 1] TEST: [0]
TRAIN: [0 2] TEST: [3]
"""
def _iter_indices(self, X, y=None, labels=None):
n_samples = _num_samples(X)
n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,
self.train_size)
rng = check_random_state(self.random_state)
for i in range(self.n_iter):
# random partition
permutation = rng.permutation(n_samples)
ind_test = permutation[:n_test]
ind_train = permutation[n_test:(n_test + n_train)]
yield ind_train, ind_test
class LabelShuffleSplit(ShuffleSplit):
'''Shuffle-Labels-Out cross-validation iterator
Provides randomized train/test indices to split data according to a
third-party provided label. This label information can be used to encode
arbitrary domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePLabelOut and LabelShuffleSplit is that
the former generates splits using all subsets of size ``p`` unique labels,
whereas LabelShuffleSplit generates a user-determined number of random
test splits, each with a user-determined fraction of unique labels.
For example, a less computationally intensive alternative to
``LeavePLabelOut(p=10)`` would be
``LabelShuffleSplit(test_size=10, n_iter=100)``.
Note: The parameters ``test_size`` and ``train_size`` refer to labels, and
not to samples, as in ShuffleSplit.
Parameters
----------
n_iter : int (default 5)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.2), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the labels to include in the test split. If
int, represents the absolute number of test labels. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the labels to include in the train split. If
int, represents the absolute number of train labels. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
'''
def __init__(self, n_iter=5, test_size=0.2, train_size=None,
random_state=None):
super(LabelShuffleSplit, self).__init__(
n_iter=n_iter,
test_size=test_size,
train_size=train_size,
random_state=random_state)
def _iter_indices(self, X, y, labels):
if labels is None:
raise ValueError("The labels parameter should not be None")
classes, label_indices = np.unique(labels, return_inverse=True)
for label_train, label_test in super(
LabelShuffleSplit, self)._iter_indices(X=classes):
# these are the indices of classes in the partition
# invert them into data indices
train = np.flatnonzero(np.in1d(label_indices, label_train))
test = np.flatnonzero(np.in1d(label_indices, label_test))
yield train, test
class StratifiedShuffleSplit(BaseShuffleSplit):
"""Stratified ShuffleSplit cross-validator
Provides train/test indices to split data in train/test sets.
This cross-validation object is a merge of StratifiedKFold and
ShuffleSplit, which returns stratified randomized folds. The folds
are made by preserving the percentage of samples for each class.
Note: like the ShuffleSplit strategy, stratified random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn.model_selection import StratifiedShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> sss = StratifiedShuffleSplit(n_iter=3, test_size=0.5, random_state=0)
>>> sss.get_n_splits(X, y)
3
>>> print(sss) # doctest: +ELLIPSIS
StratifiedShuffleSplit(n_iter=3, random_state=0, ...)
>>> for train_index, test_index in sss.split(X, y):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2] TEST: [3 0]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 2] TEST: [3 1]
"""
def __init__(self, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
super(StratifiedShuffleSplit, self).__init__(
n_iter, test_size, train_size, random_state)
def _iter_indices(self, X, y, labels=None):
n_samples = _num_samples(X)
n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,
self.train_size)
classes, y_indices = np.unique(y, return_inverse=True)
n_classes = classes.shape[0]
class_counts = bincount(y_indices)
if np.min(class_counts) < 2:
raise ValueError("The least populated class in y has only 1"
" member, which is too few. The minimum"
" number of labels for any class cannot"
" be less than 2.")
if n_train < n_classes:
raise ValueError('The train_size = %d should be greater or '
'equal to the number of classes = %d' %
(n_train, n_classes))
if n_test < n_classes:
raise ValueError('The test_size = %d should be greater or '
'equal to the number of classes = %d' %
(n_test, n_classes))
rng = check_random_state(self.random_state)
p_i = class_counts / float(n_samples)
n_i = np.round(n_train * p_i).astype(int)
t_i = np.minimum(class_counts - n_i,
np.round(n_test * p_i).astype(int))
for _ in range(self.n_iter):
train = []
test = []
for i, class_i in enumerate(classes):
permutation = rng.permutation(class_counts[i])
perm_indices_class_i = np.where((y == class_i))[0][permutation]
train.extend(perm_indices_class_i[:n_i[i]])
test.extend(perm_indices_class_i[n_i[i]:n_i[i] + t_i[i]])
# Because of rounding issues (as n_train and n_test are not
# dividers of the number of elements per class), we may end
# up here with less samples in train and test than asked for.
if len(train) < n_train or len(test) < n_test:
# We complete by affecting randomly the missing indexes
missing_indices = np.where(bincount(train + test,
minlength=len(y)) == 0)[0]
missing_indices = rng.permutation(missing_indices)
train.extend(missing_indices[:(n_train - len(train))])
test.extend(missing_indices[-(n_test - len(test)):])
train = rng.permutation(train)
test = rng.permutation(test)
yield train, test
def _validate_shuffle_split_init(test_size, train_size):
"""Validation helper to check the test_size and train_size at init
NOTE This does not take into account the number of samples which is known
only at split
"""
if test_size is None and train_size is None:
raise ValueError('test_size and train_size can not both be None')
if test_size is not None:
if np.asarray(test_size).dtype.kind == 'f':
if test_size >= 1.:
raise ValueError(
'test_size=%f should be smaller '
'than 1.0 or be an integer' % test_size)
elif np.asarray(test_size).dtype.kind != 'i':
# int values are checked during split based on the input
raise ValueError("Invalid value for test_size: %r" % test_size)
if train_size is not None:
if np.asarray(train_size).dtype.kind == 'f':
if train_size >= 1.:
raise ValueError("train_size=%f should be smaller "
"than 1.0 or be an integer" % train_size)
elif (np.asarray(test_size).dtype.kind == 'f' and
(train_size + test_size) > 1.):
raise ValueError('The sum of test_size and train_size = %f, '
'should be smaller than 1.0. Reduce '
'test_size and/or train_size.' %
(train_size + test_size))
elif np.asarray(train_size).dtype.kind != 'i':
# int values are checked during split based on the input
raise ValueError("Invalid value for train_size: %r" % train_size)
def _validate_shuffle_split(n_samples, test_size, train_size):
"""
Validation helper to check if the test/test sizes are meaningful wrt to the
size of the data (n_samples)
"""
if (test_size is not None and np.asarray(test_size).dtype.kind == 'i'
and test_size >= n_samples):
raise ValueError('test_size=%d should be smaller than the number of '
'samples %d' % (test_size, n_samples))
if (train_size is not None and np.asarray(train_size).dtype.kind == 'i'
and train_size >= n_samples):
raise ValueError("train_size=%d should be smaller than the number of"
" samples %d" % (train_size, n_samples))
if np.asarray(test_size).dtype.kind == 'f':
n_test = ceil(test_size * n_samples)
elif np.asarray(test_size).dtype.kind == 'i':
n_test = float(test_size)
if train_size is None:
n_train = n_samples - n_test
elif np.asarray(train_size).dtype.kind == 'f':
n_train = floor(train_size * n_samples)
else:
n_train = float(train_size)
if test_size is None:
n_test = n_samples - n_train
if n_train + n_test > n_samples:
raise ValueError('The sum of train_size and test_size = %d, '
'should be smaller than the number of '
'samples %d. Reduce test_size and/or '
'train_size.' % (n_train + n_test, n_samples))
return int(n_train), int(n_test)
class PredefinedSplit(BaseCrossValidator):
"""Predefined split cross-validator
Splits the data into training/test set folds according to a predefined
scheme. Each sample can be assigned to at most one test set fold, as
specified by the user through the ``test_fold`` parameter.
Read more in the :ref:`User Guide <cross_validation>`.
Examples
--------
>>> from sklearn.model_selection import PredefinedSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> test_fold = [0, 1, -1, 1]
>>> ps = PredefinedSplit(test_fold)
>>> ps.get_n_splits()
2
>>> print(ps) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
PredefinedSplit(test_fold=array([ 0, 1, -1, 1]))
>>> for train_index, test_index in ps.split():
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2 3] TEST: [0]
TRAIN: [0 2] TEST: [1 3]
"""
def __init__(self, test_fold):
self.test_fold = np.array(test_fold, dtype=np.int)
self.test_fold = column_or_1d(self.test_fold)
self.unique_folds = np.unique(self.test_fold)
self.unique_folds = self.unique_folds[self.unique_folds != -1]
def split(self, X=None, y=None, labels=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
labels : object
Always ignored, exists for compatibility.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
ind = np.arange(len(self.test_fold))
for test_index in self._iter_test_masks():
train_index = ind[np.logical_not(test_index)]
test_index = ind[test_index]
yield train_index, test_index
def _iter_test_masks(self):
"""Generates boolean masks corresponding to test sets."""
for f in self.unique_folds:
test_index = np.where(self.test_fold == f)[0]
test_mask = np.zeros(len(self.test_fold), dtype=np.bool)
test_mask[test_index] = True
yield test_mask
def get_n_splits(self, X=None, y=None, labels=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
labels : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return len(self.unique_folds)
class _CVIterableWrapper(BaseCrossValidator):
"""Wrapper class for old style cv objects and iterables."""
def __init__(self, cv):
self.cv = cv
def get_n_splits(self, X=None, y=None, labels=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
labels : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return len(self.cv) # Both iterables and old-cv objects support len
def split(self, X=None, y=None, labels=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
labels : object
Always ignored, exists for compatibility.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
for train, test in self.cv:
yield train, test
def check_cv(cv=3, y=None, classifier=False):
"""Input checker utility for building a cross-validator
Parameters
----------
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If classifier is False or if ``y`` is
neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
y : array-like, optional
The target variable for supervised learning problems.
classifier : boolean, optional, default False
Whether the task is a classification task, in which case
stratified KFold will be used.
Returns
-------
checked_cv : a cross-validator instance.
The return value is a cross-validator which generates the train/test
splits via the ``split`` method.
"""
if cv is None:
cv = 3
if isinstance(cv, numbers.Integral):
if (classifier and (y is not None) and
(type_of_target(y) in ('binary', 'multiclass'))):
return StratifiedKFold(cv)
else:
return KFold(cv)
if not hasattr(cv, 'split') or isinstance(cv, str):
if not isinstance(cv, Iterable) or isinstance(cv, str):
raise ValueError("Expected cv as an integer, cross-validation "
"object (from sklearn.model_selection) "
"or and iterable. Got %s." % cv)
return _CVIterableWrapper(cv)
return cv # New style cv objects are passed without any modification
def train_test_split(*arrays, **options):
"""Split arrays or matrices into random train and test subsets
Quick utility that wraps input validation and
``next(ShuffleSplit().split(X, y))`` and application to input data
into a single call for splitting (and optionally subsampling) data in a
oneliner.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
*arrays : sequence of indexables with same length / shape[0]
allowed inputs are lists, numpy arrays, scipy-sparse
matrices or pandas dataframes.
.. versionadded:: 0.16
preserves input type instead of always casting to numpy array.
test_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
If train size is also None, test size is set to 0.25.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
stratify : array-like or None (default is None)
If not None, data is split in a stratified fashion, using this as
the labels array.
Returns
-------
splitting : list, length=2 * len(arrays)
List containing train-test split of inputs.
.. versionadded:: 0.16
Output type is the same as the input type.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import train_test_split
>>> X, y = np.arange(10).reshape((5, 2)), range(5)
>>> X
array([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]])
>>> list(y)
[0, 1, 2, 3, 4]
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, test_size=0.33, random_state=42)
...
>>> X_train
array([[4, 5],
[0, 1],
[6, 7]])
>>> y_train
[2, 0, 3]
>>> X_test
array([[2, 3],
[8, 9]])
>>> y_test
[1, 4]
"""
n_arrays = len(arrays)
if n_arrays == 0:
raise ValueError("At least one array required as input")
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
stratify = options.pop('stratify', None)
if options:
raise TypeError("Invalid parameters passed: %s" % str(options))
if test_size is None and train_size is None:
test_size = 0.25
arrays = indexable(*arrays)
if stratify is not None:
CVClass = StratifiedShuffleSplit
else:
CVClass = ShuffleSplit
cv = CVClass(test_size=test_size,
train_size=train_size,
random_state=random_state)
train, test = next(cv.split(X=arrays[0], y=stratify))
return list(chain.from_iterable((safe_indexing(a, train),
safe_indexing(a, test)) for a in arrays))
train_test_split.__test__ = False # to avoid a pb with nosetests
def _safe_split(estimator, X, y, indices, train_indices=None):
"""Create subset of dataset and properly handle kernels."""
if (hasattr(estimator, 'kernel') and callable(estimator.kernel) and
not isinstance(estimator.kernel, GPKernel)):
# cannot compute the kernel values with custom function
raise ValueError("Cannot use a custom kernel function. "
"Precompute the kernel matrix instead.")
if not hasattr(X, "shape"):
if getattr(estimator, "_pairwise", False):
raise ValueError("Precomputed kernels or affinity matrices have "
"to be passed as arrays or sparse matrices.")
X_subset = [X[index] for index in indices]
else:
if getattr(estimator, "_pairwise", False):
# X is a precomputed square kernel matrix
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square kernel matrix")
if train_indices is None:
X_subset = X[np.ix_(indices, indices)]
else:
X_subset = X[np.ix_(indices, train_indices)]
else:
X_subset = safe_indexing(X, indices)
if y is not None:
y_subset = safe_indexing(y, indices)
else:
y_subset = None
return X_subset, y_subset
def _build_repr(self):
# XXX This is copied from BaseEstimator's get_params
cls = self.__class__
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
# Ignore varargs, kw and default values and pop self
if init is object.__init__:
# No explicit constructor to introspect
args = []
else:
args = sorted(inspect.getargspec(init)[0])
if 'self' in args:
args.remove('self')
class_name = self.__class__.__name__
params = dict()
for key in args:
# We need deprecation warnings to always be on in order to
# catch deprecated param values.
# This is set in utils/__init__.py but it gets overwritten
# when running under python3 somehow.
warnings.simplefilter("always", DeprecationWarning)
try:
with warnings.catch_warnings(record=True) as w:
value = getattr(self, key, None)
if len(w) and w[0].category == DeprecationWarning:
# if the parameter is deprecated, don't show it
continue
finally:
warnings.filters.pop(0)
params[key] = value
return '%s(%s)' % (class_name, _pprint(params, offset=len(class_name)))
FIX deprecation warning for inspect.getargspec
"""
The :mod:`sklearn.model_selection._split` module includes classes and
functions to split the data based on a preset strategy.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>,
# Gael Varoquaux <gael.varoquaux@normalesup.org>,
# Olivier Girsel <olivier.grisel@ensta.org>
# Raghav R V <rvraghav93@gmail.com>
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
import warnings
import inspect
from itertools import chain, combinations
from collections import Iterable
from math import ceil, floor
import numbers
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.misc import comb
from ..utils import indexable, check_random_state, safe_indexing
from ..utils.validation import _num_samples, column_or_1d
from ..utils.multiclass import type_of_target
from ..externals.six import with_metaclass
from ..externals.six.moves import zip
from ..utils.fixes import bincount
from ..utils.fixes import signature
from ..base import _pprint
from ..gaussian_process.kernels import Kernel as GPKernel
__all__ = ['BaseCrossValidator',
'KFold',
'LabelKFold',
'LeaveOneLabelOut',
'LeaveOneOut',
'LeavePLabelOut',
'LeavePOut',
'ShuffleSplit',
'LabelShuffleSplit',
'StratifiedKFold',
'StratifiedShuffleSplit',
'PredefinedSplit',
'train_test_split',
'check_cv']
class BaseCrossValidator(with_metaclass(ABCMeta)):
"""Base class for all cross-validators
Implementations must define `_iter_test_masks` or `_iter_test_indices`.
"""
def split(self, X, y=None, labels=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, labels = indexable(X, y, labels)
indices = np.arange(_num_samples(X))
for test_index in self._iter_test_masks(X, y, labels):
train_index = indices[np.logical_not(test_index)]
test_index = indices[test_index]
yield train_index, test_index
# Since subclasses must implement either _iter_test_masks or
# _iter_test_indices, neither can be abstract.
def _iter_test_masks(self, X=None, y=None, labels=None):
"""Generates boolean masks corresponding to test sets.
By default, delegates to _iter_test_indices(X, y, labels)
"""
for test_index in self._iter_test_indices(X, y, labels):
test_mask = np.zeros(_num_samples(X), dtype=np.bool)
test_mask[test_index] = True
yield test_mask
def _iter_test_indices(self, X=None, y=None, labels=None):
"""Generates integer indices corresponding to test sets."""
raise NotImplementedError
@abstractmethod
def get_n_splits(self, X=None, y=None, labels=None):
"""Returns the number of splitting iterations in the cross-validator"""
def __repr__(self):
return _build_repr(self)
class LeaveOneOut(BaseCrossValidator):
"""Leave-One-Out cross-validator
Provides train/test indices to split data in train/test sets. Each
sample is used once as a test set (singleton) while the remaining
samples form the training set.
Note: ``LeaveOneOut()`` is equivalent to ``KFold(n_folds=n)`` and
``LeavePOut(p=1)`` where ``n`` is the number of samples.
Due to the high number of test sets (which is the same as the
number of samples) this cross-validation method can be very costly.
For large datasets one should favor :class:`KFold`, :class:`ShuffleSplit`
or :class:`StratifiedKFold`.
Read more in the :ref:`User Guide <cross_validation>`.
Examples
--------
>>> from sklearn.model_selection import LeaveOneOut
>>> X = np.array([[1, 2], [3, 4]])
>>> y = np.array([1, 2])
>>> loo = LeaveOneOut()
>>> loo.get_n_splits(X)
2
>>> print(loo)
LeaveOneOut()
>>> for train_index, test_index in loo.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [1] TEST: [0]
[[3 4]] [[1 2]] [2] [1]
TRAIN: [0] TEST: [1]
[[1 2]] [[3 4]] [1] [2]
See also
--------
LeaveOneLabelOut
For splitting the data according to explicit, domain-specific
stratification of the dataset.
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def _iter_test_indices(self, X, y=None, labels=None):
return range(_num_samples(X))
def get_n_splits(self, X, y=None, labels=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : object
Always ignored, exists for compatibility.
labels : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
if X is None:
raise ValueError("The X parameter should not be None")
return _num_samples(X)
class LeavePOut(BaseCrossValidator):
"""Leave-P-Out cross-validator
Provides train/test indices to split data in train/test sets. This results
in testing on all distinct samples of size p, while the remaining n - p
samples form the training set in each iteration.
Note: ``LeavePOut(p)`` is NOT equivalent to
``KFold(n_folds=n_samples // p)`` which creates non-overlapping test sets.
Due to the high number of iterations which grows combinatorically with the
number of samples this cross-validation method can be very costly. For
large datasets one should favor :class:`KFold`, :class:`StratifiedKFold`
or :class:`ShuffleSplit`.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
p : int
Size of the test sets.
Examples
--------
>>> from sklearn.model_selection import LeavePOut
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> lpo = LeavePOut(2)
>>> lpo.get_n_splits(X)
6
>>> print(lpo)
LeavePOut(p=2)
>>> for train_index, test_index in lpo.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 1] TEST: [2 3]
"""
def __init__(self, p):
self.p = p
def _iter_test_indices(self, X, y=None, labels=None):
for combination in combinations(range(_num_samples(X)), self.p):
yield np.array(combination)
def get_n_splits(self, X, y=None, labels=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : object
Always ignored, exists for compatibility.
labels : object
Always ignored, exists for compatibility.
"""
if X is None:
raise ValueError("The X parameter should not be None")
return int(comb(_num_samples(X), self.p, exact=True))
class _BaseKFold(with_metaclass(ABCMeta, BaseCrossValidator)):
"""Base class for KFold and StratifiedKFold"""
@abstractmethod
def __init__(self, n_folds, shuffle, random_state):
if not isinstance(n_folds, numbers.Integral):
raise ValueError('The number of folds must be of Integral type. '
'%s of type %s was passed.'
% (n_folds, type(n_folds)))
n_folds = int(n_folds)
if n_folds <= 1:
raise ValueError(
"k-fold cross-validation requires at least one"
" train/test split by setting n_folds=2 or more,"
" got n_folds={0}.".format(n_folds))
if not isinstance(shuffle, bool):
raise TypeError("shuffle must be True or False;"
" got {0}".format(shuffle))
self.n_folds = n_folds
self.shuffle = shuffle
self.random_state = random_state
def split(self, X, y=None, labels=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,), optional
The target variable for supervised learning problems.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, labels = indexable(X, y, labels)
n_samples = _num_samples(X)
if self.n_folds > n_samples:
raise ValueError(
("Cannot have number of folds n_folds={0} greater"
" than the number of samples: {1}.").format(self.n_folds,
n_samples))
for train, test in super(_BaseKFold, self).split(X, y, labels):
yield train, test
def get_n_splits(self, X=None, y=None, labels=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
labels : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return self.n_folds
class KFold(_BaseKFold):
"""K-Folds cross-validator
Provides train/test indices to split data in train/test sets. Split
dataset into k consecutive folds (without shuffling by default).
Each fold is then used once as a validation while the k - 1 remaining
folds form the training set.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle the data before splitting into batches.
random_state : None, int or RandomState
When shuffle=True, pseudo-random number generator state used for
shuffling. If None, use default numpy RNG for shuffling.
Examples
--------
>>> from sklearn.model_selection import KFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> kf = KFold(n_folds=2)
>>> kf.get_n_splits(X)
2
>>> print(kf) # doctest: +NORMALIZE_WHITESPACE
KFold(n_folds=2, random_state=None, shuffle=False)
>>> for train_index, test_index in kf.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [0 1] TEST: [2 3]
Notes
-----
The first ``n_samples % n_folds`` folds have size
``n_samples // n_folds + 1``, other folds have size
``n_samples // n_folds``, where ``n_samples`` is the number of samples.
See also
--------
StratifiedKFold
Takes label information into account to avoid building folds with
imbalanced class distributions (for binary or multiclass
classification tasks).
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, n_folds=3, shuffle=False,
random_state=None):
super(KFold, self).__init__(n_folds, shuffle, random_state)
self.shuffle = shuffle
def _iter_test_indices(self, X, y=None, labels=None):
n_samples = _num_samples(X)
indices = np.arange(n_samples)
if self.shuffle:
check_random_state(self.random_state).shuffle(indices)
n_folds = self.n_folds
fold_sizes = (n_samples // n_folds) * np.ones(n_folds, dtype=np.int)
fold_sizes[:n_samples % n_folds] += 1
current = 0
for fold_size in fold_sizes:
start, stop = current, current + fold_size
yield indices[start:stop]
current = stop
class LabelKFold(_BaseKFold):
"""K-fold iterator variant with non-overlapping labels.
The same label will not appear in two different folds (the number of
distinct labels has to be at least equal to the number of folds).
The folds are approximately balanced in the sense that the number of
distinct labels is approximately the same in each fold.
Parameters
----------
n_folds : int, default=3
Number of folds. Must be at least 2.
Examples
--------
>>> from sklearn.model_selection import LabelKFold
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> labels = np.array([0, 0, 2, 2])
>>> label_kfold = LabelKFold(n_folds=2)
>>> label_kfold.get_n_splits(X, y, labels)
2
>>> print(label_kfold)
LabelKFold(n_folds=2)
>>> for train_index, test_index in label_kfold.split(X, y, labels):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
...
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [3 4]
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [3 4] [1 2]
See also
--------
LeaveOneLabelOut
For splitting the data according to explicit domain-specific
stratification of the dataset.
"""
def __init__(self, n_folds=3):
super(LabelKFold, self).__init__(n_folds, shuffle=False,
random_state=None)
def _iter_test_indices(self, X, y, labels):
if labels is None:
raise ValueError("The labels parameter should not be None")
unique_labels, labels = np.unique(labels, return_inverse=True)
n_labels = len(unique_labels)
if self.n_folds > n_labels:
raise ValueError("Cannot have number of folds n_folds=%d greater"
" than the number of labels: %d."
% (self.n_folds, n_labels))
# Weight labels by their number of occurences
n_samples_per_label = np.bincount(labels)
# Distribute the most frequent labels first
indices = np.argsort(n_samples_per_label)[::-1]
n_samples_per_label = n_samples_per_label[indices]
# Total weight of each fold
n_samples_per_fold = np.zeros(self.n_folds)
# Mapping from label index to fold index
label_to_fold = np.zeros(len(unique_labels))
# Distribute samples by adding the largest weight to the lightest fold
for label_index, weight in enumerate(n_samples_per_label):
lightest_fold = np.argmin(n_samples_per_fold)
n_samples_per_fold[lightest_fold] += weight
label_to_fold[indices[label_index]] = lightest_fold
indices = label_to_fold[labels]
for f in range(self.n_folds):
yield np.where(indices == f)[0]
class StratifiedKFold(_BaseKFold):
"""Stratified K-Folds cross-validator
Provides train/test indices to split data in train/test sets.
This cross-validation object is a variation of KFold that returns
stratified folds. The folds are made by preserving the percentage of
samples for each class.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle each stratification of the data before splitting
into batches.
random_state : None, int or RandomState
When shuffle=True, pseudo-random number generator state used for
shuffling. If None, use default numpy RNG for shuffling.
Examples
--------
>>> from sklearn.model_selection import StratifiedKFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> skf = StratifiedKFold(n_folds=2)
>>> skf.get_n_splits(X, y)
2
>>> print(skf) # doctest: +NORMALIZE_WHITESPACE
StratifiedKFold(n_folds=2, random_state=None, shuffle=False)
>>> for train_index, test_index in skf.split(X, y):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [0 2] TEST: [1 3]
Notes
-----
All the folds have size ``trunc(n_samples / n_folds)``, the last one has
the complementary.
"""
def __init__(self, n_folds=3, shuffle=False, random_state=None):
super(StratifiedKFold, self).__init__(n_folds, shuffle, random_state)
self.shuffle = shuffle
def _make_test_folds(self, X, y=None, labels=None):
if self.shuffle:
rng = check_random_state(self.random_state)
else:
rng = self.random_state
y = np.asarray(y)
n_samples = y.shape[0]
unique_y, y_inversed = np.unique(y, return_inverse=True)
y_counts = bincount(y_inversed)
min_labels = np.min(y_counts)
if self.n_folds > min_labels:
warnings.warn(("The least populated class in y has only %d"
" members, which is too few. The minimum"
" number of labels for any class cannot"
" be less than n_folds=%d."
% (min_labels, self.n_folds)), Warning)
# pre-assign each sample to a test fold index using individual KFold
# splitting strategies for each class so as to respect the balance of
# classes
# NOTE: Passing the data corresponding to ith class say X[y==class_i]
# will break when the data is not 100% stratifiable for all classes.
# So we pass np.zeroes(max(c, n_folds)) as data to the KFold
per_cls_cvs = [
KFold(self.n_folds, shuffle=self.shuffle,
random_state=rng).split(np.zeros(max(count, self.n_folds)))
for count in y_counts]
test_folds = np.zeros(n_samples, dtype=np.int)
for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)):
for cls, (_, test_split) in zip(unique_y, per_cls_splits):
cls_test_folds = test_folds[y == cls]
# the test split can be too big because we used
# KFold(...).split(X[:max(c, n_folds)]) when data is not 100%
# stratifiable for all the classes
# (we use a warning instead of raising an exception)
# If this is the case, let's trim it:
test_split = test_split[test_split < len(cls_test_folds)]
cls_test_folds[test_split] = test_fold_indices
test_folds[y == cls] = cls_test_folds
return test_folds
def _iter_test_masks(self, X, y=None, labels=None):
test_folds = self._make_test_folds(X, y)
for i in range(self.n_folds):
yield test_folds == i
class LeaveOneLabelOut(BaseCrossValidator):
"""Leave One Label Out cross-validator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
Read more in the :ref:`User Guide <cross_validation>`.
Examples
--------
>>> from sklearn.model_selection import LeaveOneLabelOut
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> labels = np.array([1, 1, 2, 2])
>>> lol = LeaveOneLabelOut()
>>> lol.get_n_splits(X, y, labels)
2
>>> print(lol)
LeaveOneLabelOut()
>>> for train_index, test_index in lol.split(X, y, labels):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [1 2] [1 2]
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [1 2]
"""
def _iter_test_masks(self, X, y, labels):
if labels is None:
raise ValueError("The labels parameter should not be None")
# We make a copy of labels to avoid side-effects during iteration
labels = np.array(labels, copy=True)
unique_labels = np.unique(labels)
for i in unique_labels:
yield labels == i
def get_n_splits(self, X, y, labels):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
if labels is None:
raise ValueError("The labels parameter should not be None")
return len(np.unique(labels))
class LeavePLabelOut(BaseCrossValidator):
"""Leave P Labels Out cross-validator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePLabelOut and LeaveOneLabelOut is that
the former builds the test sets with all the samples assigned to
``p`` different values of the labels while the latter uses samples
all assigned the same labels.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_labels : int
Number of labels (``p``) to leave out in the test split.
Examples
--------
>>> from sklearn.model_selection import LeavePLabelOut
>>> X = np.array([[1, 2], [3, 4], [5, 6]])
>>> y = np.array([1, 2, 1])
>>> labels = np.array([1, 2, 3])
>>> lpl = LeavePLabelOut(n_labels=2)
>>> lpl.get_n_splits(X, y, labels)
3
>>> print(lpl)
LeavePLabelOut(n_labels=2)
>>> for train_index, test_index in lpl.split(X, y, labels):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2] TEST: [0 1]
[[5 6]] [[1 2]
[3 4]] [1] [1 2]
TRAIN: [1] TEST: [0 2]
[[3 4]] [[1 2]
[5 6]] [2] [1 1]
TRAIN: [0] TEST: [1 2]
[[1 2]] [[3 4]
[5 6]] [1] [2 1]
See also
--------
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, n_labels):
self.n_labels = n_labels
def _iter_test_masks(self, X, y, labels):
if labels is None:
raise ValueError("The labels parameter should not be None")
labels = np.array(labels, copy=True)
unique_labels = np.unique(labels)
combi = combinations(range(len(unique_labels)), self.n_labels)
for indices in combi:
test_index = np.zeros(_num_samples(X), dtype=np.bool)
for l in unique_labels[np.array(indices)]:
test_index[labels == l] = True
yield test_index
def get_n_splits(self, X, y, labels):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
if labels is None:
raise ValueError("The labels parameter should not be None")
return int(comb(len(np.unique(labels)), self.n_labels, exact=True))
class BaseShuffleSplit(with_metaclass(ABCMeta)):
"""Base class for ShuffleSplit and StratifiedShuffleSplit"""
def __init__(self, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
_validate_shuffle_split_init(test_size, train_size)
self.n_iter = n_iter
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
def split(self, X, y=None, labels=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, labels = indexable(X, y, labels)
for train, test in self._iter_indices(X, y, labels):
yield train, test
@abstractmethod
def _iter_indices(self, X, y=None, labels=None):
"""Generate (train, test) indices"""
def get_n_splits(self, X=None, y=None, labels=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
labels : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return self.n_iter
def __repr__(self):
return _build_repr(self)
class ShuffleSplit(BaseShuffleSplit):
"""Random permutation cross-validator
Yields indices to split data into training and test sets.
Note: contrary to other cross-validation strategies, random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float, int, or None, default 0.1
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn.model_selection import ShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> rs = ShuffleSplit(n_iter=3, test_size=.25, random_state=0)
>>> rs.get_n_splits(X)
3
>>> print(rs)
ShuffleSplit(n_iter=3, random_state=0, test_size=0.25, train_size=None)
>>> for train_index, test_index in rs.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... # doctest: +ELLIPSIS
TRAIN: [3 1 0] TEST: [2]
TRAIN: [2 1 3] TEST: [0]
TRAIN: [0 2 1] TEST: [3]
>>> rs = ShuffleSplit(n_iter=3, train_size=0.5, test_size=.25,
... random_state=0)
>>> for train_index, test_index in rs.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... # doctest: +ELLIPSIS
TRAIN: [3 1] TEST: [2]
TRAIN: [2 1] TEST: [0]
TRAIN: [0 2] TEST: [3]
"""
def _iter_indices(self, X, y=None, labels=None):
n_samples = _num_samples(X)
n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,
self.train_size)
rng = check_random_state(self.random_state)
for i in range(self.n_iter):
# random partition
permutation = rng.permutation(n_samples)
ind_test = permutation[:n_test]
ind_train = permutation[n_test:(n_test + n_train)]
yield ind_train, ind_test
class LabelShuffleSplit(ShuffleSplit):
'''Shuffle-Labels-Out cross-validation iterator
Provides randomized train/test indices to split data according to a
third-party provided label. This label information can be used to encode
arbitrary domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePLabelOut and LabelShuffleSplit is that
the former generates splits using all subsets of size ``p`` unique labels,
whereas LabelShuffleSplit generates a user-determined number of random
test splits, each with a user-determined fraction of unique labels.
For example, a less computationally intensive alternative to
``LeavePLabelOut(p=10)`` would be
``LabelShuffleSplit(test_size=10, n_iter=100)``.
Note: The parameters ``test_size`` and ``train_size`` refer to labels, and
not to samples, as in ShuffleSplit.
Parameters
----------
n_iter : int (default 5)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.2), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the labels to include in the test split. If
int, represents the absolute number of test labels. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the labels to include in the train split. If
int, represents the absolute number of train labels. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
'''
def __init__(self, n_iter=5, test_size=0.2, train_size=None,
random_state=None):
super(LabelShuffleSplit, self).__init__(
n_iter=n_iter,
test_size=test_size,
train_size=train_size,
random_state=random_state)
def _iter_indices(self, X, y, labels):
if labels is None:
raise ValueError("The labels parameter should not be None")
classes, label_indices = np.unique(labels, return_inverse=True)
for label_train, label_test in super(
LabelShuffleSplit, self)._iter_indices(X=classes):
# these are the indices of classes in the partition
# invert them into data indices
train = np.flatnonzero(np.in1d(label_indices, label_train))
test = np.flatnonzero(np.in1d(label_indices, label_test))
yield train, test
class StratifiedShuffleSplit(BaseShuffleSplit):
"""Stratified ShuffleSplit cross-validator
Provides train/test indices to split data in train/test sets.
This cross-validation object is a merge of StratifiedKFold and
ShuffleSplit, which returns stratified randomized folds. The folds
are made by preserving the percentage of samples for each class.
Note: like the ShuffleSplit strategy, stratified random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn.model_selection import StratifiedShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> sss = StratifiedShuffleSplit(n_iter=3, test_size=0.5, random_state=0)
>>> sss.get_n_splits(X, y)
3
>>> print(sss) # doctest: +ELLIPSIS
StratifiedShuffleSplit(n_iter=3, random_state=0, ...)
>>> for train_index, test_index in sss.split(X, y):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2] TEST: [3 0]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 2] TEST: [3 1]
"""
def __init__(self, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
super(StratifiedShuffleSplit, self).__init__(
n_iter, test_size, train_size, random_state)
def _iter_indices(self, X, y, labels=None):
n_samples = _num_samples(X)
n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,
self.train_size)
classes, y_indices = np.unique(y, return_inverse=True)
n_classes = classes.shape[0]
class_counts = bincount(y_indices)
if np.min(class_counts) < 2:
raise ValueError("The least populated class in y has only 1"
" member, which is too few. The minimum"
" number of labels for any class cannot"
" be less than 2.")
if n_train < n_classes:
raise ValueError('The train_size = %d should be greater or '
'equal to the number of classes = %d' %
(n_train, n_classes))
if n_test < n_classes:
raise ValueError('The test_size = %d should be greater or '
'equal to the number of classes = %d' %
(n_test, n_classes))
rng = check_random_state(self.random_state)
p_i = class_counts / float(n_samples)
n_i = np.round(n_train * p_i).astype(int)
t_i = np.minimum(class_counts - n_i,
np.round(n_test * p_i).astype(int))
for _ in range(self.n_iter):
train = []
test = []
for i, class_i in enumerate(classes):
permutation = rng.permutation(class_counts[i])
perm_indices_class_i = np.where((y == class_i))[0][permutation]
train.extend(perm_indices_class_i[:n_i[i]])
test.extend(perm_indices_class_i[n_i[i]:n_i[i] + t_i[i]])
# Because of rounding issues (as n_train and n_test are not
# dividers of the number of elements per class), we may end
# up here with less samples in train and test than asked for.
if len(train) < n_train or len(test) < n_test:
# We complete by affecting randomly the missing indexes
missing_indices = np.where(bincount(train + test,
minlength=len(y)) == 0)[0]
missing_indices = rng.permutation(missing_indices)
train.extend(missing_indices[:(n_train - len(train))])
test.extend(missing_indices[-(n_test - len(test)):])
train = rng.permutation(train)
test = rng.permutation(test)
yield train, test
def _validate_shuffle_split_init(test_size, train_size):
"""Validation helper to check the test_size and train_size at init
NOTE This does not take into account the number of samples which is known
only at split
"""
if test_size is None and train_size is None:
raise ValueError('test_size and train_size can not both be None')
if test_size is not None:
if np.asarray(test_size).dtype.kind == 'f':
if test_size >= 1.:
raise ValueError(
'test_size=%f should be smaller '
'than 1.0 or be an integer' % test_size)
elif np.asarray(test_size).dtype.kind != 'i':
# int values are checked during split based on the input
raise ValueError("Invalid value for test_size: %r" % test_size)
if train_size is not None:
if np.asarray(train_size).dtype.kind == 'f':
if train_size >= 1.:
raise ValueError("train_size=%f should be smaller "
"than 1.0 or be an integer" % train_size)
elif (np.asarray(test_size).dtype.kind == 'f' and
(train_size + test_size) > 1.):
raise ValueError('The sum of test_size and train_size = %f, '
'should be smaller than 1.0. Reduce '
'test_size and/or train_size.' %
(train_size + test_size))
elif np.asarray(train_size).dtype.kind != 'i':
# int values are checked during split based on the input
raise ValueError("Invalid value for train_size: %r" % train_size)
def _validate_shuffle_split(n_samples, test_size, train_size):
"""
Validation helper to check if the test/test sizes are meaningful wrt to the
size of the data (n_samples)
"""
if (test_size is not None and np.asarray(test_size).dtype.kind == 'i'
and test_size >= n_samples):
raise ValueError('test_size=%d should be smaller than the number of '
'samples %d' % (test_size, n_samples))
if (train_size is not None and np.asarray(train_size).dtype.kind == 'i'
and train_size >= n_samples):
raise ValueError("train_size=%d should be smaller than the number of"
" samples %d" % (train_size, n_samples))
if np.asarray(test_size).dtype.kind == 'f':
n_test = ceil(test_size * n_samples)
elif np.asarray(test_size).dtype.kind == 'i':
n_test = float(test_size)
if train_size is None:
n_train = n_samples - n_test
elif np.asarray(train_size).dtype.kind == 'f':
n_train = floor(train_size * n_samples)
else:
n_train = float(train_size)
if test_size is None:
n_test = n_samples - n_train
if n_train + n_test > n_samples:
raise ValueError('The sum of train_size and test_size = %d, '
'should be smaller than the number of '
'samples %d. Reduce test_size and/or '
'train_size.' % (n_train + n_test, n_samples))
return int(n_train), int(n_test)
class PredefinedSplit(BaseCrossValidator):
"""Predefined split cross-validator
Splits the data into training/test set folds according to a predefined
scheme. Each sample can be assigned to at most one test set fold, as
specified by the user through the ``test_fold`` parameter.
Read more in the :ref:`User Guide <cross_validation>`.
Examples
--------
>>> from sklearn.model_selection import PredefinedSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> test_fold = [0, 1, -1, 1]
>>> ps = PredefinedSplit(test_fold)
>>> ps.get_n_splits()
2
>>> print(ps) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
PredefinedSplit(test_fold=array([ 0, 1, -1, 1]))
>>> for train_index, test_index in ps.split():
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2 3] TEST: [0]
TRAIN: [0 2] TEST: [1 3]
"""
def __init__(self, test_fold):
self.test_fold = np.array(test_fold, dtype=np.int)
self.test_fold = column_or_1d(self.test_fold)
self.unique_folds = np.unique(self.test_fold)
self.unique_folds = self.unique_folds[self.unique_folds != -1]
def split(self, X=None, y=None, labels=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
labels : object
Always ignored, exists for compatibility.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
ind = np.arange(len(self.test_fold))
for test_index in self._iter_test_masks():
train_index = ind[np.logical_not(test_index)]
test_index = ind[test_index]
yield train_index, test_index
def _iter_test_masks(self):
"""Generates boolean masks corresponding to test sets."""
for f in self.unique_folds:
test_index = np.where(self.test_fold == f)[0]
test_mask = np.zeros(len(self.test_fold), dtype=np.bool)
test_mask[test_index] = True
yield test_mask
def get_n_splits(self, X=None, y=None, labels=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
labels : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return len(self.unique_folds)
class _CVIterableWrapper(BaseCrossValidator):
"""Wrapper class for old style cv objects and iterables."""
def __init__(self, cv):
self.cv = cv
def get_n_splits(self, X=None, y=None, labels=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
labels : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return len(self.cv) # Both iterables and old-cv objects support len
def split(self, X=None, y=None, labels=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
labels : object
Always ignored, exists for compatibility.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
for train, test in self.cv:
yield train, test
def check_cv(cv=3, y=None, classifier=False):
"""Input checker utility for building a cross-validator
Parameters
----------
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If classifier is False or if ``y`` is
neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
y : array-like, optional
The target variable for supervised learning problems.
classifier : boolean, optional, default False
Whether the task is a classification task, in which case
stratified KFold will be used.
Returns
-------
checked_cv : a cross-validator instance.
The return value is a cross-validator which generates the train/test
splits via the ``split`` method.
"""
if cv is None:
cv = 3
if isinstance(cv, numbers.Integral):
if (classifier and (y is not None) and
(type_of_target(y) in ('binary', 'multiclass'))):
return StratifiedKFold(cv)
else:
return KFold(cv)
if not hasattr(cv, 'split') or isinstance(cv, str):
if not isinstance(cv, Iterable) or isinstance(cv, str):
raise ValueError("Expected cv as an integer, cross-validation "
"object (from sklearn.model_selection) "
"or and iterable. Got %s." % cv)
return _CVIterableWrapper(cv)
return cv # New style cv objects are passed without any modification
def train_test_split(*arrays, **options):
"""Split arrays or matrices into random train and test subsets
Quick utility that wraps input validation and
``next(ShuffleSplit().split(X, y))`` and application to input data
into a single call for splitting (and optionally subsampling) data in a
oneliner.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
*arrays : sequence of indexables with same length / shape[0]
allowed inputs are lists, numpy arrays, scipy-sparse
matrices or pandas dataframes.
.. versionadded:: 0.16
preserves input type instead of always casting to numpy array.
test_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
If train size is also None, test size is set to 0.25.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
stratify : array-like or None (default is None)
If not None, data is split in a stratified fashion, using this as
the labels array.
Returns
-------
splitting : list, length=2 * len(arrays)
List containing train-test split of inputs.
.. versionadded:: 0.16
Output type is the same as the input type.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import train_test_split
>>> X, y = np.arange(10).reshape((5, 2)), range(5)
>>> X
array([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]])
>>> list(y)
[0, 1, 2, 3, 4]
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, test_size=0.33, random_state=42)
...
>>> X_train
array([[4, 5],
[0, 1],
[6, 7]])
>>> y_train
[2, 0, 3]
>>> X_test
array([[2, 3],
[8, 9]])
>>> y_test
[1, 4]
"""
n_arrays = len(arrays)
if n_arrays == 0:
raise ValueError("At least one array required as input")
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
stratify = options.pop('stratify', None)
if options:
raise TypeError("Invalid parameters passed: %s" % str(options))
if test_size is None and train_size is None:
test_size = 0.25
arrays = indexable(*arrays)
if stratify is not None:
CVClass = StratifiedShuffleSplit
else:
CVClass = ShuffleSplit
cv = CVClass(test_size=test_size,
train_size=train_size,
random_state=random_state)
train, test = next(cv.split(X=arrays[0], y=stratify))
return list(chain.from_iterable((safe_indexing(a, train),
safe_indexing(a, test)) for a in arrays))
train_test_split.__test__ = False # to avoid a pb with nosetests
def _safe_split(estimator, X, y, indices, train_indices=None):
"""Create subset of dataset and properly handle kernels."""
if (hasattr(estimator, 'kernel') and callable(estimator.kernel) and
not isinstance(estimator.kernel, GPKernel)):
# cannot compute the kernel values with custom function
raise ValueError("Cannot use a custom kernel function. "
"Precompute the kernel matrix instead.")
if not hasattr(X, "shape"):
if getattr(estimator, "_pairwise", False):
raise ValueError("Precomputed kernels or affinity matrices have "
"to be passed as arrays or sparse matrices.")
X_subset = [X[index] for index in indices]
else:
if getattr(estimator, "_pairwise", False):
# X is a precomputed square kernel matrix
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square kernel matrix")
if train_indices is None:
X_subset = X[np.ix_(indices, indices)]
else:
X_subset = X[np.ix_(indices, train_indices)]
else:
X_subset = safe_indexing(X, indices)
if y is not None:
y_subset = safe_indexing(y, indices)
else:
y_subset = None
return X_subset, y_subset
def _build_repr(self):
# XXX This is copied from BaseEstimator's get_params
cls = self.__class__
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
# Ignore varargs, kw and default values and pop self
init_signature = signature(init)
# Consider the constructor parameters excluding 'self'
if init is object.__init__:
args = []
else:
args = sorted([p.name for p in init_signature.parameters.values()
if p.name != 'self' and p.kind != p.VAR_KEYWORD])
class_name = self.__class__.__name__
params = dict()
for key in args:
# We need deprecation warnings to always be on in order to
# catch deprecated param values.
# This is set in utils/__init__.py but it gets overwritten
# when running under python3 somehow.
warnings.simplefilter("always", DeprecationWarning)
try:
with warnings.catch_warnings(record=True) as w:
value = getattr(self, key, None)
if len(w) and w[0].category == DeprecationWarning:
# if the parameter is deprecated, don't show it
continue
finally:
warnings.filters.pop(0)
params[key] = value
return '%s(%s)' % (class_name, _pprint(params, offset=len(class_name)))
|
from pupa.scrape import Scraper
from pupa.scrape.helpers import Legislator, Organization
import lxml.html
import datetime
import traceback
MEMBERLIST = 'https://chicago.legistar.com/People.aspx'
class LegistarScraper(Scraper) :
date_format='%m/%d/%Y'
def lxmlize(self, url, payload=None):
entry = self.urlopen(url, 'POST', payload)
page = lxml.html.fromstring(entry)
page.make_links_absolute(url)
return page
def pages(self, url, payload=None) :
page = self.lxmlize(url, payload)
yield page
next_page = page.xpath("//a[@class='rgCurrentPage']/following-sibling::a[1]")
while next_page :
event_target = next_page.attrib['href'].split("'")[1]
br.select_form('aspnetForm')
data = self._data(br.form, event_target)
del data['ctl00$ContentPlaceHolder1$gridPeople$ctl00$ctl02$ctl01$ctl01']
# print data
data = urllib.urlencode(data)
page = lxmlize(url, payload)
yield page
next_page = page.xpath("//a[@class='rgCurrentPage']/following-sibling::a[1]")
def councilMembers(self, follow_links=True) :
for page in self.pages(MEMBERLIST) :
table = page.xpath("//table[@id='ctl00_ContentPlaceHolder1_gridPeople_ctl00']")[0]
for councilman, headers, row in self.parseDataTable(table):
if follow_links and type(councilman['Person Name']) == dict :
detail_url = councilman['Person Name']['url']
councilman_details = self.lxmlize(detail_url)
img = councilman_details.xpath("./img[@id='ctl00_ContentPlaceHolder1_imgPhoto']")
if img :
councilman['Photo'] = img.get['src']
yield councilman
def _get_link_address(self, link_soup):
# If the link doesn't start with a #, then it'll send the browser
# somewhere, and we should use the href value directly.
href = link_soup.get('href')
if href is not None and not href.startswith('#'):
return href
# If it does start with a hash, then it causes some sort of action
# and we should check the onclick handler.
else:
onclick = link_soup.get('onclick')
if onclick is not None and onclick.startswith("radopen('"):
return onclick.split("'")[1]
# Otherwise, we don't know how to find the address.
return None
def parseDataTable(self, table):
"""
Legistar uses the same kind of data table in a number of
places. This will return a list of dictionaries using the
table headers as keys.
"""
headers = table.xpath('//th')
rows = table.xpath("//tr[starts-with(@id, 'ctl00_ContentPlaceHolder1_')]")
keys = {}
for index, header in enumerate(headers):
keys[index] = header.text_content().replace(' ', ' ').strip()
for row in rows:
try:
data = {}
for index, field in enumerate(row.xpath("./td")):
key = keys[index]
value = field.text_content().replace(' ', ' ').strip()
try:
value = datetime.datetime.strptime(value, self.date_format)
except ValueError:
pass
# Is it a link?
address = None
link = field.find('a')
if link is not None:
address = self._get_link_address(link)
if address is not None:
value = {'label': value, 'url': address}
data[key] = value
yield data, keys, row
except Exception as e:
print 'Problem parsing row:'
print row
print traceback.format_exc()
raise e
class PersonScraper(LegistarScraper):
def get_people(self):
for councilman in self.councilMembers() :
p = Legislator(councilman['Person Name']['label'],
post_id = councilman['Ward/Office'])
p.add_source(MEMBERLIST)
yield p
committee memberships
from pupa.scrape import Scraper
from pupa.scrape.helpers import Legislator, Organization
import lxml.html
import datetime
import traceback
from collections import defaultdict
MEMBERLIST = 'https://chicago.legistar.com/People.aspx'
class LegistarScraper(Scraper) :
date_format='%m/%d/%Y'
def lxmlize(self, url, payload=None):
entry = self.urlopen(url, 'POST', payload)
page = lxml.html.fromstring(entry)
page.make_links_absolute(url)
return page
def pages(self, url, payload=None) :
page = self.lxmlize(url, payload)
yield page
next_page = page.xpath("//a[@class='rgCurrentPage']/following-sibling::a[1]")
while next_page :
event_target = next_page.attrib['href'].split("'")[1]
br.select_form('aspnetForm')
data = self._data(br.form, event_target)
del data['ctl00$ContentPlaceHolder1$gridPeople$ctl00$ctl02$ctl01$ctl01']
# print data
data = urllib.urlencode(data)
page = lxmlize(url, payload)
yield page
next_page = page.xpath("//a[@class='rgCurrentPage']/following-sibling::a[1]")
def councilMembers(self, follow_links=True) :
for page in self.pages(MEMBERLIST) :
table = page.xpath("//table[@id='ctl00_ContentPlaceHolder1_gridPeople_ctl00']")[0]
for councilman, headers, row in self.parseDataTable(table):
if follow_links and type(councilman['Person Name']) == dict :
detail_url = councilman['Person Name']['url']
councilman_details = self.lxmlize(detail_url)
img = councilman_details.xpath("//img[@id='ctl00_ContentPlaceHolder1_imgPhoto']")
if img :
councilman['Photo'] = img[0].get('src')
committee_table = councilman_details.xpath("//table[@id='ctl00_ContentPlaceHolder1_gridDepartments_ctl00']")[0]
committees = self.parseDataTable(committee_table)
yield councilman, committees
else :
yield councilman
def _get_link_address(self, link_soup):
# If the link doesn't start with a #, then it'll send the browser
# somewhere, and we should use the href value directly.
href = link_soup.get('href')
if href is not None and not href.startswith('#'):
return href
# If it does start with a hash, then it causes some sort of action
# and we should check the onclick handler.
else:
onclick = link_soup.get('onclick')
if onclick is not None and onclick.startswith("radopen('"):
return onclick.split("'")[1]
# Otherwise, we don't know how to find the address.
return None
def parseDataTable(self, table):
"""
Legistar uses the same kind of data table in a number of
places. This will return a list of dictionaries using the
table headers as keys.
"""
headers = table.xpath('.//th')
rows = table.xpath(".//tr[starts-with(@id, 'ctl00_ContentPlaceHolder1_')]")
keys = {}
for index, header in enumerate(headers):
keys[index] = header.text_content().replace(' ', ' ').strip()
for row in rows:
try:
data = defaultdict(lambda : None)
for index, field in enumerate(row.xpath("./td")):
key = keys[index]
value = field.text_content().replace(' ', ' ').strip()
try:
value = datetime.datetime.strptime(value, self.date_format)
except ValueError:
pass
# Is it a link?
address = None
link = field.find('a')
if link is not None:
address = self._get_link_address(link)
if address is not None:
value = {'label': value, 'url': address}
data[key] = value
yield data, keys, row
except Exception as e:
print 'Problem parsing row:'
print row
print traceback.format_exc()
raise e
class PersonScraper(LegistarScraper):
def get_people(self):
for councilman, committees in self.councilMembers() :
contact_types = {
"City Hall Office": ("address", "City Hall Office"),
"City Hall Phone": ("phone", "City Hall Phone"),
"Ward Office Phone": ("phone", "Ward Office Phone"),
"Ward Office Address": ("address", "Ward Office Address"),
"Fax": ("fax", "Fax")
}
contacts = []
for contact_type, (_type, note) in contact_types.items () :
if councilman[contact_type] :
contacts.append({"type": _type,
"value": councilman[contact_type],
"note": note})
if councilman["E-mail"] :
contacts.append({"type" : "email",
"value" : councilman['E-mail']['label'],
'note' : 'E-mail'})
p = Legislator(councilman['Person Name']['label'],
post_id = councilman['Ward/Office'],
image=councilman['Photo'],
contact_details = contacts)
if councilman['Website'] :
p.add_link('homepage', councilman['Website']['url'])
p.add_source(MEMBERLIST)
for committee, _, _ in committees :
print committee
if (committee['Legislative Body']['label']
and committee['Legislative Body']['label'] not in ('City Council', 'Office of the Mayor')) :
p.add_committee_membership(committee['Legislative Body']['label'],
role= committee["Title"])
yield p
|
from datetime import datetime
from tempfile import TemporaryFile
from zipfile import ZipFile
from enum import IntEnum
import hashlib
import os
from collections import namedtuple
from flask import Blueprint, render_template, request, flash, redirect, g, current_app, url_for, abort, make_response
from flask.ext.babel import _, lazy_gettext as l_
from redis.exceptions import ConnectionError
from werkzeug.exceptions import BadRequest
from skylines.database import db
from skylines.frontend.forms import UploadForm, UploadUpdateForm
from skylines.lib import files
from skylines.lib.util import pressure_alt_to_qnh_alt
from skylines.lib.decorators import login_required
from skylines.lib.md5 import file_md5
from skylines.lib.sql import query_to_sql
from skylines.lib.xcsoar_ import flight_path, analyse_flight
from skylines.model import User, Flight, IGCFile, Airspace
from skylines.model.airspace import get_airspace_infringements
from skylines.model.event import create_flight_notifications
from skylines.worker import tasks
from geoalchemy2.shape import from_shape
from sqlalchemy.sql import literal_column
from shapely.geometry import MultiLineString, LineString
try:
import mapscript
import pyproj
mapscript_available = True
except ImportError:
mapscript_available = False
import xcsoar
upload_blueprint = Blueprint('upload', 'skylines')
class UploadStatus(IntEnum):
SUCCESS = 0
DUPLICATE = 1 # _('Duplicate file')
MISSING_DATE = 2 # _('Date missing in IGC file')
PARSER_ERROR = 3 # _('Failed to parse file')
NO_FLIGHT = 4 # _('No flight found in file')
FLIGHT_IN_FUTURE = 5 # _('Date of flight in future')
UploadResult = namedtuple(
'UploadResult', ['name', 'flight', 'status', 'prefix', 'trace', 'airspace', 'cache_key', 'form'])
def iterate_files(name, f):
try:
z = ZipFile(f, 'r')
except:
# if f is not a ZipFile
# reset the pointer to the top of the file
# (the ZipFile constructor might have moved it!)
f.seek(0)
yield name, f
else:
# if f is a ZipFile
for info in z.infolist():
if info.file_size > 0:
yield info.filename, z.open(info.filename, 'r')
def iterate_upload_files(upload):
if isinstance(upload, unicode):
# the Chromium browser sends an empty string if no file is selected
if not upload:
return
# some Android versions send the IGC file as a string, not as
# a file
with TemporaryFile() as f:
f.write(upload.encode('UTF-8'))
f.seek(0)
yield 'direct.igc', f
elif isinstance(upload, list):
for x in upload:
for name, f in iterate_upload_files(x):
yield name, f
else:
for x in iterate_files(upload.filename, upload):
yield x
def _encode_flight_path(fp, qnh):
# Reduce to 1000 points maximum with equal spacing
shortener = int(max(1, len(fp) / 1000))
barogram_h = xcsoar.encode([pressure_alt_to_qnh_alt(fix.pressure_altitude, qnh) for fix in fp[::shortener]],
method="signed")
barogram_t = xcsoar.encode([fix.seconds_of_day for fix in fp[::shortener]], method="signed")
enl = xcsoar.encode([fix.enl if fix.enl is not None else 0 for fix in fp[::shortener]], method="signed")
elevations_h = xcsoar.encode([fix.elevation if fix.elevation is not None else -1000 for fix in fp[::shortener]], method="signed")
return dict(barogram_h=barogram_h, barogram_t=barogram_t,
enl=enl, elevations_h=elevations_h,
igc_start_time=fp[0].datetime, igc_end_time=fp[-1].datetime)
@upload_blueprint.route('/', methods=('GET', 'POST'))
@login_required(l_("You have to login to upload flights."))
def index():
if request.values.get('stage', type=int) == 1:
# Parse update form
num_flights = request.values.get('num_flights', 0, type=int)
results = []
flight_id_list = []
form_error = False
for prefix in range(1, num_flights + 1):
name = request.values.get('{}-name'.format(prefix))
try:
status = UploadStatus(request.values.get('{}-status'.format(prefix), type=int))
except ValueError:
raise BadRequest('Status unknown')
flight, fp, form = check_update_form(prefix, status)
if fp:
trace = _encode_flight_path(fp, flight.qnh)
infringements = get_airspace_infringements(fp, qnh=flight.qnh)
else:
trace = None
infringements = {}
cache_key = None
if form and not infringements:
# remove airspace field from form if no airspace infringements found
del form.airspace_usage
elif form and infringements:
# if airspace infringements found create cache_key from flight id and user id
cache_key = hashlib.sha1(str(flight.id) + '_' + str(g.current_user.id)).hexdigest()
airspace = db.session.query(Airspace) \
.filter(Airspace.id.in_(infringements.keys())) \
.all()
results.append(UploadResult(name, flight, status, str(prefix), trace, airspace, cache_key, form))
if form and form.validate_on_submit():
_update_flight(flight.id, fp, form)
flight_id_list.append(flight.id)
elif form:
form_error = True
if form_error:
return render_template(
'upload/result.jinja', num_flights=num_flights, results=results, success=True)
elif flight_id_list:
flash(_('Your flight(s) have been successfully published.'))
return redirect(url_for('flights.list', ids=','.join(str(x) for x in flight_id_list)))
else:
return redirect(url_for('flights.today'))
else:
# Create/parse file selection form
form = UploadForm(pilot=g.current_user.id)
if form.validate_on_submit():
return index_post(form)
return render_template('upload/form.jinja', form=form)
def index_post(form):
user = g.current_user
pilot_id = form.pilot.data if form.pilot.data != 0 else None
pilot = pilot_id and User.get(int(pilot_id))
pilot_id = pilot and pilot.id
club_id = (pilot and pilot.club_id) or user.club_id
results = []
success = False
prefix = 0
for name, f in iterate_upload_files(form.file.raw_data):
prefix += 1
filename = files.sanitise_filename(name)
filename = files.add_file(filename, f)
# check if the file already exists
with files.open_file(filename) as f:
md5 = file_md5(f)
other = Flight.by_md5(md5)
if other:
files.delete_file(filename)
results.append(UploadResult(name, other, UploadStatus.DUPLICATE, str(prefix), None, None, None, None))
continue
igc_file = IGCFile()
igc_file.owner = user
igc_file.filename = filename
igc_file.md5 = md5
igc_file.update_igc_headers()
if igc_file.date_utc is None:
files.delete_file(filename)
results.append(UploadResult(name, None, UploadStatus.MISSING_DATE, str(prefix), None, None, None, None))
continue
flight = Flight()
flight.pilot_id = pilot_id
flight.pilot_name = form.pilot_name.data if form.pilot_name.data else None
flight.club_id = club_id
flight.igc_file = igc_file
flight.model_id = igc_file.guess_model()
if igc_file.registration:
flight.registration = igc_file.registration
else:
flight.registration = igc_file.guess_registration()
flight.competition_id = igc_file.competition_id
fp = flight_path(flight.igc_file, add_elevation=True, max_points=None)
analyzed = False
try:
analyzed = analyse_flight(flight, fp=fp)
except:
current_app.logger.exception('analyse_flight() raised an exception')
if not analyzed:
files.delete_file(filename)
results.append(UploadResult(name, None, UploadStatus.PARSER_ERROR, str(prefix), None, None, None, None))
continue
if not flight.takeoff_time or not flight.landing_time:
files.delete_file(filename)
results.append(UploadResult(name, None, UploadStatus.NO_FLIGHT, str(prefix), None, None, None, None))
continue
if flight.landing_time > datetime.now():
files.delete_file(filename)
results.append(UploadResult(name, None, UploadStatus.FLIGHT_IN_FUTURE, str(prefix), None, None, None, None))
continue
if not flight.update_flight_path():
files.delete_file(filename)
results.append(UploadResult(name, None, UploadStatus.NO_FLIGHT, str(prefix), None, None, None, None))
continue
flight.privacy_level = Flight.PrivacyLevel.PRIVATE
trace = _encode_flight_path(fp, qnh=flight.qnh)
infringements = get_airspace_infringements(fp, qnh=flight.qnh)
db.session.add(igc_file)
db.session.add(flight)
# flush data to make sure we don't get duplicate files from ZIP files
db.session.flush()
# Store data in cache for image creation
cache_key = hashlib.sha1(str(flight.id) + '_' + str(user.id)).hexdigest()
current_app.cache.set('upload_airspace_infringements_' + cache_key, infringements, timeout=15 * 60)
current_app.cache.set('upload_airspace_flight_path_' + cache_key, fp, timeout=15 * 60)
airspace = db.session.query(Airspace) \
.filter(Airspace.id.in_(infringements.keys())) \
.all()
# create form after flushing the session, otherwise we wouldn't have a flight.id
update_form = UploadUpdateForm(formdata=None, prefix=str(prefix), obj=flight)
# remove airspace field from update_form if no airspace infringements found
if not infringements:
del update_form.airspace_usage
# replace None in update_form.pilot_id and update_form.co_pilot_id with 0
if not update_form.pilot_id.data:
update_form.pilot_id.data = 0
if not update_form.co_pilot_id.data:
update_form.co_pilot_id.data = 0
update_form.pilot_id.validate(update_form)
results.append(UploadResult(name, flight, UploadStatus.SUCCESS, str(prefix), trace,
airspace, cache_key, update_form))
create_flight_notifications(flight)
success = True
db.session.commit()
if success:
flash(_('Please click "Publish Flight(s)" at the bottom to confirm our automatic analysis.'))
return render_template(
'upload/result.jinja', num_flights=prefix, results=results, success=success)
def check_update_form(prefix, status):
form = UploadUpdateForm(prefix=str(prefix))
if not form.id or not form.id.data:
return None, None, None
flight_id = form.id.data
# Get flight from database and check if it is writable
flight = Flight.get(flight_id)
if not flight:
abort(404)
if status == UploadStatus.DUPLICATE:
return flight, None, None
else:
if not flight.is_writable(g.current_user):
abort(403)
fp = flight_path(flight.igc_file, add_elevation=True, max_points=None)
form.populate_obj(flight)
# replace None in form.pilot_id and form.co_pilot_id with 0
if not form.pilot_id.data:
form.pilot_id.data = 0
if not form.co_pilot_id.data:
form.co_pilot_id.data = 0
# Force takeoff_time and landing_time to be within the igc file limits
if form.takeoff_time.data < fp[0].datetime:
form.takeoff_time.data = fp[0].datetime
if form.landing_time.data > fp[-1].datetime:
form.landing_time.data = fp[-1].datetime
return flight, fp, form
def _update_flight(flight_id, fp, form):
model_id = form.model_id.data
registration = form.registration.data
competition_id = form.competition_id.data
takeoff_time = form.takeoff_time.data
scoring_start_time = form.scoring_start_time.data
scoring_end_time = form.scoring_end_time.data
landing_time = form.landing_time.data
pilot_id = form.pilot_id.data
pilot_name = form.pilot_name.data
co_pilot_id = form.co_pilot_id.data
co_pilot_name = form.co_pilot_name.data
# Get flight from database and check if it is writable
flight = Flight.get(flight_id)
if not flight or not flight.is_writable(g.current_user):
return False
# Parse model, registration and competition ID
if model_id == 0:
model_id = None
if registration is not None:
registration = registration.strip()
if not 0 < len(registration) <= 32:
registration = None
if competition_id is not None:
competition_id = competition_id.strip()
if not 0 < len(competition_id) <= 5:
competition_id = None
if pilot_id == 0:
pilot_id = None
# Set new values
if flight.pilot_id != pilot_id:
flight.pilot_id = pilot_id
# update club if pilot changed
if pilot_id:
flight.club_id = User.get(pilot_id).club_id
flight.pilot_name = pilot_name if pilot_name else None
flight.co_pilot_id = co_pilot_id if co_pilot_id != 0 else None
flight.co_pilot_name = co_pilot_name if co_pilot_name else None
flight.model_id = model_id
flight.registration = registration
flight.competition_id = competition_id
flight.time_modified = datetime.utcnow()
# Update times only if they are reasonable and have been changed...
trigger_analysis = False
if takeoff_time and scoring_start_time and scoring_end_time and landing_time \
and takeoff_time <= scoring_start_time <= scoring_end_time <= landing_time \
and (flight.takeoff_time != takeoff_time or
flight.scoring_start_time != scoring_start_time or
flight.scoring_end_time != scoring_end_time or
flight.landing_time != landing_time):
flight.takeoff_time = takeoff_time
flight.scoring_start_time = scoring_start_time
flight.scoring_end_time = scoring_end_time
flight.landing_time = landing_time
trigger_analysis = True
flight.privacy_level = Flight.PrivacyLevel.PUBLIC
db.session.commit()
if trigger_analysis:
analyse_flight(flight, fp=fp)
try:
tasks.analyse_flight.delay(flight.id)
tasks.find_meetings.delay(flight.id)
except ConnectionError:
current_app.logger.info('Cannot connect to Redis server')
return True
@upload_blueprint.route('/airspace/<string:cache_key>/<int:airspace_id>.png')
def airspace_image(cache_key, airspace_id):
if not mapscript_available:
abort(404)
# get information from cache...
infringements = current_app.cache.get('upload_airspace_infringements_' + cache_key)
flight_path = current_app.cache.get('upload_airspace_flight_path_' + cache_key)
# abort if invalid cache key
if not infringements \
or not flight_path:
abort(404)
# Convert the coordinate into a list of tuples
coordinates = [(c.location['longitude'], c.location['latitude']) for c in flight_path]
# Create a shapely LineString object from the coordinates
linestring = LineString(coordinates)
# Save the new path as WKB
locations = from_shape(linestring, srid=4326)
highlight_locations = []
extent_epsg4326 = [180, 85.05112878, -180, -85.05112878]
for period in infringements[airspace_id]:
# Convert the coordinate into a list of tuples
coordinates = [(c['location']['longitude'], c['location']['latitude']) for c in period]
# Create a shapely LineString object from the coordinates
if len(coordinates) == 1:
# a LineString must contain at least two points...
linestring = LineString([coordinates[0], coordinates[0]])
else:
linestring = LineString(coordinates)
highlight_locations.append(linestring)
# gather extent
(minx, miny, maxx, maxy) = linestring.bounds
extent_epsg4326[0] = min(extent_epsg4326[0], minx)
extent_epsg4326[1] = min(extent_epsg4326[1], miny)
extent_epsg4326[2] = max(extent_epsg4326[2], maxx)
extent_epsg4326[3] = max(extent_epsg4326[3], maxy)
# Save the new path as WKB
highlight_multilinestring = from_shape(MultiLineString(highlight_locations), srid=4326)
# increase extent by factor 1.05
width = abs(extent_epsg4326[0] - extent_epsg4326[2])
height = abs(extent_epsg4326[1] - extent_epsg4326[3])
center_x = (extent_epsg4326[0] + extent_epsg4326[2]) / 2
center_y = (extent_epsg4326[1] + extent_epsg4326[3]) / 2
extent_epsg4326[0] = center_x - width / 2 * 1.05
extent_epsg4326[1] = center_y - height / 2 * 1.05
extent_epsg4326[2] = center_x + width / 2 * 1.05
extent_epsg4326[3] = center_y + height / 2 * 1.05
# minimum extent should be 0.3 deg
width = abs(extent_epsg4326[0] - extent_epsg4326[2])
height = abs(extent_epsg4326[1] - extent_epsg4326[3])
if width < 0.3:
extent_epsg4326[0] = center_x - 0.15
extent_epsg4326[2] = center_x + 0.15
if height < 0.3:
extent_epsg4326[1] = center_y - 0.15
extent_epsg4326[3] = center_y + 0.15
# convert extent from EPSG4326 to EPSG3857
epsg4326 = pyproj.Proj(init='epsg:4326')
epsg3857 = pyproj.Proj(init='epsg:3857')
x1, y1 = pyproj.transform(epsg4326, epsg3857, extent_epsg4326[0], extent_epsg4326[1])
x2, y2 = pyproj.transform(epsg4326, epsg3857, extent_epsg4326[2], extent_epsg4326[3])
extent_epsg3857 = [x1, y1, x2, y2]
# load basemap and set size + extent
basemap_path = os.path.join(current_app.config.get('SKYLINES_MAPSERVER_PATH'), 'basemap.map')
map_object = mapscript.mapObj(basemap_path)
map_object.setSize(400, 400)
map_object.setExtent(extent_epsg3857[0], extent_epsg3857[1], extent_epsg3857[2], extent_epsg3857[3])
# enable airspace and airports layers
num_layers = map_object.numlayers
for i in range(num_layers):
layer = map_object.getLayer(i)
if layer.group == 'Airports':
layer.status = mapscript.MS_ON
if layer.group == 'Airspace':
layer.status = mapscript.MS_ON
# get flights layer
flights_layer = map_object.getLayerByName('Flights')
highlight_layer = map_object.getLayerByName('Flights_Highlight')
# set sql query for blue flight
one = literal_column('1 as flight_id')
flight_query = db.session.query(locations.label('flight_geometry'), one)
flights_layer.data = 'flight_geometry FROM (' + query_to_sql(flight_query) + ')' + \
' AS foo USING UNIQUE flight_id USING SRID=4326'
# set sql query for highlighted linestrings
highlighted_query = db.session.query(highlight_multilinestring.label('flight_geometry'), one)
highlight_layer.data = 'flight_geometry FROM (' + query_to_sql(highlighted_query) + ')' + \
' AS foo USING UNIQUE flight_id USING SRID=4326'
highlight_layer.status = mapscript.MS_ON
# get osm layer and set WMS url
osm_layer = map_object.getLayerByName('OSM')
osm_layer.connection = current_app.config.get('SKYLINES_MAP_TILE_URL') + \
'/service?'
# draw map
map_image = map_object.draw()
# get image
mapscript.msIO_installStdoutToBuffer()
map_image.write()
content = mapscript.msIO_getStdoutBufferBytes()
# return to client
resp = make_response(content)
resp.headers['Content-type'] = map_image.format.mimetype
return resp
views/upload: Add factory functions to UploadResult
from datetime import datetime
from tempfile import TemporaryFile
from zipfile import ZipFile
from enum import IntEnum
import hashlib
import os
from collections import namedtuple
from flask import Blueprint, render_template, request, flash, redirect, g, current_app, url_for, abort, make_response
from flask.ext.babel import _, lazy_gettext as l_
from redis.exceptions import ConnectionError
from werkzeug.exceptions import BadRequest
from skylines.database import db
from skylines.frontend.forms import UploadForm, UploadUpdateForm
from skylines.lib import files
from skylines.lib.util import pressure_alt_to_qnh_alt
from skylines.lib.decorators import login_required
from skylines.lib.md5 import file_md5
from skylines.lib.sql import query_to_sql
from skylines.lib.xcsoar_ import flight_path, analyse_flight
from skylines.model import User, Flight, IGCFile, Airspace
from skylines.model.airspace import get_airspace_infringements
from skylines.model.event import create_flight_notifications
from skylines.worker import tasks
from geoalchemy2.shape import from_shape
from sqlalchemy.sql import literal_column
from shapely.geometry import MultiLineString, LineString
try:
import mapscript
import pyproj
mapscript_available = True
except ImportError:
mapscript_available = False
import xcsoar
upload_blueprint = Blueprint('upload', 'skylines')
class UploadStatus(IntEnum):
SUCCESS = 0
DUPLICATE = 1 # _('Duplicate file')
MISSING_DATE = 2 # _('Date missing in IGC file')
PARSER_ERROR = 3 # _('Failed to parse file')
NO_FLIGHT = 4 # _('No flight found in file')
FLIGHT_IN_FUTURE = 5 # _('Date of flight in future')
class UploadResult(namedtuple('UploadResult', [
'name', 'flight', 'status', 'prefix', 'trace', 'airspace', 'cache_key', 'form'])):
@classmethod
def for_duplicate(cls, name, other, prefix):
return cls(name, other, UploadStatus.DUPLICATE, prefix, None, None, None, None)
@classmethod
def for_missing_date(cls, name, prefix):
return cls(name, None, UploadStatus.MISSING_DATE, prefix, None, None, None, None)
@classmethod
def for_parser_error(cls, name, prefix):
return cls(name, None, UploadStatus.PARSER_ERROR, prefix, None, None, None, None)
@classmethod
def for_no_flight(cls, name, prefix):
return cls(name, None, UploadStatus.NO_FLIGHT, prefix, None, None, None, None)
@classmethod
def for_future_flight(cls, name, prefix):
return cls(name, None, UploadStatus.FLIGHT_IN_FUTURE, prefix, None, None, None, None)
def iterate_files(name, f):
try:
z = ZipFile(f, 'r')
except:
# if f is not a ZipFile
# reset the pointer to the top of the file
# (the ZipFile constructor might have moved it!)
f.seek(0)
yield name, f
else:
# if f is a ZipFile
for info in z.infolist():
if info.file_size > 0:
yield info.filename, z.open(info.filename, 'r')
def iterate_upload_files(upload):
if isinstance(upload, unicode):
# the Chromium browser sends an empty string if no file is selected
if not upload:
return
# some Android versions send the IGC file as a string, not as
# a file
with TemporaryFile() as f:
f.write(upload.encode('UTF-8'))
f.seek(0)
yield 'direct.igc', f
elif isinstance(upload, list):
for x in upload:
for name, f in iterate_upload_files(x):
yield name, f
else:
for x in iterate_files(upload.filename, upload):
yield x
def _encode_flight_path(fp, qnh):
# Reduce to 1000 points maximum with equal spacing
shortener = int(max(1, len(fp) / 1000))
barogram_h = xcsoar.encode([pressure_alt_to_qnh_alt(fix.pressure_altitude, qnh) for fix in fp[::shortener]],
method="signed")
barogram_t = xcsoar.encode([fix.seconds_of_day for fix in fp[::shortener]], method="signed")
enl = xcsoar.encode([fix.enl if fix.enl is not None else 0 for fix in fp[::shortener]], method="signed")
elevations_h = xcsoar.encode([fix.elevation if fix.elevation is not None else -1000 for fix in fp[::shortener]], method="signed")
return dict(barogram_h=barogram_h, barogram_t=barogram_t,
enl=enl, elevations_h=elevations_h,
igc_start_time=fp[0].datetime, igc_end_time=fp[-1].datetime)
@upload_blueprint.route('/', methods=('GET', 'POST'))
@login_required(l_("You have to login to upload flights."))
def index():
if request.values.get('stage', type=int) == 1:
# Parse update form
num_flights = request.values.get('num_flights', 0, type=int)
results = []
flight_id_list = []
form_error = False
for prefix in range(1, num_flights + 1):
name = request.values.get('{}-name'.format(prefix))
try:
status = UploadStatus(request.values.get('{}-status'.format(prefix), type=int))
except ValueError:
raise BadRequest('Status unknown')
flight, fp, form = check_update_form(prefix, status)
if fp:
trace = _encode_flight_path(fp, flight.qnh)
infringements = get_airspace_infringements(fp, qnh=flight.qnh)
else:
trace = None
infringements = {}
cache_key = None
if form and not infringements:
# remove airspace field from form if no airspace infringements found
del form.airspace_usage
elif form and infringements:
# if airspace infringements found create cache_key from flight id and user id
cache_key = hashlib.sha1(str(flight.id) + '_' + str(g.current_user.id)).hexdigest()
airspace = db.session.query(Airspace) \
.filter(Airspace.id.in_(infringements.keys())) \
.all()
results.append(UploadResult(name, flight, status, str(prefix), trace, airspace, cache_key, form))
if form and form.validate_on_submit():
_update_flight(flight.id, fp, form)
flight_id_list.append(flight.id)
elif form:
form_error = True
if form_error:
return render_template(
'upload/result.jinja', num_flights=num_flights, results=results, success=True)
elif flight_id_list:
flash(_('Your flight(s) have been successfully published.'))
return redirect(url_for('flights.list', ids=','.join(str(x) for x in flight_id_list)))
else:
return redirect(url_for('flights.today'))
else:
# Create/parse file selection form
form = UploadForm(pilot=g.current_user.id)
if form.validate_on_submit():
return index_post(form)
return render_template('upload/form.jinja', form=form)
def index_post(form):
user = g.current_user
pilot_id = form.pilot.data if form.pilot.data != 0 else None
pilot = pilot_id and User.get(int(pilot_id))
pilot_id = pilot and pilot.id
club_id = (pilot and pilot.club_id) or user.club_id
results = []
success = False
prefix = 0
for name, f in iterate_upload_files(form.file.raw_data):
prefix += 1
filename = files.sanitise_filename(name)
filename = files.add_file(filename, f)
# check if the file already exists
with files.open_file(filename) as f:
md5 = file_md5(f)
other = Flight.by_md5(md5)
if other:
files.delete_file(filename)
results.append(UploadResult.for_duplicate(name, other, str(prefix)))
continue
igc_file = IGCFile()
igc_file.owner = user
igc_file.filename = filename
igc_file.md5 = md5
igc_file.update_igc_headers()
if igc_file.date_utc is None:
files.delete_file(filename)
results.append(UploadResult.for_missing_date(name, str(prefix)))
continue
flight = Flight()
flight.pilot_id = pilot_id
flight.pilot_name = form.pilot_name.data if form.pilot_name.data else None
flight.club_id = club_id
flight.igc_file = igc_file
flight.model_id = igc_file.guess_model()
if igc_file.registration:
flight.registration = igc_file.registration
else:
flight.registration = igc_file.guess_registration()
flight.competition_id = igc_file.competition_id
fp = flight_path(flight.igc_file, add_elevation=True, max_points=None)
analyzed = False
try:
analyzed = analyse_flight(flight, fp=fp)
except:
current_app.logger.exception('analyse_flight() raised an exception')
if not analyzed:
files.delete_file(filename)
results.append(UploadResult.for_parser_error(name, str(prefix)))
continue
if not flight.takeoff_time or not flight.landing_time:
files.delete_file(filename)
results.append(UploadResult.for_no_flight(name, str(prefix)))
continue
if flight.landing_time > datetime.now():
files.delete_file(filename)
results.append(UploadResult.for_future_flight(name, str(prefix)))
continue
if not flight.update_flight_path():
files.delete_file(filename)
results.append(UploadResult.for_no_flight(name, str(prefix)))
continue
flight.privacy_level = Flight.PrivacyLevel.PRIVATE
trace = _encode_flight_path(fp, qnh=flight.qnh)
infringements = get_airspace_infringements(fp, qnh=flight.qnh)
db.session.add(igc_file)
db.session.add(flight)
# flush data to make sure we don't get duplicate files from ZIP files
db.session.flush()
# Store data in cache for image creation
cache_key = hashlib.sha1(str(flight.id) + '_' + str(user.id)).hexdigest()
current_app.cache.set('upload_airspace_infringements_' + cache_key, infringements, timeout=15 * 60)
current_app.cache.set('upload_airspace_flight_path_' + cache_key, fp, timeout=15 * 60)
airspace = db.session.query(Airspace) \
.filter(Airspace.id.in_(infringements.keys())) \
.all()
# create form after flushing the session, otherwise we wouldn't have a flight.id
update_form = UploadUpdateForm(formdata=None, prefix=str(prefix), obj=flight)
# remove airspace field from update_form if no airspace infringements found
if not infringements:
del update_form.airspace_usage
# replace None in update_form.pilot_id and update_form.co_pilot_id with 0
if not update_form.pilot_id.data:
update_form.pilot_id.data = 0
if not update_form.co_pilot_id.data:
update_form.co_pilot_id.data = 0
update_form.pilot_id.validate(update_form)
results.append(UploadResult(name, flight, UploadStatus.SUCCESS, str(prefix), trace,
airspace, cache_key, update_form))
create_flight_notifications(flight)
success = True
db.session.commit()
if success:
flash(_('Please click "Publish Flight(s)" at the bottom to confirm our automatic analysis.'))
return render_template(
'upload/result.jinja', num_flights=prefix, results=results, success=success)
def check_update_form(prefix, status):
form = UploadUpdateForm(prefix=str(prefix))
if not form.id or not form.id.data:
return None, None, None
flight_id = form.id.data
# Get flight from database and check if it is writable
flight = Flight.get(flight_id)
if not flight:
abort(404)
if status == UploadStatus.DUPLICATE:
return flight, None, None
else:
if not flight.is_writable(g.current_user):
abort(403)
fp = flight_path(flight.igc_file, add_elevation=True, max_points=None)
form.populate_obj(flight)
# replace None in form.pilot_id and form.co_pilot_id with 0
if not form.pilot_id.data:
form.pilot_id.data = 0
if not form.co_pilot_id.data:
form.co_pilot_id.data = 0
# Force takeoff_time and landing_time to be within the igc file limits
if form.takeoff_time.data < fp[0].datetime:
form.takeoff_time.data = fp[0].datetime
if form.landing_time.data > fp[-1].datetime:
form.landing_time.data = fp[-1].datetime
return flight, fp, form
def _update_flight(flight_id, fp, form):
model_id = form.model_id.data
registration = form.registration.data
competition_id = form.competition_id.data
takeoff_time = form.takeoff_time.data
scoring_start_time = form.scoring_start_time.data
scoring_end_time = form.scoring_end_time.data
landing_time = form.landing_time.data
pilot_id = form.pilot_id.data
pilot_name = form.pilot_name.data
co_pilot_id = form.co_pilot_id.data
co_pilot_name = form.co_pilot_name.data
# Get flight from database and check if it is writable
flight = Flight.get(flight_id)
if not flight or not flight.is_writable(g.current_user):
return False
# Parse model, registration and competition ID
if model_id == 0:
model_id = None
if registration is not None:
registration = registration.strip()
if not 0 < len(registration) <= 32:
registration = None
if competition_id is not None:
competition_id = competition_id.strip()
if not 0 < len(competition_id) <= 5:
competition_id = None
if pilot_id == 0:
pilot_id = None
# Set new values
if flight.pilot_id != pilot_id:
flight.pilot_id = pilot_id
# update club if pilot changed
if pilot_id:
flight.club_id = User.get(pilot_id).club_id
flight.pilot_name = pilot_name if pilot_name else None
flight.co_pilot_id = co_pilot_id if co_pilot_id != 0 else None
flight.co_pilot_name = co_pilot_name if co_pilot_name else None
flight.model_id = model_id
flight.registration = registration
flight.competition_id = competition_id
flight.time_modified = datetime.utcnow()
# Update times only if they are reasonable and have been changed...
trigger_analysis = False
if takeoff_time and scoring_start_time and scoring_end_time and landing_time \
and takeoff_time <= scoring_start_time <= scoring_end_time <= landing_time \
and (flight.takeoff_time != takeoff_time or
flight.scoring_start_time != scoring_start_time or
flight.scoring_end_time != scoring_end_time or
flight.landing_time != landing_time):
flight.takeoff_time = takeoff_time
flight.scoring_start_time = scoring_start_time
flight.scoring_end_time = scoring_end_time
flight.landing_time = landing_time
trigger_analysis = True
flight.privacy_level = Flight.PrivacyLevel.PUBLIC
db.session.commit()
if trigger_analysis:
analyse_flight(flight, fp=fp)
try:
tasks.analyse_flight.delay(flight.id)
tasks.find_meetings.delay(flight.id)
except ConnectionError:
current_app.logger.info('Cannot connect to Redis server')
return True
@upload_blueprint.route('/airspace/<string:cache_key>/<int:airspace_id>.png')
def airspace_image(cache_key, airspace_id):
if not mapscript_available:
abort(404)
# get information from cache...
infringements = current_app.cache.get('upload_airspace_infringements_' + cache_key)
flight_path = current_app.cache.get('upload_airspace_flight_path_' + cache_key)
# abort if invalid cache key
if not infringements \
or not flight_path:
abort(404)
# Convert the coordinate into a list of tuples
coordinates = [(c.location['longitude'], c.location['latitude']) for c in flight_path]
# Create a shapely LineString object from the coordinates
linestring = LineString(coordinates)
# Save the new path as WKB
locations = from_shape(linestring, srid=4326)
highlight_locations = []
extent_epsg4326 = [180, 85.05112878, -180, -85.05112878]
for period in infringements[airspace_id]:
# Convert the coordinate into a list of tuples
coordinates = [(c['location']['longitude'], c['location']['latitude']) for c in period]
# Create a shapely LineString object from the coordinates
if len(coordinates) == 1:
# a LineString must contain at least two points...
linestring = LineString([coordinates[0], coordinates[0]])
else:
linestring = LineString(coordinates)
highlight_locations.append(linestring)
# gather extent
(minx, miny, maxx, maxy) = linestring.bounds
extent_epsg4326[0] = min(extent_epsg4326[0], minx)
extent_epsg4326[1] = min(extent_epsg4326[1], miny)
extent_epsg4326[2] = max(extent_epsg4326[2], maxx)
extent_epsg4326[3] = max(extent_epsg4326[3], maxy)
# Save the new path as WKB
highlight_multilinestring = from_shape(MultiLineString(highlight_locations), srid=4326)
# increase extent by factor 1.05
width = abs(extent_epsg4326[0] - extent_epsg4326[2])
height = abs(extent_epsg4326[1] - extent_epsg4326[3])
center_x = (extent_epsg4326[0] + extent_epsg4326[2]) / 2
center_y = (extent_epsg4326[1] + extent_epsg4326[3]) / 2
extent_epsg4326[0] = center_x - width / 2 * 1.05
extent_epsg4326[1] = center_y - height / 2 * 1.05
extent_epsg4326[2] = center_x + width / 2 * 1.05
extent_epsg4326[3] = center_y + height / 2 * 1.05
# minimum extent should be 0.3 deg
width = abs(extent_epsg4326[0] - extent_epsg4326[2])
height = abs(extent_epsg4326[1] - extent_epsg4326[3])
if width < 0.3:
extent_epsg4326[0] = center_x - 0.15
extent_epsg4326[2] = center_x + 0.15
if height < 0.3:
extent_epsg4326[1] = center_y - 0.15
extent_epsg4326[3] = center_y + 0.15
# convert extent from EPSG4326 to EPSG3857
epsg4326 = pyproj.Proj(init='epsg:4326')
epsg3857 = pyproj.Proj(init='epsg:3857')
x1, y1 = pyproj.transform(epsg4326, epsg3857, extent_epsg4326[0], extent_epsg4326[1])
x2, y2 = pyproj.transform(epsg4326, epsg3857, extent_epsg4326[2], extent_epsg4326[3])
extent_epsg3857 = [x1, y1, x2, y2]
# load basemap and set size + extent
basemap_path = os.path.join(current_app.config.get('SKYLINES_MAPSERVER_PATH'), 'basemap.map')
map_object = mapscript.mapObj(basemap_path)
map_object.setSize(400, 400)
map_object.setExtent(extent_epsg3857[0], extent_epsg3857[1], extent_epsg3857[2], extent_epsg3857[3])
# enable airspace and airports layers
num_layers = map_object.numlayers
for i in range(num_layers):
layer = map_object.getLayer(i)
if layer.group == 'Airports':
layer.status = mapscript.MS_ON
if layer.group == 'Airspace':
layer.status = mapscript.MS_ON
# get flights layer
flights_layer = map_object.getLayerByName('Flights')
highlight_layer = map_object.getLayerByName('Flights_Highlight')
# set sql query for blue flight
one = literal_column('1 as flight_id')
flight_query = db.session.query(locations.label('flight_geometry'), one)
flights_layer.data = 'flight_geometry FROM (' + query_to_sql(flight_query) + ')' + \
' AS foo USING UNIQUE flight_id USING SRID=4326'
# set sql query for highlighted linestrings
highlighted_query = db.session.query(highlight_multilinestring.label('flight_geometry'), one)
highlight_layer.data = 'flight_geometry FROM (' + query_to_sql(highlighted_query) + ')' + \
' AS foo USING UNIQUE flight_id USING SRID=4326'
highlight_layer.status = mapscript.MS_ON
# get osm layer and set WMS url
osm_layer = map_object.getLayerByName('OSM')
osm_layer.connection = current_app.config.get('SKYLINES_MAP_TILE_URL') + \
'/service?'
# draw map
map_image = map_object.draw()
# get image
mapscript.msIO_installStdoutToBuffer()
map_image.write()
content = mapscript.msIO_getStdoutBufferBytes()
# return to client
resp = make_response(content)
resp.headers['Content-type'] = map_image.format.mimetype
return resp
|
# -*- coding: utf-8 -*-
import numpy as np
import xgboost as xgb
def evalerror(preds, dtrain):
labels = dtrain.get_label()
# return a pair metric_name, result
# since preds are margin(before logistic transformation, cutoff at 0)
return 'error', float(sum(labels != (preds > 0.0))) / len(labels)
def cv(feature_prefix, feature_name, params, num_round=1000, early_stopping_rounds=10, kfolder=10):
vals = []
for i in xrange(kfolder):
train_f = feature_prefix + '/Folder%d/' % i + \
feature_name + '.train.xgboost.4rank.txt'
test_f = feature_prefix + '/Folder%d/' % i + \
feature_name + '.test.xgboost.4rank.txt'
bst, eresult = train(train_f, test_f, params,
num_round, early_stopping_rounds, evaluate=True)
vals.append(eresult)
return vals
def train(train_f, test_f, params, num_round, early_stopping_rounds, evaluate=False):
train_group_f = train_f.replace('.txt', '.txt.group')
dtrain = xgb.DMatrix(train_f)
dtest = xgb.DMatrix(test_f)
dtrain.set_group(np.loadtxt(train_group_f).astype(int))
if evaluate:
test_group_f = test_f.replace('.txt', '.txt.group')
dtest.set_group(np.loadtxt(test_group_f).astype(int))
else:
dval = xgb.DMatrix(train_f.replace('train', 'test'))
dval.set_group(np.loadtxt(train_group_f.replace('train', 'test')).astype(int))
if evaluate:
watchlist = [(dtrain, 'train'), (dtest, 'valid')]
else:
watchlist = [(dtrain, 'train'), (dval, 'valid')]
bst = xgb.train(params, dtrain, num_round, watchlist, obj=None,
feval=None, early_stopping_rounds=early_stopping_rounds)
return bst, dtest if not evaluate else bst.eval(dtest)
def normed_by_group(preds, groups):
min_v = np.min(preds)
max_v = np.max(preds)
print min_v, max_v
for lines in groups:
if len(lines) == 1:
preds[lines[0]] = 0
continue
tmp = preds[lines]
candidates = (tmp - min_v) / (max_v - min_v)
for i, l in enumerate(lines):
preds[l] = candidates[i]
return preds
def submit(bst, dtest):
# make prediction
preds = bst.predict(dtest)
print preds.shape
groups = {}
with open('../data/0_raw/validate_nolabel.txt', 'r') as fp:
for i, line in enumerate(fp):
qid, uid = line.strip().split()
if qid in groups:
groups[qid].append(i)
else:
groups[qid] = [i]
preds = normed_by_group(preds, groups.values())
with open('submit.csv', 'w') as fo:
fo.write('qid,uid,label\n')
with open('../data/0_raw/validate_nolabel.txt', 'r') as fp:
for i, line in enumerate(fp):
fo.write(line.strip().replace('\t', ',') +
',' + str(preds[i]) + '\n')
def gradsearch(feature_name='stat'):
fo = open('gradsearch.%s.rs.txt' % feature_name, 'w')
min_child_weights = [1, 2, 5]
max_depths = [3, 4, 5]
etas = [0.01, 0.05, 0.1]
max_delta_steps = [0, 1, 5, 10]
subsamples = [0.5, 0.7, 1]
colsample_bytrees = [0.5, 0.7, 1]
scale_pos_weights = [1, 5, 10]
for m1 in min_child_weights:
for m2 in max_depths:
for eta in etas:
for m3 in max_delta_steps:
for subsample in subsamples:
for colsample_bytree in colsample_bytrees:
for w in scale_pos_weights:
params = {}
params['min_child_weight'] = m1
params['max_depth'] = m2
params['eta'] = eta
params['max_delta_step'] = m3
params['subsample'] = subsample
params['colsample_bytree'] = colsample_bytree
params['scale_pos_weight'] = w
params['silent'] = True
params['objective'] = 'reg:logistic'
# params['objective'] = 'rank:pairwise'
# params['objective'] = 'rank:ndcg'
params['eval_metric'] = ['ndcg@5-', 'ndcg@10-']
evals = cv('../feature/feature', feature_name, params,
num_round=1000, early_stopping_rounds=50, kfolder=10)
# print('%d %d %f %d %f %f %d' % (m1, m2, eta, m3, subsample, colsample_bytree, w))
# print('\n'.join(evals) + '\n\n')
fo.write('%d %d %f %d %f %f %d' % (
m1, m2, eta, m3, subsample, colsample_bytree, w))
fo.write('\n'.join(evals) + '\n\n')
fo.flush()
fo.close()
feature_prefix = '../feature/feature'
# feature_name = 'stat'
feature_name = 'merge.stat_tags'
gradsearch(feature_name=feature_name)
# params = {'min_child_weight': 1, 'max_depth': 3, 'eta': 0.1,
# 'max_delta_step': 1, 'subsample': 0.7, 'colsample_bytree': 0.7}
# params['scale_pos_weight'] = 1
# params['silent'] = True
# params['objective'] = 'reg:logistic'
# # params['objective'] = 'rank:pairwise'
# # params['objective'] = 'rank:ndcg'
# params['eval_metric'] = ['ndcg@5-', 'ndcg@10-']
# train_f = feature_prefix + '/Folder1/' + feature_name + '.train.xgboost.4rank.txt'
# test_f = feature_prefix + '/' + feature_name + '.test.xgboost.txt'
# bst, dtest = train(train_f, test_f, params, 1000, 100, evaluate=False)
# submit(bst, dtest)
save the target evalutation for cv
# -*- coding: utf-8 -*-
import numpy as np
import xgboost as xgb
def evalerror(preds, dtrain):
labels = dtrain.get_label()
# return a pair metric_name, result
# since preds are margin(before logistic transformation, cutoff at 0)
return 'error', float(sum(labels != (preds > 0.0))) / len(labels)
def cv(feature_prefix, feature_name, params, num_round=1000, early_stopping_rounds=10, kfolder=10):
vals = []
for i in xrange(kfolder):
train_f = feature_prefix + '/Folder%d/' % i + \
feature_name + '.train.xgboost.4rank.txt'
test_f = feature_prefix + '/Folder%d/' % i + \
feature_name + '.test.xgboost.4rank.txt'
bst, eresult = train(train_f, test_f, params,
num_round, early_stopping_rounds, evaluate=True)
vals.append(eresult)
return vals
def train(train_f, test_f, params, num_round, early_stopping_rounds, evaluate=False):
train_group_f = train_f.replace('.txt', '.txt.group')
dtrain = xgb.DMatrix(train_f)
dtest = xgb.DMatrix(test_f)
dtrain.set_group(np.loadtxt(train_group_f).astype(int))
if evaluate:
test_group_f = test_f.replace('.txt', '.txt.group')
dtest.set_group(np.loadtxt(test_group_f).astype(int))
else:
dval = xgb.DMatrix(train_f.replace('train', 'test'))
dval.set_group(np.loadtxt(train_group_f.replace('train', 'test')).astype(int))
if evaluate:
watchlist = [(dtrain, 'train'), (dtest, 'valid')]
else:
watchlist = [(dtrain, 'train'), (dval, 'valid')]
bst = xgb.train(params, dtrain, num_round, watchlist, obj=None,
feval=None, early_stopping_rounds=early_stopping_rounds)
return bst, dtest if not evaluate else bst.eval(dtest)
def normed_by_group(preds, groups):
min_v = np.min(preds)
max_v = np.max(preds)
print min_v, max_v
for lines in groups:
if len(lines) == 1:
preds[lines[0]] = 0
continue
tmp = preds[lines]
candidates = (tmp - min_v) / (max_v - min_v)
for i, l in enumerate(lines):
preds[l] = candidates[i]
return preds
def submit(bst, dtest):
# make prediction
preds = bst.predict(dtest)
print preds.shape
groups = {}
with open('../data/0_raw/validate_nolabel.txt', 'r') as fp:
for i, line in enumerate(fp):
qid, uid = line.strip().split()
if qid in groups:
groups[qid].append(i)
else:
groups[qid] = [i]
preds = normed_by_group(preds, groups.values())
with open('submit.csv', 'w') as fo:
fo.write('qid,uid,label\n')
with open('../data/0_raw/validate_nolabel.txt', 'r') as fp:
for i, line in enumerate(fp):
fo.write(line.strip().replace('\t', ',') +
',' + str(preds[i]) + '\n')
def gradsearch(feature_name='stat'):
fo = open('gradsearch.%s.rs.txt' % feature_name, 'w')
min_child_weights = [1, 2, 5]
max_depths = [2, 3, 4, 5]
etas = [0.01, 0.05, 0.1]
max_delta_steps = [0, 1, 5, 10]
subsamples = [0.5, 0.7, 1]
colsample_bytrees = [0.5, 0.7, 1]
scale_pos_weights = [1, 5, 10]
best_result = (0, )
for m1 in min_child_weights:
for m2 in max_depths:
for eta in etas:
for m3 in max_delta_steps:
for subsample in subsamples:
for colsample_bytree in colsample_bytrees:
for w in scale_pos_weights:
params = {}
params['min_child_weight'] = m1
params['max_depth'] = m2
params['eta'] = eta
params['max_delta_step'] = m3
params['subsample'] = subsample
params['colsample_bytree'] = colsample_bytree
params['scale_pos_weight'] = w
params['silent'] = True
params['objective'] = 'reg:logistic'
# params['objective'] = 'rank:pairwise'
# params['objective'] = 'rank:ndcg'
params['eval_metric'] = ['ndcg@5-', 'ndcg@10-']
evals = cv('../feature/feature', feature_name, params,
num_round=1000, early_stopping_rounds=50, kfolder=10)
metrics = 0.
for eva in evals:
eva_tmp = eva.split('eval-ndcg@', 2)
ndcg_at5 = eva_tmp[1].strip().replace('5-:', '')
ndcg_at10 = eva_tmp[2].strip().replace('10-:', '')
metrics += (float(ndcg_at5) + float(ndcg_at10)) / 2
metrics /= len(evals)
if metrics > best_result[0]:
best_result = (metrics, m1, m2, eta, m3, subsample, colsample_bytree, w)
fo.write('%d %d %f %d %f %f %d\n' % (
m1, m2, eta, m3, subsample, colsample_bytree, w))
fo.write('\n'.join(evals) + '\n')
fo.write('average (ndcg@5 + ndcg@10)/2 %f\n\n' % metrics)
fo.flush()
fo.write('the best params and result is\nndcg@5 + ndcg@10)/2 = %f\nparams is %d %d %f %d %f %f %d\n' % best_result)
fo.close()
feature_prefix = '../feature/feature'
# feature_name = 'stat'
feature_name = 'merge.stat_tags'
gradsearch(feature_name=feature_name)
# params = {'min_child_weight': 1, 'max_depth': 3, 'eta': 0.1,
# 'max_delta_step': 1, 'subsample': 0.7, 'colsample_bytree': 0.7}
# params['scale_pos_weight'] = 1
# params['silent'] = True
# params['objective'] = 'reg:logistic'
# # params['objective'] = 'rank:pairwise'
# # params['objective'] = 'rank:ndcg'
# params['eval_metric'] = ['ndcg@5-', 'ndcg@10-']
# train_f = feature_prefix + '/Folder1/' + feature_name + '.train.xgboost.4rank.txt'
# test_f = feature_prefix + '/' + feature_name + '.test.xgboost.txt'
# bst, dtest = train(train_f, test_f, params, 1000, 100, evaluate=False)
# submit(bst, dtest)
|
import unittest
from celery import Celery
from django.conf import settings
from django.db import connection
class SmokeTests(unittest.TestCase):
def setUp(self):
pass
def test_can_access_db(self):
"access the database"
cursor = connection.cursor()
cursor.execute('SELECT 1')
row = cursor.fetchone()
self.assertEqual(1, row[0])
def test_can_access_celery(self):
"connect to SQS"
if not getattr(settings, 'CELERY_ALWAYS_EAGER', False):
app = Celery('cla_backend')
app.config_from_object('django.conf:settings')
app.connection().connect()
conn.connect()
conn.release()
Fix smoke test
import unittest
from celery import Celery
from django.conf import settings
from django.db import connection
class SmokeTests(unittest.TestCase):
def setUp(self):
pass
def test_can_access_db(self):
"access the database"
cursor = connection.cursor()
cursor.execute('SELECT 1')
row = cursor.fetchone()
self.assertEqual(1, row[0])
def test_can_access_celery(self):
"connect to SQS"
if not getattr(settings, 'CELERY_ALWAYS_EAGER', False):
app = Celery('cla_backend')
app.config_from_object('django.conf:settings')
conn = app.connection()i
conn.connect()
conn.release()
|
#!/usr/bin/python
"""
Copyright [2009-2017] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# ---------------------------------IMPORTS-------------------------------------
import os
import shutil
import subprocess
import sys
import math
from config import rfam_local as conf
from scripts.validation import genome_search_validator as gsv
from utils import genome_search_utils as gsu
from utils import scan_utils as su
# -----------------------------------------------------------------------------
#SPLIT_SIZE = 5427083
SPLIT_SIZE = 2000000
SRCH_MEM = 6000
SCAN_MEM = 6000
#RFAMSEQ_SIZE = 451031.997884 # size of rfamseq13 in Mb
RFAMSEQ_SIZE = 742849.287494 #
CM_NO = 3016 # number of cms in Rfam.cm file
CPU_NO = 5
SGROUP_IDX = 65 # subgroup index - Ascii for A
LSF_GROUP = "/rfam_srch_mpi/%s"
RFAM_SRCH_GROUP = "/rfam_search"
SUB_DIRS = 26 # max 26 subdirs as the number of the alphabet
MAX_JOBS = 50
MAX_FILES = 5000
# filtered - add this as a command line option
CREATE_SUBGROUP = "bgadd -L %s %s"
SEARCH_MPI = ("bsub -q mpi-rh7 -M %s -R \"rusage[mem=%s,tmp=2000]\" -o %s -e %s -n %s -g %s -R \"span[hosts=1]\" "
"-f \"%s < %s\" "
"-f \"%s < %s\" "
"-f \"%s < %s\" "
"-f \"%s < %s\" "
"-Ep \"rm /tmp/%s.*\" "
"-a openmpi mpiexec -mca btl ^openib -np %s "
"%s -o %s --tblout %s --acc --cut_ga --rfam --notextw --nohmmonly -Z %s --mpi %s %s")
GROUP_AND_SRCH_MPI = ("bsub -q mpi-rh7 -M %s -R \"rusage[mem=%s]\" -o %s -e %s -n %s -g %s -R \"span[hosts=1]\" "
"-f %s < /tmp/%J.out "
"-f %s < /tmp/%J.inf "
"-f %s < /tmp/%J.err "
"-f %s < /tmp/%J.tbl "
"-a openmpi mpiexec -mca btl ^openib -np %s "
"%s -o %s --tblout %s --acc --cut_ga --rfam --notextw --nohmmonly -Z %s --mpi %s %s")
"""
# unfiltered
CMD_TEMPLATE_MPI = ("bsub -q mpi-rh7 -M %s -R \"rusage[mem=%s]\" -o %s -e %s -n %s -g %s -R \"span[hosts=1]\" "
"-a openmpi mpiexec -mca btl ^openib -np %s "
"%s -o %s --tblout %s --acc --notextw --nohmmonly -Z %s --mpi %s %s")
"""
# -------------------------------------------------------------------------
def calculate_required_memory():
"""
:return:
"""
memory = 0
return memory
# -------------------------------------------------------------------------
def genome_scan_from_sequence_directory(input_dir, dest_dir, tool="cmsearch", size=None):
"""
using cmsearch by default unless defined otherwise
:param input_dir:
:param dest_dir:
:param tool:
:return:
"""
fp = open(os.path.join(dest_dir,"result_locations.txt"),'w')
# initialize output space
if not os.path.exists(dest_dir):
os.mkdir(dest_dir, 0775)
# create subdirs
i = 0
while i < SUB_DIRS:
subdir = os.path.join(dest_dir, chr(SGROUP_IDX + i))
if not os.path.exists(subdir):
os.mkdir(subdir)
os.chmod(subdir, 0775)
i += 1
# get candidate sequence files
seq_files = [x for x in os.listdir(input_dir)
if x.endswith('.fa') or x.endswith('.fasta') or x.endswith('.fna')]
out_idx = 0
for seq_file in seq_files:
# 1. get size
seq_file_loc = os.path.join(input_dir, seq_file)
if size is None:
# size * 2 as both strands are being searched and divided by 1M as the size needs to be in Mb
size = (float(su.get_nt_count(seq_file_loc, type="dna")) * 2.0)/1000000.0
else:
size = RFAMSEQ_SIZE
filename = seq_file.partition('.')[0]
if len(seq_files) > MAX_FILES:
gen_output_dir = os.path.join(os.path.join(dest_dir, chr(SGROUP_IDX + out_idx)),
filename)
else:
gen_output_dir = os.path.join(dest_dir, filename)
fp.write(gen_output_dir+'\n')
if not os.path.exists(gen_output_dir):
os.mkdir(gen_output_dir, 0775)
gen_input_dir = ''
nts = gsu.count_nucleotides_in_fasta(seq_file_loc)
# check if large seq file and make sure we can split in more than 1 files
if nts >= SPLIT_SIZE and math.ceil(nts/SPLIT_SIZE) > 1:
gen_input_dir = os.path.join(input_dir, filename)
# create a distinct directory for the genome
if not os.path.exists(gen_input_dir):
os.mkdir(gen_input_dir, 0775)
shutil.move(seq_file_loc, os.path.join(gen_input_dir, os.path.basename(seq_file_loc)))
# new input file location
seq_file_loc = os.path.join(gen_input_dir, os.path.basename(seq_file_loc))
# split sequence file into smalled chunks and store under destination directory
gsu.split_seq_file(seq_file_loc, SPLIT_SIZE, dest_dir=gen_input_dir)
# list all smaller files
genome_chunks = [x for x in os.listdir(gen_input_dir) if not x.endswith(".fa") or not x.endswith(".ssi") != -1]
group_idx = out_idx
# group_idx = 0
for genome_chunk in genome_chunks:
# index all sequence files
chunk_loc = os.path.join(gen_input_dir, genome_chunk)
gsu.index_sequence_file(chunk_loc)
chunk_name = genome_chunk
lsf_out_file = os.path.join(gen_output_dir, chunk_name + ".out")
lsf_err_file = os.path.join(gen_output_dir, chunk_name + ".err")
inf_tbl_file = os.path.join(gen_output_dir, chunk_name + ".tbl")
inf_out_file = os.path.join(gen_output_dir, chunk_name + ".inf")
tmp_out_file = os.path.join("/tmp", chunk_name + ".out")
tmp_err_file = os.path.join("/tmp", chunk_name + ".err")
tmp_inf_file = os.path.join("/tmp", chunk_name + ".inf")
tmp_tbl_file = os.path.join("/tmp", chunk_name + ".tbl")
if tool == 'cmsearch':
cmd = SEARCH_MPI % (SRCH_MEM, SRCH_MEM, tmp_out_file, tmp_err_file,
CPU_NO, LSF_GROUP % (chr(SGROUP_IDX + group_idx)),
lsf_out_file, tmp_out_file,
lsf_err_file, tmp_err_file,
inf_out_file, tmp_inf_file,
inf_tbl_file, tmp_tbl_file,
filename,
CPU_NO, conf.CMSEARCH, inf_out_file,
inf_tbl_file, size,
conf.CMFILE, chunk_loc)
else:
cmd = SEARCH_MPI % (SRCH_MEM, SRCH_MEM, tmp_out_file, tmp_err_file,
CPU_NO, LSF_GROUP % (chr(SGROUP_IDX + group_idx)),
lsf_out_file, tmp_out_file,
lsf_err_file, tmp_err_file,
inf_out_file, tmp_inf_file,
inf_tbl_file, tmp_tbl_file,
filename,
CPU_NO, conf.CMSCAN, inf_out_file,
inf_tbl_file, size,
conf.CMFILE, chunk_loc)
subprocess.call(cmd, shell=True)
# small enough genomes that don't need splitting
else:
group_idx = out_idx
gsu.index_sequence_file(seq_file_loc)
lsf_out_file = os.path.join(gen_output_dir, filename + ".out")
lsf_err_file = os.path.join(gen_output_dir, filename + ".err")
inf_tbl_file = os.path.join(gen_output_dir, filename + ".tbl")
inf_out_file = os.path.join(gen_output_dir, filename + ".inf")
tmp_out_file = os.path.join("/tmp", filename + ".out")
tmp_err_file = os.path.join("/tmp", filename + ".err")
tmp_inf_file = os.path.join("/tmp", filename + ".inf")
tmp_tbl_file = os.path.join("/tmp", filename + ".tbl")
cmd = ''
if tool == 'cmsearch':
cmd = SEARCH_MPI % (SRCH_MEM, SRCH_MEM, tmp_out_file, tmp_err_file,
CPU_NO, LSF_GROUP % (chr(SGROUP_IDX + group_idx)),
lsf_out_file, tmp_out_file,
lsf_err_file, tmp_err_file,
inf_out_file, tmp_inf_file,
inf_tbl_file, tmp_tbl_file,
filename,
CPU_NO, conf.CMSEARCH, inf_out_file,
inf_tbl_file, size,
conf.CMFILE, seq_file_loc)
else:
cmd = SEARCH_MPI % (SRCH_MEM, SRCH_MEM, tmp_out_file, tmp_err_file,
CPU_NO, LSF_GROUP % (chr(SGROUP_IDX + group_idx)),
lsf_out_file, tmp_out_file,
lsf_err_file, tmp_err_file,
inf_out_file, tmp_inf_file,
inf_tbl_file, tmp_tbl_file,
filename,
CPU_NO, conf.CMSCAN, inf_out_file,
inf_tbl_file, size,
conf.CMFILE, seq_file_loc)
subprocess.call(cmd, shell=True)
out_idx += 1
if out_idx == SUB_DIRS:
out_idx = 0
size = None
fp.close()
# ------------------------------------------------------------------------------------------------
def genome_scan_from_download_directory(project_dir, upid_file, tool="cmsearch"):
"""
Search all genomes from within their download directories
project_dir: The path to a project directory generated by genome_downloader
upid_file: A file of upids to launch the searches with
tool: Infernal's search method used for genome annotation (cmsearch, cmscan). Defaults to
cmsearch
return: void
"""
# load upids from file
upid_fp = open(upid_file, 'r')
upids = [x.strip() for x in upid_fp]
upid_fp.close()
for upid in upids:
# get updir location
subdir = os.path.join(project_dir, upid[-3:])
updir = os.path.join(subdir, upid)
# generate chunks - do this bit withing the search job in order to parallelize it.
# if sequence chunk directory exists then don't generate it again and use that to
# re-launch the searches
upid_fasta = os.path.join(updir, upid + '.fa')
seq_chunks_dir = os.path.join(updir, "search_chunks")
# check if the genome sequence file has already been split
if not os.path.exists(seq_chunks_dir):
os.mkdir(seq_chunks_dir)
os.chmod(seq_chunks_dir, 0777)
# check if we need to split the seq_file
if gsu.count_nucleotides_in_fasta(upid_fasta) >= SPLIT_SIZE:
# split sequence file into smalled chunks and store under destination directory
gsu.split_seq_file(upid_fasta, SPLIT_SIZE, dest_dir=seq_chunks_dir)
# For all inputs to be consistent, if the sequence file is small,
# copy it in the search_chunks directory
else:
# copy file
shutil.copyfile(upid_fasta, os.path.join(seq_chunks_dir, upid + '.fa'))
# index file
cmd = "%s --index %s" % (conf.ESL_SFETCH,
os.path.join(seq_chunks_dir, upid + '.fa'))
subprocess.call(cmd, shell=True)
# Create a search directory
search_output_dir = os.path.join(updir, "search_output")
if not os.path.exists(search_output_dir):
os.mkdir(search_output_dir)
os.chmod(search_output_dir, 0777)
# List all smaller files. Using list comprehension to filter out other contents
genome_chunks = [x for x in os.listdir(seq_chunks_dir) if not x.endswith('.ssi')]
for seq_file in genome_chunks:
cmd = ''
# index all sequence files
seq_file_loc = os.path.join(seq_chunks_dir, seq_file)
gsu.index_sequence_file(seq_file_loc)
chunk_name = seq_file
lsf_out_file = os.path.join(search_output_dir, chunk_name + ".out")
lsf_err_file = os.path.join(search_output_dir, chunk_name + ".err")
inf_tbl_file = os.path.join(search_output_dir, chunk_name + ".tbl")
inf_out_file = os.path.join(search_output_dir, chunk_name + ".inf")
if tool == 'cmsearch':
cmd = SEARCH_MPI % (SRCH_MEM, SRCH_MEM, lsf_out_file, lsf_err_file,
CPU_NO, RFAM_SRCH_GROUP,
CPU_NO, conf.CMSEARCH, inf_out_file,
inf_tbl_file, RFAMSEQ_SIZE,
conf.CMFILE, seq_file_loc)
else:
cmd = SEARCH_MPI % (SRCH_MEM, SRCH_MEM, lsf_out_file, lsf_err_file,
CPU_NO, RFAM_SRCH_GROUP,
CPU_NO, conf.CMSCAN, inf_out_file,
inf_tbl_file, RFAMSEQ_SIZE,
conf.CMFILE, seq_file_loc)
subprocess.call(cmd, shell=True)
# ------------------------------------------------------------------------------------------------
def single_genome_scan_from_download_directory(updir, upid, tool="cmsearch"):
"""
Search all genomes from within their
updir: The path to a genome directory generated by genome_downloader. The
download directory should be in the
upid: A valid upid of the genome to be searched
tool: Infernal's search method used for genome annotation (cmsearch, cmscan). Defaults to
cmsearch
return: void
"""
# generate chunks - do this bit withing the search job in order to parallelize it.
# if sequence chunk directory exists then don't generate it again and use that to
# re-launch the searches
upid_fasta = os.path.join(updir, upid + '.fa')
seq_chunks_dir = os.path.join(updir, "search_chunks")
# check if the genome sequence file has already been split
if not os.path.exists(seq_chunks_dir):
os.mkdir(seq_chunks_dir)
os.chmod(seq_chunks_dir, 0777)
# check if we need to split the seq_file
if gsu.count_nucleotides_in_fasta(upid_fasta) >= SPLIT_SIZE:
# split sequence file into smalled chunks and store under destination directory
gsu.split_seq_file(upid_fasta, SPLIT_SIZE, dest_dir=seq_chunks_dir)
# now index the fasta files
seq_files = os.listdir(seq_chunks_dir)
for seq_file in seq_files:
seq_file_loc = os.path.join(seq_chunks_dir, seq_file)
cmd = "%s --index %s" % (conf.ESL_SFETCH, seq_file_loc)
subprocess.call(cmd, shell=True)
# For all inputs to be consistent, if the sequence file is small,
# copy it in the search_chunks directory
else:
# copy file
shutil.copyfile(upid_fasta, os.path.join(seq_chunks_dir, upid + '.fa'))
# index file
cmd = "%s --index %s" % (conf.ESL_SFETCH, os.path.join(seq_chunks_dir, upid + '.fa'))
subprocess.call(cmd, shell=True)
# Create a search directory
search_output_dir = os.path.join(updir, "search_output")
if not os.path.exists(search_output_dir):
os.mkdir(search_output_dir)
os.chmod(search_output_dir, 0777)
# List all smaller files. Using list comprehension to filter out other contents
genome_chunks = [x for x in os.listdir(seq_chunks_dir) if not x.endswith('.ssi')]
# check and set search method selected
search_method = None
if tool == 'cmsearch':
search_method = conf.CMSEARCH
else:
search_method = conf.CMSCAN
for seq_file in genome_chunks:
cmd = ''
# index all sequence files
seq_file_loc = os.path.join(seq_chunks_dir, seq_file)
chunk_name = seq_file
lsf_out_file = os.path.join(search_output_dir, chunk_name + ".out")
lsf_err_file = os.path.join(search_output_dir, chunk_name + ".err")
inf_tbl_file = os.path.join(search_output_dir, chunk_name + ".tbl")
inf_out_file = os.path.join(search_output_dir, chunk_name + ".inf")
tmp_out_file = os.path.join("/tmp", chunk_name + ".out")
tmp_err_file = os.path.join("/tmp", chunk_name + ".err")
tmp_tbl_file = os.path.join("/tmp", chunk_name + ".tbl")
tmp_inf_file = os.path.join("/tmp", chunk_name + ".inf")
cmd = SEARCH_MPI % (SRCH_MEM, SRCH_MEM, tmp_out_file, tmp_err_file,
CPU_NO, RFAM_SRCH_GROUP,
lsf_out_file, tmp_out_file,
lsf_err_file, tmp_err_file,
inf_tbl_file, tmp_tbl_file,
inf_out_file, tmp_inf_file, chunk_name,
CPU_NO, search_method, tmp_inf_file,
tmp_tbl_file, RFAMSEQ_SIZE,
conf.CMFILE, seq_file_loc)
subprocess.call(cmd, shell=True)
# ------------------------------------------------------------------------------------------------
def restore_io_paths(input_dir, output_dir):
"""
Restore searches from and to the same directories.
** Will need to simplify this
input_dir: The path to the input directory as organized by genome_scanner
output_dir: The path to the output directory of the run we want to restore
return: A list of tuples with input and output paths
"""
io_path_list = []
err_cases = gsv.check_search_err_files(output_dir)
# loop over all output subdirs
for subdir in err_cases.keys():
dest_dir = os.path.join(output_dir, subdir)
# get genome i/o directories
for genome in err_cases[subdir].keys():
out_gen_dir = os.path.join(dest_dir, genome)
in_gen_dir = os.path.join(input_dir, genome)
# get sequence file i/o paths -
# need to handle the case in which we don't have a dir,
# or just create one for all cases in input dir
for err_file in err_cases[subdir][genome]:
# add a check here to look for an index number
out_file_loc = os.path.join(out_gen_dir, err_file)
in_file_loc = os.path.join(in_gen_dir, err_file)
io_path_list.append((in_file_loc, out_file_loc))
# remove files from previous runs
return io_path_list
# -------------------------------------------------------------------------
def restore_jobs_with_multi_cms(cm_dir, input_dir, output_dir):
"""
Restore search jobs by scanning using smaller cm files
cm_dir: The path to the CM directory. If None, use default
input_dir: The path to the input directory as organized by genome_scanner
output_dir: The path to the output directory of the run we want to restore
returns: void
"""
io_path_list = restore_io_paths(input_dir, output_dir)
cms = [x for x in os.listdir(cm_dir) if x.find('.') == -1]
for cm_file in cms:
cm_file_loc = os.path.join(cm_dir, cm_file)
for err_case in io_path_list:
seq_file = err_case[0]
out_file = err_case[1]
# cleanup old files
if os.path.exists(out_file + ".out"):
os.remove(out_file + ".out")
if os.path.exists(out_file + ".err"):
os.remove(out_file + ".err")
if os.path.exists(out_file + ".tbl"):
os.remove(out_file + ".tbl")
if os.path.exists(out_file + ".inf"):
os.remove(out_file + ".inf")
lsf_out_file = out_file + '_' + cm_file + ".out"
lsf_err_file = out_file + '_' + cm_file + ".err"
inf_tbl_file = out_file + '_' + cm_file + ".tbl"
inf_out_file = out_file + '_' + cm_file + ".inf"
lsf_subgroup = out_file.split('/')[-3]
cmd = SEARCH_MPI % (SRCH_MEM, SRCH_MEM, lsf_out_file,
lsf_err_file, CPU_NO,
LSF_GROUP % lsf_subgroup,
CPU_NO, conf.CMSEARCH,
inf_out_file, inf_tbl_file,
RFAMSEQ_SIZE, cm_file_loc,
seq_file)
subprocess.call(cmd, shell=True) # re-initialization
lsf_out_file = ''
lsf_err_file = ''
inf_tbl_file = ''
inf_out_file = ''
# -------------------------------------------------------------------------
def multi_cm_sequence_scan(cm_dir, sequence_dir, tool="cmsearch", seqdb_size=None, dest_dir=None):
"""
This function treats each covariance model individually and launches
a job for each sequence file found in sequence_dir.
cm_dir: A directory with all CM models to scan
sequence_dir: A directory with fasta sequence files to scan
dest_dir: The path to the destination directory
return: void
"""
# list all covariance models and sequence files
cms = [x for x in os.listdir(cm_dir)
if x.endswith('.CM') or x.endswith('.cm')]
seq_files = [x for x in os.listdir(sequence_dir)
if not x.endswith('.ssi')]
# create the destination directory if necessary
if dest_dir is not None:
if not os.path.exists(dest_dir):
os.mkdir(dest_dir)
# sequence db size is None set to Rfamseq size
if seqdb_size is None:
seqdb_size = RFAMSEQ_SIZE
search_method = ''
if tool == 'cmsearch':
search_method = conf.CMSEARCH
else:
search_method = conf.CMSCAN
# now launch the searches
for cm in cms:
# create an individual result directory per model
rfam_acc = cm.partition('.')[0]
family_dir = os.path.join(dest_dir, rfam_acc)
cm_path = os.path.join(cm_dir, cm)
if not os.path.exists(family_dir):
os.mkdir(family_dir)
for seq_file in seq_files:
seq_file_path = os.path.join(sequence_dir, seq_file)
filename = ''
if seq_file.find(".") != -1:
filename = seq_file.partition('.')[0]
lsf_out_file = os.path.join(family_dir, filename + ".out")
lsf_err_file = os.path.join(family_dir, filename + ".err")
inf_tbl_file = os.path.join(family_dir, filename + ".tbl")
inf_out_file = os.path.join(family_dir, filename + ".inf")
tmp_out_file = os.path.join("/tmp", filename + ".out")
tmp_err_file = os.path.join("/tmp", filename + ".err")
tmp_tbl_file = os.path.join("/tmp", filename + ".tbl")
tmp_inf_file = os.path.join("/tmp", filename + ".inf")
cmd = SEARCH_MPI % (SRCH_MEM, SRCH_MEM, tmp_out_file, tmp_err_file,
CPU_NO, RFAM_SRCH_GROUP,
lsf_out_file, tmp_out_file,
lsf_err_file, tmp_err_file,
inf_tbl_file, tmp_tbl_file,
inf_out_file, tmp_inf_file, filename,
CPU_NO, search_method, tmp_inf_file,
tmp_tbl_file, seqdb_size,
cm_path, seq_file_path)
subprocess.call(cmd, shell=True)
# -------------------------------------------------------------------------
if __name__ == '__main__':
"""
TO DO:
- update function that restores crashed searches
- implement this as a luigi pipeline
"""
# restore searches if --restore option is provided
if '--restore' in sys.argv:
cm_dir = sys.argv[1] # a directory of split cms
input_dir = sys.argv[2]
dest_dir = sys.argv[3]
restore_jobs_with_multi_cms(cm_dir, input_dir, dest_dir)
elif '--multi' in sys.argv:
cm_dir = sys.argv[1]
sequence_dir = sys.argv[2]
dest_dir = sys.argv[3]
multi_cm_sequence_scan(cm_dir, sequence_dir, tool="cmsearch",
seqdb_size=None, dest_dir=dest_dir)
elif '--project' in sys.argv:
project_dir = sys.argv[1]
upid_input = sys.argv[2]
# a file that contains a list of upids
if os.path.isfile(upid_input):
upid_fp = open(upid_input, 'r')
upids = [x.strip() for x in upid_fp]
upid_fp.close()
for upid in upids:
suffix = upid[-3:]
updir = os.path.join(os.path.join(project_dir, suffix), upid)
single_genome_scan_from_download_directory(updir, upid, tool="cmsearch")
# single upid
else:
upid = upid_input
suffix = upid_input[-3:]
updir = os.path.join(os.path.join(project_dir, suffix), upid)
single_genome_scan_from_download_directory(updir, upid, tool="cmsearch")
# this option allows the user to launch batch genome search from within a single directory
elif '--batch' in sys.argv:
input_dir = sys.argv[1]
dest_dir = sys.argv[2]
genome_scan_from_sequence_directory(input_dir, dest_dir, tool="cmsearch", size=None)
else:
print "Wrong Input"
# need to implement the a help function
Keep old output structure
#!/usr/bin/python
"""
Copyright [2009-2017] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# ---------------------------------IMPORTS-------------------------------------
import os
import shutil
import subprocess
import sys
import math
from config import rfam_local as conf
from scripts.validation import genome_search_validator as gsv
from utils import genome_search_utils as gsu
from utils import scan_utils as su
# -----------------------------------------------------------------------------
#SPLIT_SIZE = 5427083
SPLIT_SIZE = 2000000
SRCH_MEM = 6000
SCAN_MEM = 6000
#RFAMSEQ_SIZE = 451031.997884 # size of rfamseq13 in Mb
RFAMSEQ_SIZE = 742849.287494 #
CM_NO = 3016 # number of cms in Rfam.cm file
CPU_NO = 5
SGROUP_IDX = 65 # subgroup index - Ascii for A
LSF_GROUP = "/rfam_srch_mpi/%s"
RFAM_SRCH_GROUP = "/rfam_search"
SUB_DIRS = 26 # max 26 subdirs as the number of the alphabet
MAX_JOBS = 50
# filtered - add this as a command line option
CREATE_SUBGROUP = "bgadd -L %s %s"
SEARCH_MPI = ("bsub -q mpi-rh7 -M %s -R \"rusage[mem=%s,tmp=2000]\" -o %s -e %s -n %s -g %s -R \"span[hosts=1]\" "
"-f \"%s < %s\" "
"-f \"%s < %s\" "
"-f \"%s < %s\" "
"-f \"%s < %s\" "
"-Ep \"rm /tmp/%s.*\" "
"-a openmpi mpiexec -mca btl ^openib -np %s "
"%s -o %s --tblout %s --acc --cut_ga --rfam --notextw --nohmmonly -Z %s --mpi %s %s")
GROUP_AND_SRCH_MPI = ("bsub -q mpi-rh7 -M %s -R \"rusage[mem=%s]\" -o %s -e %s -n %s -g %s -R \"span[hosts=1]\" "
"-f %s < /tmp/%J.out "
"-f %s < /tmp/%J.inf "
"-f %s < /tmp/%J.err "
"-f %s < /tmp/%J.tbl "
"-a openmpi mpiexec -mca btl ^openib -np %s "
"%s -o %s --tblout %s --acc --cut_ga --rfam --notextw --nohmmonly -Z %s --mpi %s %s")
"""
# unfiltered
CMD_TEMPLATE_MPI = ("bsub -q mpi-rh7 -M %s -R \"rusage[mem=%s]\" -o %s -e %s -n %s -g %s -R \"span[hosts=1]\" "
"-a openmpi mpiexec -mca btl ^openib -np %s "
"%s -o %s --tblout %s --acc --notextw --nohmmonly -Z %s --mpi %s %s")
"""
# -------------------------------------------------------------------------
def calculate_required_memory():
"""
:return:
"""
memory = 0
return memory
# -------------------------------------------------------------------------
def genome_scan_from_sequence_directory(input_dir, dest_dir, tool="cmsearch", size=None):
"""
using cmsearch by default unless defined otherwise
:param input_dir:
:param dest_dir:
:param tool:
:return:
"""
fp = open(os.path.join(dest_dir,"result_locations.txt"),'w')
# initialize output space
if not os.path.exists(dest_dir):
os.mkdir(dest_dir, 0775)
# create subdirs
i = 0
while i < SUB_DIRS:
subdir = os.path.join(dest_dir, chr(SGROUP_IDX + i))
if not os.path.exists(subdir):
os.mkdir(subdir)
os.chmod(subdir, 0775)
i += 1
# get candidate sequence files
seq_files = [x for x in os.listdir(input_dir)
if x.endswith('.fa') or x.endswith('.fasta') or x.endswith('.fna')]
out_idx = 0
for seq_file in seq_files:
# 1. get size
seq_file_loc = os.path.join(input_dir, seq_file)
if size is None:
# size * 2 as both strands are being searched and divided by 1M as the size needs to be in Mb
size = (float(su.get_nt_count(seq_file_loc, type="dna")) * 2.0)/1000000.0
else:
size = RFAMSEQ_SIZE
filename = seq_file.partition('.')[0]
gen_output_dir = os.path.join(os.path.join(dest_dir, chr(SGROUP_IDX + out_idx)),
filename)
fp.write(gen_output_dir+'\n')
if not os.path.exists(gen_output_dir):
os.mkdir(gen_output_dir, 0775)
gen_input_dir = ''
nts = gsu.count_nucleotides_in_fasta(seq_file_loc)
# check if large seq file and make sure we can split in more than 1 files
if nts >= SPLIT_SIZE and math.ceil(nts/SPLIT_SIZE) > 1:
gen_input_dir = os.path.join(input_dir, filename)
# create a distinct directory for the genome
if not os.path.exists(gen_input_dir):
os.mkdir(gen_input_dir, 0775)
shutil.move(seq_file_loc, os.path.join(gen_input_dir, os.path.basename(seq_file_loc)))
# new input file location
seq_file_loc = os.path.join(gen_input_dir, os.path.basename(seq_file_loc))
# split sequence file into smalled chunks and store under destination directory
gsu.split_seq_file(seq_file_loc, SPLIT_SIZE, dest_dir=gen_input_dir)
# list all smaller files
genome_chunks = [x for x in os.listdir(gen_input_dir) if not x.endswith(".fa") or not x.endswith(".ssi") != -1]
group_idx = out_idx
# group_idx = 0
for genome_chunk in genome_chunks:
# index all sequence files
chunk_loc = os.path.join(gen_input_dir, genome_chunk)
gsu.index_sequence_file(chunk_loc)
chunk_name = genome_chunk
lsf_out_file = os.path.join(gen_output_dir, chunk_name + ".out")
lsf_err_file = os.path.join(gen_output_dir, chunk_name + ".err")
inf_tbl_file = os.path.join(gen_output_dir, chunk_name + ".tbl")
inf_out_file = os.path.join(gen_output_dir, chunk_name + ".inf")
tmp_out_file = os.path.join("/tmp", chunk_name + ".out")
tmp_err_file = os.path.join("/tmp", chunk_name + ".err")
tmp_inf_file = os.path.join("/tmp", chunk_name + ".inf")
tmp_tbl_file = os.path.join("/tmp", chunk_name + ".tbl")
if tool == 'cmsearch':
cmd = SEARCH_MPI % (SRCH_MEM, SRCH_MEM, tmp_out_file, tmp_err_file,
CPU_NO, LSF_GROUP % (chr(SGROUP_IDX + group_idx)),
lsf_out_file, tmp_out_file,
lsf_err_file, tmp_err_file,
inf_out_file, tmp_inf_file,
inf_tbl_file, tmp_tbl_file,
filename,
CPU_NO, conf.CMSEARCH, inf_out_file,
inf_tbl_file, size,
conf.CMFILE, chunk_loc)
else:
cmd = SEARCH_MPI % (SRCH_MEM, SRCH_MEM, tmp_out_file, tmp_err_file,
CPU_NO, LSF_GROUP % (chr(SGROUP_IDX + group_idx)),
lsf_out_file, tmp_out_file,
lsf_err_file, tmp_err_file,
inf_out_file, tmp_inf_file,
inf_tbl_file, tmp_tbl_file,
filename,
CPU_NO, conf.CMSCAN, inf_out_file,
inf_tbl_file, size,
conf.CMFILE, chunk_loc)
subprocess.call(cmd, shell=True)
# small enough genomes that don't need splitting
else:
group_idx = out_idx
gsu.index_sequence_file(seq_file_loc)
lsf_out_file = os.path.join(gen_output_dir, filename + ".out")
lsf_err_file = os.path.join(gen_output_dir, filename + ".err")
inf_tbl_file = os.path.join(gen_output_dir, filename + ".tbl")
inf_out_file = os.path.join(gen_output_dir, filename + ".inf")
tmp_out_file = os.path.join("/tmp", filename + ".out")
tmp_err_file = os.path.join("/tmp", filename + ".err")
tmp_inf_file = os.path.join("/tmp", filename + ".inf")
tmp_tbl_file = os.path.join("/tmp", filename + ".tbl")
cmd = ''
if tool == 'cmsearch':
cmd = SEARCH_MPI % (SRCH_MEM, SRCH_MEM, tmp_out_file, tmp_err_file,
CPU_NO, LSF_GROUP % (chr(SGROUP_IDX + group_idx)),
lsf_out_file, tmp_out_file,
lsf_err_file, tmp_err_file,
inf_out_file, tmp_inf_file,
inf_tbl_file, tmp_tbl_file,
filename,
CPU_NO, conf.CMSEARCH, inf_out_file,
inf_tbl_file, size,
conf.CMFILE, seq_file_loc)
else:
cmd = SEARCH_MPI % (SRCH_MEM, SRCH_MEM, tmp_out_file, tmp_err_file,
CPU_NO, LSF_GROUP % (chr(SGROUP_IDX + group_idx)),
lsf_out_file, tmp_out_file,
lsf_err_file, tmp_err_file,
inf_out_file, tmp_inf_file,
inf_tbl_file, tmp_tbl_file,
filename,
CPU_NO, conf.CMSCAN, inf_out_file,
inf_tbl_file, size,
conf.CMFILE, seq_file_loc)
subprocess.call(cmd, shell=True)
out_idx += 1
if out_idx == SUB_DIRS:
out_idx = 0
size = None
fp.close()
# ------------------------------------------------------------------------------------------------
def genome_scan_from_download_directory(project_dir, upid_file, tool="cmsearch"):
"""
Search all genomes from within their download directories
project_dir: The path to a project directory generated by genome_downloader
upid_file: A file of upids to launch the searches with
tool: Infernal's search method used for genome annotation (cmsearch, cmscan). Defaults to
cmsearch
return: void
"""
# load upids from file
upid_fp = open(upid_file, 'r')
upids = [x.strip() for x in upid_fp]
upid_fp.close()
for upid in upids:
# get updir location
subdir = os.path.join(project_dir, upid[-3:])
updir = os.path.join(subdir, upid)
# generate chunks - do this bit withing the search job in order to parallelize it.
# if sequence chunk directory exists then don't generate it again and use that to
# re-launch the searches
upid_fasta = os.path.join(updir, upid + '.fa')
seq_chunks_dir = os.path.join(updir, "search_chunks")
# check if the genome sequence file has already been split
if not os.path.exists(seq_chunks_dir):
os.mkdir(seq_chunks_dir)
os.chmod(seq_chunks_dir, 0777)
# check if we need to split the seq_file
if gsu.count_nucleotides_in_fasta(upid_fasta) >= SPLIT_SIZE:
# split sequence file into smalled chunks and store under destination directory
gsu.split_seq_file(upid_fasta, SPLIT_SIZE, dest_dir=seq_chunks_dir)
# For all inputs to be consistent, if the sequence file is small,
# copy it in the search_chunks directory
else:
# copy file
shutil.copyfile(upid_fasta, os.path.join(seq_chunks_dir, upid + '.fa'))
# index file
cmd = "%s --index %s" % (conf.ESL_SFETCH,
os.path.join(seq_chunks_dir, upid + '.fa'))
subprocess.call(cmd, shell=True)
# Create a search directory
search_output_dir = os.path.join(updir, "search_output")
if not os.path.exists(search_output_dir):
os.mkdir(search_output_dir)
os.chmod(search_output_dir, 0777)
# List all smaller files. Using list comprehension to filter out other contents
genome_chunks = [x for x in os.listdir(seq_chunks_dir) if not x.endswith('.ssi')]
for seq_file in genome_chunks:
cmd = ''
# index all sequence files
seq_file_loc = os.path.join(seq_chunks_dir, seq_file)
gsu.index_sequence_file(seq_file_loc)
chunk_name = seq_file
lsf_out_file = os.path.join(search_output_dir, chunk_name + ".out")
lsf_err_file = os.path.join(search_output_dir, chunk_name + ".err")
inf_tbl_file = os.path.join(search_output_dir, chunk_name + ".tbl")
inf_out_file = os.path.join(search_output_dir, chunk_name + ".inf")
if tool == 'cmsearch':
cmd = SEARCH_MPI % (SRCH_MEM, SRCH_MEM, lsf_out_file, lsf_err_file,
CPU_NO, RFAM_SRCH_GROUP,
CPU_NO, conf.CMSEARCH, inf_out_file,
inf_tbl_file, RFAMSEQ_SIZE,
conf.CMFILE, seq_file_loc)
else:
cmd = SEARCH_MPI % (SRCH_MEM, SRCH_MEM, lsf_out_file, lsf_err_file,
CPU_NO, RFAM_SRCH_GROUP,
CPU_NO, conf.CMSCAN, inf_out_file,
inf_tbl_file, RFAMSEQ_SIZE,
conf.CMFILE, seq_file_loc)
subprocess.call(cmd, shell=True)
# ------------------------------------------------------------------------------------------------
def single_genome_scan_from_download_directory(updir, upid, tool="cmsearch"):
"""
Search all genomes from within their
updir: The path to a genome directory generated by genome_downloader. The
download directory should be in the
upid: A valid upid of the genome to be searched
tool: Infernal's search method used for genome annotation (cmsearch, cmscan). Defaults to
cmsearch
return: void
"""
# generate chunks - do this bit withing the search job in order to parallelize it.
# if sequence chunk directory exists then don't generate it again and use that to
# re-launch the searches
upid_fasta = os.path.join(updir, upid + '.fa')
seq_chunks_dir = os.path.join(updir, "search_chunks")
# check if the genome sequence file has already been split
if not os.path.exists(seq_chunks_dir):
os.mkdir(seq_chunks_dir)
os.chmod(seq_chunks_dir, 0777)
# check if we need to split the seq_file
if gsu.count_nucleotides_in_fasta(upid_fasta) >= SPLIT_SIZE:
# split sequence file into smalled chunks and store under destination directory
gsu.split_seq_file(upid_fasta, SPLIT_SIZE, dest_dir=seq_chunks_dir)
# now index the fasta files
seq_files = os.listdir(seq_chunks_dir)
for seq_file in seq_files:
seq_file_loc = os.path.join(seq_chunks_dir, seq_file)
cmd = "%s --index %s" % (conf.ESL_SFETCH, seq_file_loc)
subprocess.call(cmd, shell=True)
# For all inputs to be consistent, if the sequence file is small,
# copy it in the search_chunks directory
else:
# copy file
shutil.copyfile(upid_fasta, os.path.join(seq_chunks_dir, upid + '.fa'))
# index file
cmd = "%s --index %s" % (conf.ESL_SFETCH, os.path.join(seq_chunks_dir, upid + '.fa'))
subprocess.call(cmd, shell=True)
# Create a search directory
search_output_dir = os.path.join(updir, "search_output")
if not os.path.exists(search_output_dir):
os.mkdir(search_output_dir)
os.chmod(search_output_dir, 0777)
# List all smaller files. Using list comprehension to filter out other contents
genome_chunks = [x for x in os.listdir(seq_chunks_dir) if not x.endswith('.ssi')]
# check and set search method selected
search_method = None
if tool == 'cmsearch':
search_method = conf.CMSEARCH
else:
search_method = conf.CMSCAN
for seq_file in genome_chunks:
cmd = ''
# index all sequence files
seq_file_loc = os.path.join(seq_chunks_dir, seq_file)
chunk_name = seq_file
lsf_out_file = os.path.join(search_output_dir, chunk_name + ".out")
lsf_err_file = os.path.join(search_output_dir, chunk_name + ".err")
inf_tbl_file = os.path.join(search_output_dir, chunk_name + ".tbl")
inf_out_file = os.path.join(search_output_dir, chunk_name + ".inf")
tmp_out_file = os.path.join("/tmp", chunk_name + ".out")
tmp_err_file = os.path.join("/tmp", chunk_name + ".err")
tmp_tbl_file = os.path.join("/tmp", chunk_name + ".tbl")
tmp_inf_file = os.path.join("/tmp", chunk_name + ".inf")
cmd = SEARCH_MPI % (SRCH_MEM, SRCH_MEM, tmp_out_file, tmp_err_file,
CPU_NO, RFAM_SRCH_GROUP,
lsf_out_file, tmp_out_file,
lsf_err_file, tmp_err_file,
inf_tbl_file, tmp_tbl_file,
inf_out_file, tmp_inf_file, chunk_name,
CPU_NO, search_method, tmp_inf_file,
tmp_tbl_file, RFAMSEQ_SIZE,
conf.CMFILE, seq_file_loc)
subprocess.call(cmd, shell=True)
# ------------------------------------------------------------------------------------------------
def restore_io_paths(input_dir, output_dir):
"""
Restore searches from and to the same directories.
** Will need to simplify this
input_dir: The path to the input directory as organized by genome_scanner
output_dir: The path to the output directory of the run we want to restore
return: A list of tuples with input and output paths
"""
io_path_list = []
err_cases = gsv.check_search_err_files(output_dir)
# loop over all output subdirs
for subdir in err_cases.keys():
dest_dir = os.path.join(output_dir, subdir)
# get genome i/o directories
for genome in err_cases[subdir].keys():
out_gen_dir = os.path.join(dest_dir, genome)
in_gen_dir = os.path.join(input_dir, genome)
# get sequence file i/o paths -
# need to handle the case in which we don't have a dir,
# or just create one for all cases in input dir
for err_file in err_cases[subdir][genome]:
# add a check here to look for an index number
out_file_loc = os.path.join(out_gen_dir, err_file)
in_file_loc = os.path.join(in_gen_dir, err_file)
io_path_list.append((in_file_loc, out_file_loc))
# remove files from previous runs
return io_path_list
# -------------------------------------------------------------------------
def restore_jobs_with_multi_cms(cm_dir, input_dir, output_dir):
"""
Restore search jobs by scanning using smaller cm files
cm_dir: The path to the CM directory. If None, use default
input_dir: The path to the input directory as organized by genome_scanner
output_dir: The path to the output directory of the run we want to restore
returns: void
"""
io_path_list = restore_io_paths(input_dir, output_dir)
cms = [x for x in os.listdir(cm_dir) if x.find('.') == -1]
for cm_file in cms:
cm_file_loc = os.path.join(cm_dir, cm_file)
for err_case in io_path_list:
seq_file = err_case[0]
out_file = err_case[1]
# cleanup old files
if os.path.exists(out_file + ".out"):
os.remove(out_file + ".out")
if os.path.exists(out_file + ".err"):
os.remove(out_file + ".err")
if os.path.exists(out_file + ".tbl"):
os.remove(out_file + ".tbl")
if os.path.exists(out_file + ".inf"):
os.remove(out_file + ".inf")
lsf_out_file = out_file + '_' + cm_file + ".out"
lsf_err_file = out_file + '_' + cm_file + ".err"
inf_tbl_file = out_file + '_' + cm_file + ".tbl"
inf_out_file = out_file + '_' + cm_file + ".inf"
lsf_subgroup = out_file.split('/')[-3]
cmd = SEARCH_MPI % (SRCH_MEM, SRCH_MEM, lsf_out_file,
lsf_err_file, CPU_NO,
LSF_GROUP % lsf_subgroup,
CPU_NO, conf.CMSEARCH,
inf_out_file, inf_tbl_file,
RFAMSEQ_SIZE, cm_file_loc,
seq_file)
subprocess.call(cmd, shell=True) # re-initialization
lsf_out_file = ''
lsf_err_file = ''
inf_tbl_file = ''
inf_out_file = ''
# -------------------------------------------------------------------------
def multi_cm_sequence_scan(cm_dir, sequence_dir, tool="cmsearch", seqdb_size=None, dest_dir=None):
"""
This function treats each covariance model individually and launches
a job for each sequence file found in sequence_dir.
cm_dir: A directory with all CM models to scan
sequence_dir: A directory with fasta sequence files to scan
dest_dir: The path to the destination directory
return: void
"""
# list all covariance models and sequence files
cms = [x for x in os.listdir(cm_dir)
if x.endswith('.CM') or x.endswith('.cm')]
seq_files = [x for x in os.listdir(sequence_dir)
if not x.endswith('.ssi')]
# create the destination directory if necessary
if dest_dir is not None:
if not os.path.exists(dest_dir):
os.mkdir(dest_dir)
# sequence db size is None set to Rfamseq size
if seqdb_size is None:
seqdb_size = RFAMSEQ_SIZE
search_method = ''
if tool == 'cmsearch':
search_method = conf.CMSEARCH
else:
search_method = conf.CMSCAN
# now launch the searches
for cm in cms:
# create an individual result directory per model
rfam_acc = cm.partition('.')[0]
family_dir = os.path.join(dest_dir, rfam_acc)
cm_path = os.path.join(cm_dir, cm)
if not os.path.exists(family_dir):
os.mkdir(family_dir)
for seq_file in seq_files:
seq_file_path = os.path.join(sequence_dir, seq_file)
filename = ''
if seq_file.find(".") != -1:
filename = seq_file.partition('.')[0]
lsf_out_file = os.path.join(family_dir, filename + ".out")
lsf_err_file = os.path.join(family_dir, filename + ".err")
inf_tbl_file = os.path.join(family_dir, filename + ".tbl")
inf_out_file = os.path.join(family_dir, filename + ".inf")
tmp_out_file = os.path.join("/tmp", filename + ".out")
tmp_err_file = os.path.join("/tmp", filename + ".err")
tmp_tbl_file = os.path.join("/tmp", filename + ".tbl")
tmp_inf_file = os.path.join("/tmp", filename + ".inf")
cmd = SEARCH_MPI % (SRCH_MEM, SRCH_MEM, tmp_out_file, tmp_err_file,
CPU_NO, RFAM_SRCH_GROUP,
lsf_out_file, tmp_out_file,
lsf_err_file, tmp_err_file,
inf_tbl_file, tmp_tbl_file,
inf_out_file, tmp_inf_file, filename,
CPU_NO, search_method, tmp_inf_file,
tmp_tbl_file, seqdb_size,
cm_path, seq_file_path)
subprocess.call(cmd, shell=True)
# -------------------------------------------------------------------------
if __name__ == '__main__':
"""
TO DO:
- update function that restores crashed searches
- implement this as a luigi pipeline
"""
# restore searches if --restore option is provided
if '--restore' in sys.argv:
cm_dir = sys.argv[1] # a directory of split cms
input_dir = sys.argv[2]
dest_dir = sys.argv[3]
restore_jobs_with_multi_cms(cm_dir, input_dir, dest_dir)
elif '--multi' in sys.argv:
cm_dir = sys.argv[1]
sequence_dir = sys.argv[2]
dest_dir = sys.argv[3]
multi_cm_sequence_scan(cm_dir, sequence_dir, tool="cmsearch",
seqdb_size=None, dest_dir=dest_dir)
elif '--project' in sys.argv:
project_dir = sys.argv[1]
upid_input = sys.argv[2]
# a file that contains a list of upids
if os.path.isfile(upid_input):
upid_fp = open(upid_input, 'r')
upids = [x.strip() for x in upid_fp]
upid_fp.close()
for upid in upids:
suffix = upid[-3:]
updir = os.path.join(os.path.join(project_dir, suffix), upid)
single_genome_scan_from_download_directory(updir, upid, tool="cmsearch")
# single upid
else:
upid = upid_input
suffix = upid_input[-3:]
updir = os.path.join(os.path.join(project_dir, suffix), upid)
single_genome_scan_from_download_directory(updir, upid, tool="cmsearch")
# this option allows the user to launch batch genome search from within a single directory
elif '--batch' in sys.argv:
input_dir = sys.argv[1]
dest_dir = sys.argv[2]
genome_scan_from_sequence_directory(input_dir, dest_dir, tool="cmsearch", size=None)
else:
print "Wrong Input"
# need to implement the a help function
|
def printNum(n):
result = ""
ns = str(n)
result += "".join([str(len(str(d) * int(d))) for d in list(ns)])
return result
print printNum(2014)
print printNum(1135)
print printNum(2048)
aaaaaaaand i one-lined it.
def printNum(n):
return "".join([str(len(str(d) * int(d))) for d in list(str(n))])
print printNum(2014)
print printNum(1135)
print printNum(2048)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django import template
from django.conf import settings
from django.template.base import Template
from django.template.loader import get_template
from django.template.loader_tags import do_include, BlockNode
import six
from copy import copy
register = template.Library()
@register.tag(name='includeblock')
def do_include_block(parser, token):
"""
Works like the {% include <template_name> %} django templatetag,
but additionally allows for definition of inline blocks that can be
referenced in the included template.
Usage:
{% includeblock 'example/includable_template.html' <...> %}
{% block myblock %}
<p>An inner block. Reference this using {{ myblock }} in the included template!</p>
{% endblock myblock %}
<b>This content is never rendered because it appears outside inner blocks!</b>
{% endincludeblock %}
"""
# inherit behaviour form ``include`` templatetag
include_node = do_include(parser, token)
# make the parser "forget" any blocks encountered in the inner includeblock,
# so duplicate blocks don't cause a TemplateSyntaxError
loaded_blocks = copy(parser.__loaded_blocks)
nodelist = parser.parse(('endincludeblock',))
parser.__loaded_blocks = loaded_blocks
parser.delete_first_token()
return IncludeBlockNode(nodelist, include_node)
class IncludeBlockNode(template.Node):
""" The {% includeblock <template> ... %} tag works just like an {% include ... %} templatetag,
with the addition of allowing an inner block of content to be specified. The tag
allows all <with> kwargs supplied to the tag, just like the {% include ... %} tag.
We render all nodes found inside the {% includeblock <template> %} Tag.
Nodes found inside a {% block %} block are rendered into a context variable with the name
of their block. They can be used in the included <template> by accessing the variable
with the name of that block.
Nodes found outside {% block %} blocks are rendered together and output after the last node
in the specified rendered <template>.
"""
def __init__(self, nodelist, include_node):
self.nodelist = nodelist
self.include_node = include_node
def render(self, context):
try:
# from django.template.loader_tags.IncludeNode:
# parse the included template
# django <= 1.6 compatibility. stores FilterExpression in self.template_name or has
# self.template as Template
if not hasattr(self.include_node, 'template'):
self.include_node.template = self.include_node.template_name
if not isinstance(self.include_node.template, Template):
# django >= 1.7
self.include_node.template = self.include_node.template.resolve(context)
if not callable(getattr(self.include_node.template, 'render', None)):
# If not, we'll try get_template
self.include_node.template = get_template(self.include_node.template)
values = dict([(name, var.resolve(context)) for name, var
in six.iteritems(self.include_node.extra_context)])
if self.include_node.isolated_context:
context = context.new(values)
else:
context.update(values)
# render each named block in the inner block into a context variable
for block in self.nodelist.get_nodes_by_type(BlockNode):
values[block.name] = block.nodelist.render(context)#block_output
del self.nodelist[self.nodelist.index(block)]
# render the included template
output = self.include_node.template.render(context)
if not self.include_node.isolated_context:
context.pop()
return output
except:
if settings.TEMPLATE_DEBUG:
raise
return ''
Fix: Nesting includeblock tags now works without errors, even for nested inline blocks with the same name
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django import template
from django.conf import settings
from django.template.base import Template
from django.template.loader import get_template
from django.template.loader_tags import do_include, BlockNode
import six
from copy import copy
register = template.Library()
@register.tag(name='includeblock')
def do_include_block(parser, token):
"""
Works like the {% include <template_name> %} django templatetag,
but additionally allows for definition of inline blocks that can be
referenced in the included template.
Usage:
{% includeblock 'example/includable_template.html' <...> %}
{% block myblock %}
<p>An inner block. Reference this using {{ myblock }} in the included template!</p>
{% endblock myblock %}
<b>This content is never rendered because it appears outside inner blocks!</b>
{% endincludeblock %}
"""
# inherit behaviour form ``include`` templatetag
include_node = do_include(parser, token)
# we give the parser a new "context" of blocks encountered in the inner includeblock,
# so duplicate blocks don't cause a TemplateSyntaxError
loaded_blocks = copy(parser.__loaded_blocks)
parser.__loaded_blocks = []
nodelist = parser.parse(('endincludeblock',))
parser.__loaded_blocks = loaded_blocks
parser.delete_first_token()
return IncludeBlockNode(nodelist, include_node)
class IncludeBlockNode(template.Node):
""" The {% includeblock <template> ... %} tag works just like an {% include ... %} templatetag,
with the addition of allowing an inner block of content to be specified. The tag
allows all <with> kwargs supplied to the tag, just like the {% include ... %} tag.
We render all nodes found inside the {% includeblock <template> %} Tag.
Nodes found inside a {% block %} block are rendered into a context variable with the name
of their block. They can be used in the included <template> by accessing the variable
with the name of that block.
Nodes found outside {% block %} blocks are rendered together and output after the last node
in the specified rendered <template>.
"""
def __init__(self, nodelist, include_node):
self.nodelist = nodelist
self.include_node = include_node
def render(self, context):
try:
# from django.template.loader_tags.IncludeNode:
# parse the included template
# django <= 1.6 compatibility. stores FilterExpression in self.template_name or has
# self.template as Template
if not hasattr(self.include_node, 'template'):
self.include_node.template = self.include_node.template_name
if not isinstance(self.include_node.template, Template):
# django >= 1.7
self.include_node.template = self.include_node.template.resolve(context)
if not callable(getattr(self.include_node.template, 'render', None)):
# If not, we'll try get_template
self.include_node.template = get_template(self.include_node.template)
values = dict([(name, var.resolve(context)) for name, var
in six.iteritems(self.include_node.extra_context)])
if self.include_node.isolated_context:
context = context.new(values)
else:
context.update(values)
# render each named block in the inner block into a context variable
block_list = self.nodelist.get_nodes_by_type(BlockNode)
while block_list:
block = block_list[0]
values[block.name] = block.nodelist.render(context)
del self.nodelist[self.nodelist.index(block)]
# we refresh the block list after each iteration, because if block B was contained in block A that we
# just rendered, block B will have been removed from the nodelist after rendering
block_list = self.nodelist.get_nodes_by_type(BlockNode)
# render the included template
output = self.include_node.template.render(context)
if not self.include_node.isolated_context:
context.pop()
return output
except:
if settings.TEMPLATE_DEBUG:
raise
return ''
|
'''Enqueue a collection's docuemnts for re-enriching.
'''
import sys
import argparse
from harvester.post_processing.couchdb_runner import CouchDBJobEnqueue
from harvester.config import parse_env
import harvester.post_processing.enrich_existing_couch_doc
def main(args):
parser = argparse.ArgumentParser(
description='run an Akara enrichment chain on documents in a \
collection.')
parser.add_argument('collection_id',
help='Registry id for the collection')
parser.add_argument('enrichment', help='File of enrichment chain to run')
args = parser.parse_args(args)
print "CID:{}".format(args.collection_id)
print "ENRICH FILE:{}".format(args.enrichment)
with open(args.enrichment) as enrichfoo:
enrichments = enrichfoo.read()
enq = CouchDBJobEnqueue()
timeout = 10000
enq.queue_collection(args.collection_id, timeout,
harvester.post_processing.enrich_existing_couch_doc.main,
enrichments
)
if __name__=='__main__':
main(sys.argv[1:])
Make work fo
'''Enqueue a collection's docuemnts for re-enriching.
'''
import sys
import argparse
from harvester.post_processing.couchdb_runner import CouchDBJobEnqueue
from harvester.config import parse_env
import harvester.post_processing.enrich_existing_couch_doc
def main(args):
parser = argparse.ArgumentParser(
description='run an Akara enrichment chain on documents in a \
collection.')
parser.add_argument('collection_id',
help='Registry id for the collection')
parser.add_argument('enrichment', help='File of enrichment chain to run')
parser.add_argument('--rq_queue',
help='Override queue for jobs, normal-stage is default')
args = parser.parse_args(args)
print "CID:{}".format(args.collection_id)
print "ENRICH FILE:{}".format(args.enrichment)
with open(args.enrichment) as enrichfoo:
enrichments = enrichfoo.read()
Q = 'normal-stage'
if args.rq_queue:
Q = args.rq_queue
enq = CouchDBJobEnqueue(Q)
timeout = 10000
enq.queue_collection(args.collection_id, timeout,
harvester.post_processing.enrich_existing_couch_doc.main,
enrichments
)
if __name__=='__main__':
main(sys.argv[1:])
|
from rest_framework import serializers
from rest_framework.exceptions import ValidationError
from .models import (
Assessment,
Boundary,
BoundaryCategory,
BoundaryType,
Institution,
InstitutionCategory,
InstitutionManagement,
MoiType,
Programme,
Question,
Relations,
Staff,
Student,
StudentGroup,
StudentStudentGroupRelation
)
from rest_framework_bulk import (
BulkListSerializer,
BulkSerializerMixin
)
class BoundarySerializer(serializers.ModelSerializer):
class Meta:
model = Boundary
fields = (
'id', 'parent', 'name', 'boundary_category', 'boundary_type',
'active'
)
class BoundaryTypeSerializer(serializers.ModelSerializer):
class Meta:
model = BoundaryType
fields = ('id', 'name')
class BoundaryCategorySerializer(serializers.ModelSerializer):
class Meta:
model = BoundaryCategory
fields = ('id', 'name')
class InstitutionSerializer(serializers.ModelSerializer):
class Meta:
model = Institution
fields = (
'id', 'boundary', 'dise_code', 'name', 'cat', 'institution_gender',
'languages', 'mgmt', 'address', 'area', 'pincode', 'landmark',
'active'
)
class InstitutionCategorySerializer(serializers.ModelSerializer):
class Meta:
model = InstitutionCategory
class InstitutionManagementSerializer(serializers.ModelSerializer):
class Meta:
model = InstitutionManagement
class LanguageSerializer(serializers.ModelSerializer):
class Meta:
model = MoiType
class AssessmentSerializer(serializers.ModelSerializer):
class Meta:
model = Assessment
fields = (
'id', 'programme', 'name', 'start_date', 'end_date', 'query',
'active', 'double_entry', 'type',
)
class ProgrammeSerializer(serializers.ModelSerializer):
class Meta:
model = Programme
fields = (
'id', 'name', 'description', 'start_date', 'end_date',
'programme_institution_category', 'active'
)
class QuestionSerializer(serializers.ModelSerializer):
class Meta:
model = Question
class RelationsSerializer(serializers.ModelSerializer):
class Meta:
model = Relations
fields = (
'id','relation_type', 'first_name' ,'middle_name', 'last_name'
)
list_serializer_class=BulkListSerializer
# Added this so the relation id is propagated in PUTs to the student endpoint. Relation info is
# nested in student info.
extra_kwargs = {
"id": {
"read_only": False,
"required": False,
},
}
class StudentSerializer(BulkSerializerMixin, serializers.ModelSerializer):
relations = RelationsSerializer(many='True')
class Meta:
model = Student
fields = (
'id', 'first_name', 'middle_name', 'last_name', 'uid', 'dob', 'gender',
'mt', 'active', 'relations'
)
list_serializer_class = BulkListSerializer
def create(self, validated_data):
studentgroup_id = self.context['view'].kwargs['parent_lookup_studentgroups']
active = validated_data.get('active', 2)
try:
student_group = StudentGroup.objects.get(id=studentgroup_id)
except:
raise ValidationError(studengroup_id + " not found.")
relations_data = validated_data.pop('relations')
student = Student.objects.create(**validated_data)
for item in relations_data:
relation = Relations.objects.create(student=student,**item)
student.save()
student_studentgroup_relation, created = StudentStudentGroupRelation.objects.get_or_create(
student=student,
student_group=student_group,
active=active,
)
return student
def update(self, instance, validated_data):
relations_data = validated_data.pop('relations')
instance.first_name = validated_data.get('first_name', instance.first_name)
instance.middle_name = validated_data.get('middle_name',instance.middle_name)
instance.last_name = validated_data.get('last_name',instance.last_name)
instance.save()
student_id = instance.id
relations = Relations.objects.filter(student_id=instance.id)
for item in relations_data:
relation = Relations.objects.get(id=item['id'])
#if firstname, lastname and middle name are empty, delete the relation
relation.relation_type = item.get('relation_type')
# If all the names are empty, delete the relation
first_name = item.get('first_name')
middle_name = item.get('middle_name')
last_name = item.get('last_name')
if not first_name and not middle_name and not last_name:
relation.delete()
else:
relation.first_name = item.get('first_name')
relation.middle_name = item.get('middle_name')
relation.last_name = item.get('last_name')
relation.save()
instance.save()
return instance
class StudentGroupSerializer(serializers.ModelSerializer):
class Meta:
model = StudentGroup
fields = (
'id', 'institution', 'name', 'section', 'active', 'group_type'
)
class StudentStudentGroupSerializer(serializers.ModelSerializer):
class Meta:
model = StudentStudentGroupRelation
fields = (
'id','student','student_group','academic','active'
)
class StaffSerializer(serializers.ModelSerializer):
class Meta:
model = Staff
fields = (
'id','first_name', 'middle_name', 'last_name', 'institution',
'doj', 'gender', 'mt', 'qualification', 'active'
)
Removes query field from AssessmentSerializer
from rest_framework import serializers
from rest_framework.exceptions import ValidationError
from .models import (
Assessment,
Boundary,
BoundaryCategory,
BoundaryType,
Institution,
InstitutionCategory,
InstitutionManagement,
MoiType,
Programme,
Question,
Relations,
Staff,
Student,
StudentGroup,
StudentStudentGroupRelation
)
from rest_framework_bulk import (
BulkListSerializer,
BulkSerializerMixin
)
class BoundarySerializer(serializers.ModelSerializer):
class Meta:
model = Boundary
fields = (
'id', 'parent', 'name', 'boundary_category', 'boundary_type',
'active'
)
class BoundaryTypeSerializer(serializers.ModelSerializer):
class Meta:
model = BoundaryType
fields = ('id', 'name')
class BoundaryCategorySerializer(serializers.ModelSerializer):
class Meta:
model = BoundaryCategory
fields = ('id', 'name')
class InstitutionSerializer(serializers.ModelSerializer):
class Meta:
model = Institution
fields = (
'id', 'boundary', 'dise_code', 'name', 'cat', 'institution_gender',
'languages', 'mgmt', 'address', 'area', 'pincode', 'landmark',
'active'
)
class InstitutionCategorySerializer(serializers.ModelSerializer):
class Meta:
model = InstitutionCategory
class InstitutionManagementSerializer(serializers.ModelSerializer):
class Meta:
model = InstitutionManagement
class LanguageSerializer(serializers.ModelSerializer):
class Meta:
model = MoiType
class AssessmentSerializer(serializers.ModelSerializer):
class Meta:
model = Assessment
fields = (
'id', 'programme', 'name', 'start_date', 'end_date',
'active', 'double_entry', 'type',
)
class ProgrammeSerializer(serializers.ModelSerializer):
class Meta:
model = Programme
fields = (
'id', 'name', 'description', 'start_date', 'end_date',
'programme_institution_category', 'active'
)
class QuestionSerializer(serializers.ModelSerializer):
class Meta:
model = Question
class RelationsSerializer(serializers.ModelSerializer):
class Meta:
model = Relations
fields = (
'id','relation_type', 'first_name' ,'middle_name', 'last_name'
)
list_serializer_class=BulkListSerializer
# Added this so the relation id is propagated in PUTs to the student endpoint. Relation info is
# nested in student info.
extra_kwargs = {
"id": {
"read_only": False,
"required": False,
},
}
class StudentSerializer(BulkSerializerMixin, serializers.ModelSerializer):
relations = RelationsSerializer(many='True')
class Meta:
model = Student
fields = (
'id', 'first_name', 'middle_name', 'last_name', 'uid', 'dob', 'gender',
'mt', 'active', 'relations'
)
list_serializer_class = BulkListSerializer
def create(self, validated_data):
studentgroup_id = self.context['view'].kwargs['parent_lookup_studentgroups']
active = validated_data.get('active', 2)
try:
student_group = StudentGroup.objects.get(id=studentgroup_id)
except:
raise ValidationError(studengroup_id + " not found.")
relations_data = validated_data.pop('relations')
student = Student.objects.create(**validated_data)
for item in relations_data:
relation = Relations.objects.create(student=student,**item)
student.save()
student_studentgroup_relation, created = StudentStudentGroupRelation.objects.get_or_create(
student=student,
student_group=student_group,
active=active,
)
return student
def update(self, instance, validated_data):
relations_data = validated_data.pop('relations')
instance.first_name = validated_data.get('first_name', instance.first_name)
instance.middle_name = validated_data.get('middle_name',instance.middle_name)
instance.last_name = validated_data.get('last_name',instance.last_name)
instance.save()
student_id = instance.id
relations = Relations.objects.filter(student_id=instance.id)
for item in relations_data:
relation = Relations.objects.get(id=item['id'])
#if firstname, lastname and middle name are empty, delete the relation
relation.relation_type = item.get('relation_type')
# If all the names are empty, delete the relation
first_name = item.get('first_name')
middle_name = item.get('middle_name')
last_name = item.get('last_name')
if not first_name and not middle_name and not last_name:
relation.delete()
else:
relation.first_name = item.get('first_name')
relation.middle_name = item.get('middle_name')
relation.last_name = item.get('last_name')
relation.save()
instance.save()
return instance
class StudentGroupSerializer(serializers.ModelSerializer):
class Meta:
model = StudentGroup
fields = (
'id', 'institution', 'name', 'section', 'active', 'group_type'
)
class StudentStudentGroupSerializer(serializers.ModelSerializer):
class Meta:
model = StudentStudentGroupRelation
fields = (
'id','student','student_group','academic','active'
)
class StaffSerializer(serializers.ModelSerializer):
class Meta:
model = Staff
fields = (
'id','first_name', 'middle_name', 'last_name', 'institution',
'doj', 'gender', 'mt', 'qualification', 'active'
)
|
import single_robot_behavior
import behavior
from enum import Enum
import main
import evaluation
import constants
import role_assignment
import robocup
class Capture(single_robot_behavior.SingleRobotBehavior):
# tunable config values
CourseApproachErrorThresh = 0.8
CourseApproachDist = 0.4
CourseApproachAvoidBall = 0.10
DribbleSpeed = 100
FineApproachSpeed = 0.2
BackOffDistance = 0.4
BackOffSpeed = 0.3
InFrontOfBallCosOfAngleThreshold = 0.95
#InFrontOfBallDistance
class State(Enum):
course_approach = 1
fine_approach = 2
back_off = 3
def __init__(self):
super().__init__(continuous=False)
self.add_state(Capture.State.course_approach, behavior.Behavior.State.running)
self.add_state(Capture.State.fine_approach, behavior.Behavior.State.running)
self.add_state(Capture.State.back_off, behavior.Behavior.State.running)
self.add_transition(behavior.Behavior.State.start,
Capture.State.course_approach,
lambda: True,
'immediately')
self.add_transition(Capture.State.course_approach,
Capture.State.fine_approach,
lambda: (self.bot_in_front_of_ball() or self.bot_near_ball(Capture.CourseApproachDist)) and main.ball().valid and (not constants.Field.TheirGoalShape.contains_point(main.ball().pos) or self.robot.is_penalty_kicker),
'dist to ball < threshold')
self.add_transition(Capture.State.fine_approach,
behavior.Behavior.State.completed,
lambda: self.robot.has_ball(),
'has ball')
self.add_transition(Capture.State.fine_approach,
Capture.State.course_approach,
lambda: not (self.bot_in_front_of_ball() or self.bot_near_ball(Capture.CourseApproachDist)) and (not self.bot_near_ball(Capture.CourseApproachDist * 1.5) or not main.ball().pos),
'ball went into goal')
self.add_transition(Capture.State.fine_approach,
Capture.State.back_off,
lambda: not self.robot.is_penalty_kicker and constants.Field.TheirGoalShape.contains_point(main.ball().pos),
'ball ran away')
self.add_transition(Capture.State.back_off,
behavior.Behavior.State.start,
lambda: constants.Field.TheirGoalShape.contains_point(main.ball().pos) or self.bot_to_ball().mag() < Capture.BackOffDistance,
"backed away enough")
self.lastApproachTarget = None
self.postChangeCount = 0
def bot_to_ball(self):
return main.ball().pos - self.robot.pos
def bot_near_ball(self, distance):
return (self.bot_to_ball().mag() < distance)
def bot_in_front_of_ball(self):
ball2bot = self.bot_to_ball() * -1
#print (ball2bot.normalized().dot(approach_vec))
#print ("hi")
#print ((ball2bot.normalized().dot(main.ball().vel.normalized())))
#print (((ball2bot).mag() < (evaluation.ball.predict_stop(main.ball().pos, main.ball().vel) - main.ball().pos).mag()))
return (ball2bot.normalized().dot(main.ball().vel) > Capture.InFrontOfBallCosOfAngleThreshold) and \
((ball2bot).mag() < (evaluation.ball.predict_stop(main.ball().pos, main.ball().vel) - main.ball().pos).mag())
# normalized vector pointing from the ball to the point the robot should get to in course_aproach
def approach_vector(self):
if main.ball().vel.mag() > 0.25 and self.robot.pos.dist_to(main.ball().pos) > 0.2:
# ball's moving, get on the side it's moving towards
return main.ball().vel.normalized()
else:
return (self.robot.pos - main.ball().pos).normalized()
def find_intercept_point(self):
approach_vec = self.approach_vector()
# sample every 5 cm in the -approach_vector direction from the ball
pos = None
for i in range(50):
dist = i * 0.05
pos = main.ball().pos + approach_vec * dist
ball_time = evaluation.ball.rev_predict(main.ball().vel, dist) # how long will it take the ball to get there
robotDist = (pos - self.robot.pos).mag()*0.6
bot_time = robocup.get_trapezoidal_time(
robotDist,
robotDist,
2.2,
1,
self.robot.vel.mag(),
0)
#bot_time = (pos - self.robot.pos).mag() * 30.0 # FIXME: evaluate trapezoid
# print('bot: ' + str(bot_time) + ';; ball: ' + str(ball_time))
if bot_time < ball_time:
break
#if i == 50:
return pos
def execute_running(self):
# make sure teammates don't bump into us
self.robot.shield_from_teammates(constants.Robot.Radius * 2.0)
def on_enter_course_approach(self):
self.lastApproachTarget == None
self.pastChangeCount = 11
def execute_course_approach(self):
# don't hit the ball on accident
self.robot.set_avoid_ball_radius(Capture.CourseApproachAvoidBall)
pos = self.find_intercept_point()
self.robot.face(main.ball().pos)
if self.pastChangeCount <=0 or (self.lastApproachTarget != None and (pos - self.lastApproachTarget).mag()<0.1):
self.pastChangeCount = self.pastChangeCount + 1
self.robot.move_to(self.lastApproachTarget)
else:
main.system_state().draw_circle(pos, constants.Ball.Radius, constants.Colors.White, "Capture")
self.robot.move_to(pos)
self.lastApproachTarget = pos
self.pastChangeCount = 0;
def on_exit_course_approach(self):
self.lastApproachTarget == None
def execute_fine_approach(self):
Capture.multiplier = 1.5
self.robot.disable_avoid_ball()
self.robot.set_dribble_speed(Capture.DribbleSpeed)
self.robot.face(main.ball().pos)
bot2ball = (main.ball().pos - self.robot.pos).normalized()
aproach = self.bot_to_ball()*Capture.multiplier + bot2ball * Capture.FineApproachSpeed/4 + main.ball().vel
if (aproach.mag() > 1):
aproach = aproach.normalized()*1
self.robot.set_world_vel(aproach)
def execute_back_off(self):
self.robot.face(main.ball().pos)
self.robot.set_world_vel(self.bot_to_ball().normalized() * -1 * Capture.BackOffSpeed)
def role_requirements(self):
reqs = super().role_requirements()
reqs.require_kicking = True
# try to be near the ball
if main.ball().valid:
#reqs.destination_shape = evaluation.ball.predict_stop(main.ball().pos, main.ball().vel)#main.ball().pos
reqs.destination_shape = main.ball().pos
return reqs
@property
def is_penalty(self):
return self._is_penalty
@is_penalty.setter
def is_penalty(self, value):
self._is_penalty = value
Get rid of some commented out and unused code in capture
import single_robot_behavior
import behavior
from enum import Enum
import main
import evaluation
import constants
import role_assignment
import robocup
class Capture(single_robot_behavior.SingleRobotBehavior):
# tunable config values
CourseApproachErrorThresh = 0.8
CourseApproachDist = 0.4
CourseApproachAvoidBall = 0.10
DribbleSpeed = 100
FineApproachSpeed = 0.2
BackOffDistance = 0.4
BackOffSpeed = 0.3
InFrontOfBallCosOfAngleThreshold = 0.95
#InFrontOfBallDistance
class State(Enum):
course_approach = 1
fine_approach = 2
back_off = 3
def __init__(self):
super().__init__(continuous=False)
self.add_state(Capture.State.course_approach, behavior.Behavior.State.running)
self.add_state(Capture.State.fine_approach, behavior.Behavior.State.running)
self.add_state(Capture.State.back_off, behavior.Behavior.State.running)
self.add_transition(behavior.Behavior.State.start,
Capture.State.course_approach,
lambda: True,
'immediately')
self.add_transition(Capture.State.course_approach,
Capture.State.fine_approach,
lambda: (self.bot_in_front_of_ball() or self.bot_near_ball(Capture.CourseApproachDist)) and main.ball().valid and (not constants.Field.TheirGoalShape.contains_point(main.ball().pos) or self.robot.is_penalty_kicker),
'dist to ball < threshold')
self.add_transition(Capture.State.fine_approach,
behavior.Behavior.State.completed,
lambda: self.robot.has_ball(),
'has ball')
self.add_transition(Capture.State.fine_approach,
Capture.State.course_approach,
lambda: not (self.bot_in_front_of_ball() or self.bot_near_ball(Capture.CourseApproachDist)) and (not self.bot_near_ball(Capture.CourseApproachDist * 1.5) or not main.ball().pos),
'ball went into goal')
self.add_transition(Capture.State.fine_approach,
Capture.State.back_off,
lambda: not self.robot.is_penalty_kicker and constants.Field.TheirGoalShape.contains_point(main.ball().pos),
'ball ran away')
self.add_transition(Capture.State.back_off,
behavior.Behavior.State.start,
lambda: constants.Field.TheirGoalShape.contains_point(main.ball().pos) or self.bot_to_ball().mag() < Capture.BackOffDistance,
"backed away enough")
self.lastApproachTarget = None
def bot_to_ball(self):
return main.ball().pos - self.robot.pos
def bot_near_ball(self, distance):
return (self.bot_to_ball().mag() < distance)
def bot_in_front_of_ball(self):
ball2bot = self.bot_to_ball() * -1
return (ball2bot.normalized().dot(main.ball().vel) > Capture.InFrontOfBallCosOfAngleThreshold) and \
((ball2bot).mag() < (evaluation.ball.predict_stop(main.ball().pos, main.ball().vel) - main.ball().pos).mag())
# normalized vector pointing from the ball to the point the robot should get to in course_aproach
def approach_vector(self):
if main.ball().vel.mag() > 0.25 and self.robot.pos.dist_to(main.ball().pos) > 0.2:
# ball's moving, get on the side it's moving towards
return main.ball().vel.normalized()
else:
return (self.robot.pos - main.ball().pos).normalized()
def find_intercept_point(self):
approach_vec = self.approach_vector()
# sample every 5 cm in the -approach_vector direction from the ball
pos = None
for i in range(50):
dist = i * 0.05
pos = main.ball().pos + approach_vec * dist
ball_time = evaluation.ball.rev_predict(main.ball().vel, dist) # how long will it take the ball to get there
robotDist = (pos - self.robot.pos).mag()*0.6
bot_time = robocup.get_trapezoidal_time(
robotDist,
robotDist,
2.2,
1,
self.robot.vel.mag(),
0)
if bot_time < ball_time:
break
return pos
def execute_running(self):
# make sure teammates don't bump into us
self.robot.shield_from_teammates(constants.Robot.Radius * 2.0)
def on_enter_course_approach(self):
self.lastApproachTarget == None
def execute_course_approach(self):
# don't hit the ball on accident
self.robot.set_avoid_ball_radius(Capture.CourseApproachAvoidBall)
pos = self.find_intercept_point()
self.robot.face(main.ball().pos)
if (self.lastApproachTarget != None and (pos - self.lastApproachTarget).mag()<0.1):
self.robot.move_to(self.lastApproachTarget)
else:
main.system_state().draw_circle(pos, constants.Ball.Radius, constants.Colors.White, "Capture")
self.robot.move_to(pos)
self.lastApproachTarget = pos
def on_exit_course_approach(self):
self.lastApproachTarget == None
def execute_fine_approach(self):
Capture.multiplier = 1.5
self.robot.disable_avoid_ball()
self.robot.set_dribble_speed(Capture.DribbleSpeed)
self.robot.face(main.ball().pos)
bot2ball = (main.ball().pos - self.robot.pos).normalized()
aproach = self.bot_to_ball()*Capture.multiplier + bot2ball * Capture.FineApproachSpeed/4 + main.ball().vel
if (aproach.mag() > 1):
aproach = aproach.normalized()*1
self.robot.set_world_vel(aproach)
def execute_back_off(self):
self.robot.face(main.ball().pos)
self.robot.set_world_vel(self.bot_to_ball().normalized() * -1 * Capture.BackOffSpeed)
def role_requirements(self):
reqs = super().role_requirements()
reqs.require_kicking = True
# try to be near the ball
if main.ball().valid:
#reqs.destination_shape = evaluation.ball.predict_stop(main.ball().pos, main.ball().vel)#main.ball().pos
reqs.destination_shape = main.ball().pos
return reqs
@property
def is_penalty(self):
return self._is_penalty
@is_penalty.setter
def is_penalty(self, value):
self._is_penalty = value
|
Update task.py
|
from calendar import LocaleHTMLCalendar
from datetime import date, timedelta
from itertools import groupby
from django.utils.html import conditional_escape as esc
class WorkoutCalendar(LocaleHTMLCalendar):
sums = {
'kcal_sum': 0,
'distance_sum': 0,
'duration_sum': timedelta(0),
}
def __init__(self, workouts, locale):
super(WorkoutCalendar, self).__init__(locale=locale)
self.workouts = self.group_by_day(workouts)
self.workouts_by_week = self.group_by_week(workouts)
self.week_sums = self.get_week_sums()
self.current_week = 0
def formatday(self, day, weekday):
if day != 0:
cssclass = self.cssclasses[weekday]
if date.today() == date(self.year, self.month, day):
cssclass += 'today'
if day in self.workouts:
cssclass += 'filled'
body = ['<ul>']
for workout in self.workouts[day]:
body.append('<li>')
body.append('<a href="%s">' % workout.get_absolute_url())
body.append(esc(workout))
body.append('</a>')
if workout.route.distance:
body.append('<br>%s km' %workout.route.distance)
body.append('<br>')
body.append(esc(workout.kcal) + ' kcal')
body.append('<br>')
body.append(esc(workout.duration))
body.append('</li>')
body.append('</ul>')
return self.day_cell(cssclass, '%d %s' % (day, ''.join(body)))
return self.day_cell(cssclass, day)
return self.day_cell('noday', ' ')
def get_week_sums(self):
week_sums = {}
i = 0
for week, workouts in self.workouts_by_week.items():
w_sums = self.sums.copy()
for workout in workouts:
w_sums['kcal_sum'] += workout.kcal
try:
w_sums['distance_sum'] += workout.route.distance
except AttributeError:
pass # some exercises doesn't have distance
try:
w_sums['duration_sum'] += workout.duration
except TypeError:
pass # fukken durationfield is sometimes decimal
week_sums[i] = w_sums
i += 1
return week_sums
def formatweek(self, theweek):
"""
Return a complete week as a table row.
"""
s = ''.join(self.formatday(d, wd) for (d, wd) in theweek)
week = { 'days': s }
if self.current_week in self.week_sums:
week.update(self.week_sums[self.current_week])
else:
week.update(self.sums)
self.current_week += 1
return '<tr>%(days)s<td> \
<br>\
Distance: %(distance_sum)s\
<br>\
Kcal: %(kcal_sum)s\
<br>\
Duration: %(duration_sum)s\
</td></tr>' % week
def formatmonth(self, year, month):
self.year, self.month = year, month
return super(WorkoutCalendar, self).formatmonth(year, month)
def group_by_week(self, workouts):
field = lambda workout: workout.date.strftime('%W')
return dict(
[(week, list(items)) for week, items in groupby(workouts, field)]
)
def group_by_day(self, workouts):
field = lambda workout: workout.date.day
return dict(
[(day, list(items)) for day, items in groupby(workouts, field)]
)
def day_cell(self, cssclass, body):
return '<td class="%s">%s</td>' % (cssclass, body)
week, what week?
from calendar import LocaleHTMLCalendar
from datetime import date, timedelta
from itertools import groupby
from django.utils.html import conditional_escape as esc
class WorkoutCalendar(LocaleHTMLCalendar):
sums = {
'kcal_sum': 0,
'distance_sum': 0,
'duration_sum': timedelta(0),
}
def __init__(self, workouts, locale):
super(WorkoutCalendar, self).__init__(locale=locale)
self.workouts = self.group_by_day(workouts)
self.workouts_by_week = self.group_by_week(workouts)
self.week_sums = self.get_week_sums()
self.current_week = 0
def formatday(self, day, weekday):
if day != 0:
cssclass = self.cssclasses[weekday]
if date.today() == date(self.year, self.month, day):
cssclass += 'today'
if day in self.workouts:
cssclass += 'filled'
body = ['<ul>']
for workout in self.workouts[day]:
body.append('<li>')
body.append('<a href="%s">' % workout.get_absolute_url())
body.append(esc(workout))
body.append('</a>')
if workout.route.distance:
body.append('<br>%s km' %workout.route.distance)
body.append('<br>')
body.append(esc(workout.kcal) + ' kcal')
body.append('<br>')
body.append(esc(workout.duration))
body.append('</li>')
body.append('</ul>')
return self.day_cell(cssclass, '%d %s' % (day, ''.join(body)))
return self.day_cell(cssclass, day)
return self.day_cell('noday', ' ')
def get_week_sums(self):
week_sums = {}
i = 0
for week, workouts in self.workouts_by_week.items():
w_sums = self.sums.copy()
for workout in workouts:
w_sums['kcal_sum'] += workout.kcal
try:
w_sums['distance_sum'] += workout.route.distance
except AttributeError:
pass # some exercises doesn't have distance
try:
w_sums['duration_sum'] += workout.duration
except TypeError:
pass # fukken durationfield is sometimes decimal
week_sums[i] = w_sums
i += 1
return week_sums
def formatweek(self, theweek):
"""
Return a complete week as a table row.
"""
s = ''.join(self.formatday(d, wd) for (d, wd) in theweek)
week = { 'days': s }
if self.current_week in self.week_sums:
week.update(self.week_sums[self.current_week])
else:
week.update(self.sums)
self.current_week += 1
return '<tr>%(days)s<td> \
<br>\
Distance: %(distance_sum)s\
<br>\
Kcal: %(kcal_sum)s\
<br>\
Duration: %(duration_sum)s\
</td></tr>' % week
def formatmonth(self, year, month):
self.year, self.month = year, month
return super(WorkoutCalendar, self).formatmonth(year, month)
def group_by_week(self, workouts):
field = lambda workout: int(workout.date.strftime('%W'))+0
return dict(
[(week, list(items)) for week, items in groupby(workouts, field)]
)
def group_by_day(self, workouts):
field = lambda workout: workout.date.day
return dict(
[(day, list(items)) for day, items in groupby(workouts, field)]
)
def day_cell(self, cssclass, body):
return '<td class="%s">%s</td>' % (cssclass, body)
|
from __future__ import print_function
import sys
from collections import defaultdict
if __name__ == "__main__":
lin_match_d = defaultdict(int)
sublin_match_d = defaultdict(int)
match_threshold = 0.01
for line in sys.stdin:
tokens = line.strip().split()
lin_toks = tokens[3].strip().strip(";").split(";")
sublin_toks = tokens[4].strip().strip(";").split(";")
l_trip = False
s_trip = False
for i in lin_toks:
t = i.split(":")
if (float(t[1]) > match_threshold):
if l_trip:
pass
#sys.stderr.write("Read matches to two or more lineages\n" + tokens[0])
l_trip = True
lin_match_d[t[0]] += 1
for s in sublin_toks:
t = s.split(":")
if float(t[1]) > match_threshold:
if s_trip:
pass
#sys.stderr.write("Read matches to two or more sublineages\n" + tokens[0])
s_trip = True
sublin_match_d[t[0]] += 1
l_total = 0
s_total = 0
for i in lin_match_d:
l_total += lin_match_d[i]
for i in sublin_match_d:
s_total += sublin_match_d[i]
l_pct_d = defaultdict(float)
s_pct_d = defaultdict(float)
for i in lin_match_d:
l_pct_d[i] = float(lin_match_d[i]) / float(l_total)
for i in sublin_match_d:
s_pct_d[i] = float(sublin_match_d[i]) / float(s_total)
print(l_pct_d)
print(s_pct_d)
Update score_real_classification.py script after analyzing PAP data and twiddling parameters for performance
from __future__ import print_function
import sys
from collections import defaultdict
import pprint
def dict_to_string(d):
t = []
for i in d:
x = ":".join([str(i), str(d[i])])
t.append(x)
t = sorted(t, reverse = True, key = lambda x : float(x.split(":")[1]))
return ";".join(t)
if __name__ == "__main__":
lin_match_d = defaultdict(int)
sublin_match_d = defaultdict(int)
match_threshold = 0.005
for line in sys.stdin:
tokens = line.strip().split()
read_len = int(tokens[2].strip().split("/")[1])
hpv_match = int(tokens[2].strip().split("/")[0])
## We get some reads that look like ION Torrent barf - toss those:
if read_len < 50 or hpv_match < 15:
continue
lin_toks = tokens[3].strip().strip(";").split(";")
lin_kmer_counts = [int(i) for i in tokens[5].strip().strip(";").split(";")]
sublin_toks = tokens[4].strip().strip(";").split(";")
sublin_kmer_counts = [int(i) for i in tokens[6].strip().strip(";").split(";")]
l_trip = False
s_trip = False
l_match = ""
for i in range(0, len(lin_toks)):
t = lin_toks[i].split(":")
#if (float(t[1]) > match_threshold) and lin_kmer_counts[i] > 4:
if lin_kmer_counts[i] > 5:
if l_trip:
#l_match = ""
#sys.stderr.write("Read matches to two or more lineages\n" + tokens[0])
break
else:
l_trip = True
l_match = t[0]
s_match = ""
for i in range(0, len(sublin_toks)):
t = sublin_toks[i].split(":")
if sublin_kmer_counts[i] > 2 and float(t[1]) > match_threshold:
#if float(t[1]) > match_threshold:
if s_trip:
#s_match = ""
#sys.stderr.write("Read matches to two or more sublineages" + tokens[0] + "\n")
break
s_trip = True
s_match = t[0]
if l_match is not "" and s_match is not "" and l_match is not s_match[0]:
old = ""
if lin_kmer_counts[0] > 10 and sublin_kmer_counts[1] > 2 and lin_toks[0].split(":")[0] == sublin_toks[1].split(":")[0][0]:
old = s_match
s_match = sublin_toks[1].split(":")[0]
sys.stderr.write("Lin / Sublin mistmatch: " + l_match + " " + old)
sys.stderr.write( " " + old + "->" + s_match + "\n")
else:
s_match = ""
if l_match is not "":
lin_match_d[l_match] += 1
if s_match is not "":
sublin_match_d[s_match] += 1
l_total = 0
s_total = 0
for i in lin_match_d:
l_total += lin_match_d[i]
for i in sublin_match_d:
s_total += sublin_match_d[i]
l_pct_d = defaultdict(float)
s_pct_d = defaultdict(float)
for i in lin_match_d:
l_pct_d[i] = float(lin_match_d[i]) / float(l_total)
for i in sublin_match_d:
s_pct_d[i] = float(sublin_match_d[i]) / float(s_total)
low_read_lins = ""
if l_total < 1000:
low_read_lins = "WARN:low_lineage_counts:" + str(l_total)
else:
low_read_lins = "INFO:low_lineage_counts:" + str(l_total)
low_read_sublins = ""
if s_total < 1000:
low_read_sublins = "WARN:low_sublineage_counts:" + str(s_total)
else:
low_read_sublins = "INFO:low_sublineage_counts:" + str(s_total)
#pprint.pprint(l_pct_d)
#pprint.pprint(s_pct_d)
#pprint.pprint (sublin_match_d)
print( dict_to_string(l_pct_d), dict_to_string(s_pct_d), dict_to_string(sublin_match_d), low_read_lins, low_read_sublins)
|
import paths
from collections import deque
from timeit import default_timer as timer
import os, signal
from syncless import coio
from syncless.util import Queue
from socketless.messenger import Messenger
from utils.testcase import TestCase
from utils.channel_echoserver import launch_echoserver
class TestMessenger(TestCase):
def testResilience(self):
try:
token = id(self)
q = Queue()
port = 6000
host = ('localhost', port)
p = launch_echoserver(port)
coio.sleep(0.5)
messenger = Messenger(host, 0.1)
messenger.send('1', token, q)
assert q.popleft() == ('1', token)
p.kill()
messenger.send('2', token, q)
coio.sleep(0.5)
messenger.send('3', token, q)
assert q.popleft() == (None, token)
assert q.popleft() == (None, token)
p = launch_echoserver(port)
coio.sleep(0.5)
messenger.send('4', token, q)
assert q.popleft() == ('4', token)
messenger.close()
coio.sleep(0.5)
finally:
p.kill()
def testPerformance(self):
token = id(self)
message_length = 1024
N = 10000
batch_size = 100
q = Queue()
l = 0
port = 6001
host = ('localhost', port)
p = launch_echoserver(port)
bytecount = 0
try:
sent_messages = deque()
coio.sleep(1)
messenger = Messenger(host)
message_buffer = ''.join('%d' % (i % 10) for i in xrange(N+message_length*2))
i = 0
start_time = timer()
for i in xrange(N):
if message_length > 4096:
message = buffer(message_buffer, i, message_length)
else:
message = message_buffer[i:i+message_length]
bytecount += len(message)
messenger.send(message, token, q)
sent_messages.append((message, token))
l += 1
if l % batch_size == 0:
for j in xrange(batch_size):
rm, rt = q.popleft()
sm, st = sent_messages.popleft()
if type(sm) is buffer:
rm = buffer(rm)
if rm != sm:
print 'i: ', i
assert False
end_time = timer()
elapsed_time = end_time - start_time
print 'Transmitted %d messages with a size of %d bytes' % (N, message_length)
print 'Transmission time (with validation): %fs' % elapsed_time
print '%.2f requests+replies/s, %.2f MB/s' % (float(N*2) / elapsed_time, (float(bytecount*2) / 2**20) / elapsed_time)
messenger.close()
finally:
os.kill(p.pid, signal.SIGKILL)
if __name__ == '__main__':
import unittest
unittest.main()
replaced call to Popen.kill() with os.kill() for python 2.5 compatibility
import paths
from collections import deque
from timeit import default_timer as timer
import os, signal
from syncless import coio
from syncless.util import Queue
from socketless.messenger import Messenger
from utils.testcase import TestCase
from utils.channel_echoserver import launch_echoserver
class TestMessenger(TestCase):
def testResilience(self):
try:
token = id(self)
q = Queue()
port = 6000
host = ('localhost', port)
p = launch_echoserver(port)
coio.sleep(0.5)
messenger = Messenger(host, 0.1)
messenger.send('1', token, q)
assert q.popleft() == ('1', token)
p.kill()
messenger.send('2', token, q)
coio.sleep(0.5)
messenger.send('3', token, q)
assert q.popleft() == (None, token)
assert q.popleft() == (None, token)
p = launch_echoserver(port)
coio.sleep(0.5)
messenger.send('4', token, q)
assert q.popleft() == ('4', token)
messenger.close()
coio.sleep(0.5)
finally:
os.kill(p.pid, signal.SIGKILL)
def testPerformance(self):
token = id(self)
message_length = 1024
N = 10000
batch_size = 100
q = Queue()
l = 0
port = 6001
host = ('localhost', port)
p = launch_echoserver(port)
bytecount = 0
try:
sent_messages = deque()
coio.sleep(1)
messenger = Messenger(host)
message_buffer = ''.join('%d' % (i % 10) for i in xrange(N+message_length*2))
i = 0
start_time = timer()
for i in xrange(N):
if message_length > 4096:
message = buffer(message_buffer, i, message_length)
else:
message = message_buffer[i:i+message_length]
bytecount += len(message)
messenger.send(message, token, q)
sent_messages.append((message, token))
l += 1
if l % batch_size == 0:
for j in xrange(batch_size):
rm, rt = q.popleft()
sm, st = sent_messages.popleft()
if type(sm) is buffer:
rm = buffer(rm)
if rm != sm:
print 'i: ', i
assert False
end_time = timer()
elapsed_time = end_time - start_time
print 'Transmitted %d messages with a size of %d bytes' % (N, message_length)
print 'Transmission time (with validation): %fs' % elapsed_time
print '%.2f requests+replies/s, %.2f MB/s' % (float(N*2) / elapsed_time, (float(bytecount*2) / 2**20) / elapsed_time)
messenger.close()
finally:
os.kill(p.pid, signal.SIGKILL)
if __name__ == '__main__':
import unittest
unittest.main() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.